diff --git a/DEVELOPMENT.md b/DEVELOPMENT.md index 9cb6747580c..165e4892ed9 100644 --- a/DEVELOPMENT.md +++ b/DEVELOPMENT.md @@ -45,9 +45,9 @@ of: [`./hack/update-codegen.sh`](./hack/update-codegen.sh). Inputs include: - Prow configs templates - [prow/config_knative.yaml](./prow/config_knative.yaml). + [prow/jobs_config](./prow/jobs_config). - - Prow configs generator [tools/config-generator](./tools/config-generator). + - Prow configs generator [tools/configgen](./tools/configgen). - **If you change a package's deps** (including adding an external dependency), then you must run [`./hack/update-deps.sh`](./hack/update-deps.sh). diff --git a/config/Makefile b/config/Makefile index 51f1a65f6b6..e3a6a348d48 100644 --- a/config/Makefile +++ b/config/Makefile @@ -23,9 +23,6 @@ PROW_GCS ?= knative-prow PROW_HOST ?= https://prow.knative.dev TESTGRID_GCS ?= knative-testgrid -KNATIVE_CONFIG ?= config_knative.yaml -TESTGRID_CONFIG ?= prow/testgrid/testgrid.yaml - CLUSTER ?= prow BUILD_CLUSTER ?= knative-prow-build-cluster ZONE ?= us-central1-f diff --git a/config/prow/k8s-testgrid/k8s-testgrid.yaml b/config/prow/k8s-testgrid/k8s-testgrid.yaml index 1ea4a1c42c5..0926ea446d4 100644 --- a/config/prow/k8s-testgrid/k8s-testgrid.yaml +++ b/config/prow/k8s-testgrid/k8s-testgrid.yaml @@ -1,16 +1,3 @@ -# Copyright 2020 The Knative Authors -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. # ####################################################################### # #### #### # #### THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. #### @@ -19,18 +6,60 @@ # ####################################################################### # Dashboards need to be specified here to be created on TestGrid # A prow annotation will be invalid if it references a dashboard that doesn't exist +dashboard_groups: +- dashboard_names: + - client + - client-pkg + - docs + - eventing + - operator + - serving + - utilities + name: knative +- dashboard_names: + - async-component + - container-freezer + - eventing-autoscaler-keda + - eventing-awssqs + - eventing-ceph + - eventing-couchdb + - eventing-github + - eventing-gitlab + - eventing-kafka + - eventing-kafka-broker + - eventing-kogito + - eventing-natss + - eventing-rabbitmq + - eventing-redis + - kn-plugin-admin + - kn-plugin-diag + - kn-plugin-event + - kn-plugin-func + - kn-plugin-migration + - kn-plugin-operator + - kn-plugin-quickstart + - kn-plugin-sample + - kn-plugin-service-log + - kn-plugin-source-kafka + - kn-plugin-source-kamelet + - net-certmanager + - net-contour + - net-gateway-api + - net-http01 + - net-istio + - net-kourier + - sample-controller + - sample-source + name: knative-sandbox dashboards: - name: async-component -- name: caching - name: client - name: client-pkg - name: container-freezer -- name: discovery - name: docs - name: eventing - name: eventing-autoscaler-keda - name: eventing-awssqs -- name: eventing-camel - name: eventing-ceph - name: eventing-couchdb - name: eventing-github @@ -39,7 +68,6 @@ dashboards: - name: eventing-kafka-broker - name: eventing-kogito - name: eventing-natss -- name: eventing-prometheus - name: eventing-rabbitmq - name: eventing-redis - name: kn-plugin-admin @@ -53,19 +81,14 @@ dashboards: - name: kn-plugin-service-log - name: kn-plugin-source-kafka - name: kn-plugin-source-kamelet -- name: knative-0.26 -- name: knative-1.0 -- name: knative-1.1 -- name: knative-1.2 -- name: knative-1.3 -- name: knative-gcp -- name: knative-sandbox-0.25 -- name: knative-sandbox-0.26 -- name: knative-sandbox-1.0 -- name: knative-sandbox-1.1 -- name: knative-sandbox-1.2 -- name: knative-sandbox-1.3 -- name: kperf +- name: knative-release-1.0 +- name: knative-release-1.1 +- name: knative-release-1.2 +- name: knative-release-1.3 +- name: knative-sandbox-release-1.0 +- name: knative-sandbox-release-1.1 +- name: knative-sandbox-release-1.2 +- name: knative-sandbox-release-1.3 - name: net-certmanager - name: net-contour - name: net-gateway-api @@ -73,67 +96,7 @@ dashboards: - name: net-istio - name: net-kourier - name: operator -- name: pkg - name: sample-controller - name: sample-source - name: serving -- name: test-infra - name: utilities -dashboard_groups: - - name: google - dashboard_names: - - "knative-gcp" - - name: knative - dashboard_names: - - "caching" - - "client" - - "client-pkg" - - "docs" - - "eventing" - - "operator" - - "pkg" - - "serving" - - "test-infra" - - name: knative-sandbox - dashboard_names: - - "async-component" - - "container-freezer" - - "discovery" - - "eventing-autoscaler-keda" - - "eventing-awssqs" - - "eventing-camel" - - "eventing-ceph" - - "eventing-couchdb" - - "eventing-github" - - "eventing-gitlab" - - "eventing-kafka" - - "eventing-kafka-broker" - - "eventing-kogito" - - "eventing-natss" - - "eventing-prometheus" - - "eventing-rabbitmq" - - "eventing-redis" - - "kn-plugin-admin" - - "kn-plugin-diag" - - "kn-plugin-event" - - "kn-plugin-func" - - "kn-plugin-migration" - - "kn-plugin-operator" - - "kn-plugin-quickstart" - - "kn-plugin-sample" - - "kn-plugin-service-log" - - "kn-plugin-source-kafka" - - "kn-plugin-source-kamelet" - - "kperf" - - "net-certmanager" - - "net-contour" - - "net-gateway-api" - - "net-http01" - - "net-istio" - - "net-kourier" - - "sample-controller" - - "sample-source" - - name: maintenance - dashboard_names: - - "utilities" - - name: prow-tests diff --git a/config/prow/testgrid/testgrid.yaml b/config/prow/testgrid/testgrid.yaml deleted file mode 100644 index 898276426a3..00000000000 --- a/config/prow/testgrid/testgrid.yaml +++ /dev/null @@ -1,3130 +0,0 @@ -# Copyright 2020 The Knative Authors -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ####################################################################### -# #### #### -# #### THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. #### -# #### USE "./hack/generate-configs.sh" TO REGENERATE THIS FILE. #### -# #### #### -# ####################################################################### -# Default testgroup and dashboardtab, please do not change them -default_test_group: - days_of_results: 14 # Number of days of test results to gather and serve - tests_name_policy: 2 # Replace the name of the test - ignore_pending: false # Show in-progress tests - column_header: - - configuration_value: Commit # Shows the commit number on column header - - configuration_value: infra-commit - num_columns_recent: 10 # The number of columns to consider "recent" for a variety of purposes - use_kubernetes_client: true # ** This field is deprecated and should always be true ** - is_external: true # ** This field is deprecated and should always be true ** - alert_stale_results_hours: 26 # Alert if tests haven't run for a day (1 day + 2h) - num_passes_to_disable_alert: 1 # Consider a failing test passing if it has 1 or more consecutive passes -default_dashboard_tab: - open_test_template: # The URL template to visit after clicking on a cell - url: https://prow.knative.dev/view/gcs// - file_bug_template: # The URL template to visit when filing a bug - url: https://github.com/knative/serving/issues/new - options: - - key: title - value: "Test \"\" failed" - - key: body - value: - attach_bug_template: # The URL template to visit when attaching a bug - url: # Empty - options: # Empty - # Text to show in the about menu as a link to another view of the results - results_text: See these results on Prow - results_url_template: # The URL template to visit after clicking - url: https://prow.knative.dev/job-history/ - # URL for regression search links. - code_search_path: github.com/knative/serving/search - num_columns_recent: 10 - code_search_url_template: # The URL template to visit when searching for changelists - url: https://github.com/knative/serving/compare/... - num_failures_to_alert: 0 - num_passes_to_disable_alert: 1 -test_groups: -- name: ci-knative-serving-continuous - gcs_prefix: knative-prow/logs/ci-knative-serving-continuous - alert_stale_results_hours: 3 -- name: ci-knative-serving-istio-latest-mesh - gcs_prefix: knative-prow/logs/ci-knative-serving-istio-latest-mesh - alert_stale_results_hours: 3 -- name: ci-knative-serving-istio-latest-no-mesh - gcs_prefix: knative-prow/logs/ci-knative-serving-istio-latest-no-mesh - alert_stale_results_hours: 3 -- name: ci-knative-serving-istio-head-mesh - gcs_prefix: knative-prow/logs/ci-knative-serving-istio-head-mesh - alert_stale_results_hours: 3 -- name: ci-knative-serving-istio-head-no-mesh - gcs_prefix: knative-prow/logs/ci-knative-serving-istio-head-no-mesh - alert_stale_results_hours: 3 -- name: ci-knative-serving-kourier-stable - gcs_prefix: knative-prow/logs/ci-knative-serving-kourier-stable - alert_stale_results_hours: 3 -- name: ci-knative-serving-contour-latest - gcs_prefix: knative-prow/logs/ci-knative-serving-contour-latest - alert_stale_results_hours: 3 -- name: ci-knative-serving-gateway-api-latest - gcs_prefix: knative-prow/logs/ci-knative-serving-gateway-api-latest - alert_stale_results_hours: 3 -- name: ci-knative-serving-https - gcs_prefix: knative-prow/logs/ci-knative-serving-https - alert_stale_results_hours: 3 -- name: ci-knative-serving-s390x-kourier-tests - gcs_prefix: knative-prow/logs/ci-knative-serving-s390x-kourier-tests - alert_stale_results_hours: 3 -- name: ci-knative-serving-s390x-contour-tests - gcs_prefix: knative-prow/logs/ci-knative-serving-s390x-contour-tests - alert_stale_results_hours: 3 -- name: ci-knative-serving-nightly-release - gcs_prefix: knative-prow/logs/ci-knative-serving-nightly-release - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 1 -- name: ci-knative-serving-auto-release - gcs_prefix: knative-prow/logs/ci-knative-serving-auto-release - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 1 -- name: ci-knative-client-continuous - gcs_prefix: knative-prow/logs/ci-knative-client-continuous - alert_stale_results_hours: 3 -- name: ci-knative-client-nightly-release - gcs_prefix: knative-prow/logs/ci-knative-client-nightly-release - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 1 -- name: ci-knative-client-tekton - gcs_prefix: knative-prow/logs/ci-knative-client-tekton - alert_stale_results_hours: 3 -- name: ci-knative-client-auto-release - gcs_prefix: knative-prow/logs/ci-knative-client-auto-release - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 1 -- name: ci-knative-client-s390x-e2e-tests - gcs_prefix: knative-prow/logs/ci-knative-client-s390x-e2e-tests - alert_stale_results_hours: 3 -- name: ci-knative-client-pkg-continuous - gcs_prefix: knative-prow/logs/ci-knative-client-pkg-continuous - alert_stale_results_hours: 3 -- name: ci-knative-client-pkg-auto-release - gcs_prefix: knative-prow/logs/ci-knative-client-pkg-auto-release - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 1 -- name: ci-knative-client-pkg-nightly-release - gcs_prefix: knative-prow/logs/ci-knative-client-pkg-nightly-release - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 1 -- name: ci-knative-docs-continuous - gcs_prefix: knative-prow/logs/ci-knative-docs-continuous - alert_stale_results_hours: 3 -- name: ci-knative-docs-test-coverage - gcs_prefix: knative-prow/logs/ci-knative-docs-go-coverage - short_text_metric: "coverage" -- name: ci-knative-eventing-continuous - gcs_prefix: knative-prow/logs/ci-knative-eventing-continuous - alert_stale_results_hours: 3 -- name: ci-knative-eventing-nightly-release - gcs_prefix: knative-prow/logs/ci-knative-eventing-nightly-release - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 1 -- name: ci-knative-eventing-auto-release - gcs_prefix: knative-prow/logs/ci-knative-eventing-auto-release - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 1 -- name: ci-knative-eventing-s390x-e2e-tests - gcs_prefix: knative-prow/logs/ci-knative-eventing-s390x-e2e-tests - alert_stale_results_hours: 3 -- name: ci-knative-pkg-continuous - gcs_prefix: knative-prow/logs/ci-knative-pkg-continuous - alert_stale_results_hours: 3 -- name: ci-knative-caching-continuous - gcs_prefix: knative-prow/logs/ci-knative-caching-continuous - alert_stale_results_hours: 3 -- name: ci-knative-test-infra-continuous - gcs_prefix: knative-prow/logs/ci-knative-test-infra-continuous - alert_stale_results_hours: 3 -- name: ci-knative-test-infra-test-coverage - gcs_prefix: knative-prow/logs/ci-knative-test-infra-go-coverage - short_text_metric: "coverage" -- name: ci-knative-operator-continuous - gcs_prefix: knative-prow/logs/ci-knative-operator-continuous - alert_stale_results_hours: 3 -- name: ci-knative-operator-nightly-release - gcs_prefix: knative-prow/logs/ci-knative-operator-nightly-release - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 1 -- name: ci-knative-operator-auto-release - gcs_prefix: knative-prow/logs/ci-knative-operator-auto-release - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 1 -- name: ci-knative-operator-s390x-e2e-tests - gcs_prefix: knative-prow/logs/ci-knative-operator-s390x-e2e-tests - alert_stale_results_hours: 3 -- name: ci-knative-serving-0.26-continuous - gcs_prefix: knative-prow/logs/ci-knative-serving-0.26-continuous - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 3 -- name: ci-knative-serving-0.26-dot-release - gcs_prefix: knative-prow/logs/ci-knative-serving-0.26-dot-release - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - alert_stale_results_hours: 170 - num_failures_to_alert: 1 -- name: ci-knative-eventing-0.26-continuous - gcs_prefix: knative-prow/logs/ci-knative-eventing-0.26-continuous - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 3 -- name: ci-knative-eventing-0.26-dot-release - gcs_prefix: knative-prow/logs/ci-knative-eventing-0.26-dot-release - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - alert_stale_results_hours: 170 - num_failures_to_alert: 1 -- name: ci-knative-serving-1.0-continuous - gcs_prefix: knative-prow/logs/ci-knative-serving-1.0-continuous - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 3 -- name: ci-knative-serving-1.0-s390x-kourier-tests - gcs_prefix: knative-prow/logs/ci-knative-serving-1.0-s390x-kourier-tests - alert_stale_results_hours: 3 -- name: ci-knative-serving-1.0-s390x-contour-tests - gcs_prefix: knative-prow/logs/ci-knative-serving-1.0-s390x-contour-tests - alert_stale_results_hours: 3 -- name: ci-knative-serving-1.0-dot-release - gcs_prefix: knative-prow/logs/ci-knative-serving-1.0-dot-release - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - alert_stale_results_hours: 170 - num_failures_to_alert: 1 -- name: ci-knative-client-1.0-continuous - gcs_prefix: knative-prow/logs/ci-knative-client-1.0-continuous - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 3 -- name: ci-knative-client-1.0-dot-release - gcs_prefix: knative-prow/logs/ci-knative-client-1.0-dot-release - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - alert_stale_results_hours: 170 - num_failures_to_alert: 1 -- name: ci-knative-client-1.0-s390x-e2e-tests - gcs_prefix: knative-prow/logs/ci-knative-client-1.0-s390x-e2e-tests - alert_stale_results_hours: 3 -- name: ci-knative-eventing-1.0-continuous - gcs_prefix: knative-prow/logs/ci-knative-eventing-1.0-continuous - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 3 -- name: ci-knative-eventing-1.0-dot-release - gcs_prefix: knative-prow/logs/ci-knative-eventing-1.0-dot-release - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - alert_stale_results_hours: 170 - num_failures_to_alert: 1 -- name: ci-knative-eventing-1.0-s390x-e2e-tests - gcs_prefix: knative-prow/logs/ci-knative-eventing-1.0-s390x-e2e-tests - alert_stale_results_hours: 3 -- name: ci-knative-operator-1.0-continuous - gcs_prefix: knative-prow/logs/ci-knative-operator-1.0-continuous - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 3 -- name: ci-knative-operator-1.0-dot-release - gcs_prefix: knative-prow/logs/ci-knative-operator-1.0-dot-release - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - alert_stale_results_hours: 170 - num_failures_to_alert: 1 -- name: ci-knative-operator-1.0-s390x-e2e-tests - gcs_prefix: knative-prow/logs/ci-knative-operator-1.0-s390x-e2e-tests - alert_stale_results_hours: 3 -- name: ci-knative-serving-1.1-continuous - gcs_prefix: knative-prow/logs/ci-knative-serving-1.1-continuous - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 3 -- name: ci-knative-serving-1.1-s390x-kourier-tests - gcs_prefix: knative-prow/logs/ci-knative-serving-1.1-s390x-kourier-tests - alert_stale_results_hours: 3 -- name: ci-knative-serving-1.1-s390x-contour-tests - gcs_prefix: knative-prow/logs/ci-knative-serving-1.1-s390x-contour-tests - alert_stale_results_hours: 3 -- name: ci-knative-serving-1.1-dot-release - gcs_prefix: knative-prow/logs/ci-knative-serving-1.1-dot-release - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - alert_stale_results_hours: 170 - num_failures_to_alert: 1 -- name: ci-knative-client-1.1-continuous - gcs_prefix: knative-prow/logs/ci-knative-client-1.1-continuous - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 3 -- name: ci-knative-client-1.1-dot-release - gcs_prefix: knative-prow/logs/ci-knative-client-1.1-dot-release - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - alert_stale_results_hours: 170 - num_failures_to_alert: 1 -- name: ci-knative-client-1.1-s390x-e2e-tests - gcs_prefix: knative-prow/logs/ci-knative-client-1.1-s390x-e2e-tests - alert_stale_results_hours: 3 -- name: ci-knative-eventing-1.1-continuous - gcs_prefix: knative-prow/logs/ci-knative-eventing-1.1-continuous - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 3 -- name: ci-knative-eventing-1.1-dot-release - gcs_prefix: knative-prow/logs/ci-knative-eventing-1.1-dot-release - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - alert_stale_results_hours: 170 - num_failures_to_alert: 1 -- name: ci-knative-eventing-1.1-s390x-e2e-tests - gcs_prefix: knative-prow/logs/ci-knative-eventing-1.1-s390x-e2e-tests - alert_stale_results_hours: 3 -- name: ci-knative-operator-1.1-continuous - gcs_prefix: knative-prow/logs/ci-knative-operator-1.1-continuous - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 3 -- name: ci-knative-operator-1.1-dot-release - gcs_prefix: knative-prow/logs/ci-knative-operator-1.1-dot-release - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - alert_stale_results_hours: 170 - num_failures_to_alert: 1 -- name: ci-knative-operator-1.1-s390x-e2e-tests - gcs_prefix: knative-prow/logs/ci-knative-operator-1.1-s390x-e2e-tests - alert_stale_results_hours: 3 -- name: ci-knative-serving-1.2-continuous - gcs_prefix: knative-prow/logs/ci-knative-serving-1.2-continuous - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 3 -- name: ci-knative-serving-1.2-s390x-kourier-tests - gcs_prefix: knative-prow/logs/ci-knative-serving-1.2-s390x-kourier-tests - alert_stale_results_hours: 3 -- name: ci-knative-serving-1.2-s390x-contour-tests - gcs_prefix: knative-prow/logs/ci-knative-serving-1.2-s390x-contour-tests - alert_stale_results_hours: 3 -- name: ci-knative-serving-1.2-dot-release - gcs_prefix: knative-prow/logs/ci-knative-serving-1.2-dot-release - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - alert_stale_results_hours: 170 - num_failures_to_alert: 1 -- name: ci-knative-client-1.2-continuous - gcs_prefix: knative-prow/logs/ci-knative-client-1.2-continuous - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 3 -- name: ci-knative-client-1.2-dot-release - gcs_prefix: knative-prow/logs/ci-knative-client-1.2-dot-release - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - alert_stale_results_hours: 170 - num_failures_to_alert: 1 -- name: ci-knative-client-1.2-s390x-e2e-tests - gcs_prefix: knative-prow/logs/ci-knative-client-1.2-s390x-e2e-tests - alert_stale_results_hours: 3 -- name: ci-knative-eventing-1.2-continuous - gcs_prefix: knative-prow/logs/ci-knative-eventing-1.2-continuous - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 3 -- name: ci-knative-eventing-1.2-dot-release - gcs_prefix: knative-prow/logs/ci-knative-eventing-1.2-dot-release - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - alert_stale_results_hours: 170 - num_failures_to_alert: 1 -- name: ci-knative-eventing-1.2-s390x-e2e-tests - gcs_prefix: knative-prow/logs/ci-knative-eventing-1.2-s390x-e2e-tests - alert_stale_results_hours: 3 -- name: ci-knative-operator-1.2-continuous - gcs_prefix: knative-prow/logs/ci-knative-operator-1.2-continuous - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 3 -- name: ci-knative-operator-1.2-dot-release - gcs_prefix: knative-prow/logs/ci-knative-operator-1.2-dot-release - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - alert_stale_results_hours: 170 - num_failures_to_alert: 1 -- name: ci-knative-operator-1.2-s390x-e2e-tests - gcs_prefix: knative-prow/logs/ci-knative-operator-1.2-s390x-e2e-tests - alert_stale_results_hours: 3 -- name: ci-knative-serving-1.3-s390x-kourier-tests - gcs_prefix: knative-prow/logs/ci-knative-serving-1.3-s390x-kourier-tests - alert_stale_results_hours: 3 -- name: ci-knative-serving-1.3-s390x-contour-tests - gcs_prefix: knative-prow/logs/ci-knative-serving-1.3-s390x-contour-tests - alert_stale_results_hours: 3 -- name: ci-knative-client-1.3-continuous - gcs_prefix: knative-prow/logs/ci-knative-client-1.3-continuous - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 3 -- name: ci-knative-client-1.3-dot-release - gcs_prefix: knative-prow/logs/ci-knative-client-1.3-dot-release - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - alert_stale_results_hours: 170 - num_failures_to_alert: 1 -- name: ci-knative-client-1.3-s390x-e2e-tests - gcs_prefix: knative-prow/logs/ci-knative-client-1.3-s390x-e2e-tests - alert_stale_results_hours: 3 -- name: ci-knative-eventing-1.3-continuous - gcs_prefix: knative-prow/logs/ci-knative-eventing-1.3-continuous - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 3 -- name: ci-knative-eventing-1.3-dot-release - gcs_prefix: knative-prow/logs/ci-knative-eventing-1.3-dot-release - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - alert_stale_results_hours: 170 - num_failures_to_alert: 1 -- name: ci-knative-eventing-1.3-s390x-e2e-tests - gcs_prefix: knative-prow/logs/ci-knative-eventing-1.3-s390x-e2e-tests - alert_stale_results_hours: 3 -- name: ci-knative-eventing-1.3-test-coverage - gcs_prefix: knative-prow/logs/ci-knative-eventing-1.3-go-coverage - short_text_metric: "coverage" -- name: ci-knative-operator-1.3-continuous - gcs_prefix: knative-prow/logs/ci-knative-operator-1.3-continuous - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 3 -- name: ci-knative-operator-1.3-dot-release - gcs_prefix: knative-prow/logs/ci-knative-operator-1.3-dot-release - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - alert_stale_results_hours: 170 - num_failures_to_alert: 1 -- name: ci-knative-operator-1.3-s390x-e2e-tests - gcs_prefix: knative-prow/logs/ci-knative-operator-1.3-s390x-e2e-tests - alert_stale_results_hours: 3 -- name: ci-knative-sandbox-kn-plugin-diag-continuous - gcs_prefix: knative-prow/logs/ci-knative-sandbox-kn-plugin-diag-continuous - alert_stale_results_hours: 3 -- name: ci-knative-sandbox-kn-plugin-event-continuous - gcs_prefix: knative-prow/logs/ci-knative-sandbox-kn-plugin-event-continuous - alert_stale_results_hours: 3 -- name: ci-knative-sandbox-kn-plugin-event-auto-release - gcs_prefix: knative-prow/logs/ci-knative-sandbox-kn-plugin-event-auto-release - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 1 -- name: ci-knative-sandbox-kn-plugin-event-nightly-release - gcs_prefix: knative-prow/logs/ci-knative-sandbox-kn-plugin-event-nightly-release - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 1 -- name: ci-knative-sandbox-kn-plugin-event-dot-release - gcs_prefix: knative-prow/logs/ci-knative-sandbox-kn-plugin-event-dot-release - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - alert_stale_results_hours: 170 - num_failures_to_alert: 1 -- name: ci-knative-sandbox-kn-plugin-func-auto-release - gcs_prefix: knative-prow/logs/ci-knative-sandbox-kn-plugin-func-auto-release - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 1 -- name: ci-knative-sandbox-kn-plugin-func-nightly-release - gcs_prefix: knative-prow/logs/ci-knative-sandbox-kn-plugin-func-nightly-release - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 1 -- name: ci-knative-sandbox-kn-plugin-func-dot-release - gcs_prefix: knative-prow/logs/ci-knative-sandbox-kn-plugin-func-dot-release - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - alert_stale_results_hours: 170 - num_failures_to_alert: 1 -- name: ci-knative-sandbox-kn-plugin-migration-continuous - gcs_prefix: knative-prow/logs/ci-knative-sandbox-kn-plugin-migration-continuous - alert_stale_results_hours: 3 -- name: ci-knative-sandbox-kn-plugin-operator-continuous - gcs_prefix: knative-prow/logs/ci-knative-sandbox-kn-plugin-operator-continuous - alert_stale_results_hours: 3 -- name: ci-knative-sandbox-kn-plugin-sample-continuous - gcs_prefix: knative-prow/logs/ci-knative-sandbox-kn-plugin-sample-continuous - alert_stale_results_hours: 3 -- name: ci-knative-sandbox-kn-plugin-service-log-continuous - gcs_prefix: knative-prow/logs/ci-knative-sandbox-kn-plugin-service-log-continuous - alert_stale_results_hours: 3 -- name: ci-knative-sandbox-kn-plugin-service-log-auto-release - gcs_prefix: knative-prow/logs/ci-knative-sandbox-kn-plugin-service-log-auto-release - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 1 -- name: ci-knative-sandbox-kn-plugin-service-log-nightly-release - gcs_prefix: knative-prow/logs/ci-knative-sandbox-kn-plugin-service-log-nightly-release - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 1 -- name: ci-knative-sandbox-kn-plugin-service-log-dot-release - gcs_prefix: knative-prow/logs/ci-knative-sandbox-kn-plugin-service-log-dot-release - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - alert_stale_results_hours: 170 - num_failures_to_alert: 1 -- name: ci-knative-sandbox-kn-plugin-source-kafka-continuous - gcs_prefix: knative-prow/logs/ci-knative-sandbox-kn-plugin-source-kafka-continuous - alert_stale_results_hours: 3 -- name: ci-knative-sandbox-kn-plugin-source-kafka-auto-release - gcs_prefix: knative-prow/logs/ci-knative-sandbox-kn-plugin-source-kafka-auto-release - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 1 -- name: ci-knative-sandbox-kn-plugin-source-kafka-nightly-release - gcs_prefix: knative-prow/logs/ci-knative-sandbox-kn-plugin-source-kafka-nightly-release - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 1 -- name: ci-knative-sandbox-kn-plugin-source-kamelet-continuous - gcs_prefix: knative-prow/logs/ci-knative-sandbox-kn-plugin-source-kamelet-continuous - alert_stale_results_hours: 3 -- name: ci-knative-sandbox-kn-plugin-source-kamelet-auto-release - gcs_prefix: knative-prow/logs/ci-knative-sandbox-kn-plugin-source-kamelet-auto-release - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 1 -- name: ci-knative-sandbox-kn-plugin-source-kamelet-nightly-release - gcs_prefix: knative-prow/logs/ci-knative-sandbox-kn-plugin-source-kamelet-nightly-release - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 1 -- name: ci-knative-sandbox-kn-plugin-source-kamelet-dot-release - gcs_prefix: knative-prow/logs/ci-knative-sandbox-kn-plugin-source-kamelet-dot-release - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - alert_stale_results_hours: 170 - num_failures_to_alert: 1 -- name: ci-knative-sandbox-kn-plugin-admin-continuous - gcs_prefix: knative-prow/logs/ci-knative-sandbox-kn-plugin-admin-continuous - alert_stale_results_hours: 3 -- name: ci-knative-sandbox-kn-plugin-admin-auto-release - gcs_prefix: knative-prow/logs/ci-knative-sandbox-kn-plugin-admin-auto-release - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 1 -- name: ci-knative-sandbox-kn-plugin-admin-nightly-release - gcs_prefix: knative-prow/logs/ci-knative-sandbox-kn-plugin-admin-nightly-release - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 1 -- name: ci-knative-sandbox-kn-plugin-admin-dot-release - gcs_prefix: knative-prow/logs/ci-knative-sandbox-kn-plugin-admin-dot-release - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - alert_stale_results_hours: 170 - num_failures_to_alert: 1 -- name: ci-knative-sandbox-kn-plugin-quickstart-continuous - gcs_prefix: knative-prow/logs/ci-knative-sandbox-kn-plugin-quickstart-continuous - alert_stale_results_hours: 3 -- name: ci-knative-sandbox-kn-plugin-quickstart-auto-release - gcs_prefix: knative-prow/logs/ci-knative-sandbox-kn-plugin-quickstart-auto-release - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 1 -- name: ci-knative-sandbox-kn-plugin-quickstart-nightly-release - gcs_prefix: knative-prow/logs/ci-knative-sandbox-kn-plugin-quickstart-nightly-release - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 1 -- name: ci-knative-sandbox-kn-plugin-quickstart-dot-release - gcs_prefix: knative-prow/logs/ci-knative-sandbox-kn-plugin-quickstart-dot-release - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - alert_stale_results_hours: 170 - num_failures_to_alert: 1 -- name: ci-knative-sandbox-eventing-awssqs-continuous - gcs_prefix: knative-prow/logs/ci-knative-sandbox-eventing-awssqs-continuous - alert_stale_results_hours: 3 -- name: ci-knative-sandbox-eventing-awssqs-nightly-release - gcs_prefix: knative-prow/logs/ci-knative-sandbox-eventing-awssqs-nightly-release - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 1 -- name: ci-knative-sandbox-eventing-awssqs-auto-release - gcs_prefix: knative-prow/logs/ci-knative-sandbox-eventing-awssqs-auto-release - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 1 -- name: ci-knative-sandbox-eventing-ceph-continuous - gcs_prefix: knative-prow/logs/ci-knative-sandbox-eventing-ceph-continuous - alert_stale_results_hours: 3 -- name: ci-knative-sandbox-eventing-ceph-nightly-release - gcs_prefix: knative-prow/logs/ci-knative-sandbox-eventing-ceph-nightly-release - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 1 -- name: ci-knative-sandbox-eventing-ceph-auto-release - gcs_prefix: knative-prow/logs/ci-knative-sandbox-eventing-ceph-auto-release - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 1 -- name: ci-knative-sandbox-eventing-couchdb-continuous - gcs_prefix: knative-prow/logs/ci-knative-sandbox-eventing-couchdb-continuous - alert_stale_results_hours: 3 -- name: ci-knative-sandbox-eventing-couchdb-nightly-release - gcs_prefix: knative-prow/logs/ci-knative-sandbox-eventing-couchdb-nightly-release - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 1 -- name: ci-knative-sandbox-eventing-couchdb-auto-release - gcs_prefix: knative-prow/logs/ci-knative-sandbox-eventing-couchdb-auto-release - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 1 -- name: ci-knative-sandbox-eventing-github-continuous - gcs_prefix: knative-prow/logs/ci-knative-sandbox-eventing-github-continuous - alert_stale_results_hours: 3 -- name: ci-knative-sandbox-eventing-github-nightly-release - gcs_prefix: knative-prow/logs/ci-knative-sandbox-eventing-github-nightly-release - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 1 -- name: ci-knative-sandbox-eventing-github-auto-release - gcs_prefix: knative-prow/logs/ci-knative-sandbox-eventing-github-auto-release - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 1 -- name: ci-knative-sandbox-eventing-gitlab-continuous - gcs_prefix: knative-prow/logs/ci-knative-sandbox-eventing-gitlab-continuous - alert_stale_results_hours: 3 -- name: ci-knative-sandbox-eventing-gitlab-nightly-release - gcs_prefix: knative-prow/logs/ci-knative-sandbox-eventing-gitlab-nightly-release - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 1 -- name: ci-knative-sandbox-eventing-gitlab-auto-release - gcs_prefix: knative-prow/logs/ci-knative-sandbox-eventing-gitlab-auto-release - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 1 -- name: ci-knative-sandbox-eventing-prometheus-continuous - gcs_prefix: knative-prow/logs/ci-knative-sandbox-eventing-prometheus-continuous - alert_stale_results_hours: 3 -- name: ci-knative-sandbox-eventing-prometheus-nightly-release - gcs_prefix: knative-prow/logs/ci-knative-sandbox-eventing-prometheus-nightly-release - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 1 -- name: ci-knative-sandbox-eventing-prometheus-auto-release - gcs_prefix: knative-prow/logs/ci-knative-sandbox-eventing-prometheus-auto-release - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 1 -- name: ci-knative-sandbox-eventing-redis-continuous - gcs_prefix: knative-prow/logs/ci-knative-sandbox-eventing-redis-continuous - alert_stale_results_hours: 3 -- name: ci-knative-sandbox-eventing-redis-nightly-release - gcs_prefix: knative-prow/logs/ci-knative-sandbox-eventing-redis-nightly-release - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 1 -- name: ci-knative-sandbox-eventing-redis-auto-release - gcs_prefix: knative-prow/logs/ci-knative-sandbox-eventing-redis-auto-release - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 1 -- name: ci-knative-sandbox-kperf-continuous - gcs_prefix: knative-prow/logs/ci-knative-sandbox-kperf-continuous - alert_stale_results_hours: 3 -- name: ci-knative-sandbox-sample-controller-continuous - gcs_prefix: knative-prow/logs/ci-knative-sandbox-sample-controller-continuous - alert_stale_results_hours: 3 -- name: ci-knative-sandbox-sample-controller-nightly-release - gcs_prefix: knative-prow/logs/ci-knative-sandbox-sample-controller-nightly-release - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 1 -- name: ci-knative-sandbox-sample-controller-auto-release - gcs_prefix: knative-prow/logs/ci-knative-sandbox-sample-controller-auto-release - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 1 -- name: ci-knative-sandbox-sample-source-continuous - gcs_prefix: knative-prow/logs/ci-knative-sandbox-sample-source-continuous - alert_stale_results_hours: 3 -- name: ci-knative-sandbox-sample-source-nightly-release - gcs_prefix: knative-prow/logs/ci-knative-sandbox-sample-source-nightly-release - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 1 -- name: ci-knative-sandbox-sample-source-auto-release - gcs_prefix: knative-prow/logs/ci-knative-sandbox-sample-source-auto-release - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 1 -- name: ci-knative-sandbox-net-certmanager-continuous - gcs_prefix: knative-prow/logs/ci-knative-sandbox-net-certmanager-continuous - alert_stale_results_hours: 3 -- name: ci-knative-sandbox-net-certmanager-nightly-release - gcs_prefix: knative-prow/logs/ci-knative-sandbox-net-certmanager-nightly-release - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 1 -- name: ci-knative-sandbox-net-certmanager-auto-release - gcs_prefix: knative-prow/logs/ci-knative-sandbox-net-certmanager-auto-release - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 1 -- name: ci-knative-sandbox-net-certmanager-test-coverage - gcs_prefix: knative-prow/logs/ci-knative-sandbox-net-certmanager-go-coverage - short_text_metric: "coverage" -- name: ci-knative-sandbox-net-contour-continuous - gcs_prefix: knative-prow/logs/ci-knative-sandbox-net-contour-continuous - alert_stale_results_hours: 3 -- name: ci-knative-sandbox-net-contour-nightly-release - gcs_prefix: knative-prow/logs/ci-knative-sandbox-net-contour-nightly-release - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 1 -- name: ci-knative-sandbox-net-contour-auto-release - gcs_prefix: knative-prow/logs/ci-knative-sandbox-net-contour-auto-release - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 1 -- name: ci-knative-sandbox-net-gateway-api-continuous - gcs_prefix: knative-prow/logs/ci-knative-sandbox-net-gateway-api-continuous - alert_stale_results_hours: 3 -- name: ci-knative-sandbox-net-gateway-api-nightly-release - gcs_prefix: knative-prow/logs/ci-knative-sandbox-net-gateway-api-nightly-release - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 1 -- name: ci-knative-sandbox-net-gateway-api-auto-release - gcs_prefix: knative-prow/logs/ci-knative-sandbox-net-gateway-api-auto-release - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 1 -- name: ci-knative-sandbox-net-http01-continuous - gcs_prefix: knative-prow/logs/ci-knative-sandbox-net-http01-continuous - alert_stale_results_hours: 3 -- name: ci-knative-sandbox-net-http01-nightly-release - gcs_prefix: knative-prow/logs/ci-knative-sandbox-net-http01-nightly-release - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 1 -- name: ci-knative-sandbox-net-http01-dot-release - gcs_prefix: knative-prow/logs/ci-knative-sandbox-net-http01-dot-release - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - alert_stale_results_hours: 170 - num_failures_to_alert: 1 -- name: ci-knative-sandbox-net-http01-auto-release - gcs_prefix: knative-prow/logs/ci-knative-sandbox-net-http01-auto-release - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 1 -- name: ci-knative-sandbox-net-istio-continuous - gcs_prefix: knative-prow/logs/ci-knative-sandbox-net-istio-continuous - alert_stale_results_hours: 3 -- name: ci-knative-sandbox-net-istio-nightly-release - gcs_prefix: knative-prow/logs/ci-knative-sandbox-net-istio-nightly-release - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 1 -- name: ci-knative-sandbox-net-istio-latest - gcs_prefix: knative-prow/logs/ci-knative-sandbox-net-istio-latest - alert_stale_results_hours: 3 -- name: ci-knative-sandbox-net-istio-auto-release - gcs_prefix: knative-prow/logs/ci-knative-sandbox-net-istio-auto-release - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 1 -- name: ci-knative-sandbox-net-istio-test-coverage - gcs_prefix: knative-prow/logs/ci-knative-sandbox-net-istio-go-coverage - short_text_metric: "coverage" -- name: ci-knative-sandbox-net-kourier-continuous - gcs_prefix: knative-prow/logs/ci-knative-sandbox-net-kourier-continuous - alert_stale_results_hours: 3 -- name: ci-knative-sandbox-net-kourier-nightly-release - gcs_prefix: knative-prow/logs/ci-knative-sandbox-net-kourier-nightly-release - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 1 -- name: ci-knative-sandbox-net-kourier-dot-release - gcs_prefix: knative-prow/logs/ci-knative-sandbox-net-kourier-dot-release - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - alert_stale_results_hours: 170 - num_failures_to_alert: 1 -- name: ci-knative-sandbox-net-kourier-auto-release - gcs_prefix: knative-prow/logs/ci-knative-sandbox-net-kourier-auto-release - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 1 -- name: ci-knative-sandbox-net-kourier-test-coverage - gcs_prefix: knative-prow/logs/ci-knative-sandbox-net-kourier-go-coverage - short_text_metric: "coverage" -- name: ci-knative-sandbox-async-component-continuous - gcs_prefix: knative-prow/logs/ci-knative-sandbox-async-component-continuous - alert_stale_results_hours: 3 -- name: ci-knative-sandbox-async-component-nightly-release - gcs_prefix: knative-prow/logs/ci-knative-sandbox-async-component-nightly-release - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 1 -- name: ci-knative-sandbox-async-component-dot-release - gcs_prefix: knative-prow/logs/ci-knative-sandbox-async-component-dot-release - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - alert_stale_results_hours: 170 - num_failures_to_alert: 1 -- name: ci-knative-sandbox-async-component-auto-release - gcs_prefix: knative-prow/logs/ci-knative-sandbox-async-component-auto-release - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 1 -- name: ci-knative-sandbox-async-component-test-coverage - gcs_prefix: knative-prow/logs/ci-knative-sandbox-async-component-go-coverage - short_text_metric: "coverage" -- name: ci-knative-sandbox-discovery-continuous - gcs_prefix: knative-prow/logs/ci-knative-sandbox-discovery-continuous - alert_stale_results_hours: 3 -- name: ci-knative-sandbox-discovery-nightly-release - gcs_prefix: knative-prow/logs/ci-knative-sandbox-discovery-nightly-release - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 1 -- name: ci-knative-sandbox-discovery-dot-release - gcs_prefix: knative-prow/logs/ci-knative-sandbox-discovery-dot-release - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - alert_stale_results_hours: 170 - num_failures_to_alert: 1 -- name: ci-knative-sandbox-discovery-auto-release - gcs_prefix: knative-prow/logs/ci-knative-sandbox-discovery-auto-release - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 1 -- name: ci-knative-sandbox-eventing-camel-continuous - gcs_prefix: knative-prow/logs/ci-knative-sandbox-eventing-camel-continuous - alert_stale_results_hours: 3 -- name: ci-knative-sandbox-eventing-camel-nightly-release - gcs_prefix: knative-prow/logs/ci-knative-sandbox-eventing-camel-nightly-release - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 1 -- name: ci-knative-sandbox-eventing-camel-dot-release - gcs_prefix: knative-prow/logs/ci-knative-sandbox-eventing-camel-dot-release - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - alert_stale_results_hours: 170 - num_failures_to_alert: 1 -- name: ci-knative-sandbox-eventing-camel-auto-release - gcs_prefix: knative-prow/logs/ci-knative-sandbox-eventing-camel-auto-release - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 1 -- name: ci-knative-sandbox-eventing-kafka-continuous - gcs_prefix: knative-prow/logs/ci-knative-sandbox-eventing-kafka-continuous - alert_stale_results_hours: 3 -- name: ci-knative-sandbox-eventing-kafka-nightly-release - gcs_prefix: knative-prow/logs/ci-knative-sandbox-eventing-kafka-nightly-release - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 1 -- name: ci-knative-sandbox-eventing-kafka-auto-release - gcs_prefix: knative-prow/logs/ci-knative-sandbox-eventing-kafka-auto-release - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 1 -- name: ci-knative-sandbox-eventing-kafka-test-coverage - gcs_prefix: knative-prow/logs/ci-knative-sandbox-eventing-kafka-go-coverage - short_text_metric: "coverage" -- name: ci-knative-sandbox-eventing-kafka-broker-continuous - gcs_prefix: knative-prow/logs/ci-knative-sandbox-eventing-kafka-broker-continuous - alert_stale_results_hours: 3 -- name: ci-knative-sandbox-eventing-kafka-broker-nightly-release - gcs_prefix: knative-prow/logs/ci-knative-sandbox-eventing-kafka-broker-nightly-release - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 1 -- name: ci-knative-sandbox-eventing-kafka-broker-auto-release - gcs_prefix: knative-prow/logs/ci-knative-sandbox-eventing-kafka-broker-auto-release - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 1 -- name: ci-knative-sandbox-eventing-kafka-broker-test-coverage - gcs_prefix: knative-prow/logs/ci-knative-sandbox-eventing-kafka-broker-go-coverage - short_text_metric: "coverage" -- name: ci-knative-sandbox-eventing-rabbitmq-nightly-release - gcs_prefix: knative-prow/logs/ci-knative-sandbox-eventing-rabbitmq-nightly-release - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 1 -- name: ci-knative-sandbox-eventing-rabbitmq-auto-release - gcs_prefix: knative-prow/logs/ci-knative-sandbox-eventing-rabbitmq-auto-release - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 1 -- name: ci-knative-sandbox-eventing-natss-nightly-release - gcs_prefix: knative-prow/logs/ci-knative-sandbox-eventing-natss-nightly-release - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 1 -- name: ci-knative-sandbox-eventing-natss-auto-release - gcs_prefix: knative-prow/logs/ci-knative-sandbox-eventing-natss-auto-release - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 1 -- name: ci-knative-sandbox-eventing-autoscaler-keda-continuous - gcs_prefix: knative-prow/logs/ci-knative-sandbox-eventing-autoscaler-keda-continuous - alert_stale_results_hours: 3 -- name: ci-knative-sandbox-eventing-autoscaler-keda-nightly-release - gcs_prefix: knative-prow/logs/ci-knative-sandbox-eventing-autoscaler-keda-nightly-release - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 1 -- name: ci-knative-sandbox-eventing-autoscaler-keda-dot-release - gcs_prefix: knative-prow/logs/ci-knative-sandbox-eventing-autoscaler-keda-dot-release - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - alert_stale_results_hours: 170 - num_failures_to_alert: 1 -- name: ci-knative-sandbox-eventing-autoscaler-keda-auto-release - gcs_prefix: knative-prow/logs/ci-knative-sandbox-eventing-autoscaler-keda-auto-release - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 1 -- name: ci-knative-sandbox-eventing-kogito-continuous - gcs_prefix: knative-prow/logs/ci-knative-sandbox-eventing-kogito-continuous - alert_stale_results_hours: 3 -- name: ci-knative-sandbox-eventing-kogito-nightly-release - gcs_prefix: knative-prow/logs/ci-knative-sandbox-eventing-kogito-nightly-release - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 1 -- name: ci-knative-sandbox-eventing-kogito-dot-release - gcs_prefix: knative-prow/logs/ci-knative-sandbox-eventing-kogito-dot-release - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - alert_stale_results_hours: 170 - num_failures_to_alert: 1 -- name: ci-knative-sandbox-eventing-kogito-auto-release - gcs_prefix: knative-prow/logs/ci-knative-sandbox-eventing-kogito-auto-release - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 1 -- name: ci-knative-sandbox-container-freezer-continuous - gcs_prefix: knative-prow/logs/ci-knative-sandbox-container-freezer-continuous - alert_stale_results_hours: 3 -- name: ci-knative-sandbox-container-freezer-nightly-release - gcs_prefix: knative-prow/logs/ci-knative-sandbox-container-freezer-nightly-release - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 1 -- name: ci-knative-sandbox-container-freezer-dot-release - gcs_prefix: knative-prow/logs/ci-knative-sandbox-container-freezer-dot-release - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - alert_stale_results_hours: 170 - num_failures_to_alert: 1 -- name: ci-knative-sandbox-container-freezer-auto-release - gcs_prefix: knative-prow/logs/ci-knative-sandbox-container-freezer-auto-release - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 1 -- name: ci-knative-sandbox-kn-plugin-source-kafka-1.0-dot-release - gcs_prefix: knative-prow/logs/ci-knative-sandbox-kn-plugin-source-kafka-1.0-dot-release - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - alert_stale_results_hours: 170 - num_failures_to_alert: 1 -- name: ci-knative-sandbox-eventing-awssqs-1.0-dot-release - gcs_prefix: knative-prow/logs/ci-knative-sandbox-eventing-awssqs-1.0-dot-release - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - alert_stale_results_hours: 170 - num_failures_to_alert: 1 -- name: ci-knative-sandbox-eventing-ceph-1.0-dot-release - gcs_prefix: knative-prow/logs/ci-knative-sandbox-eventing-ceph-1.0-dot-release - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - alert_stale_results_hours: 170 - num_failures_to_alert: 1 -- name: ci-knative-sandbox-eventing-couchdb-1.0-dot-release - gcs_prefix: knative-prow/logs/ci-knative-sandbox-eventing-couchdb-1.0-dot-release - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - alert_stale_results_hours: 170 - num_failures_to_alert: 1 -- name: ci-knative-sandbox-eventing-github-1.0-dot-release - gcs_prefix: knative-prow/logs/ci-knative-sandbox-eventing-github-1.0-dot-release - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - alert_stale_results_hours: 170 - num_failures_to_alert: 1 -- name: ci-knative-sandbox-eventing-gitlab-1.0-dot-release - gcs_prefix: knative-prow/logs/ci-knative-sandbox-eventing-gitlab-1.0-dot-release - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - alert_stale_results_hours: 170 - num_failures_to_alert: 1 -- name: ci-knative-sandbox-eventing-prometheus-1.0-dot-release - gcs_prefix: knative-prow/logs/ci-knative-sandbox-eventing-prometheus-1.0-dot-release - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - alert_stale_results_hours: 170 - num_failures_to_alert: 1 -- name: ci-knative-sandbox-eventing-redis-1.0-dot-release - gcs_prefix: knative-prow/logs/ci-knative-sandbox-eventing-redis-1.0-dot-release - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - alert_stale_results_hours: 170 - num_failures_to_alert: 1 -- name: ci-knative-sandbox-net-certmanager-1.0-dot-release - gcs_prefix: knative-prow/logs/ci-knative-sandbox-net-certmanager-1.0-dot-release - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - alert_stale_results_hours: 170 - num_failures_to_alert: 1 -- name: ci-knative-sandbox-net-contour-1.0-dot-release - gcs_prefix: knative-prow/logs/ci-knative-sandbox-net-contour-1.0-dot-release - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - alert_stale_results_hours: 170 - num_failures_to_alert: 1 -- name: ci-knative-sandbox-net-istio-1.0-dot-release - gcs_prefix: knative-prow/logs/ci-knative-sandbox-net-istio-1.0-dot-release - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - alert_stale_results_hours: 170 - num_failures_to_alert: 1 -- name: ci-knative-sandbox-eventing-kafka-1.0-dot-release - gcs_prefix: knative-prow/logs/ci-knative-sandbox-eventing-kafka-1.0-dot-release - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - alert_stale_results_hours: 170 - num_failures_to_alert: 1 -- name: ci-knative-sandbox-eventing-kafka-broker-1.0-dot-release - gcs_prefix: knative-prow/logs/ci-knative-sandbox-eventing-kafka-broker-1.0-dot-release - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - alert_stale_results_hours: 170 - num_failures_to_alert: 1 -- name: ci-knative-sandbox-eventing-rabbitmq-1.0-dot-release - gcs_prefix: knative-prow/logs/ci-knative-sandbox-eventing-rabbitmq-1.0-dot-release - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - alert_stale_results_hours: 170 - num_failures_to_alert: 1 -- name: ci-knative-sandbox-eventing-natss-1.0-dot-release - gcs_prefix: knative-prow/logs/ci-knative-sandbox-eventing-natss-1.0-dot-release - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - alert_stale_results_hours: 170 - num_failures_to_alert: 1 -- name: ci-knative-sandbox-kn-plugin-source-kafka-1.1-dot-release - gcs_prefix: knative-prow/logs/ci-knative-sandbox-kn-plugin-source-kafka-1.1-dot-release - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - alert_stale_results_hours: 170 - num_failures_to_alert: 1 -- name: ci-knative-sandbox-eventing-awssqs-1.1-dot-release - gcs_prefix: knative-prow/logs/ci-knative-sandbox-eventing-awssqs-1.1-dot-release - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - alert_stale_results_hours: 170 - num_failures_to_alert: 1 -- name: ci-knative-sandbox-eventing-ceph-1.1-dot-release - gcs_prefix: knative-prow/logs/ci-knative-sandbox-eventing-ceph-1.1-dot-release - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - alert_stale_results_hours: 170 - num_failures_to_alert: 1 -- name: ci-knative-sandbox-eventing-couchdb-1.1-dot-release - gcs_prefix: knative-prow/logs/ci-knative-sandbox-eventing-couchdb-1.1-dot-release - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - alert_stale_results_hours: 170 - num_failures_to_alert: 1 -- name: ci-knative-sandbox-eventing-github-1.1-dot-release - gcs_prefix: knative-prow/logs/ci-knative-sandbox-eventing-github-1.1-dot-release - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - alert_stale_results_hours: 170 - num_failures_to_alert: 1 -- name: ci-knative-sandbox-eventing-gitlab-1.1-dot-release - gcs_prefix: knative-prow/logs/ci-knative-sandbox-eventing-gitlab-1.1-dot-release - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - alert_stale_results_hours: 170 - num_failures_to_alert: 1 -- name: ci-knative-sandbox-eventing-prometheus-1.1-dot-release - gcs_prefix: knative-prow/logs/ci-knative-sandbox-eventing-prometheus-1.1-dot-release - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - alert_stale_results_hours: 170 - num_failures_to_alert: 1 -- name: ci-knative-sandbox-eventing-redis-1.1-dot-release - gcs_prefix: knative-prow/logs/ci-knative-sandbox-eventing-redis-1.1-dot-release - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - alert_stale_results_hours: 170 - num_failures_to_alert: 1 -- name: ci-knative-sandbox-net-certmanager-1.1-dot-release - gcs_prefix: knative-prow/logs/ci-knative-sandbox-net-certmanager-1.1-dot-release - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - alert_stale_results_hours: 170 - num_failures_to_alert: 1 -- name: ci-knative-sandbox-net-contour-1.1-dot-release - gcs_prefix: knative-prow/logs/ci-knative-sandbox-net-contour-1.1-dot-release - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - alert_stale_results_hours: 170 - num_failures_to_alert: 1 -- name: ci-knative-sandbox-net-istio-1.1-dot-release - gcs_prefix: knative-prow/logs/ci-knative-sandbox-net-istio-1.1-dot-release - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - alert_stale_results_hours: 170 - num_failures_to_alert: 1 -- name: ci-knative-sandbox-eventing-kafka-1.1-dot-release - gcs_prefix: knative-prow/logs/ci-knative-sandbox-eventing-kafka-1.1-dot-release - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - alert_stale_results_hours: 170 - num_failures_to_alert: 1 -- name: ci-knative-sandbox-eventing-kafka-broker-1.1-dot-release - gcs_prefix: knative-prow/logs/ci-knative-sandbox-eventing-kafka-broker-1.1-dot-release - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - alert_stale_results_hours: 170 - num_failures_to_alert: 1 -- name: ci-knative-sandbox-eventing-rabbitmq-1.1-dot-release - gcs_prefix: knative-prow/logs/ci-knative-sandbox-eventing-rabbitmq-1.1-dot-release - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - alert_stale_results_hours: 170 - num_failures_to_alert: 1 -- name: ci-knative-sandbox-eventing-natss-1.1-dot-release - gcs_prefix: knative-prow/logs/ci-knative-sandbox-eventing-natss-1.1-dot-release - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - alert_stale_results_hours: 170 - num_failures_to_alert: 1 -- name: ci-knative-sandbox-kn-plugin-source-kafka-1.2-dot-release - gcs_prefix: knative-prow/logs/ci-knative-sandbox-kn-plugin-source-kafka-1.2-dot-release - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - alert_stale_results_hours: 170 - num_failures_to_alert: 1 -- name: ci-knative-sandbox-eventing-awssqs-1.2-dot-release - gcs_prefix: knative-prow/logs/ci-knative-sandbox-eventing-awssqs-1.2-dot-release - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - alert_stale_results_hours: 170 - num_failures_to_alert: 1 -- name: ci-knative-sandbox-eventing-ceph-1.2-dot-release - gcs_prefix: knative-prow/logs/ci-knative-sandbox-eventing-ceph-1.2-dot-release - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - alert_stale_results_hours: 170 - num_failures_to_alert: 1 -- name: ci-knative-sandbox-eventing-github-1.2-dot-release - gcs_prefix: knative-prow/logs/ci-knative-sandbox-eventing-github-1.2-dot-release - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - alert_stale_results_hours: 170 - num_failures_to_alert: 1 -- name: ci-knative-sandbox-eventing-gitlab-1.2-dot-release - gcs_prefix: knative-prow/logs/ci-knative-sandbox-eventing-gitlab-1.2-dot-release - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - alert_stale_results_hours: 170 - num_failures_to_alert: 1 -- name: ci-knative-sandbox-eventing-redis-1.2-dot-release - gcs_prefix: knative-prow/logs/ci-knative-sandbox-eventing-redis-1.2-dot-release - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - alert_stale_results_hours: 170 - num_failures_to_alert: 1 -- name: ci-knative-sandbox-net-certmanager-1.2-dot-release - gcs_prefix: knative-prow/logs/ci-knative-sandbox-net-certmanager-1.2-dot-release - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - alert_stale_results_hours: 170 - num_failures_to_alert: 1 -- name: ci-knative-sandbox-net-contour-1.2-dot-release - gcs_prefix: knative-prow/logs/ci-knative-sandbox-net-contour-1.2-dot-release - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - alert_stale_results_hours: 170 - num_failures_to_alert: 1 -- name: ci-knative-sandbox-net-istio-1.2-dot-release - gcs_prefix: knative-prow/logs/ci-knative-sandbox-net-istio-1.2-dot-release - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - alert_stale_results_hours: 170 - num_failures_to_alert: 1 -- name: ci-knative-sandbox-eventing-kafka-1.2-dot-release - gcs_prefix: knative-prow/logs/ci-knative-sandbox-eventing-kafka-1.2-dot-release - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - alert_stale_results_hours: 170 - num_failures_to_alert: 1 -- name: ci-knative-sandbox-eventing-kafka-broker-1.2-dot-release - gcs_prefix: knative-prow/logs/ci-knative-sandbox-eventing-kafka-broker-1.2-dot-release - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - alert_stale_results_hours: 170 - num_failures_to_alert: 1 -- name: ci-knative-sandbox-eventing-rabbitmq-1.2-dot-release - gcs_prefix: knative-prow/logs/ci-knative-sandbox-eventing-rabbitmq-1.2-dot-release - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - alert_stale_results_hours: 170 - num_failures_to_alert: 1 -- name: ci-knative-sandbox-eventing-natss-1.2-dot-release - gcs_prefix: knative-prow/logs/ci-knative-sandbox-eventing-natss-1.2-dot-release - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - alert_stale_results_hours: 170 - num_failures_to_alert: 1 -- name: ci-knative-sandbox-kn-plugin-source-kafka-1.3-dot-release - gcs_prefix: knative-prow/logs/ci-knative-sandbox-kn-plugin-source-kafka-1.3-dot-release - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - alert_stale_results_hours: 170 - num_failures_to_alert: 1 -- name: ci-knative-sandbox-eventing-ceph-1.3-dot-release - gcs_prefix: knative-prow/logs/ci-knative-sandbox-eventing-ceph-1.3-dot-release - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - alert_stale_results_hours: 170 - num_failures_to_alert: 1 -- name: ci-knative-sandbox-eventing-github-1.3-dot-release - gcs_prefix: knative-prow/logs/ci-knative-sandbox-eventing-github-1.3-dot-release - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - alert_stale_results_hours: 170 - num_failures_to_alert: 1 -- name: ci-knative-sandbox-eventing-gitlab-1.3-dot-release - gcs_prefix: knative-prow/logs/ci-knative-sandbox-eventing-gitlab-1.3-dot-release - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - alert_stale_results_hours: 170 - num_failures_to_alert: 1 -- name: ci-knative-sandbox-eventing-redis-1.3-dot-release - gcs_prefix: knative-prow/logs/ci-knative-sandbox-eventing-redis-1.3-dot-release - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - alert_stale_results_hours: 170 - num_failures_to_alert: 1 -- name: ci-knative-sandbox-net-certmanager-1.3-dot-release - gcs_prefix: knative-prow/logs/ci-knative-sandbox-net-certmanager-1.3-dot-release - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - alert_stale_results_hours: 170 - num_failures_to_alert: 1 -- name: ci-knative-sandbox-net-contour-1.3-dot-release - gcs_prefix: knative-prow/logs/ci-knative-sandbox-net-contour-1.3-dot-release - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - alert_stale_results_hours: 170 - num_failures_to_alert: 1 -- name: ci-knative-sandbox-net-istio-1.3-dot-release - gcs_prefix: knative-prow/logs/ci-knative-sandbox-net-istio-1.3-dot-release - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - alert_stale_results_hours: 170 - num_failures_to_alert: 1 -- name: ci-knative-sandbox-eventing-kafka-1.3-dot-release - gcs_prefix: knative-prow/logs/ci-knative-sandbox-eventing-kafka-1.3-dot-release - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - alert_stale_results_hours: 170 - num_failures_to_alert: 1 -- name: ci-knative-sandbox-eventing-kafka-broker-1.3-dot-release - gcs_prefix: knative-prow/logs/ci-knative-sandbox-eventing-kafka-broker-1.3-dot-release - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - alert_stale_results_hours: 170 - num_failures_to_alert: 1 -- name: ci-knative-sandbox-eventing-rabbitmq-1.3-dot-release - gcs_prefix: knative-prow/logs/ci-knative-sandbox-eventing-rabbitmq-1.3-dot-release - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - alert_stale_results_hours: 170 - num_failures_to_alert: 1 -- name: ci-knative-sandbox-eventing-natss-1.3-dot-release - gcs_prefix: knative-prow/logs/ci-knative-sandbox-eventing-natss-1.3-dot-release - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - alert_stale_results_hours: 170 - num_failures_to_alert: 1 -- name: ci-knative-sandbox-eventing-awssqs-0.26-dot-release - gcs_prefix: knative-prow/logs/ci-knative-sandbox-eventing-awssqs-0.26-dot-release - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - alert_stale_results_hours: 170 - num_failures_to_alert: 1 -- name: ci-knative-sandbox-eventing-couchdb-0.26-dot-release - gcs_prefix: knative-prow/logs/ci-knative-sandbox-eventing-couchdb-0.26-dot-release - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - alert_stale_results_hours: 170 - num_failures_to_alert: 1 -- name: ci-knative-sandbox-eventing-prometheus-0.26-dot-release - gcs_prefix: knative-prow/logs/ci-knative-sandbox-eventing-prometheus-0.26-dot-release - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - alert_stale_results_hours: 170 - num_failures_to_alert: 1 -- name: ci-knative-sandbox-eventing-couchdb-0.25-dot-release - gcs_prefix: knative-prow/logs/ci-knative-sandbox-eventing-couchdb-0.25-dot-release - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - alert_stale_results_hours: 170 - num_failures_to_alert: 1 -- name: ci-knative-sandbox-eventing-prometheus-0.25-dot-release - gcs_prefix: knative-prow/logs/ci-knative-sandbox-eventing-prometheus-0.25-dot-release - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - alert_stale_results_hours: 170 - num_failures_to_alert: 1 -- name: ci-google-knative-gcp-nightly-release - gcs_prefix: knative-prow/logs/ci-google-knative-gcp-nightly-release - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 1 -- name: ci-google-knative-gcp-auto-release - gcs_prefix: knative-prow/logs/ci-google-knative-gcp-auto-release - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 1 -- name: ci-google-knative-gcp-test-coverage - gcs_prefix: knative-prow/logs/ci-google-knative-gcp-go-coverage - short_text_metric: "coverage" -- name: ci-knative-cleanup - gcs_prefix: knative-prow/logs/ci-knative-cleanup - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 1 -- name: ci-knative-flakes-reporter - gcs_prefix: knative-prow/logs/ci-knative-flakes-reporter - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 1 -- name: ci-knative-flakes-resultsrecorder - gcs_prefix: knative-prow/logs/ci-knative-flakes-resultsrecorder - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 1 -- name: ci-knative-prow-jobs-syncer - gcs_prefix: knative-prow/logs/ci-knative-prow-jobs-syncer - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 1 -- name: post-knative-test-infra-image-push - gcs_prefix: knative-prow/logs/post-knative-test-infra-image-push - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 1 -- name: post-knative-sandbox-peribolos - gcs_prefix: knative-prow/logs/post-knative-sandbox-peribolos - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 1 -- name: post-knative-test-infra-deploy-tools - gcs_prefix: knative-prow/logs/post-knative-test-infra-deploy-tools - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 1 -dashboards: -- name: serving - dashboard_tab: - - name: continuous - test_group_name: ci-knative-serving-continuous - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 3 - - name: conformance - test_group_name: ci-knative-serving-continuous - base_options: "include-filter-by-regex=test/conformance/&sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 3 - - name: istio-latest-mesh - test_group_name: ci-knative-serving-istio-latest-mesh - base_options: "sort-by-name=" - - name: istio-latest-no-mesh - test_group_name: ci-knative-serving-istio-latest-no-mesh - base_options: "sort-by-name=" - - name: istio-head-mesh - test_group_name: ci-knative-serving-istio-head-mesh - base_options: "sort-by-name=" - - name: istio-head-no-mesh - test_group_name: ci-knative-serving-istio-head-no-mesh - base_options: "sort-by-name=" - - name: kourier-stable - test_group_name: ci-knative-serving-kourier-stable - base_options: "sort-by-name=" - - name: contour-latest - test_group_name: ci-knative-serving-contour-latest - base_options: "sort-by-name=" - - name: gateway-api-latest - test_group_name: ci-knative-serving-gateway-api-latest - base_options: "sort-by-name=" - - name: https - test_group_name: ci-knative-serving-https - base_options: "sort-by-name=" - - name: s390x-kourier-tests - test_group_name: ci-knative-serving-s390x-kourier-tests - base_options: "sort-by-name=" - - name: s390x-contour-tests - test_group_name: ci-knative-serving-s390x-contour-tests - base_options: "sort-by-name=" - - name: nightly - test_group_name: ci-knative-serving-nightly-release - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 1 - - name: auto-release - test_group_name: ci-knative-serving-auto-release - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 1 -- name: client - dashboard_tab: - - name: continuous - test_group_name: ci-knative-client-continuous - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 3 - - name: nightly - test_group_name: ci-knative-client-nightly-release - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 1 - - name: tekton - test_group_name: ci-knative-client-tekton - base_options: "sort-by-name=" - - name: auto-release - test_group_name: ci-knative-client-auto-release - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 1 - - name: s390x-e2e-tests - test_group_name: ci-knative-client-s390x-e2e-tests - base_options: "sort-by-name=" -- name: client-pkg - dashboard_tab: - - name: continuous - test_group_name: ci-knative-client-pkg-continuous - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 3 - - name: auto-release - test_group_name: ci-knative-client-pkg-auto-release - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 1 - - name: nightly - test_group_name: ci-knative-client-pkg-nightly-release - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 1 -- name: docs - dashboard_tab: - - name: continuous - test_group_name: ci-knative-docs-continuous - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 3 - - name: coverage - test_group_name: ci-knative-docs-test-coverage - base_options: "exclude-filter-by-regex=Overall$&group-by-directory=&expand-groups=&sort-by-name=" -- name: eventing - dashboard_tab: - - name: continuous - test_group_name: ci-knative-eventing-continuous - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 3 - - name: nightly - test_group_name: ci-knative-eventing-nightly-release - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 1 - - name: auto-release - test_group_name: ci-knative-eventing-auto-release - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 1 - - name: s390x-e2e-tests - test_group_name: ci-knative-eventing-s390x-e2e-tests - base_options: "sort-by-name=" -- name: pkg - dashboard_tab: - - name: continuous - test_group_name: ci-knative-pkg-continuous - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 3 -- name: caching - dashboard_tab: - - name: continuous - test_group_name: ci-knative-caching-continuous - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 3 -- name: test-infra - dashboard_tab: - - name: continuous - test_group_name: ci-knative-test-infra-continuous - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 3 - - name: coverage - test_group_name: ci-knative-test-infra-test-coverage - base_options: "exclude-filter-by-regex=Overall$&group-by-directory=&expand-groups=&sort-by-name=" -- name: operator - dashboard_tab: - - name: continuous - test_group_name: ci-knative-operator-continuous - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 3 - - name: nightly - test_group_name: ci-knative-operator-nightly-release - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 1 - - name: auto-release - test_group_name: ci-knative-operator-auto-release - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 1 - - name: s390x-e2e-tests - test_group_name: ci-knative-operator-s390x-e2e-tests - base_options: "sort-by-name=" -- name: kn-plugin-diag - dashboard_tab: - - name: continuous - test_group_name: ci-knative-sandbox-kn-plugin-diag-continuous - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 3 -- name: kn-plugin-event - dashboard_tab: - - name: continuous - test_group_name: ci-knative-sandbox-kn-plugin-event-continuous - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 3 - - name: auto-release - test_group_name: ci-knative-sandbox-kn-plugin-event-auto-release - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 1 - - name: nightly - test_group_name: ci-knative-sandbox-kn-plugin-event-nightly-release - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 1 - - name: dot-release - test_group_name: ci-knative-sandbox-kn-plugin-event-dot-release - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 1 -- name: kn-plugin-func - dashboard_tab: - - name: auto-release - test_group_name: ci-knative-sandbox-kn-plugin-func-auto-release - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 1 - - name: nightly - test_group_name: ci-knative-sandbox-kn-plugin-func-nightly-release - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 1 - - name: dot-release - test_group_name: ci-knative-sandbox-kn-plugin-func-dot-release - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 1 -- name: kn-plugin-migration - dashboard_tab: - - name: continuous - test_group_name: ci-knative-sandbox-kn-plugin-migration-continuous - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 3 -- name: kn-plugin-operator - dashboard_tab: - - name: continuous - test_group_name: ci-knative-sandbox-kn-plugin-operator-continuous - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 3 -- name: kn-plugin-sample - dashboard_tab: - - name: continuous - test_group_name: ci-knative-sandbox-kn-plugin-sample-continuous - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 3 -- name: kn-plugin-service-log - dashboard_tab: - - name: continuous - test_group_name: ci-knative-sandbox-kn-plugin-service-log-continuous - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 3 - - name: auto-release - test_group_name: ci-knative-sandbox-kn-plugin-service-log-auto-release - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 1 - - name: nightly - test_group_name: ci-knative-sandbox-kn-plugin-service-log-nightly-release - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 1 - - name: dot-release - test_group_name: ci-knative-sandbox-kn-plugin-service-log-dot-release - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 1 -- name: kn-plugin-source-kafka - dashboard_tab: - - name: continuous - test_group_name: ci-knative-sandbox-kn-plugin-source-kafka-continuous - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 3 - - name: auto-release - test_group_name: ci-knative-sandbox-kn-plugin-source-kafka-auto-release - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 1 - - name: nightly - test_group_name: ci-knative-sandbox-kn-plugin-source-kafka-nightly-release - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 1 -- name: kn-plugin-source-kamelet - dashboard_tab: - - name: continuous - test_group_name: ci-knative-sandbox-kn-plugin-source-kamelet-continuous - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 3 - - name: auto-release - test_group_name: ci-knative-sandbox-kn-plugin-source-kamelet-auto-release - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 1 - - name: nightly - test_group_name: ci-knative-sandbox-kn-plugin-source-kamelet-nightly-release - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 1 - - name: dot-release - test_group_name: ci-knative-sandbox-kn-plugin-source-kamelet-dot-release - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 1 -- name: kn-plugin-admin - dashboard_tab: - - name: continuous - test_group_name: ci-knative-sandbox-kn-plugin-admin-continuous - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 3 - - name: auto-release - test_group_name: ci-knative-sandbox-kn-plugin-admin-auto-release - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 1 - - name: nightly - test_group_name: ci-knative-sandbox-kn-plugin-admin-nightly-release - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 1 - - name: dot-release - test_group_name: ci-knative-sandbox-kn-plugin-admin-dot-release - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 1 -- name: kn-plugin-quickstart - dashboard_tab: - - name: continuous - test_group_name: ci-knative-sandbox-kn-plugin-quickstart-continuous - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 3 - - name: auto-release - test_group_name: ci-knative-sandbox-kn-plugin-quickstart-auto-release - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 1 - - name: nightly - test_group_name: ci-knative-sandbox-kn-plugin-quickstart-nightly-release - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 1 - - name: dot-release - test_group_name: ci-knative-sandbox-kn-plugin-quickstart-dot-release - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 1 -- name: eventing-awssqs - dashboard_tab: - - name: continuous - test_group_name: ci-knative-sandbox-eventing-awssqs-continuous - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 3 - - name: nightly - test_group_name: ci-knative-sandbox-eventing-awssqs-nightly-release - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 1 - - name: auto-release - test_group_name: ci-knative-sandbox-eventing-awssqs-auto-release - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 1 -- name: eventing-ceph - dashboard_tab: - - name: continuous - test_group_name: ci-knative-sandbox-eventing-ceph-continuous - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 3 - - name: nightly - test_group_name: ci-knative-sandbox-eventing-ceph-nightly-release - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 1 - - name: auto-release - test_group_name: ci-knative-sandbox-eventing-ceph-auto-release - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 1 -- name: eventing-couchdb - dashboard_tab: - - name: continuous - test_group_name: ci-knative-sandbox-eventing-couchdb-continuous - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 3 - - name: nightly - test_group_name: ci-knative-sandbox-eventing-couchdb-nightly-release - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 1 - - name: auto-release - test_group_name: ci-knative-sandbox-eventing-couchdb-auto-release - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 1 -- name: eventing-github - dashboard_tab: - - name: continuous - test_group_name: ci-knative-sandbox-eventing-github-continuous - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 3 - - name: nightly - test_group_name: ci-knative-sandbox-eventing-github-nightly-release - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 1 - - name: auto-release - test_group_name: ci-knative-sandbox-eventing-github-auto-release - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 1 -- name: eventing-gitlab - dashboard_tab: - - name: continuous - test_group_name: ci-knative-sandbox-eventing-gitlab-continuous - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 3 - - name: nightly - test_group_name: ci-knative-sandbox-eventing-gitlab-nightly-release - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 1 - - name: auto-release - test_group_name: ci-knative-sandbox-eventing-gitlab-auto-release - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 1 -- name: eventing-prometheus - dashboard_tab: - - name: continuous - test_group_name: ci-knative-sandbox-eventing-prometheus-continuous - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 3 - - name: nightly - test_group_name: ci-knative-sandbox-eventing-prometheus-nightly-release - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 1 - - name: auto-release - test_group_name: ci-knative-sandbox-eventing-prometheus-auto-release - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 1 -- name: eventing-redis - dashboard_tab: - - name: continuous - test_group_name: ci-knative-sandbox-eventing-redis-continuous - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 3 - - name: nightly - test_group_name: ci-knative-sandbox-eventing-redis-nightly-release - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 1 - - name: auto-release - test_group_name: ci-knative-sandbox-eventing-redis-auto-release - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 1 -- name: kperf - dashboard_tab: - - name: continuous - test_group_name: ci-knative-sandbox-kperf-continuous - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 3 -- name: sample-controller - dashboard_tab: - - name: continuous - test_group_name: ci-knative-sandbox-sample-controller-continuous - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 3 - - name: nightly - test_group_name: ci-knative-sandbox-sample-controller-nightly-release - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 1 - - name: auto-release - test_group_name: ci-knative-sandbox-sample-controller-auto-release - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 1 -- name: sample-source - dashboard_tab: - - name: continuous - test_group_name: ci-knative-sandbox-sample-source-continuous - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 3 - - name: nightly - test_group_name: ci-knative-sandbox-sample-source-nightly-release - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 1 - - name: auto-release - test_group_name: ci-knative-sandbox-sample-source-auto-release - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 1 -- name: net-certmanager - dashboard_tab: - - name: continuous - test_group_name: ci-knative-sandbox-net-certmanager-continuous - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 3 - - name: nightly - test_group_name: ci-knative-sandbox-net-certmanager-nightly-release - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 1 - - name: auto-release - test_group_name: ci-knative-sandbox-net-certmanager-auto-release - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 1 - - name: coverage - test_group_name: ci-knative-sandbox-net-certmanager-test-coverage - base_options: "exclude-filter-by-regex=Overall$&group-by-directory=&expand-groups=&sort-by-name=" -- name: net-contour - dashboard_tab: - - name: continuous - test_group_name: ci-knative-sandbox-net-contour-continuous - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 3 - - name: nightly - test_group_name: ci-knative-sandbox-net-contour-nightly-release - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 1 - - name: auto-release - test_group_name: ci-knative-sandbox-net-contour-auto-release - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 1 -- name: net-gateway-api - dashboard_tab: - - name: continuous - test_group_name: ci-knative-sandbox-net-gateway-api-continuous - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 3 - - name: nightly - test_group_name: ci-knative-sandbox-net-gateway-api-nightly-release - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 1 - - name: auto-release - test_group_name: ci-knative-sandbox-net-gateway-api-auto-release - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 1 -- name: net-http01 - dashboard_tab: - - name: continuous - test_group_name: ci-knative-sandbox-net-http01-continuous - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 3 - - name: nightly - test_group_name: ci-knative-sandbox-net-http01-nightly-release - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 1 - - name: dot-release - test_group_name: ci-knative-sandbox-net-http01-dot-release - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 1 - - name: auto-release - test_group_name: ci-knative-sandbox-net-http01-auto-release - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 1 -- name: net-istio - dashboard_tab: - - name: continuous - test_group_name: ci-knative-sandbox-net-istio-continuous - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 3 - - name: nightly - test_group_name: ci-knative-sandbox-net-istio-nightly-release - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 1 - - name: latest - test_group_name: ci-knative-sandbox-net-istio-latest - base_options: "sort-by-name=" - - name: auto-release - test_group_name: ci-knative-sandbox-net-istio-auto-release - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 1 - - name: coverage - test_group_name: ci-knative-sandbox-net-istio-test-coverage - base_options: "exclude-filter-by-regex=Overall$&group-by-directory=&expand-groups=&sort-by-name=" -- name: net-kourier - dashboard_tab: - - name: continuous - test_group_name: ci-knative-sandbox-net-kourier-continuous - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 3 - - name: nightly - test_group_name: ci-knative-sandbox-net-kourier-nightly-release - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 1 - - name: dot-release - test_group_name: ci-knative-sandbox-net-kourier-dot-release - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 1 - - name: auto-release - test_group_name: ci-knative-sandbox-net-kourier-auto-release - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 1 - - name: coverage - test_group_name: ci-knative-sandbox-net-kourier-test-coverage - base_options: "exclude-filter-by-regex=Overall$&group-by-directory=&expand-groups=&sort-by-name=" -- name: async-component - dashboard_tab: - - name: continuous - test_group_name: ci-knative-sandbox-async-component-continuous - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 3 - - name: nightly - test_group_name: ci-knative-sandbox-async-component-nightly-release - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 1 - - name: dot-release - test_group_name: ci-knative-sandbox-async-component-dot-release - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 1 - - name: auto-release - test_group_name: ci-knative-sandbox-async-component-auto-release - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 1 - - name: coverage - test_group_name: ci-knative-sandbox-async-component-test-coverage - base_options: "exclude-filter-by-regex=Overall$&group-by-directory=&expand-groups=&sort-by-name=" -- name: discovery - dashboard_tab: - - name: continuous - test_group_name: ci-knative-sandbox-discovery-continuous - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 3 - - name: nightly - test_group_name: ci-knative-sandbox-discovery-nightly-release - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 1 - - name: dot-release - test_group_name: ci-knative-sandbox-discovery-dot-release - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 1 - - name: auto-release - test_group_name: ci-knative-sandbox-discovery-auto-release - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 1 -- name: eventing-camel - dashboard_tab: - - name: continuous - test_group_name: ci-knative-sandbox-eventing-camel-continuous - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 3 - - name: nightly - test_group_name: ci-knative-sandbox-eventing-camel-nightly-release - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 1 - - name: dot-release - test_group_name: ci-knative-sandbox-eventing-camel-dot-release - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 1 - - name: auto-release - test_group_name: ci-knative-sandbox-eventing-camel-auto-release - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 1 -- name: eventing-kafka - dashboard_tab: - - name: continuous - test_group_name: ci-knative-sandbox-eventing-kafka-continuous - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 3 - - name: nightly - test_group_name: ci-knative-sandbox-eventing-kafka-nightly-release - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 1 - - name: auto-release - test_group_name: ci-knative-sandbox-eventing-kafka-auto-release - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 1 - - name: coverage - test_group_name: ci-knative-sandbox-eventing-kafka-test-coverage - base_options: "exclude-filter-by-regex=Overall$&group-by-directory=&expand-groups=&sort-by-name=" -- name: eventing-kafka-broker - dashboard_tab: - - name: continuous - test_group_name: ci-knative-sandbox-eventing-kafka-broker-continuous - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 3 - - name: nightly - test_group_name: ci-knative-sandbox-eventing-kafka-broker-nightly-release - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 1 - - name: auto-release - test_group_name: ci-knative-sandbox-eventing-kafka-broker-auto-release - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 1 - - name: coverage - test_group_name: ci-knative-sandbox-eventing-kafka-broker-test-coverage - base_options: "exclude-filter-by-regex=Overall$&group-by-directory=&expand-groups=&sort-by-name=" -- name: eventing-rabbitmq - dashboard_tab: - - name: nightly - test_group_name: ci-knative-sandbox-eventing-rabbitmq-nightly-release - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 1 - - name: auto-release - test_group_name: ci-knative-sandbox-eventing-rabbitmq-auto-release - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 1 -- name: eventing-natss - dashboard_tab: - - name: nightly - test_group_name: ci-knative-sandbox-eventing-natss-nightly-release - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 1 - - name: auto-release - test_group_name: ci-knative-sandbox-eventing-natss-auto-release - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 1 -- name: eventing-autoscaler-keda - dashboard_tab: - - name: continuous - test_group_name: ci-knative-sandbox-eventing-autoscaler-keda-continuous - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 3 - - name: nightly - test_group_name: ci-knative-sandbox-eventing-autoscaler-keda-nightly-release - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 1 - - name: dot-release - test_group_name: ci-knative-sandbox-eventing-autoscaler-keda-dot-release - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 1 - - name: auto-release - test_group_name: ci-knative-sandbox-eventing-autoscaler-keda-auto-release - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 1 -- name: eventing-kogito - dashboard_tab: - - name: continuous - test_group_name: ci-knative-sandbox-eventing-kogito-continuous - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 3 - - name: nightly - test_group_name: ci-knative-sandbox-eventing-kogito-nightly-release - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 1 - - name: dot-release - test_group_name: ci-knative-sandbox-eventing-kogito-dot-release - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 1 - - name: auto-release - test_group_name: ci-knative-sandbox-eventing-kogito-auto-release - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 1 -- name: container-freezer - dashboard_tab: - - name: continuous - test_group_name: ci-knative-sandbox-container-freezer-continuous - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 3 - - name: nightly - test_group_name: ci-knative-sandbox-container-freezer-nightly-release - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 1 - - name: dot-release - test_group_name: ci-knative-sandbox-container-freezer-dot-release - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 1 - - name: auto-release - test_group_name: ci-knative-sandbox-container-freezer-auto-release - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 1 -- name: knative-gcp - dashboard_tab: - - name: nightly - test_group_name: ci-google-knative-gcp-nightly-release - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 1 - - name: auto-release - test_group_name: ci-google-knative-gcp-auto-release - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 1 - - name: coverage - test_group_name: ci-google-knative-gcp-test-coverage - base_options: "exclude-filter-by-regex=Overall$&group-by-directory=&expand-groups=&sort-by-name=" -- name: knative-0.26 - dashboard_tab: - - name: serving-continuous - test_group_name: ci-knative-serving-0.26-continuous - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 3 - - name: serving-dot-release - test_group_name: ci-knative-serving-0.26-dot-release - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 3 - - name: eventing-continuous - test_group_name: ci-knative-eventing-0.26-continuous - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 3 - - name: eventing-dot-release - test_group_name: ci-knative-eventing-0.26-dot-release - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 3 -- name: knative-1.0 - dashboard_tab: - - name: serving-continuous - test_group_name: ci-knative-serving-1.0-continuous - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 3 - - name: serving-s390x-kourier-tests - test_group_name: ci-knative-serving-1.0-s390x-kourier-tests - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 3 - - name: serving-s390x-contour-tests - test_group_name: ci-knative-serving-1.0-s390x-contour-tests - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 3 - - name: serving-dot-release - test_group_name: ci-knative-serving-1.0-dot-release - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 3 - - name: client-continuous - test_group_name: ci-knative-client-1.0-continuous - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 3 - - name: client-dot-release - test_group_name: ci-knative-client-1.0-dot-release - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 3 - - name: client-s390x-e2e-tests - test_group_name: ci-knative-client-1.0-s390x-e2e-tests - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 3 - - name: eventing-continuous - test_group_name: ci-knative-eventing-1.0-continuous - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 3 - - name: eventing-dot-release - test_group_name: ci-knative-eventing-1.0-dot-release - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 3 - - name: eventing-s390x-e2e-tests - test_group_name: ci-knative-eventing-1.0-s390x-e2e-tests - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 3 - - name: operator-continuous - test_group_name: ci-knative-operator-1.0-continuous - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 3 - - name: operator-dot-release - test_group_name: ci-knative-operator-1.0-dot-release - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 3 - - name: operator-s390x-e2e-tests - test_group_name: ci-knative-operator-1.0-s390x-e2e-tests - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 3 -- name: knative-1.1 - dashboard_tab: - - name: serving-continuous - test_group_name: ci-knative-serving-1.1-continuous - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 3 - - name: serving-s390x-kourier-tests - test_group_name: ci-knative-serving-1.1-s390x-kourier-tests - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 3 - - name: serving-s390x-contour-tests - test_group_name: ci-knative-serving-1.1-s390x-contour-tests - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 3 - - name: serving-dot-release - test_group_name: ci-knative-serving-1.1-dot-release - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 3 - - name: client-continuous - test_group_name: ci-knative-client-1.1-continuous - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 3 - - name: client-dot-release - test_group_name: ci-knative-client-1.1-dot-release - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 3 - - name: client-s390x-e2e-tests - test_group_name: ci-knative-client-1.1-s390x-e2e-tests - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 3 - - name: eventing-continuous - test_group_name: ci-knative-eventing-1.1-continuous - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 3 - - name: eventing-dot-release - test_group_name: ci-knative-eventing-1.1-dot-release - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 3 - - name: eventing-s390x-e2e-tests - test_group_name: ci-knative-eventing-1.1-s390x-e2e-tests - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 3 - - name: operator-continuous - test_group_name: ci-knative-operator-1.1-continuous - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 3 - - name: operator-dot-release - test_group_name: ci-knative-operator-1.1-dot-release - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 3 - - name: operator-s390x-e2e-tests - test_group_name: ci-knative-operator-1.1-s390x-e2e-tests - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 3 -- name: knative-1.2 - dashboard_tab: - - name: serving-continuous - test_group_name: ci-knative-serving-1.2-continuous - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 3 - - name: serving-s390x-kourier-tests - test_group_name: ci-knative-serving-1.2-s390x-kourier-tests - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 3 - - name: serving-s390x-contour-tests - test_group_name: ci-knative-serving-1.2-s390x-contour-tests - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 3 - - name: serving-dot-release - test_group_name: ci-knative-serving-1.2-dot-release - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 3 - - name: client-continuous - test_group_name: ci-knative-client-1.2-continuous - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 3 - - name: client-dot-release - test_group_name: ci-knative-client-1.2-dot-release - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 3 - - name: client-s390x-e2e-tests - test_group_name: ci-knative-client-1.2-s390x-e2e-tests - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 3 - - name: eventing-continuous - test_group_name: ci-knative-eventing-1.2-continuous - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 3 - - name: eventing-dot-release - test_group_name: ci-knative-eventing-1.2-dot-release - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 3 - - name: eventing-s390x-e2e-tests - test_group_name: ci-knative-eventing-1.2-s390x-e2e-tests - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 3 - - name: operator-continuous - test_group_name: ci-knative-operator-1.2-continuous - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 3 - - name: operator-dot-release - test_group_name: ci-knative-operator-1.2-dot-release - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 3 - - name: operator-s390x-e2e-tests - test_group_name: ci-knative-operator-1.2-s390x-e2e-tests - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 3 -- name: knative-1.3 - dashboard_tab: - - name: serving-s390x-kourier-tests - test_group_name: ci-knative-serving-1.3-s390x-kourier-tests - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 3 - - name: serving-s390x-contour-tests - test_group_name: ci-knative-serving-1.3-s390x-contour-tests - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 3 - - name: client-continuous - test_group_name: ci-knative-client-1.3-continuous - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 3 - - name: client-dot-release - test_group_name: ci-knative-client-1.3-dot-release - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 3 - - name: client-s390x-e2e-tests - test_group_name: ci-knative-client-1.3-s390x-e2e-tests - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 3 - - name: eventing-continuous - test_group_name: ci-knative-eventing-1.3-continuous - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 3 - - name: eventing-dot-release - test_group_name: ci-knative-eventing-1.3-dot-release - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 3 - - name: eventing-s390x-e2e-tests - test_group_name: ci-knative-eventing-1.3-s390x-e2e-tests - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 3 - - name: eventing-test-coverage - test_group_name: ci-knative-eventing-1.3-test-coverage - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 3 - - name: operator-continuous - test_group_name: ci-knative-operator-1.3-continuous - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 3 - - name: operator-dot-release - test_group_name: ci-knative-operator-1.3-dot-release - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 3 - - name: operator-s390x-e2e-tests - test_group_name: ci-knative-operator-1.3-s390x-e2e-tests - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 3 -- name: knative-sandbox-1.0 - dashboard_tab: - - name: kn-plugin-source-kafka-dot-release - test_group_name: ci-knative-sandbox-kn-plugin-source-kafka-1.0-dot-release - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 3 - - name: eventing-awssqs-dot-release - test_group_name: ci-knative-sandbox-eventing-awssqs-1.0-dot-release - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 3 - - name: eventing-ceph-dot-release - test_group_name: ci-knative-sandbox-eventing-ceph-1.0-dot-release - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 3 - - name: eventing-couchdb-dot-release - test_group_name: ci-knative-sandbox-eventing-couchdb-1.0-dot-release - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 3 - - name: eventing-github-dot-release - test_group_name: ci-knative-sandbox-eventing-github-1.0-dot-release - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 3 - - name: eventing-gitlab-dot-release - test_group_name: ci-knative-sandbox-eventing-gitlab-1.0-dot-release - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 3 - - name: eventing-prometheus-dot-release - test_group_name: ci-knative-sandbox-eventing-prometheus-1.0-dot-release - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 3 - - name: eventing-redis-dot-release - test_group_name: ci-knative-sandbox-eventing-redis-1.0-dot-release - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 3 - - name: net-certmanager-dot-release - test_group_name: ci-knative-sandbox-net-certmanager-1.0-dot-release - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 3 - - name: net-contour-dot-release - test_group_name: ci-knative-sandbox-net-contour-1.0-dot-release - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 3 - - name: net-istio-dot-release - test_group_name: ci-knative-sandbox-net-istio-1.0-dot-release - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 3 - - name: eventing-kafka-dot-release - test_group_name: ci-knative-sandbox-eventing-kafka-1.0-dot-release - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 3 - - name: eventing-kafka-broker-dot-release - test_group_name: ci-knative-sandbox-eventing-kafka-broker-1.0-dot-release - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 3 - - name: eventing-rabbitmq-dot-release - test_group_name: ci-knative-sandbox-eventing-rabbitmq-1.0-dot-release - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 3 - - name: eventing-natss-dot-release - test_group_name: ci-knative-sandbox-eventing-natss-1.0-dot-release - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 3 -- name: knative-sandbox-1.1 - dashboard_tab: - - name: kn-plugin-source-kafka-dot-release - test_group_name: ci-knative-sandbox-kn-plugin-source-kafka-1.1-dot-release - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 3 - - name: eventing-awssqs-dot-release - test_group_name: ci-knative-sandbox-eventing-awssqs-1.1-dot-release - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 3 - - name: eventing-ceph-dot-release - test_group_name: ci-knative-sandbox-eventing-ceph-1.1-dot-release - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 3 - - name: eventing-couchdb-dot-release - test_group_name: ci-knative-sandbox-eventing-couchdb-1.1-dot-release - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 3 - - name: eventing-github-dot-release - test_group_name: ci-knative-sandbox-eventing-github-1.1-dot-release - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 3 - - name: eventing-gitlab-dot-release - test_group_name: ci-knative-sandbox-eventing-gitlab-1.1-dot-release - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 3 - - name: eventing-prometheus-dot-release - test_group_name: ci-knative-sandbox-eventing-prometheus-1.1-dot-release - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 3 - - name: eventing-redis-dot-release - test_group_name: ci-knative-sandbox-eventing-redis-1.1-dot-release - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 3 - - name: net-certmanager-dot-release - test_group_name: ci-knative-sandbox-net-certmanager-1.1-dot-release - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 3 - - name: net-contour-dot-release - test_group_name: ci-knative-sandbox-net-contour-1.1-dot-release - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 3 - - name: net-istio-dot-release - test_group_name: ci-knative-sandbox-net-istio-1.1-dot-release - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 3 - - name: eventing-kafka-dot-release - test_group_name: ci-knative-sandbox-eventing-kafka-1.1-dot-release - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 3 - - name: eventing-kafka-broker-dot-release - test_group_name: ci-knative-sandbox-eventing-kafka-broker-1.1-dot-release - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 3 - - name: eventing-rabbitmq-dot-release - test_group_name: ci-knative-sandbox-eventing-rabbitmq-1.1-dot-release - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 3 - - name: eventing-natss-dot-release - test_group_name: ci-knative-sandbox-eventing-natss-1.1-dot-release - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 3 -- name: knative-sandbox-1.2 - dashboard_tab: - - name: kn-plugin-source-kafka-dot-release - test_group_name: ci-knative-sandbox-kn-plugin-source-kafka-1.2-dot-release - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 3 - - name: eventing-awssqs-dot-release - test_group_name: ci-knative-sandbox-eventing-awssqs-1.2-dot-release - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 3 - - name: eventing-ceph-dot-release - test_group_name: ci-knative-sandbox-eventing-ceph-1.2-dot-release - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 3 - - name: eventing-github-dot-release - test_group_name: ci-knative-sandbox-eventing-github-1.2-dot-release - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 3 - - name: eventing-gitlab-dot-release - test_group_name: ci-knative-sandbox-eventing-gitlab-1.2-dot-release - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 3 - - name: eventing-redis-dot-release - test_group_name: ci-knative-sandbox-eventing-redis-1.2-dot-release - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 3 - - name: net-certmanager-dot-release - test_group_name: ci-knative-sandbox-net-certmanager-1.2-dot-release - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 3 - - name: net-contour-dot-release - test_group_name: ci-knative-sandbox-net-contour-1.2-dot-release - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 3 - - name: net-istio-dot-release - test_group_name: ci-knative-sandbox-net-istio-1.2-dot-release - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 3 - - name: eventing-kafka-dot-release - test_group_name: ci-knative-sandbox-eventing-kafka-1.2-dot-release - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 3 - - name: eventing-kafka-broker-dot-release - test_group_name: ci-knative-sandbox-eventing-kafka-broker-1.2-dot-release - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 3 - - name: eventing-rabbitmq-dot-release - test_group_name: ci-knative-sandbox-eventing-rabbitmq-1.2-dot-release - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 3 - - name: eventing-natss-dot-release - test_group_name: ci-knative-sandbox-eventing-natss-1.2-dot-release - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 3 -- name: knative-sandbox-1.3 - dashboard_tab: - - name: kn-plugin-source-kafka-dot-release - test_group_name: ci-knative-sandbox-kn-plugin-source-kafka-1.3-dot-release - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 3 - - name: eventing-ceph-dot-release - test_group_name: ci-knative-sandbox-eventing-ceph-1.3-dot-release - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 3 - - name: eventing-github-dot-release - test_group_name: ci-knative-sandbox-eventing-github-1.3-dot-release - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 3 - - name: eventing-gitlab-dot-release - test_group_name: ci-knative-sandbox-eventing-gitlab-1.3-dot-release - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 3 - - name: eventing-redis-dot-release - test_group_name: ci-knative-sandbox-eventing-redis-1.3-dot-release - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 3 - - name: net-certmanager-dot-release - test_group_name: ci-knative-sandbox-net-certmanager-1.3-dot-release - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 3 - - name: net-contour-dot-release - test_group_name: ci-knative-sandbox-net-contour-1.3-dot-release - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 3 - - name: net-istio-dot-release - test_group_name: ci-knative-sandbox-net-istio-1.3-dot-release - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 3 - - name: eventing-kafka-dot-release - test_group_name: ci-knative-sandbox-eventing-kafka-1.3-dot-release - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 3 - - name: eventing-kafka-broker-dot-release - test_group_name: ci-knative-sandbox-eventing-kafka-broker-1.3-dot-release - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 3 - - name: eventing-rabbitmq-dot-release - test_group_name: ci-knative-sandbox-eventing-rabbitmq-1.3-dot-release - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 3 - - name: eventing-natss-dot-release - test_group_name: ci-knative-sandbox-eventing-natss-1.3-dot-release - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 3 -- name: knative-sandbox-0.26 - dashboard_tab: - - name: eventing-awssqs-dot-release - test_group_name: ci-knative-sandbox-eventing-awssqs-0.26-dot-release - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 3 - - name: eventing-couchdb-dot-release - test_group_name: ci-knative-sandbox-eventing-couchdb-0.26-dot-release - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 3 - - name: eventing-prometheus-dot-release - test_group_name: ci-knative-sandbox-eventing-prometheus-0.26-dot-release - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 3 -- name: knative-sandbox-0.25 - dashboard_tab: - - name: eventing-couchdb-dot-release - test_group_name: ci-knative-sandbox-eventing-couchdb-0.25-dot-release - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 3 - - name: eventing-prometheus-dot-release - test_group_name: ci-knative-sandbox-eventing-prometheus-0.25-dot-release - base_options: "sort-by-name=" - alert_options: - alert_mail_to_addresses: "serverless-engprod-sea@google.com" - num_failures_to_alert: 3 -- name: utilities - dashboard_tab: - - name: ci-knative-cleanup - test_group_name: ci-knative-cleanup - base_options: "" - - name: ci-knative-flakes-reporter - test_group_name: ci-knative-flakes-reporter - base_options: "" - - name: ci-knative-flakes-resultsrecorder - test_group_name: ci-knative-flakes-resultsrecorder - base_options: "" - - name: ci-knative-prow-jobs-syncer - test_group_name: ci-knative-prow-jobs-syncer - base_options: "" - - name: post-knative-test-infra-image-push - test_group_name: post-knative-test-infra-image-push - base_options: "" - - name: post-knative-sandbox-peribolos - test_group_name: post-knative-sandbox-peribolos - base_options: "" - - name: post-knative-test-infra-deploy-tools - test_group_name: post-knative-test-infra-deploy-tools - base_options: "" -dashboard_groups: -- name: knative - dashboard_names: - - "serving" - - "client" - - "client-pkg" - - "docs" - - "eventing" - - "pkg" - - "caching" - - "test-infra" - - "operator" -- name: knative-sandbox - dashboard_names: - - "kn-plugin-diag" - - "kn-plugin-event" - - "kn-plugin-func" - - "kn-plugin-migration" - - "kn-plugin-operator" - - "kn-plugin-sample" - - "kn-plugin-service-log" - - "kn-plugin-source-kafka" - - "kn-plugin-source-kamelet" - - "kn-plugin-admin" - - "kn-plugin-quickstart" - - "eventing-awssqs" - - "eventing-ceph" - - "eventing-couchdb" - - "eventing-github" - - "eventing-gitlab" - - "eventing-prometheus" - - "eventing-redis" - - "kperf" - - "sample-controller" - - "sample-source" - - "net-certmanager" - - "net-contour" - - "net-gateway-api" - - "net-http01" - - "net-istio" - - "net-kourier" - - "async-component" - - "discovery" - - "eventing-camel" - - "eventing-kafka" - - "eventing-kafka-broker" - - "eventing-rabbitmq" - - "eventing-natss" - - "eventing-autoscaler-keda" - - "eventing-kogito" - - "container-freezer" -- name: google - dashboard_names: - - "knative-gcp" -- name: maintenance - dashboard_names: - - "utilities" diff --git a/go.mod b/go.mod index 37b312700e6..53a17521881 100644 --- a/go.mod +++ b/go.mod @@ -3,30 +3,36 @@ module knative.dev/test-infra go 1.15 require ( - cloud.google.com/go v0.62.0 // indirect cloud.google.com/go/pubsub v1.6.1 - cloud.google.com/go/storage v1.10.0 + cloud.google.com/go/storage v1.12.0 github.com/blang/semver/v4 v4.0.0 github.com/davecgh/go-spew v1.1.1 github.com/go-git/go-git-fixtures/v4 v4.0.1 github.com/go-git/go-git/v5 v5.1.0 github.com/go-sql-driver/mysql v1.5.0 - github.com/google/go-cmp v0.5.1 + github.com/google/go-cmp v0.5.6 github.com/google/go-containerregistry v0.1.4 github.com/google/go-github/v32 v32.1.1-0.20201004213705-76c3c3d7c6e7 // HEAD as of Nov 6 github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 github.com/kelseyhightower/envconfig v1.4.0 github.com/pkg/errors v0.9.1 - github.com/spf13/cobra v1.0.0 - go.uber.org/atomic v1.6.0 - golang.org/x/mod v0.3.0 - golang.org/x/net v0.0.0-20201110031124-69a78807bb2b - golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d - google.golang.org/api v0.29.0 - google.golang.org/genproto v0.0.0-20200731012542-8145dea6a485 // indirect - google.golang.org/grpc v1.31.0 // indirect - gopkg.in/yaml.v2 v2.3.0 - k8s.io/apimachinery v0.19.7 + github.com/spf13/cobra v1.2.1 + go.uber.org/atomic v1.7.0 + golang.org/x/mod v0.4.2 + golang.org/x/net v0.0.0-20210520170846-37e1c6afe023 + golang.org/x/oauth2 v0.0.0-20210402161424-2e8d93401602 + google.golang.org/api v0.44.0 + istio.io/test-infra/tools/prowgen v0.0.0-20220319022446-0c39030e6a2f + k8s.io/apimachinery v0.22.2 + k8s.io/test-infra v0.0.0-20220110151312-600d25dbe068 knative.dev/hack v0.0.0-20220224013837-e1785985d364 - sigs.k8s.io/boskos v0.0.0-20200729174948-794df80db9c9 + sigs.k8s.io/boskos v0.0.0-20210730172138-093b54882439 + sigs.k8s.io/yaml v1.3.0 +) + +replace ( + github.com/Azure/go-autorest => github.com/Azure/go-autorest v14.2.0+incompatible + k8s.io/api => k8s.io/api v0.22.2 + k8s.io/apimachinery => k8s.io/apimachinery v0.22.2 + k8s.io/client-go => k8s.io/client-go v0.22.2 ) diff --git a/go.sum b/go.sum index cb98f6b1589..9d495ff9948 100644 --- a/go.sum +++ b/go.sum @@ -1,6 +1,8 @@ bazil.org/fuse v0.0.0-20160811212531-371fbbdaa898/go.mod h1:Xbm+BRKSBEpa4q4hTSxohYNQpsxXPbPry4JJWOB3LB8= bazil.org/fuse v0.0.0-20180421153158-65cc252bf669/go.mod h1:Xbm+BRKSBEpa4q4hTSxohYNQpsxXPbPry4JJWOB3LB8= +cloud.google.com/go v0.25.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.30.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= cloud.google.com/go v0.39.0/go.mod h1:rVLT6fkc8chs9sfPtFc1SBH6em7n+ZoXaG+87tDISts= @@ -16,11 +18,20 @@ cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6T cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= +cloud.google.com/go v0.55.0/go.mod h1:ZHmoY+/lIMNkN2+fBmuTiqZ4inFhvQad8ft7MT8IV5Y= cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= +cloud.google.com/go v0.60.0/go.mod h1:yw2G51M9IfRboUH61Us8GqCeF1PzPblB823Mn2q2eAU= cloud.google.com/go v0.61.0/go.mod h1:XukKJg4Y7QsUu0Hxg3qQKUWR4VuWivmyMK2+rUyxAqw= -cloud.google.com/go v0.62.0 h1:RmDygqvj27Zf3fCQjQRtLyC7KwFcHkeJitcO0OoGOcA= cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= +cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= +cloud.google.com/go v0.66.0/go.mod h1:dgqGAjKCDxyhGTtC9dAREQGUJpkceNm1yt590Qno0Ko= +cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= +cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= +cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= +cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= +cloud.google.com/go v0.81.0 h1:at8Tk2zUz63cLPR0JPWm5vp77pEZmzxEQBEfRKn1VV8= +cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= @@ -29,87 +40,137 @@ cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4g cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk= cloud.google.com/go/logging v1.0.0/go.mod h1:V1cc3ogwobYzQq5f2R7DS/GvRIrI4FKj01Gs5glwAls= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= +cloud.google.com/go/pubsub v1.4.0/go.mod h1:LFrqilwgdw4X2cJS9ALgzYmMu+ULyrUN6IHV3CPK4TM= cloud.google.com/go/pubsub v1.6.1 h1:lhCQrTgu7f5SjWm5yJO0geSsPORQ2OAD+Eq1AMyBW8Y= cloud.google.com/go/pubsub v1.6.1/go.mod h1:kvW9rcn9OLEx6eTIzMBbWbpB8YsK3vu9jxgPolVz+p4= cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= -cloud.google.com/go/storage v1.10.0 h1:STgFzyU5/8miMl0//zKh2aQeTyeaUH3WN9bSUiJ09bA= +cloud.google.com/go/storage v1.9.0/go.mod h1:m+/etGaqZbylxaNT876QGXqEHp4PR2Rq5GMqICWb9bU= cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= +cloud.google.com/go/storage v1.10.1-0.20200805182106-fcd132957b02/go.mod h1:bdhVveip9CJX75wUu7ALOTnCSKjv6PHRY0bCeBmePnw= +cloud.google.com/go/storage v1.12.0 h1:4y3gHptW1EHVtcPAVE0eBBlFuGqEejTTG3KdIE0lUX4= +cloud.google.com/go/storage v1.12.0/go.mod h1:fFLk2dp2oAhDz8QFKwqrjdJvxSp/W2g7nillojlL5Ho= +code.gitea.io/sdk/gitea v0.12.0/go.mod h1:z3uwDV/b9Ls47NGukYM9XhnHtqPh/J+t40lsUrR6JDY= contrib.go.opencensus.io/exporter/aws v0.0.0-20181029163544-2befc13012d0/go.mod h1:uu1P0UCM/6RbsMrgPa98ll8ZcHM858i/AD06a9aLRCA= contrib.go.opencensus.io/exporter/ocagent v0.4.12/go.mod h1:450APlNTSR6FrvC3CTRqYosuDstRB9un7SOx2k/9ckA= contrib.go.opencensus.io/exporter/ocagent v0.5.0/go.mod h1:ImxhfLRpxoYiSq891pBrLVhN+qmP8BTVvdH2YLs7Gl0= +contrib.go.opencensus.io/exporter/ocagent v0.6.0 h1:Z1n6UAyr0QwM284yUuh5Zd8JlvxUGAhFZcgMJkMPrGM= +contrib.go.opencensus.io/exporter/ocagent v0.6.0/go.mod h1:zmKjrJcdo0aYcVS7bmEeSEBLPA9YJp5bjrofdU3pIXs= +contrib.go.opencensus.io/exporter/prometheus v0.1.0 h1:SByaIoWwNgMdPSgl5sMqM2KDE5H/ukPWBRo314xiDvg= contrib.go.opencensus.io/exporter/prometheus v0.1.0/go.mod h1:cGFniUXGZlKRjzOyuZJ6mgB+PgBcCIa79kEKR8YCW+A= contrib.go.opencensus.io/exporter/stackdriver v0.12.1/go.mod h1:iwB6wGarfphGGe/e5CWqyUk/cLzKnWsOKPVW3no6OTw= contrib.go.opencensus.io/exporter/stackdriver v0.12.8/go.mod h1:XyyafDnFOsqoxHJgTFycKZMrRUrPThLh2iYTJF6uoO0= +contrib.go.opencensus.io/exporter/stackdriver v0.12.9-0.20191108183826-59d068f8d8ff/go.mod h1:XyyafDnFOsqoxHJgTFycKZMrRUrPThLh2iYTJF6uoO0= +contrib.go.opencensus.io/exporter/stackdriver v0.13.1 h1:RX9W6FelAqTVnBi/bRXJLXr9n18v4QkQwZYIdnNS51I= +contrib.go.opencensus.io/exporter/stackdriver v0.13.1/go.mod h1:z2tyTZtPmQ2HvWH4cOmVDgtY+1lomfKdbLnkJvZdc8c= +contrib.go.opencensus.io/exporter/zipkin v0.1.1/go.mod h1:GMvdSl3eJ2gapOaLKzTKE3qDgUkJ86k9k3yY2eqwkzc= contrib.go.opencensus.io/integrations/ocsql v0.1.4/go.mod h1:8DsSdjz3F+APR+0z0WkU1aRorQCFfRxvqjUUPMbF3fE= contrib.go.opencensus.io/resource v0.1.1/go.mod h1:F361eGI91LCmW1I/Saf+rX0+OFcigGlFvXwEGEnkRLA= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg= git.apache.org/thrift.git v0.12.0/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg= github.com/Azure/azure-amqp-common-go/v2 v2.1.0/go.mod h1:R8rea+gJRuJR6QxTir/XuEd+YuKoUiazDC/N96FiDEU= +github.com/Azure/azure-pipeline-go v0.1.8/go.mod h1:XA1kFWRVhSK+KNFiOhfv83Fv8L9achrP7OxIzeTn1Yg= +github.com/Azure/azure-pipeline-go v0.1.9/go.mod h1:XA1kFWRVhSK+KNFiOhfv83Fv8L9achrP7OxIzeTn1Yg= github.com/Azure/azure-pipeline-go v0.2.1/go.mod h1:UGSo8XybXnIGZ3epmeBw7Jdz+HiUVpqIlpz/HKHylF4= +github.com/Azure/azure-pipeline-go v0.2.2 h1:6oiIS9yaG6XCCzhgAgKFfIWyo4LLCiDhZot6ltoThhY= +github.com/Azure/azure-pipeline-go v0.2.2/go.mod h1:4rQ/NZncSvGqNkkOsNpOU1tgoNuIlp9AfUH5G1tvCHc= github.com/Azure/azure-sdk-for-go v16.2.1+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/azure-sdk-for-go v19.1.1+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/azure-sdk-for-go v21.1.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/azure-sdk-for-go v28.1.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/azure-sdk-for-go v29.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/azure-sdk-for-go v30.1.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/azure-sdk-for-go v35.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/azure-sdk-for-go v38.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/azure-sdk-for-go v42.3.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/azure-service-bus-go v0.9.1/go.mod h1:yzBx6/BUGfjfeqbRZny9AQIbIe3AcV9WZbAdpkoXOa0= +github.com/Azure/azure-storage-blob-go v0.0.0-20190123011202-457680cc0804/go.mod h1:oGfmITT1V6x//CswqY2gtAHND+xIP64/qL7a5QJix0Y= +github.com/Azure/azure-storage-blob-go v0.8.0 h1:53qhf0Oxa0nOjgbDeeYPUeyiNmafAFEY95rZLK0Tj6o= github.com/Azure/azure-storage-blob-go v0.8.0/go.mod h1:lPI3aLPpuLTeUwh1sViKXFxwl2B6teiRqI0deQUvsw0= github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= -github.com/Azure/go-autorest v10.8.1+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= -github.com/Azure/go-autorest v12.0.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= +github.com/Azure/go-ansiterm v0.0.0-20210608223527-2377c96fe795/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= +github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= +github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= +github.com/Azure/go-autorest/autorest v0.1.0/go.mod h1:AKyIcETwSUFxIcs/Wnq/C+kwCtlEYGUVd7FPNb2slmg= github.com/Azure/go-autorest/autorest v0.2.0/go.mod h1:AKyIcETwSUFxIcs/Wnq/C+kwCtlEYGUVd7FPNb2slmg= github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= github.com/Azure/go-autorest/autorest v0.9.3/go.mod h1:GsRuLYvwzLjjjRoWEIyMUaYq8GNUx2nRB378IPt/1p0= github.com/Azure/go-autorest/autorest v0.9.6/go.mod h1:/FALq9T/kS7b5J5qsQ+RSTUdAmGFqi0vUdVNNx8q630= github.com/Azure/go-autorest/autorest v0.10.2/go.mod h1:/FALq9T/kS7b5J5qsQ+RSTUdAmGFqi0vUdVNNx8q630= +github.com/Azure/go-autorest/autorest v0.11.12/go.mod h1:eipySxLmqSyC5s5k1CLupqet0PSENBEDP93LQ9a8QYw= +github.com/Azure/go-autorest/autorest v0.11.18 h1:90Y4srNYrwOtAgVo3ndrQkTYn6kf1Eg/AjTFJ8Is2aM= +github.com/Azure/go-autorest/autorest v0.11.18/go.mod h1:dSiJPy22c3u0OtOKDNttNgqpNFY/GeWa7GH/Pz56QRA= github.com/Azure/go-autorest/autorest/adal v0.1.0/go.mod h1:MeS4XhScH55IST095THyTxElntu7WqB7pNbZo8Q5G3E= github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= github.com/Azure/go-autorest/autorest/adal v0.8.0/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc= github.com/Azure/go-autorest/autorest/adal v0.8.1/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q= github.com/Azure/go-autorest/autorest/adal v0.8.2/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q= +github.com/Azure/go-autorest/autorest/adal v0.8.3/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q= github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A= +github.com/Azure/go-autorest/autorest/adal v0.9.13 h1:Mp5hbtOePIzM8pJVRa3YLrWWmZtoxRXqUEzCfJt3+/Q= +github.com/Azure/go-autorest/autorest/adal v0.9.13/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M= +github.com/Azure/go-autorest/autorest/azure/auth v0.4.2/go.mod h1:90gmfKdlmKgfjUpnCEpOJzsUEjrWDSLwHIG73tSXddM= +github.com/Azure/go-autorest/autorest/azure/cli v0.3.1/go.mod h1:ZG5p860J94/0kI9mNJVoIoLgXcirM2gF5i2kWloofxw= github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g= +github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw= github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM= +github.com/Azure/go-autorest/autorest/mocks v0.4.1 h1:K0laFcLE6VLTOwNgSxaGbUcLPuGXlNkbVvq4cW4nIHk= github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= +github.com/Azure/go-autorest/autorest/to v0.1.0/go.mod h1:GunWKJp1AEqgMaGLV+iocmRAJWqST1wQYhyyjXJ3SJc= github.com/Azure/go-autorest/autorest/to v0.2.0/go.mod h1:GunWKJp1AEqgMaGLV+iocmRAJWqST1wQYhyyjXJ3SJc= github.com/Azure/go-autorest/autorest/to v0.3.0/go.mod h1:MgwOyqaIuKdG4TL/2ywSsIWKAfJfgHDo8ObuUk3t5sA= github.com/Azure/go-autorest/autorest/validation v0.1.0/go.mod h1:Ha3z/SqBeaalWQvokg3NZAlQTalVMtOIAs1aGK7G6u8= github.com/Azure/go-autorest/autorest/validation v0.2.0/go.mod h1:3EEqHnBxQGHXRYq3HT1WyXAvT7LLY3tl70hw6tQIbjI= github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= +github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= +github.com/Azure/go-autorest/logger v0.2.1 h1:IG7i4p/mDa2Ce4TRyAO8IHnVhAVF3RFU+ZtXWSmf4Tg= +github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= github.com/Azure/go-autorest/tracing v0.1.0/go.mod h1:ROEEAFwXycQw7Sn3DXNtEedEvdeRAgDr0izn4z5Ij88= github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= +github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo= github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= +github.com/BurntSushi/toml v0.3.0/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/DataDog/zstd v1.3.6-0.20190409195224-796139022798/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo= github.com/DataDog/zstd v1.4.1/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo= +github.com/Djarvur/go-err113 v0.0.0-20200410182137-af658d038157/go.mod h1:4UJr5HIiMZrwgkSPdsjy2uOQExX/WEILpIrO9UPGuXs= +github.com/Djarvur/go-err113 v0.1.0/go.mod h1:4UJr5HIiMZrwgkSPdsjy2uOQExX/WEILpIrO9UPGuXs= github.com/GoogleCloudPlatform/cloud-builders/gcs-fetcher v0.0.0-20191203181535-308b93ad1f39/go.mod h1:yfGmCjKuUzk9WzubMlW2zwjhCraIc/J+M40cufdemRM= github.com/GoogleCloudPlatform/cloudsql-proxy v0.0.0-20191009163259-e802c2cb94ae/go.mod h1:mjwGPas4yKduTyubHvD1Atl9r1rUq8DfVy+gkVvZ+oo= github.com/GoogleCloudPlatform/k8s-cloud-provider v0.0.0-20190822182118-27a4ced34534/go.mod h1:iroGtC8B3tQiqtds1l+mgk/BBOrxbqjH+eUfFQYRc14= +github.com/GoogleCloudPlatform/testgrid v0.0.1-alpha.3/go.mod h1:f96W2HYy3tiBNV5zbbRc+NczwYHgG1PHXMQfoEWv680= github.com/GoogleCloudPlatform/testgrid v0.0.7/go.mod h1:lmtHGBL0M/MLbu1tR9BWV7FGZ1FEFIdPqmJiHNCL7y8= +github.com/GoogleCloudPlatform/testgrid v0.0.68 h1:qs3/BQpz3j3qsgnfjV8aVBfPopkGxp/TnWjjiboUVf8= +github.com/GoogleCloudPlatform/testgrid v0.0.68/go.mod h1:SIRhudHYGiAUqMwKorBp2Kb5yJKhMq/nEMzFpYlKHVk= github.com/MakeNowJust/heredoc v0.0.0-20170808103936-bb23615498cd/go.mod h1:64YHyfSL2R96J44Nlwm39UHepQbyR5q10x7iYa1ks2E= github.com/Masterminds/goutils v1.1.0/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= +github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= github.com/Masterminds/semver/v3 v3.0.3/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs= +github.com/Masterminds/semver/v3 v3.1.0/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs= github.com/Masterminds/sprig/v3 v3.0.2/go.mod h1:oesJ8kPONMONaZgtiHNzUShJbksypC5kWczhZAf6+aU= github.com/Masterminds/vcs v1.13.1/go.mod h1:N09YCmOQr6RLxC6UNHzuVwAdodYbbnycGHSmwVJjcKA= github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA= github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw= github.com/Microsoft/hcsshim v0.8.7/go.mod h1:OHd7sQqRFrYd3RmSgbgji+ctCwkbq2wbEYNSzOYtcBQ= github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= +github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/OpenPeeDeeP/depguard v1.0.1/go.mod h1:xsIw86fROiiwelg+jB2uM9PiKihMMmUx/1V+TNhjQvM= github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= @@ -119,45 +180,84 @@ github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d/go.mod h1:H github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= github.com/Shopify/sarama v1.23.1/go.mod h1:XLH1GYJnLVE0XCr6KdJGVJRTwY30moWNJ4sERjXX6fs= github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= +github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= github.com/agnivade/levenshtein v1.0.1/go.mod h1:CURSv5d9Uaml+FovSIICkLbAUZ9S4RqaHDIsdSBg7lM= github.com/alcortesm/tgz v0.0.0-20161220082320-9c5fe88206d7 h1:uSoVVbwJiQipAclBbw+8quDsfcvFjOpI5iCf4p/cqCs= github.com/alcortesm/tgz v0.0.0-20161220082320-9c5fe88206d7/go.mod h1:6zEj6s6u/ghQa61ZWa/C2Aw3RkjiTBOix7dkqa1VLIs= +github.com/alecthomas/kingpin v2.2.6+incompatible/go.mod h1:59OFYbFVLKQKq+mqrL6Rw5bR0c3ACQaawgXx0QYndlE= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8= github.com/andybalholm/brotli v0.0.0-20190621154722-5f990b63d2d6/go.mod h1:+lx6/Aqd1kLJ1GQfkvOnaZ1WGmLpMpbprPuIOOZX30U= github.com/andygrunwald/go-gerrit v0.0.0-20190120104749-174420ebee6c/go.mod h1:0iuRQp6WJ44ts+iihy5E/WlPqfg5RNeQxOmzRkxCdtk= +github.com/andygrunwald/go-gerrit v0.0.0-20210709065208-9d38b0be0268 h1:7gokoTWteZhP1t2f0OzrFFXlyL8o0+b0r4ZaRV9PXOs= +github.com/andygrunwald/go-gerrit v0.0.0-20210709065208-9d38b0be0268/go.mod h1:aqcjwEnmLLSalFNYR0p2ttnEXOVVRctIzsUMHbEcruU= +github.com/andygrunwald/go-jira v1.13.0/go.mod h1:jYi4kFDbRPZTJdJOVJO4mpMMIwdB+rcZwSO58DzPd2I= +github.com/andygrunwald/go-jira v1.14.0 h1:7GT/3qhar2dGJ0kq8w0d63liNyHOnxZsUZ9Pe4+AKBI= +github.com/andygrunwald/go-jira v1.14.0/go.mod h1:KMo2f4DgMZA1C9FdImuLc04x4WQhn5derQpnsuBFgqE= github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239 h1:kFOfPq6dUM1hTo4JG6LR5AXSUEsOjtdm0kw0FtQtMJA= github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= +github.com/antihax/optional v0.0.0-20180407024304-ca021399b1a6/go.mod h1:V8iCPQYkqmusNa815XgQio277wI47sdRh1dUOLdyC6Q= +github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= +github.com/apex/log v1.1.4/go.mod h1:AlpoD9aScyQfJDVHmLMEcx4oU6LqzkWp4Mg9GdAcEvQ= +github.com/apex/log v1.3.0/go.mod h1:jd8Vpsr46WAe3EZSQ/IUMs2qQD/GOycT5rPWCO1yGcs= +github.com/apex/logs v0.0.4/go.mod h1:XzxuLZ5myVHDy9SAmYpamKKRNApGj54PfYLcFrXqDwo= +github.com/aphistic/golf v0.0.0-20180712155816-02c07f170c5a/go.mod h1:3NqKYiepwy8kCu4PNA+aP7WUV72eXWJeP9/r3/K9aLE= +github.com/aphistic/sweet v0.2.0/go.mod h1:fWDlIh/isSE9n6EPsRmC0det+whmX6dJid3stzu0Xys= +github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= +github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= +github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= github.com/asaskevich/govalidator v0.0.0-20200108200545-475eaeb16496/go.mod h1:oGkLhpf+kjZl6xBf758TQhh5XrAeiJv/7FRz/2spLIg= +github.com/aws/aws-k8s-tester v0.0.0-20190114231546-b411acf57dfe/go.mod h1:1ADF5tAtU1/mVtfMcHAYSm2fPw71DA7fFk0yed64/0I= +github.com/aws/aws-k8s-tester v0.9.3/go.mod h1:nsh1f7joi8ZI1lvR+Ron6kJM2QdCYPU/vFePghSSuTc= github.com/aws/aws-k8s-tester v1.0.0/go.mod h1:NUNd9k43+h9O5tvwL+4N1Ctb//SapmeeFX1G0/2/0Qc= github.com/aws/aws-sdk-go v1.15.11/go.mod h1:mFuSZ37Z9YOHbQEwBWztmVzqXrEkub65tZoCYDt7FT0= github.com/aws/aws-sdk-go v1.15.27/go.mod h1:mFuSZ37Z9YOHbQEwBWztmVzqXrEkub65tZoCYDt7FT0= +github.com/aws/aws-sdk-go v1.15.90/go.mod h1:es1KtYUFs7le0xQ3rOihkuoVD90z7D0fR2Qm4S00/gU= +github.com/aws/aws-sdk-go v1.16.18/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.16.26/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.19.18/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.19.45/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aws/aws-sdk-go v1.20.6/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.23.20/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aws/aws-sdk-go v1.23.22/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aws/aws-sdk-go v1.25.11/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.27.1/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.28.2/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aws/aws-sdk-go v1.29.32/go.mod h1:1KvfttTE3SPKMpo8g2c6jL3ZKfXtFvKscTgahTma5Xg= +github.com/aws/aws-sdk-go v1.29.34/go.mod h1:1KvfttTE3SPKMpo8g2c6jL3ZKfXtFvKscTgahTma5Xg= github.com/aws/aws-sdk-go v1.30.4/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0= github.com/aws/aws-sdk-go v1.30.5/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0= +github.com/aws/aws-sdk-go v1.30.16/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0= github.com/aws/aws-sdk-go v1.31.6/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0= github.com/aws/aws-sdk-go v1.31.12/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0= +github.com/aws/aws-sdk-go v1.37.22 h1:cyZp8TvUbH9rrShdrwULtCj4pB5szddrw9aKHUsw1Ic= +github.com/aws/aws-sdk-go v1.37.22/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= +github.com/aybabtme/rgbterm v0.0.0-20170906152045-cc83f3b3ce59/go.mod h1:q/89r3U2H7sSsE2t6Kca0lfwTK8JdoNGS/yzM/4iH5I= github.com/bazelbuild/buildtools v0.0.0-20190917191645-69366ca98f89/go.mod h1:5JP0TXzWDHXv8qvxRC4InIazwdyDseBDbzESUMKk1yU= +github.com/bazelbuild/buildtools v0.0.0-20200922170545-10384511ce98/go.mod h1:5JP0TXzWDHXv8qvxRC4InIazwdyDseBDbzESUMKk1yU= +github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM= +github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8= +github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/bitly/go-simplejson v0.5.0/go.mod h1:cXHtHw4XUPsvGaxgjIAn8PhEWG9NfngEKAMDJEczWVA= +github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= +github.com/bketelsen/crypt v0.0.4/go.mod h1:aI6NrJ0pMGgvZKL1iVgXLnfIFJtfV+bKCoqOes/6LfM= +github.com/blakesmith/ar v0.0.0-20190502131153-809d4375e1fb/go.mod h1:PkYb9DJNAwrSvRx5DYA+gUcOIgTGVMNkfSCbZM8cWpI= github.com/blang/semver v3.1.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= github.com/blang/semver v3.5.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdnnjpJbkM4JQ= @@ -165,25 +265,49 @@ github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnweb github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= +github.com/bmizerany/perks v0.0.0-20141205001514-d9a9656a3a4b/go.mod h1:ac9efd0D1fsDb3EJvhqgXRbFx7bs2wqZ10HQPeU8U/Q= +github.com/bombsimon/wsl/v2 v2.0.0/go.mod h1:mf25kr/SqFEPhhcxW1+7pxzGlW+hIl/hYTKY95VwV8U= +github.com/bombsimon/wsl/v2 v2.2.0/go.mod h1:Azh8c3XGEJl9LyX0/sFC+CKMc7Ssgua0g+6abzXN4Pg= +github.com/bombsimon/wsl/v3 v3.0.0/go.mod h1:st10JtZYLE4D5sC7b8xV4zTKZwAQjCH/Hy2Pm1FNZIc= +github.com/bombsimon/wsl/v3 v3.1.0/go.mod h1:st10JtZYLE4D5sC7b8xV4zTKZwAQjCH/Hy2Pm1FNZIc= github.com/bshuster-repo/logrus-logstash-hook v0.4.1/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk= github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8= github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b/go.mod h1:obH5gd0BsqsP2LwDJ9aOkm/6J86V6lyAXCoQWGw3K50= github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE= github.com/bwmarrin/snowflake v0.0.0/go.mod h1:NdZxfVWX+oR6y2K0o6qAYv6gIOP9rjG0/E9WsDpxqwE= +github.com/caarlos0/ctrlc v1.0.0/go.mod h1:CdXpj4rmq0q/1Eb44M9zi2nKB0QraNKuRGYGrrHhcQw= +github.com/campoy/unique v0.0.0-20180121183637-88950e537e7e/go.mod h1:9IOqJGCPMSc6E5ydlp5NIonxObaeu/Iub/X03EKPVYo= +github.com/cavaliercoder/go-cpio v0.0.0-20180626203310-925f9528c45e/go.mod h1:oDpT4efm8tSYHXV5tHSdRvBet/b/QzxZ+XyyPehvm3A= github.com/census-instrumentation/opencensus-proto v0.2.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/census-instrumentation/opencensus-proto v0.2.1 h1:glEXhBS5PSLLv4IXzLA5yPRVX4bilULVyxxbrfOtDAk= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/certifi/gocertifi v0.0.0-20191021191039-0944d244cd40/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA= +github.com/certifi/gocertifi v0.0.0-20200922220541-2c3bb06c6054/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA= +github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= +github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chai2010/gettext-go v0.0.0-20160711120539-c6fed771bfd5/go.mod h1:/iP1qXHoty45bqomnu2LM+VVyAEdWN+vtSHGlQgyxbw= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/cihub/seelog v0.0.0-20170130134532-f561c5e57575/go.mod h1:9d6lWj8KzO/fd/NrVaLscBKmPigpZpn5YawRPw+e3Yo= +github.com/clarketm/json v1.13.4 h1:0JketcMdLC16WGnRGJiNmTXuQznDEQaiknxSPRBxg+k= github.com/clarketm/json v1.13.4/go.mod h1:ynr2LRfb0fQU34l07csRNBTcivjySLLiY1YzQqKVfdo= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cloudevents/sdk-go v0.0.0-20190509003705-56931988abe3/go.mod h1:j1nZWMLGg3om8SswStBoY6/SHvcLM19MuZqwDtMtmzs= +github.com/cloudevents/sdk-go v1.0.0 h1:gS5I0s2qPmdc4GBPlUmzZU7RH30BaiOdcRJ1RkXnPrc= github.com/cloudevents/sdk-go v1.0.0/go.mod h1:3TkmM0cFqkhCHOq5JzzRU/RxRkwzoS8TZ+G448qVTog= +github.com/cloudevents/sdk-go/v2 v2.0.0/go.mod h1:3CTrpB4+u7Iaj6fd7E2Xvm5IxMdRoaAhqaRVnOr2rCU= +github.com/cloudevents/sdk-go/v2 v2.1.0 h1:bmgrU8k+K2ppZ+G/q5xEQx/Xk9HRtJmkrEO3qtDO2k0= +github.com/cloudevents/sdk-go/v2 v2.1.0/go.mod h1:3CTrpB4+u7Iaj6fd7E2Xvm5IxMdRoaAhqaRVnOr2rCU= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= +github.com/cockroachdb/datadriven v0.0.0-20200714090401-bf6692d28da5/go.mod h1:h6jFvWxBdQXxjopDMZyH2UVceIRfR84bdzbkoKrsWNo= +github.com/cockroachdb/errors v1.2.4/go.mod h1:rQD95gz6FARkaKkQXUksEje/d9a6wBJoCr5oaCLELYA= +github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f/go.mod h1:i/u985jwjWRlyHXQbwatDASoW0RMlZ/3i9yJHE2xLkI= github.com/containerd/cgroups v0.0.0-20190919134610-bf292b21730f/go.mod h1:OApqhQ4XNSNC13gXIwDjhOQxjWa/NxkwZXJ1EvqT0ko= github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= github.com/containerd/containerd v1.3.0-beta.2.0.20190828155532-0293cbd26c69/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= @@ -196,14 +320,21 @@ github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448/go.mod h1:ODA38xgv github.com/containerd/go-runc v0.0.0-20180907222934-5a6d9f37cfa3/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0= github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc= +github.com/coreos/bbolt v1.3.1-coreos.6/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= +github.com/coreos/bbolt v1.3.3/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/etcd v3.3.17+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= +github.com/coreos/go-oidc v0.0.0-20180117170138-065b426bd416/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= +github.com/coreos/go-semver v0.0.0-20180108230905-e214231b295a/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/coreos/pkg v0.0.0-20180108230652-97fdf19511ea/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= @@ -211,31 +342,46 @@ github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwc github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4= +github.com/danwakefield/fnmatch v0.0.0-20160403171240-cbb64ac3d964 h1:y5HC9v93H5EPKqaS1UYVg1uYah5Xf51mBfIoWehClUQ= +github.com/danwakefield/fnmatch v0.0.0-20160403171240-cbb64ac3d964/go.mod h1:Xd9hchkHSWYkEqJwUGisez3G1QY8Ryz0sdWrLPMGjLk= github.com/davecgh/go-spew v0.0.0-20151105211317-5215b55f46b2/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/daviddengcn/go-colortext v0.0.0-20160507010035-511bcaf42ccd/go.mod h1:dv4zxwHi5C/8AeI+4gX4dCWOIvNi7I6JCSX0HvlKPgE= github.com/deislabs/oras v0.8.1/go.mod h1:Mx0rMSbBNaNfY9hjpccEnxkOqJL6KGjtxNHPLC4G4As= +github.com/denisenkom/go-mssqldb v0.0.0-20190111225525-2fea367d496d/go.mod h1:xN/JuLBIz4bjkxNmByTiV1IbhfnYb6oo99phBn4Eqhc= github.com/denisenkom/go-mssqldb v0.0.0-20191124224453-732737034ffd/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU= +github.com/denormal/go-gitignore v0.0.0-20180930084346-ae8ad1d07817 h1:0nsrg//Dc7xC74H/TZ5sYR8uk4UQRNjsw8zejqH5a4Q= +github.com/denormal/go-gitignore v0.0.0-20180930084346-ae8ad1d07817/go.mod h1:C/+sI4IFnEpCn6VQ3GIPEp+FrQnQw+YQP3+n+GdGq7o= github.com/denverdino/aliyungo v0.0.0-20190125010748-a747050bb1ba/go.mod h1:dV8lFg6daOBZbT6/BDGIz6Y3WFGn8juu6G+CQ6LHtl0= github.com/devigned/tab v0.1.1/go.mod h1:XG9mPq0dFghrYvoBF3xdRrJzSTX1b7IQrvaL9mzjeJY= github.com/dgrijalva/jwt-go v0.0.0-20170104182250-a601269ab70c/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dgrijalva/jwt-go/v4 v4.0.0-preview1 h1:CaO/zOnF8VvUfEbhRatPcwKVWamvbYd8tQGRWacE9kU= +github.com/dgrijalva/jwt-go/v4 v4.0.0-preview1/go.mod h1:+hnT3ywWDTAFrW5aE+u2Sa/wT555ZqwoCS+pk3p6ry4= +github.com/dgryski/go-gk v0.0.0-20200319235926-a69029f61654/go.mod h1:qm+vckxRlDt0aOla0RYJJVeqHZlWfOm2UIxHaqPB46E= github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8= github.com/djherbis/atime v1.0.0/go.mod h1:5W+KBIuTwVGcqjIfaTwt+KSYX1o6uep8dtevevQP/f8= github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E= +github.com/docker/cli v0.0.0-20190925022749-754388324470/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= github.com/docker/cli v0.0.0-20191017083524-a8ff7f821017/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= -github.com/docker/cli v0.0.0-20200130152716-5d0cf8839492 h1:FwssHbCDJD025h+BchanCwE1Q8fyMgqDr2mOQAWOLGw= github.com/docker/cli v0.0.0-20200130152716-5d0cf8839492/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= +github.com/docker/cli v0.0.0-20200210162036-a4bedce16568 h1:AbI1uj9w4yt6TvfKHfRu7G55KuQe7NCvWPQRKDoXggE= +github.com/docker/cli v0.0.0-20200210162036-a4bedce16568/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= github.com/docker/distribution v0.0.0-20191216044856-a8371794149d/go.mod h1:0+TTO4EOBfRPhZXAeF1Vu+W3hHZ8eLp8PgKVZlcvtFY= +github.com/docker/distribution v2.6.0-rc.1.0.20180327202408-83389a148052+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/docker v0.7.3-0.20190327010347-be7ac8be2ae0/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v1.4.2-0.20180531152204-71cd53e4a197/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker v1.4.2-0.20190924003213-a8608b5b67c7/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/docker v1.4.2-0.20200203170920-46ec8731fbce h1:KXS1Jg+ddGcWA8e1N7cupxaHHZhit5rB9tfDU+mfjyY= github.com/docker/docker v1.4.2-0.20200203170920-46ec8731fbce/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v1.13.1 h1:IkZjBSIc8hBjLpqeAbeE5mca5mNgeatLHBy3GO78BWo= +github.com/docker/docker v1.13.1/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker-credential-helpers v0.6.3 h1:zI2p9+1NQYdnG6sMU26EX4aVGlqbInSQxQXLvzJ4RPQ= github.com/docker/docker-credential-helpers v0.6.3/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= @@ -243,7 +389,6 @@ github.com/docker/go-metrics v0.0.0-20180209012529-399ea8c73916/go.mod h1:/u0gXw github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docker/libtrust v0.0.0-20150114040149-fa567046d9b1/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE= -github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= github.com/dsnet/compress v0.0.1/go.mod h1:Aw8dCMJ7RioblQeTqt88akK31OvO8Dhf5JflhBbQEHo= github.com/dsnet/golib v0.0.0-20171103203638-1ea166775780/go.mod h1:Lj+Z9rebOhdfkVLjJ8T6VcRQv3SXugXy999NBtR9aFY= @@ -253,7 +398,6 @@ github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5m github.com/eapache/go-resiliency v1.2.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= -github.com/elazarl/goproxy v0.0.0-20170405201442-c4fc26588b6e/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= @@ -263,33 +407,59 @@ github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4s github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= +github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/erikstmartin/go-testdb v0.0.0-20160219214506-8d10e4a1bae5/go.mod h1:a2zkGnVExMxdzMo3M0Hi/3sEU+cWnZpSni0O6/Yb/P0= +github.com/evanphx/json-patch v0.0.0-20190203023257-5858425f7550/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v0.0.0-20200808040245-162e5629780b/go.mod h1:NAJj0yf/KaRKURN6nyi7A9IZydMivZEm9oQLWNjfKDc= +github.com/evanphx/json-patch v0.5.2/go.mod h1:ZWS5hhDbVDyob71nXKNL0+PWn6ToqBHMikGIFbs31qQ= github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v4.5.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/evanphx/json-patch v4.11.0+incompatible h1:glyUF9yIYtMHzn8xaKw5rMhdWcwsYV8dZHIq5567/xs= +github.com/evanphx/json-patch v4.11.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d/go.mod h1:ZZMPRZwes7CROmyNKgQzC3XPs6L/G2EJLHddWejkmf4= github.com/fatih/camelcase v1.0.0/go.mod h1:yN2Sb0lFhZJUdVvtELVWefmrXpuZESvPmqwoZc+/fpc= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= +github.com/fatih/color v1.12.0/go.mod h1:ELkj/draVOlAH/xkhN6mQ50Qd0MPOk5AAr3maGEBuJM= +github.com/fatih/structs v1.0.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= +github.com/fatih/structs v1.1.0 h1:Q7juDM0QtcnhCpeyLGQKyg4TOIghuNXrkL32pHAUMxo= +github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= +github.com/felixge/fgprof v0.9.1 h1:E6FUJ2Mlv043ipLOCFqo8+cHo9MhQ203E2cdEK/isEs= +github.com/felixge/fgprof v0.9.1/go.mod h1:7/HK6JFtFaARhIljgP2IV8rJLIoHDoOYoUphsnGvqxE= +github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568 h1:BHsljHzVlRcyQhjrss6TZTdY2VfCqZPbv5k3iBFa2ZQ= github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= +github.com/form3tech-oss/jwt-go v3.2.3+incompatible h1:7ZaBxOI7TMoYBfyA3cQHErNNyAWIKUMIwqxEtgHOs5c= +github.com/form3tech-oss/jwt-go v3.2.3+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= github.com/fortytw2/leaktest v1.2.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= github.com/frankban/quicktest v1.8.1/go.mod h1:ui7WezCLWMWxVWr1GETZY3smRy0G4KWq9vcPtJmFl7Y= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/fsouza/fake-gcs-server v0.0.0-20180612165233-e85be23bdaa8/go.mod h1:1/HufuJ+eaDf4KTnYdS6HJMGvMRU8d4cYTuu/1QaBbI= +github.com/fsouza/fake-gcs-server v1.19.4 h1:3bRRh/rQnB2XbrMolHAj9oX/PFiWVQFVVfPR5y2pxb8= +github.com/fsouza/fake-gcs-server v1.19.4/go.mod h1:I0/88nHCASqJJ5M7zVF0zKODkYTcuXFW5J5yajsNJnE= +github.com/fvbommel/sortorder v1.0.1 h1:dSnXLt4mJYH25uDDGa3biZNQsozaUWDSWeKJ0qqFfzE= +github.com/fvbommel/sortorder v1.0.1/go.mod h1:uk88iVf1ovNn1iLfgUVU2F9o5eO30ui720w+kxuqRs0= github.com/garyburd/redigo v0.0.0-20150301180006-535138d7bcd7/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY= +github.com/getsentry/raven-go v0.2.0/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49PX4NzFV5kcQ= github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/ghodss/yaml v0.0.0-20180820084758-c7ce16629ff4/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/gliderlabs/ssh v0.2.2 h1:6zsha5zo/TWhRhwqCD3+EarCAgZ2yN28ipRnGPnwkI0= github.com/gliderlabs/ssh v0.2.2/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= github.com/globalsign/mgo v0.0.0-20180905125535-1ca0a4f7cbcb/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= github.com/go-bindata/go-bindata/v3 v3.1.3/go.mod h1:1/zrpXsLD8YDIbhZRqXzm1Ghc7NhEvIN9+Z6R5/xH4I= +github.com/go-critic/go-critic v0.4.1/go.mod h1:7/14rZGnZbY6E38VEGk2kVhoq6itzc1E68facVDK23g= +github.com/go-critic/go-critic v0.4.3/go.mod h1:j4O3D4RoIwRqlZw5jJpx0BNfXWWbpcJoKu5cYSe4YmQ= github.com/go-git/gcfg v1.5.0 h1:Q5ViNfGF8zFgyJWPqYwA7qGFoMTEiBmdlkcfRmpIMa4= github.com/go-git/gcfg v1.5.0/go.mod h1:5m20vg6GwYabIxaOonVkTdrILxQMpEShl1xiMF4ua+E= github.com/go-git/go-billy/v5 v5.0.0 h1:7NQHvd9FVid8VL4qVUMm8XifBK+2xCoZ2lSk0agRrHM= @@ -302,47 +472,66 @@ github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9 github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-ini/ini v1.25.4/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= +github.com/go-ini/ini v1.46.0/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= github.com/go-ini/ini v1.55.0/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= +github.com/go-lintpack/lintpack v0.5.2/go.mod h1:NwZuYi2nUHho8XEIZ6SIxihrnPoqBTDqfpXvXAN0sXM= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= +github.com/go-logr/logr v0.4.0 h1:K7/B1jt6fIBQVd4Owv2MqGQClcgf0R266+7C/QjRcLc= +github.com/go-logr/logr v0.4.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= github.com/go-logr/zapr v0.1.0/go.mod h1:tabnROwaDl0UNxkVeFRbY8bwB37GwRv0P8lg6aAiEnk= github.com/go-logr/zapr v0.1.1/go.mod h1:tabnROwaDl0UNxkVeFRbY8bwB37GwRv0P8lg6aAiEnk= +github.com/go-logr/zapr v0.4.0 h1:uc1uML3hRYL9/ZZPdgHS/n8Nzo+eaYL/Efxkkamf7OM= +github.com/go-logr/zapr v0.4.0/go.mod h1:tabnROwaDl0UNxkVeFRbY8bwB37GwRv0P8lg6aAiEnk= +github.com/go-ole/go-ole v1.2.1/go.mod h1:7FAglXiTm7HKlQRDeOQ6ZNUHidzCWXuZWq/1dTyBNF8= github.com/go-openapi/analysis v0.0.0-20180825180245-b006789cd277/go.mod h1:k70tL6pCuVxPJOHXQ+wIac1FUrvNkHolPie/cLEU6hI= github.com/go-openapi/analysis v0.17.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= +github.com/go-openapi/analysis v0.17.2/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= github.com/go-openapi/analysis v0.18.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= github.com/go-openapi/analysis v0.19.2/go.mod h1:3P1osvZa9jKjb8ed2TPng3f0i/UY9snX6gxi44djMjk= github.com/go-openapi/analysis v0.19.5/go.mod h1:hkEAkxagaIvIP7VTn8ygJNkd4kAYON2rCu0v0ObL0AU= github.com/go-openapi/errors v0.17.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= +github.com/go-openapi/errors v0.17.2/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= github.com/go-openapi/errors v0.18.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= github.com/go-openapi/errors v0.19.2/go.mod h1:qX0BLWsyaKfvhluLejVpVNwNRdXZhEbTA4kxxpKBC94= github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0= github.com/go-openapi/jsonpointer v0.17.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= github.com/go-openapi/jsonpointer v0.18.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= +github.com/go-openapi/jsonpointer v0.19.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg= github.com/go-openapi/jsonreference v0.17.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= github.com/go-openapi/jsonreference v0.18.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= +github.com/go-openapi/jsonreference v0.19.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= +github.com/go-openapi/jsonreference v0.19.5/go.mod h1:RdybgQwPxbL4UEjuAruzK1x3nE69AqPYEJeo/TWfEeg= github.com/go-openapi/loads v0.17.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= +github.com/go-openapi/loads v0.17.2/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= github.com/go-openapi/loads v0.18.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= github.com/go-openapi/loads v0.19.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= github.com/go-openapi/loads v0.19.2/go.mod h1:QAskZPMX5V0C2gvfkGZzJlINuP7Hx/4+ix5jWFxsNPs= github.com/go-openapi/loads v0.19.4/go.mod h1:zZVHonKd8DXyxyw4yfnVjPzBjIQcLt0CCsn0N0ZrQsk= github.com/go-openapi/runtime v0.0.0-20180920151709-4f900dc2ade9/go.mod h1:6v9a6LTXWQCdL8k1AO3cvqx5OtZY/Y9wKTgaoP6YRfA= +github.com/go-openapi/runtime v0.17.2/go.mod h1:QO936ZXeisByFmZEO1IS1Dqhtf4QV1sYYFtIq6Ld86Q= github.com/go-openapi/runtime v0.19.0/go.mod h1:OwNfisksmmaZse4+gpV3Ne9AyMOlP1lt4sK4FXt0O64= github.com/go-openapi/runtime v0.19.4/go.mod h1:X277bwSUBxVlCYR3r7xgZZGKVvBd/29gLDlFGtJ8NL4= github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc= github.com/go-openapi/spec v0.17.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= +github.com/go-openapi/spec v0.17.2/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= github.com/go-openapi/spec v0.18.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= github.com/go-openapi/spec v0.19.2/go.mod h1:sCxk3jxKgioEJikev4fgkNmwS+3kuYdJtcsZsD5zxMY= github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo= github.com/go-openapi/spec v0.19.4/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo= +github.com/go-openapi/spec v0.19.5/go.mod h1:Hm2Jr4jv8G1ciIAo+frC/Ft+rR2kQDh8JHKHb3gWUSk= github.com/go-openapi/spec v0.19.6/go.mod h1:Hm2Jr4jv8G1ciIAo+frC/Ft+rR2kQDh8JHKHb3gWUSk= github.com/go-openapi/strfmt v0.17.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= github.com/go-openapi/strfmt v0.18.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= @@ -350,23 +539,52 @@ github.com/go-openapi/strfmt v0.19.0/go.mod h1:+uW+93UVvGGq2qGaZxdDeJqSAqBqBdl+Z github.com/go-openapi/strfmt v0.19.3/go.mod h1:0yX7dbo8mKIvc3XSKp7MNfxw4JytCfCD6+bY1AVL9LU= github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= github.com/go-openapi/swag v0.17.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= +github.com/go-openapi/swag v0.17.2/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= github.com/go-openapi/swag v0.18.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/swag v0.19.7/go.mod h1:ao+8BpOPyKdpQz3AOJfbeEVpLmWAvlT1IfTe5McPyhY= +github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= +github.com/go-openapi/validate v0.17.0/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+MYsct2VUrAJ4= github.com/go-openapi/validate v0.18.0/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+MYsct2VUrAJ4= github.com/go-openapi/validate v0.19.2/go.mod h1:1tRCw7m3jtI8eNWEEliiAqUIcBztB2KDnRCRMUi7GTA= github.com/go-openapi/validate v0.19.5/go.mod h1:8DJv2CVJQ6kGNpFW6eV9N3JviE1C85nY1c2z52x1Gk4= +github.com/go-sql-driver/mysql v0.0.0-20160411075031-7ebe0a500653/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= +github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-sql-driver/mysql v1.5.0 h1:ozyZYNQW3x3HtqT1jira07DN2PArx2v7/mN66gGcHOs= github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= github.com/go-test/deep v1.0.4/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= +github.com/go-test/deep v1.0.7/go.mod h1:QV8Hv/iy04NyLBxAdO9njL0iVPN1S4d/A3NVv1V36o8= +github.com/go-toolsmith/astcast v1.0.0/go.mod h1:mt2OdQTeAQcY4DQgPSArJjHCcOwlX+Wl/kwN+LbLGQ4= +github.com/go-toolsmith/astcopy v1.0.0/go.mod h1:vrgyG+5Bxrnz4MZWPF+pI4R8h3qKRjjyvV/DSez4WVQ= +github.com/go-toolsmith/astequal v0.0.0-20180903214952-dcb477bfacd6/go.mod h1:H+xSiq0+LtiDC11+h1G32h7Of5O3CYFJ99GVbS5lDKY= +github.com/go-toolsmith/astequal v1.0.0/go.mod h1:H+xSiq0+LtiDC11+h1G32h7Of5O3CYFJ99GVbS5lDKY= +github.com/go-toolsmith/astfmt v0.0.0-20180903215011-8f8ee99c3086/go.mod h1:mP93XdblcopXwlyN4X4uodxXQhldPGZbcEJIimQHrkg= +github.com/go-toolsmith/astfmt v1.0.0/go.mod h1:cnWmsOAuq4jJY6Ct5YWlVLmcmLMn1JUPuQIHCY7CJDw= +github.com/go-toolsmith/astinfo v0.0.0-20180906194353-9809ff7efb21/go.mod h1:dDStQCHtmZpYOmjRP/8gHHnCCch3Zz3oEgCdZVdtweU= +github.com/go-toolsmith/astp v0.0.0-20180903215135-0af7e3c24f30/go.mod h1:SV2ur98SGypH1UjcPpCatrV5hPazG6+IfNHbkDXBRrk= +github.com/go-toolsmith/astp v1.0.0/go.mod h1:RSyrtpVlfTFGDYRbrjyWP1pYu//tSFcvdYrA8meBmLI= +github.com/go-toolsmith/pkgload v0.0.0-20181119091011-e9e65178eee8/go.mod h1:WoMrjiy4zvdS+Bg6z9jZH82QXwkcgCBX6nOfnmdaHks= +github.com/go-toolsmith/pkgload v1.0.0/go.mod h1:5eFArkbO80v7Z0kdngIxsRXRMTaX4Ilcwuh3clNrQJc= +github.com/go-toolsmith/strparse v1.0.0/go.mod h1:YI2nUKP9YGZnL/L1/DLFBfixrcjslWct4wyljWhSRy8= +github.com/go-toolsmith/typep v1.0.0/go.mod h1:JSQCQMUPdRlMZFswiq3TGpNp1GMktqkR2Ns5AIQkATU= +github.com/go-toolsmith/typep v1.0.2/go.mod h1:JSQCQMUPdRlMZFswiq3TGpNp1GMktqkR2Ns5AIQkATU= +github.com/go-xmlfmt/xmlfmt v0.0.0-20191208150333-d5b6f63a941b/go.mod h1:aUCEOzzezBEjDBbFBoSiya/gduyIiWYRP6CnSFIV8AM= +github.com/go-yaml/yaml v2.1.0+incompatible/go.mod h1:w2MrLa16VYP0jy6N7M5kHaCkaLENm+P+Tv+MfurjSw0= github.com/gobuffalo/envy v1.6.5/go.mod h1:N+GkhhZ/93bGZc6ZKhJLP6+m+tCNPKwgSpH9kaifseQ= +github.com/gobuffalo/envy v1.7.0/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI= +github.com/gobuffalo/envy v1.7.1/go.mod h1:FurDp9+EDPE4aIUS3ZLyD+7/9fpx7YRt/ukY6jIHf0w= +github.com/gobuffalo/flect v0.2.3/go.mod h1:vmkQwuZYhN5Pc4ljYQZzP+1sq+NEkK+lh20jmEmX3jc= github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4= +github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/gofrs/flock v0.0.0-20190320160742-5135e617513b/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= github.com/gofrs/flock v0.7.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= +github.com/gogo/protobuf v0.0.0-20171007142547-342cbe0a0415/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.0.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= @@ -374,6 +592,10 @@ github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zV github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.2.2-0.20190730201129-28a6bbf47e48/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang-jwt/jwt v3.2.1+incompatible h1:73Z+4BJcrTC+KczS6WvTPvRGOp1WmfEP4Q1lOd9Z/+c= +github.com/golang-jwt/jwt v3.2.1+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzqecmYZeUEB8OUGHkxJ+I= github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0= github.com/golang/gddo v0.0.0-20190419222130-af0f2af80721/go.mod h1:xEhNfoBDX1hzLm2Nf80qUvZ2sVwoMZ8d6IE2SrsQfh4= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= @@ -382,14 +604,18 @@ github.com/golang/groupcache v0.0.0-20180513044358-24b0969c4cb7/go.mod h1:cIg4er github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e h1:1r7pUrabqp18hOBcwBwiTsbnFeTZHV9eER/QT5JVZxY= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:tluoj9z5200jBnyusfRPU2LqT6J+DAorxEvtC7LHB+E= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= +github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -403,45 +629,96 @@ github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrU github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= -github.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0= github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= +github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2/go.mod h1:k9Qvh+8juN+UKMCS/3jFtGICgW8O96FVaZsaxdzDkR4= +github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a/go.mod h1:ryS0uhF+x9jgbj/N71xsEqODy9BN81/GonCZiOzirOk= +github.com/golangci/errcheck v0.0.0-20181223084120-ef45e06d44b6/go.mod h1:DbHgvLiFKX1Sh2T1w8Q/h4NAI8MHIpzCdnBUDTXU3I0= +github.com/golangci/go-misc v0.0.0-20180628070357-927a3d87b613/go.mod h1:SyvUF2NxV+sN8upjjeVYr5W7tyxaT1JVtvhKhOn2ii8= +github.com/golangci/goconst v0.0.0-20180610141641-041c5f2b40f3/go.mod h1:JXrF4TWy4tXYn62/9x8Wm/K/dm06p8tCKwFRDPZG/1o= +github.com/golangci/gocyclo v0.0.0-20180528134321-2becd97e67ee/go.mod h1:ozx7R9SIwqmqf5pRP90DhR2Oay2UIjGuKheCBCNwAYU= +github.com/golangci/gocyclo v0.0.0-20180528144436-0a533e8fa43d/go.mod h1:ozx7R9SIwqmqf5pRP90DhR2Oay2UIjGuKheCBCNwAYU= +github.com/golangci/gofmt v0.0.0-20190930125516-244bba706f1a/go.mod h1:9qCChq59u/eW8im404Q2WWTrnBUQKjpNYKMbU4M7EFU= +github.com/golangci/golangci-lint v1.23.7/go.mod h1:g/38bxfhp4rI7zeWSxcdIeHTQGS58TCak8FYcyCmavQ= +github.com/golangci/golangci-lint v1.27.0/go.mod h1:+eZALfxIuthdrHPtfM7w/R3POJLjHDfJJw8XZl9xOng= +github.com/golangci/ineffassign v0.0.0-20190609212857-42439a7714cc/go.mod h1:e5tpTHCfVze+7EpLEozzMB3eafxo2KT5veNg1k6byQU= +github.com/golangci/lint-1 v0.0.0-20191013205115-297bf364a8e0/go.mod h1:66R6K6P6VWk9I95jvqGxkqJxVWGFy9XlDwLwVz1RCFg= +github.com/golangci/maligned v0.0.0-20180506175553-b1d89398deca/go.mod h1:tvlJhZqDe4LMs4ZHD0oMUlt9G2LWuDGoisJTBzLMV9o= +github.com/golangci/misspell v0.0.0-20180809174111-950f5d19e770/go.mod h1:dEbvlSfYbMQDtrpRMQU675gSDLDNa8sCPPChZ7PhiVA= +github.com/golangci/misspell v0.3.5/go.mod h1:dEbvlSfYbMQDtrpRMQU675gSDLDNa8sCPPChZ7PhiVA= +github.com/golangci/prealloc v0.0.0-20180630174525-215b22d4de21/go.mod h1:tf5+bzsHdTM0bsB7+8mt0GUMvjCgwLpTapNZHU8AajI= +github.com/golangci/revgrep v0.0.0-20180526074752-d9c87f5ffaf0/go.mod h1:qOQCunEYvmd/TLamH+7LlVccLvUH5kZNhbCgTHoBbp4= +github.com/golangci/revgrep v0.0.0-20180812185044-276a5c0a1039/go.mod h1:qOQCunEYvmd/TLamH+7LlVccLvUH5kZNhbCgTHoBbp4= +github.com/golangci/unconvert v0.0.0-20180507085042-28b1c447d1f4/go.mod h1:Izgrg8RkN3rCIMLGE9CyYmU9pY2Jer6DgANEnZ/L/cQ= github.com/golangplus/bytes v0.0.0-20160111154220-45c989fe5450/go.mod h1:Bk6SMAONeMXrxql8uvOKuAZSu8aM5RUGv+1C6IJaEho= github.com/golangplus/fmt v0.0.0-20150411045040-2a5d6d7d2995/go.mod h1:lJgMEyOkYFkPcDKwRXegd+iM6E7matEszMG5HhwytU8= github.com/golangplus/testing v0.0.0-20180327235837-af21d9c3145e/go.mod h1:0AA//k/eakGydO4jKRoRL2j92ZKSzTgj9tclaCrvXHk= github.com/gomodule/redigo v1.7.0/go.mod h1:B4C85qUVwatsJoIUNIfCRsp7qO0iAmpGFZ4EELWSbC4= +github.com/gomodule/redigo v1.8.5 h1:nRAxCa+SVsyjSBrtZmG/cqb6VbTmuRzpg/PoTFlpumc= +github.com/gomodule/redigo v1.8.5/go.mod h1:P9dn9mFrCBvWhGE1wpxx6fgq7BAeLBk+UUUzlpkBYO0= +github.com/google/btree v0.0.0-20180124185431-e89373fe6b4a/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4= +github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.1 h1:JFrFEBb2xKufg6XkJsJr+WbKb4FQlURi5RUcBveYu9k= github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-containerregistry v0.0.0-20191010200024-a3d713f9b7f8/go.mod h1:KyKXa9ciM8+lgMXwOVsXi7UxGrsf9mM61Mzs+xKUrKE= github.com/google/go-containerregistry v0.0.0-20200115214256-379933c9c22b/go.mod h1:Wtl/v6YdQxv397EREtzwgd9+Ud7Q5D8XMbi3Zazgkrs= +github.com/google/go-containerregistry v0.0.0-20200123184029-53ce695e4179/go.mod h1:Wtl/v6YdQxv397EREtzwgd9+Ud7Q5D8XMbi3Zazgkrs= +github.com/google/go-containerregistry v0.0.0-20200331213917-3d03ed9b1ca2/go.mod h1:pD1UFYs7MCAx+ZLShBdttcaOSbyc8F9Na/9IZLNwJeA= +github.com/google/go-containerregistry v0.1.1/go.mod h1:npTSyywOeILcgWqd+rvtzGWflIPPcBQhYoOONaY4ltM= github.com/google/go-containerregistry v0.1.4 h1:fZm+V2pYnvb8NMPM1YOsyxr31XKfpHTun5oVTRnG8qc= github.com/google/go-containerregistry v0.1.4/go.mod h1:6EGiuQp36pL82lX6rFN0s9AJOVL0Mlgx/DAsYZW5X3s= github.com/google/go-github v17.0.0+incompatible h1:N0LgJ1j65A7kfXrZnUDaYCs/Sf4rEjNlfyDHW9dolSY= github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= +github.com/google/go-github/v27 v27.0.6/go.mod h1:/0Gr8pJ55COkmv+S/yPKCczSkUPIM/LnFyubufRNIS0= +github.com/google/go-github/v28 v28.1.1/go.mod h1:bsqJWQX05omyWVmc00nEUql9mhQyv38lDZ8kPZcQVoM= +github.com/google/go-github/v29 v29.0.3/go.mod h1:CHKiKKPHJ0REzfwc14QMklvtHwCveD0PxlMjLlzAM5E= github.com/google/go-github/v32 v32.1.1-0.20201004213705-76c3c3d7c6e7 h1:W2zu67hbmXqUmeHhUQ6mHuWh5zOOuPBizXjvLX4OL9c= github.com/google/go-github/v32 v32.1.1-0.20201004213705-76c3c3d7c6e7/go.mod h1:rIEpZD9CTDQwDK9GDrtMTycQNA4JU3qBsCizh3q2WCI= github.com/google/go-licenses v0.0.0-20191112164736-212ea350c932/go.mod h1:16wa6pRqNDUIhOtwF0GcROVqMeXHZJ7H6eGDFUh5Pfk= -github.com/google/go-querystring v1.0.0 h1:Xkwi/a1rcvNg1PPYe5vI8GbeBY/jrVuDX5ASuANWTrk= +github.com/google/go-licenses v0.0.0-20200227160636-0fa8c766a591/go.mod h1:JWeTIGPLQ9gF618ZOdlUitd1gRR/l99WOkHOlmR/UVA= +github.com/google/go-querystring v0.0.0-20170111101155-53e6ce116135/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= +github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8= +github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU= +github.com/google/go-replayers/grpcreplay v0.1.0 h1:eNb1y9rZFmY4ax45uEEECSa8fsxGRU+8Bil52ASAwic= github.com/google/go-replayers/grpcreplay v0.1.0/go.mod h1:8Ig2Idjpr6gifRd6pNVggX6TC1Zw6Jx74AKp7QNH2QE= +github.com/google/go-replayers/httpreplay v0.1.0 h1:AX7FUb4BjrrzNvblr/OlgwrmFiep6soj5K2QSDW7BGk= github.com/google/go-replayers/httpreplay v0.1.0/go.mod h1:YKZViNhiGgqdBlUbI2MwGpq4pXxNmhJLPHQ7cv2b5no= github.com/google/gofuzz v0.0.0-20161122191042-44d81051d367/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= +github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.2.1-0.20210504230335-f78f29fc09ea h1:VcIYpAGBae3Z6BVncE0OnTE/ZjlDXqtYhOZky88neLM= +github.com/google/gofuzz v1.2.1-0.20210504230335-f78f29fc09ea/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/licenseclassifier v0.0.0-20190926221455-842c0d70d702/go.mod h1:qsqn2hxC+vURpyBRygGUuinTO42MFRLcsmQ/P8v94+M= +github.com/google/licenseclassifier v0.0.0-20200402202327-879cb1424de0/go.mod h1:qsqn2hxC+vURpyBRygGUuinTO42MFRLcsmQ/P8v94+M= +github.com/google/mako v0.0.0-20190821191249-122f8dcef9e3/go.mod h1:YzLcVlL+NqWnmUEPuhS1LxDDwGO9WNbVlEXaF4IH35g= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/martian v2.1.1-0.20190517191504-25dcb96d9e51+incompatible h1:xmapqc1AyLoB+ddYT6r04bD9lIjlOqGaREovi0SzFaE= github.com/google/martian v2.1.1-0.20190517191504-25dcb96d9e51+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= -github.com/google/martian/v3 v3.0.0 h1:pMen7vLs8nvgEYhywH3KDWJIJTeEr2ULsVWHWYHQyBs= github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.1.0 h1:wCKgOCHuUEVfsaQLpPSJb7VdYCdTVZQAuOdYm1yc/60= +github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190723021845-34ac40c74b70/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= @@ -449,13 +726,28 @@ github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hf github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200507031123-427632fa3b1c/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200615235658-03e1cf38a040/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200905233945-acf8798be1f7/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5 h1:zIaiqGYDQwa4HVx5wGRTXbx38Pqxjemn4BP98wpzpXo= +github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/rpmpack v0.0.0-20191226140753-aa36bfddb3a0/go.mod h1:RaTPr0KUf2K7fnZYLNDrr8rxAamWs3iNywJLtQ2AzBg= github.com/google/subcommands v1.0.1/go.mod h1:ZjhPrFU+Olkh9WazFPsl27BQ4UPiG37m3yTrtFlrHVk= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= +github.com/google/uuid v1.1.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.2.0 h1:qJYtXnJRWmpe7m/3XlyhrsLrEURqHRM2kxzoxXqyUDs= +github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/wire v0.3.0/go.mod h1:i1DMg/Lu8Sz5yYl25iOdmc5CT5qusaa+zmRWs16741s= +github.com/google/wire v0.4.0 h1:kXcsA/rIGzJImVqPdhfnr6q0xsS9gU0515q1EPpJ9fE= +github.com/google/wire v0.4.0/go.mod h1:ngWDr9Qvq3yZA10YrxfyGELY/AFWGVpy9c1LTRi1EoU= +github.com/googleapis/gax-go v2.0.0+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY= github.com/googleapis/gax-go v2.0.2+incompatible h1:silFMLAnr330+NRuag/VjIGF7TLp/LBrV2CJKFLWEww= github.com/googleapis/gax-go v2.0.2+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= @@ -465,171 +757,316 @@ github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsC github.com/googleapis/gnostic v0.1.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= github.com/googleapis/gnostic v0.2.2/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= github.com/googleapis/gnostic v0.3.1/go.mod h1:on+2t9HRStVgn95RSsFWFz+6Q0Snyqv1awfrALZdbtU= +github.com/googleapis/gnostic v0.4.0/go.mod h1:on+2t9HRStVgn95RSsFWFz+6Q0Snyqv1awfrALZdbtU= github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg= +github.com/googleapis/gnostic v0.5.1/go.mod h1:6U4PtQXGIEt/Z3h5MAT7FNofLnw9vXk2cUuW7uA/OeU= +github.com/googleapis/gnostic v0.5.5 h1:9fHAtK0uDfpveeqqo1hkEZJcFvYXAiCN3UutL8F9xHw= +github.com/googleapis/gnostic v0.5.5/go.mod h1:7+EbHbldMins07ALC74bsA81Ovc97DwqyJO1AENw9kA= +github.com/gookit/color v1.2.4/go.mod h1:AhIE+pS6D4Ql0SQWbBeXPHw7gY0/sjHoA4s/n1KB7xg= github.com/gophercloud/gophercloud v0.1.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/goreleaser/goreleaser v0.136.0/go.mod h1:wiKrPUeSNh6Wu8nUHxZydSOVQ/OZvOaO7DTtFqie904= +github.com/goreleaser/nfpm v1.2.1/go.mod h1:TtWrABZozuLOttX2uDlYyECfQX7x5XYkVxhjYcR6G9w= +github.com/goreleaser/nfpm v1.3.0/go.mod h1:w0p7Kc9TAUgWMyrub63ex3M2Mgw88M4GZXoTq5UCb40= github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= github.com/gorilla/csrf v1.6.2/go.mod h1:7tSf8kmjNYr7IWDCYhd3U8Ck34iQ/Yw5CJu7bAkHEGI= github.com/gorilla/handlers v0.0.0-20150720190736-60c7bfde3e33/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ= +github.com/gorilla/handlers v1.4.2 h1:0QniY0USkHQ1RGCLfKxeNHK9bkDHGRYGNDFBCS+YARg= +github.com/gorilla/handlers v1.4.2/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ= github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/mux v1.7.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/mux v1.7.4 h1:VuZ8uybHlWmqV03+zRzdwKL4tUnIp1MAQtp1mIFE1bc= +github.com/gorilla/mux v1.7.4/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4= +github.com/gorilla/sessions v1.1.3/go.mod h1:8KCfur6+4Mqcc6S0FEfKuN15Vl5MgXW92AE8ovaJD0w= github.com/gorilla/sessions v1.2.0/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/zI+bUmuGM= github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gostaticanalysis/analysisutil v0.0.0-20190318220348-4088753ea4d3/go.mod h1:eEOZF4jCKGi+aprrirO9e7WKB3beBRtWgqGunKl6pKE= +github.com/gostaticanalysis/analysisutil v0.0.3/go.mod h1:eEOZF4jCKGi+aprrirO9e7WKB3beBRtWgqGunKl6pKE= github.com/gosuri/uitable v0.0.4/go.mod h1:tKR86bXuXPZazfOTG1FIzvjIdXzd0mo4Vtn16vt0PJo= +github.com/gotestyourself/gotestyourself v2.2.0+incompatible/go.mod h1:zZKM6oeNM8k+FRljX1mnzVYeS8wiGgQyvST1/GafPbY= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= +github.com/gregjones/httpcache v0.0.0-20190212212710-3befbb6ad0cc h1:f8eY6cV/x1x+HLjOp4r72s/31/V2aTUtg5oKRRPf8/Q= github.com/gregjones/httpcache v0.0.0-20190212212710-3befbb6ad0cc/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= +github.com/grpc-ecosystem/go-grpc-middleware v0.0.0-20190222133341-cfaf5686ec79/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y= +github.com/grpc-ecosystem/go-grpc-prometheus v0.0.0-20170330212424-2500245aa611/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= +github.com/grpc-ecosystem/grpc-gateway v1.3.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= +github.com/grpc-ecosystem/grpc-gateway v1.4.1/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= github.com/grpc-ecosystem/grpc-gateway v1.8.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.9.2/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/grpc-gateway v1.9.4/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/grpc-gateway v1.12.1/go.mod h1:8XEsbTttt/W+VvjtQhLACqCisSPWTxCZ7sBRjU6iH9c= +github.com/grpc-ecosystem/grpc-gateway v1.12.2/go.mod h1:8XEsbTttt/W+VvjtQhLACqCisSPWTxCZ7sBRjU6iH9c= +github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= +github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/h2non/gock v1.0.9/go.mod h1:CZMcB0Lg5IWnr9bF79pPMg9WeV6WumxQiUJ1UvdO1iE= +github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= +github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-cleanhttp v0.5.1 h1:dH3aiDG9Jvb5r5+bYHsikaOUIpcM0xvgMXVoDkXMzJM= +github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-hclog v0.9.2 h1:CG6TE5H9/JXsFWJCfoIVpKFIkFe6ysEuHirp4DxCsHI= +github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= +github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= github.com/hashicorp/go-multierror v0.0.0-20161216184304-ed905158d874/go.mod h1:JMRHfdO9jKNzS/+BTlxCjKNQHg/jZAft8U7LloJvN7I= -github.com/hashicorp/go-multierror v1.0.0 h1:iVjPR7a6H0tWELX5NxNe7bYopibicUzc7uPribsnS6o= +github.com/hashicorp/go-multierror v0.0.0-20171204182908-b7773ae21874/go.mod h1:JMRHfdO9jKNzS/+BTlxCjKNQHg/jZAft8U7LloJvN7I= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= +github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA= +github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= +github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= +github.com/hashicorp/go-retryablehttp v0.6.4/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY= +github.com/hashicorp/go-retryablehttp v0.6.6 h1:HJunrbHTDDbBb/ay4kxa1n+dLmttUlnP3V9oNE4hmsM= +github.com/hashicorp/go-retryablehttp v0.6.6/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY= +github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= +github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= +github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= +github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.3/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= +github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= +github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= +github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= +github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/huandu/xstrings v1.2.0/go.mod h1:DvyZB1rfVYsBIigL8HwpZgxHwXozlTgGqn63UyNX5k4= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.7/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= -github.com/imdario/mergo v0.3.9 h1:UauaLniWCFHWd+Jp9oCEkTBj8VO/9DKg3PV3VCNMDIg= github.com/imdario/mergo v0.3.9/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU= +github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/influxdata/influxdb v0.0.0-20161215172503-049f9b42e9a5/go.mod h1:qZna6X/4elxqT3yI9iZYdZrWWdeFOOprn86kgg4+IzY= +github.com/influxdata/tdigest v0.0.0-20181121200506-bf2b5ad3c0a9/go.mod h1:Js0mqiSBE6Ffsg94weZZ2c+v/ciT8QRHFOap7EKDrR0= +github.com/jarcoal/httpmock v1.0.5/go.mod h1:ATjnClrvW/3tijVmpL/va5Z3aAyGvqU3gCT8nX0Txik= github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A= github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo= github.com/jcmturner/gofork v0.0.0-20190328161633-dc7c13fece03/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/UM3ncEo0o= github.com/jcmturner/gofork v1.0.0/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/UM3ncEo0o= +github.com/jenkins-x/go-scm v1.5.65/go.mod h1:MgGRkJScE/rJ30J/bXYqduN5sDPZqZFITJopsnZmTOw= github.com/jenkins-x/go-scm v1.5.79/go.mod h1:PCT338UhP/pQ0IeEeMEf/hoLTYKcH7qjGEKd7jPkeYg= +github.com/jenkins-x/go-scm v1.5.117/go.mod h1:PCT338UhP/pQ0IeEeMEf/hoLTYKcH7qjGEKd7jPkeYg= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= +github.com/jingyugao/rowserrcheck v0.0.0-20191204022205-72ab7603b68a/go.mod h1:xRskid8CManxVta/ALEhJha/pweKBaVG6fWgc0yH25s= +github.com/jinzhu/gorm v0.0.0-20170316141641-572d0a0ab1eb/go.mod h1:Vla75njaFJ8clLU1W44h34PjIkijhjHIYnZxMqCdxqo= github.com/jinzhu/gorm v1.9.12/go.mod h1:vhTjlKSJUTWNtcbQtrMBFCxy7eXTzeCAzfL5fBZT/Qs= +github.com/jinzhu/inflection v0.0.0-20190603042836-f5c5f50e6090/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc= github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc= github.com/jinzhu/now v1.0.1/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8= github.com/jinzhu/now v1.1.1/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8= +github.com/jirfag/go-printf-func-name v0.0.0-20191110105641-45db9963cdd3/go.mod h1:HEWGJkRDzjJY2sqdDwxccsGicWEf9BQOZsq2tV+xzM0= +github.com/jirfag/go-printf-func-name v0.0.0-20200119135958-7558a9eaa5af/go.mod h1:HEWGJkRDzjJY2sqdDwxccsGicWEf9BQOZsq2tV+xzM0= github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmespath/go-jmespath v0.0.0-20160803190731-bd40a432e4c7/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmespath/go-jmespath v0.3.0/go.mod h1:9QtRXoHjLGCJ5IBSaohpXITPlowMeeYCZ7fLUTSywik= +github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= +github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= +github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= +github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= +github.com/jmoiron/sqlx v1.2.1-0.20190826204134-d7d95172beb5/go.mod h1:1FEQNm3xlJgrMD+FBdI9+xvCksHtbpVBBw5dYhBSsks= github.com/joefitzgerald/rainbow-reporter v0.1.0/go.mod h1:481CNgqmVHQZzdIbN52CupLJyoVwB10FQ/IQlF1pdL8= github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= +github.com/jonboulle/clockwork v0.0.0-20141017032234-72f9bd7c4e0c/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= +github.com/jonboulle/clockwork v0.2.2/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8= +github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= +github.com/jpillora/backoff v0.0.0-20180909062703-3050d21c67d7/go.mod h1:2iMrUgbbvHEiQClaW2NsSzMyGHqN+rDFqY705q49KG0= +github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/json-iterator/go v0.0.0-20180612202835-f2b4162afba3/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v0.0.0-20180701071628-ab8a2e0c74be/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.5/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.11 h1:uVUAXhF2To8cbw/3xN3pxj6kk7TYKs98NIrTqPlMWAQ= +github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1 h1:6QPYqodiu3GuPL+7mfx+NwDdp2eTkp9IfEUpgAwUN0o= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= +github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 h1:Z9n2FFNUXsshfwJMBgNA0RU6/i7WVaAegv3PtuIHPMs= github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8= +github.com/kelseyhightower/envconfig v1.3.0/go.mod h1:cccZRl6mQpaq41TPp5QxidR+Sa3axMbJDNb//FQX6Gg= github.com/kelseyhightower/envconfig v1.4.0 h1:Im6hONhd3pLkfDFsbRgu68RDNkGF1r3dvMUtDTo2cv8= github.com/kelseyhightower/envconfig v1.4.0/go.mod h1:cccZRl6mQpaq41TPp5QxidR+Sa3axMbJDNb//FQX6Gg= github.com/kevinburke/ssh_config v0.0.0-20190725054713-01f96b0aa0cd h1:Coekwdh0v2wtGp9Gmz1Ze3eVRAWJMLokvN3QjdzCHLY= github.com/kevinburke/ssh_config v0.0.0-20190725054713-01f96b0aa0cd/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM= github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.4.0/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= github.com/klauspost/compress v1.4.1/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= github.com/klauspost/compress v1.9.2/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= github.com/klauspost/compress v1.10.2/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= +github.com/klauspost/cpuid v0.0.0-20180405133222-e7e905edc00e/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= +github.com/klauspost/cpuid v1.2.2/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/klauspost/pgzip v1.2.1/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= +github.com/knative/build v0.1.2/go.mod h1:/sU74ZQkwlYA5FwYDJhYTy61i/Kn+5eWfln2jDbw3Qo= +github.com/konsorten/go-windows-terminal-sequences v0.0.0-20180402223658-b729f2633dfe/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/konsorten/go-windows-terminal-sequences v1.0.3 h1:CE8S1cTafDpPvMhIxNJKvHsGVBgn1xWYf1NbHQhywc8= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pty v1.0.0/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.1.1/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE= +github.com/lightstep/tracecontext.go v0.0.0-20181129014701-1757c391b1ac h1:+2b6iGRJe3hvV/yVXrd41yVEjxuFHxasJqDhkIjS4gk= +github.com/lightstep/tracecontext.go v0.0.0-20181129014701-1757c391b1ac/go.mod h1:Frd2bnT3w5FB5q49ENTfVlztJES+1k/7lyWX2+9gq/M= github.com/lithammer/dedent v1.1.0/go.mod h1:jrXYCQtgg0nJiN+StA2KgR7w6CiQNv9Fd/Z9BP0jIOc= +github.com/logrusorgru/aurora v0.0.0-20181002194514-a7b3b318ed4e/go.mod h1:7rIyQOR62GCctdiQpZ/zOJlFyk6y+94wXzv6RNZgaR4= github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/magiconair/properties v1.8.5/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60= github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= +github.com/mailru/easyjson v0.7.1-0.20191009090205-6c0755d89d1e/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= +github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/maratori/testpackage v1.0.1/go.mod h1:ddKdw+XG0Phzhx8BFDTKgpWP4i7MpApTE5fXSKAqwDU= github.com/markbates/inflect v1.0.4/go.mod h1:1fR9+pO2KHEO9ZRtto13gDwwZaAKstQzferVeWqbgNs= github.com/marstr/guid v1.1.0/go.mod h1:74gB1z2wpxxInTG6yaqA7KrtM0NZ+RbrcqDvYHefzho= +github.com/matoous/godox v0.0.0-20190911065817-5d6d842e92eb/go.mod h1:1BELzlh859Sh1c6+90blK8lbYy0kwQf1bYlBhBysy1s= github.com/mattbaird/jsonpatch v0.0.0-20171005235357-81af80346b1a/go.mod h1:M1qoD/MqPgTZIk0EWKB38wE28ACRfVcn+cU08jyArI0= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ= github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-ieproxy v0.0.0-20190610004146-91bb50d98149/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc= +github.com/mattn/go-ieproxy v0.0.0-20190702010315-6dee0af9227d/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc= +github.com/mattn/go-ieproxy v0.0.1 h1:qiyop7gCflfhwCzGyeT0gro3sF9AIg9HU98JORTkqfI= +github.com/mattn/go-ieproxy v0.0.1/go.mod h1:pYabZ6IHcRpFh7vIaLfK7rdcWgFEb3SFJ6/gNWuh88E= +github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= +github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-runewidth v0.0.8/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= github.com/mattn/go-shellwords v1.0.9/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y= +github.com/mattn/go-shellwords v1.0.10/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y= +github.com/mattn/go-sqlite3 v0.0.0-20160514122348-38ee283dabf1/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= +github.com/mattn/go-sqlite3 v1.9.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= github.com/mattn/go-sqlite3 v2.0.1+incompatible/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= github.com/mattn/go-zglob v0.0.1/go.mod h1:9fxibJccNxU2cnpIKLRRFA7zX7qhkJIQWBb449FYHOo= +github.com/mattn/go-zglob v0.0.2 h1:0qT24o2wsZ8cOXQAERwBX6s+rPMs/bJTKxLVVtgfDXc= +github.com/mattn/go-zglob v0.0.2/go.mod h1:9fxibJccNxU2cnpIKLRRFA7zX7qhkJIQWBb449FYHOo= +github.com/mattn/goveralls v0.0.2/go.mod h1:8d1ZMHsd7fW6IRPKQh46F2WRpyib5/X4FOpevwGNQEw= +github.com/matttproud/golang_protobuf_extensions v1.0.0/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 h1:I0XW9+e1XWDxdcEniV4rQAIOPUGDq67JSCiRCgGCZLI= +github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= github.com/maxbrunsfeld/counterfeiter/v6 v6.2.2/go.mod h1:eD9eIE7cdwcMi9rYluz88Jz2VyhSmden33/aXg4oVIY= +github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE= github.com/mholt/archiver/v3 v3.3.0/go.mod h1:YnQtqsp+94Rwd0D/rk5cnLrxusUBUXg+08Ebtr1Mqao= +github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= +github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= +github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-ps v0.0.0-20190716172923-621e5597135b/go.mod h1:r1VsdOzOPt1ZSrGZWFoNhsAedKnEd6r9Np1+5blZCWk= +github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= +github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= +github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= github.com/mitchellh/ioprogress v0.0.0-20180201004757-6a23b12fa88e/go.mod h1:waEya8ee1Ro/lgxpVhkJI4BVASzkm3UZqkx/cFJiYHM= +github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/mapstructure v1.3.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/osext v0.0.0-20151018003038-5e2d6d41470f/go.mod h1:OkQIRizQZAeMln+1tSwduZz7+Af5oFlKirV/MSYes2A= github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= +github.com/moby/term v0.0.0-20201216013528-df9cb8a40635/go.mod h1:FBS0z0QWA44HXygs7VXDUOGoN/1TV3RuWkLO04am3wc= +github.com/moby/term v0.0.0-20210610120745-9d4ed1856297/go.mod h1:vgPCkQMyxTZ7IDy8SXRufE172gr8+K/JE/7hHFxHW3A= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v0.0.0-20180320133207-05fbef0ca5da/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826/go.mod h1:TaXosZuwdSHYgviHp1DAtfrULt5eUgsSMsZf+YrPgl8= github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= +github.com/mozilla/tls-observatory v0.0.0-20190404164649-a3c1b6cfecfd/go.mod h1:SrKMQvPiws7F7iqYp8/TX+IhxCYhzr6N/1yb8cwHsGk= +github.com/mozilla/tls-observatory v0.0.0-20200317151703-4fa42e1c2dee/go.mod h1:SrKMQvPiws7F7iqYp8/TX+IhxCYhzr6N/1yb8cwHsGk= github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= +github.com/nakabonne/nestif v0.3.0/go.mod h1:dI314BppzXjJ4HsCnbo7XzrJHPszZsjnk5wEBSYHI2c= +github.com/natefinch/lumberjack v2.0.0+incompatible/go.mod h1:Wi9p2TTF5DG5oU+6YfsmYQpsTIOm0B1VNzQg9Mw6nPk= +github.com/nats-io/gnatsd v1.4.1/go.mod h1:nqco77VO78hLCJpIcVfygDP2rPGfsEHkGTUk94uh5DQ= +github.com/nats-io/go-nats v1.7.0/go.mod h1:+t7RHT5ApZebkrQdnn6AhQJmhJJiKAvJUio1PiiCtj0= github.com/nats-io/jwt v0.3.0/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5VglpSg= github.com/nats-io/jwt v0.3.2/go.mod h1:/euKqTS1ZD+zzjYrY7pseZrTtWQSjujC7xjPc8wL6eU= github.com/nats-io/nats-server/v2 v2.1.2/go.mod h1:Afk+wRZqkMQs/p45uXdrVLuab3gwv3Z8C4HTBu8GD/k= github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzEE/Zbp4w= +github.com/nats-io/nkeys v0.0.2/go.mod h1:dab7URMsZm6Z/jp9Z5UGa87Uutgc2mVpXLC4B7TDb/4= github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= +github.com/nats-io/nuid v1.0.0/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= github.com/nbio/st v0.0.0-20140626010706-e9e8d9816f32/go.mod h1:9wM+0iRr9ahx58uYLpLIr5fm8diHn0JbqRycJi6w0Ms= +github.com/nbutton23/zxcvbn-go v0.0.0-20180912185939-ae427f1e4c1d/go.mod h1:o96djdrsSGy3AWPyBgZMAGfxZNfgntdJG+11KU4QvbU= github.com/ncw/swift v1.0.47/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/nwaples/rardecode v1.0.0/go.mod h1:5DzqNKiOdpKKBH87u8VlvAnPZMXcGRhxWkRpHbbfGS0= +github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= +github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= +github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= github.com/octago/sflags v0.2.0/go.mod h1:G0bjdxh4qPRycF74a2B8pU36iTp9QHGx0w0dFZXPt80= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= @@ -638,34 +1075,58 @@ github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+W github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.10.2/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0HfGg= +github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= +github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= +github.com/onsi/ginkgo v1.16.2/go.mod h1:CObGmKUOKaSC0RjmoAK7tKyn4Azo5P2IWuoMnvwxz1E= +github.com/onsi/ginkgo v1.16.4 h1:29JGrr5oVBm5ulCWet69zQkzWipVXIol6ygQUe/EzNc= +github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= +github.com/onsi/gomega v1.4.2/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.8.1/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA= github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA= +github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= +github.com/onsi/gomega v1.13.0/go.mod h1:lRk9szgn8TxENtWd0Tp4c3wjlRfMTMH27I+3Je41yGY= +github.com/onsi/gomega v1.14.0/go.mod h1:cIuvLEne0aoVhAgh/O6ac0Op8WWw9H6eYCriF+tEHG0= +github.com/onsi/gomega v1.15.0 h1:WjP/FQ/sk43MRmnEcT+MlDw2TFvkrXlprrPST/IudjU= +github.com/onsi/gomega v1.15.0/go.mod h1:cIuvLEne0aoVhAgh/O6ac0Op8WWw9H6eYCriF+tEHG0= +github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= github.com/opencontainers/go-digest v0.0.0-20170106003457-a6d0ee40d420/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/image-spec v1.0.0/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= +github.com/opencontainers/runc v0.1.1 h1:GlxAyO6x8rfZYN9Tt0Kti5a/cP41iuiO2yYT0IJGY8Y= github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-tools v0.0.0-20181011054405-1d69bd0f9c39/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs= +github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/openzipkin/zipkin-go v0.1.1/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8= github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw= github.com/openzipkin/zipkin-go v0.2.0/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= +github.com/openzipkin/zipkin-go v0.2.2 h1:nY8Hti+WKaP0cRsSeQ026wU03QsM762XBeCXBb9NAWI= +github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= github.com/otiai10/copy v1.0.2/go.mod h1:c7RpqBkwMom4bYTSkLSym4VSJz/XtncWRAj/J4PEIMY= github.com/otiai10/curr v0.0.0-20150429015615-9b4961190c95/go.mod h1:9qAhocn7zKJG+0mI8eUu6xqkFDYS2kb2saOteoSB3cE= github.com/otiai10/mint v1.3.0/go.mod h1:F5AjcsTsWUqX+Na9fpHb52P8pcRX2CI6A3ctIT91xUo= +github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= github.com/pelletier/go-buffruneio v0.2.0/go.mod h1:JkE26KsDizTr40EUHkXVtNPvgGtbSNq5BcowyYOWdKo= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= +github.com/pelletier/go-toml v1.3.0/go.mod h1:PN7xzY2wHTK0K9p34ErDQMlFxa51Fk0OUruD3k1mMwo= github.com/pelletier/go-toml v1.6.0/go.mod h1:5N711Q9dKgbdkxHL+MEfF31hpT7l0S0s/t2kKREewys= +github.com/pelletier/go-toml v1.8.0/go.mod h1:D6yutnOGMveHEPV7VQOuvI/gXY61bv+9bAOTRnLElKs= +github.com/pelletier/go-toml v1.9.3/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= +github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= +github.com/phayes/checkstyle v0.0.0-20170904204023-bfd46e6a821d/go.mod h1:3OzsM7FXDQlpCiw2j81fOmAwQLnZnLGXVKUzeKQXIAw= github.com/phayes/freeport v0.0.0-20180830031419-95f893ade6f2/go.mod h1:iIss55rKnNBTvrwdmkUpLnDpZoAHvWaiq5+iMmen4AE= github.com/pierrec/lz4 v0.0.0-20190327172049-315a67e90e41/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= @@ -677,11 +1138,15 @@ github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA= +github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA= github.com/prometheus/client_golang v0.0.0-20180209125602-c332b6f63c06/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4xf/QclQDMrYNZzcM= github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs= @@ -689,13 +1154,21 @@ github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDf github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g= github.com/prometheus/client_golang v1.5.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= +github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= +github.com/prometheus/client_golang v1.11.0 h1:HNkLOAEQMIDv/K+04rukrLx6ch7msSRwf3/SASFAGtQ= +github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= +github.com/prometheus/client_model v0.0.0-20170216185247-6f3806018612/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/common v0.0.0-20180110214958-89604d197083/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.0.0-20180518154759-7600349dcfe1/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.0.0-20181020173914-7e9e6cabbd39/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= @@ -704,7 +1177,12 @@ github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y8 github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA= github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= +github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= +github.com/prometheus/common v0.26.0 h1:iMAkS2TDoNWnKM+Kopnx/8tnEStIfpYA0ur0xQzzhMQ= +github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20180612222113-7d6f385de8be/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= @@ -714,60 +1192,115 @@ github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDa github.com/prometheus/procfs v0.0.5/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= github.com/prometheus/procfs v0.0.10/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= +github.com/prometheus/procfs v0.0.11/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.6.0 h1:mxy4L2jP6qMonqmq+aTtOx1ifVWUgG/TAmntgbh3xv4= +github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= +github.com/quasilyte/go-consistent v0.0.0-20190521200055-c6f3937de18c/go.mod h1:5STLWrekHfjyYwxBRVRXNOSewLJ3PWfDJd1VyTS21fI= +github.com/quasilyte/go-ruleguard v0.1.2-0.20200318202121-b00d7a75d3d8/go.mod h1:CGFX09Ci3pq9QZdj86B+VGIdNj4VyCo2iPOGS9esB/k= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/rcrowley/go-metrics v0.0.0-20190706150252-9beb055b7962/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/remyoudompheng/bigfft v0.0.0-20170806203942-52369c62f446/go.mod h1:uYEyJGbgTkfkS4+E/PavXkNJcbFIpEtjt2B0KDQ5+9M= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= +github.com/rogpeppe/fastuuid v1.1.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= +github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= +github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.3.2/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= +github.com/rogpeppe/go-internal v1.5.2/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= github.com/rubiojr/go-vhd v0.0.0-20160810183302-0bfd3b39853c/go.mod h1:DM5xW0nvfNNm2uytzsvhI3OnX8uzaRAg8UX/CnDqbto= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/rwcarlsen/goexif v0.0.0-20190401172101-9e8deecbddbd/go.mod h1:hPqNNc0+uJM6H+SuU8sEs5K5IQeKccPqeSjfgcKGgPk= +github.com/ryancurrah/gomodguard v1.0.4/go.mod h1:9T/Cfuxs5StfsocWr4WzDL36HqnX0fVb9d5fSEaLhoE= +github.com/ryancurrah/gomodguard v1.1.0/go.mod h1:4O8tr7hBODaGE6VIhfJDHcwzh5GUccKSJBU0UMXJFVM= +github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/sassoftware/go-rpmutils v0.0.0-20190420191620-a8f1baeba37b/go.mod h1:am+Fp8Bt506lA3Rk3QCmSqmYmLMnPDhdDUcosQCAx+I= +github.com/satori/go.uuid v0.0.0-20160713180306-0aa62d5ddceb/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= github.com/sclevine/spec v1.2.0/go.mod h1:W4J29eT/Kzv7/b9IWLB055Z+qvVC9vt0Arko24q7p+U= +github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= +github.com/securego/gosec v0.0.0-20200103095621-79fbf3af8d83/go.mod h1:vvbZ2Ae7AzSq3/kywjUDxSNq2SJ27RxCz2un0H3ePqE= +github.com/securego/gosec v0.0.0-20200401082031-e946c8c39989/go.mod h1:i9l/TNj+yDFh9SZXUTvspXTjbFXgZGP/UvhU1S65A4A= +github.com/securego/gosec/v2 v2.3.0/go.mod h1:UzeVyUXbxukhLeHKV3VVqo7HdoQR9MrRfFmZYotn8ME= github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= github.com/sergi/go-diff v1.1.0 h1:we8PVUC3FE2uYfodKH/nBHMSetSfHDR6scGdBi+erh0= github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= +github.com/shirou/gopsutil v0.0.0-20190901111213-e4ec7b275ada/go.mod h1:WWnYX4lzhCH5h/3YBfyVA3VbLYjlMZZAQcW9ojMexNc= +github.com/shirou/w32 v0.0.0-20160930032740-bb4de0191aa4/go.mod h1:qsXQc7+bwAM3Q1u/4XEfrquwF8Lw7D7y5cD8CuHnfIc= +github.com/shurcooL/githubv4 v0.0.0-20180925043049-51d7b505e2e9/go.mod h1:hAF0iLZy4td2EX+/8Tw+4nodhlMrwN3HupfaXj3zkGo= github.com/shurcooL/githubv4 v0.0.0-20190718010115-4ba037080260/go.mod h1:hAF0iLZy4td2EX+/8Tw+4nodhlMrwN3HupfaXj3zkGo= github.com/shurcooL/githubv4 v0.0.0-20191102174205-af46314aec7b/go.mod h1:hAF0iLZy4td2EX+/8Tw+4nodhlMrwN3HupfaXj3zkGo= +github.com/shurcooL/githubv4 v0.0.0-20210725200734-83ba7b4c9228 h1:N5B+JgvM/DVYIxreItPJMM3yWrNO/GB2q4nESrtBisM= +github.com/shurcooL/githubv4 v0.0.0-20210725200734-83ba7b4c9228/go.mod h1:hAF0iLZy4td2EX+/8Tw+4nodhlMrwN3HupfaXj3zkGo= +github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk= +github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOmsrJOB+vfqUK+7DmDyjhSLIIBnXo9lvZJj3MWQ= +github.com/shurcooL/graphql v0.0.0-20180924043259-e4a3a37e6d42/go.mod h1:AuYgA5Kyo4c7HfUmvRGs/6rGlMMV/6B1bVnB9JxJEEg= +github.com/shurcooL/graphql v0.0.0-20181231061246-d48a9a75455f h1:tygelZueB1EtXkPI6mQ4o9DQ0+FKW41hTbunoXZCTqk= github.com/shurcooL/graphql v0.0.0-20181231061246-d48a9a75455f/go.mod h1:AuYgA5Kyo4c7HfUmvRGs/6rGlMMV/6B1bVnB9JxJEEg= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/sirupsen/logrus v1.0.4-0.20170822132746-89742aefa4b2/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= +github.com/sirupsen/logrus v1.0.5/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= +github.com/sirupsen/logrus v1.1.1/go.mod h1:zrgwTnHtNr00buQ1vSptGe8m1f/BbgsPukg8qsT7A+A= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= -github.com/sirupsen/logrus v1.6.0 h1:UBcNElsrwanuuMsnGSlYmtmgbb23qDR5dG+6X6Oo89I= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= +github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE= +github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/assertions v1.0.0 h1:UVQPSSmc3qtTi+zPPkCXvZX9VvW/xT/NsRvKfwY81a8= +github.com/smartystreets/assertions v1.0.0/go.mod h1:kHHU4qYBaI3q23Pp3VPrmWhuIUrLW/7eUrw0BU5VaoM= +github.com/smartystreets/go-aws-auth v0.0.0-20180515143844-0c1422d1fdb9/go.mod h1:SnhjPscd9TpLiy1LpzGSKh3bXCfxxXuqd9xmQJy3slM= github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/smartystreets/goconvey v0.0.0-20190731233626-505e41936337/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s= github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/smartystreets/gunit v1.0.0/go.mod h1:qwPWnhz6pn0NnRBP++URONOVyNkPyr4SauJk4cUOwJs= +github.com/soheilhy/cmux v0.1.3/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= +github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0= +github.com/sourcegraph/go-diff v0.5.1/go.mod h1:j2dHj3m8aZgQO8lMTcTnBcXkRRRqi34cd2MNlA9u1mE= +github.com/sourcegraph/go-diff v0.5.3/go.mod h1:v9JDtjCE4HHHCZGId75rg8gkKKa98RVjBcBGsVmMmak= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= +github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cobra v0.0.0-20180319062004-c439c4fa0937/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= github.com/spf13/cobra v0.0.6/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= -github.com/spf13/cobra v1.0.0 h1:6m/oheQuQ13N9ks4hubMG6BnvwOeaJrqSPLahSnczz8= github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= +github.com/spf13/cobra v1.1.1/go.mod h1:WnodtKOvamDL/PwE2M4iKs8aMDBZ5Q5klgD3qfVJQMI= +github.com/spf13/cobra v1.1.3/go.mod h1:pGADOWyqRD/YMrPZigI/zbliZ2wVD/23d+is3pSWzOo= +github.com/spf13/cobra v1.2.1 h1:+KmjbUw1hriSNMF55oPrkZcb27aECyrj8V2ytv7kWDw= +github.com/spf13/cobra v1.2.1/go.mod h1:ExllRjgxM/piMAM+3tAZvg8fsklGAf3tPfi+i8t68Nk= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.1-0.20171106142849-4c012f6dcd95/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.2/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= +github.com/spf13/viper v1.6.1/go.mod h1:t3iDnF5Jlj76alVNuyFBk5oUMCvsrkbvZK0WQdfDi5k= github.com/spf13/viper v1.6.2/go.mod h1:t3iDnF5Jlj76alVNuyFBk5oUMCvsrkbvZK0WQdfDi5k= +github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= +github.com/spf13/viper v1.7.1/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= +github.com/spf13/viper v1.8.1/go.mod h1:o0Pch8wJ9BVSWGQMbra6iw0oQ5oktSIBaujf1rJH9Ns= github.com/src-d/gcfg v1.4.0/go.mod h1:p/UMsR43ujA89BJY9duynAwIpvqEujIH/jFlfL7jWoI= +github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= +github.com/streadway/quantile v0.0.0-20150917103942-b0c588724d25/go.mod h1:lbP8tGiBjZ5YWIc2fzuRpTaz0b/53vT6PEs3QuAWzuU= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= @@ -775,27 +1308,66 @@ github.com/stretchr/testify v0.0.0-20151208002404-e3a8ff8ce365/go.mod h1:a8OnRci github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= +github.com/tdakkota/asciicheck v0.0.0-20200416190851-d7f85be797a2/go.mod h1:yHp0ai0Z9gUljN3o0xMhYJnH/IcvkdTBOX2fmJ93JEM= +github.com/tdakkota/asciicheck v0.0.0-20200416200610-e657995f937b/go.mod h1:yHp0ai0Z9gUljN3o0xMhYJnH/IcvkdTBOX2fmJ93JEM= +github.com/tektoncd/pipeline v0.8.0/go.mod h1:IZzJdiX9EqEMuUcgdnElozdYYRh0/ZRC+NKMLj1K3Yw= +github.com/tektoncd/pipeline v0.10.1/go.mod h1:D2X0exT46zYx95BU7ByM8+erpjoN7thmUBvlKThOszU= github.com/tektoncd/pipeline v0.11.0/go.mod h1:hlkH32S92+/UODROH0dmxzyuMxfRFp/Nc3e29MewLn8= +github.com/tektoncd/pipeline v0.13.1-0.20200625065359-44f22a067b75/go.mod h1:R5AlT46x/F8n/pFJFjZ1U1q71GWtVXgG7RZkkoRL554= +github.com/tektoncd/pipeline v0.14.1-0.20200710073957-5eeb17f81999 h1:l9GiqDfZrUMJovJqcSID2oqZmNgD2feeT/HGAzQ0yBw= +github.com/tektoncd/pipeline v0.14.1-0.20200710073957-5eeb17f81999/go.mod h1:1DqbPVNCquEdAjVps1Fbtht6R+P7+DuWbfQRJQprjiU= +github.com/tektoncd/plumbing v0.0.0-20191216083742-847dcf196de9/go.mod h1:QZHgU07PRBTRF6N57w4+ApRu8OgfYLFNqCDlfEZaD9Y= github.com/tektoncd/plumbing v0.0.0-20200217163359-cd0db6e567d2/go.mod h1:QZHgU07PRBTRF6N57w4+ApRu8OgfYLFNqCDlfEZaD9Y= +github.com/tektoncd/plumbing v0.0.0-20200430135134-e53521e1d887/go.mod h1:cZPJIeTIoP7UPTxQyTQLs7VE1TiXJSNj0te+If4Q+jI= github.com/tektoncd/plumbing/pipelinerun-logs v0.0.0-20191206114338-712d544c2c21/go.mod h1:S62EUWtqmejjJgUMOGB1CCCHRp6C706laH06BoALkzU= +github.com/tetafro/godot v0.3.7/go.mod h1:/7NLHhv08H1+8DNj0MElpAACw1ajsCuf3TKNQxA5S+0= +github.com/tetafro/godot v0.4.2/go.mod h1:/7NLHhv08H1+8DNj0MElpAACw1ajsCuf3TKNQxA5S+0= github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= +github.com/timakin/bodyclose v0.0.0-20190930140734-f7f2e9bca95e/go.mod h1:Qimiffbc6q9tBWlVV6x0P9sat/ao1xEkREYPPj9hphk= +github.com/timakin/bodyclose v0.0.0-20200424151742-cb6215831a94/go.mod h1:Qimiffbc6q9tBWlVV6x0P9sat/ao1xEkREYPPj9hphk= +github.com/tj/assert v0.0.0-20171129193455-018094318fb0/go.mod h1:mZ9/Rh9oLWpLLDRpvE+3b7gP/C2YyLFYxNmcLnPTMe0= +github.com/tj/go-elastic v0.0.0-20171221160941-36157cbbebc2/go.mod h1:WjeM0Oo1eNAjXGDx2yma7uG2XoyRZTq1uv3M/o7imD0= +github.com/tj/go-kinesis v0.0.0-20171128231115-08b17f58cb1b/go.mod h1:/yhzCV0xPfx6jb1bBgRFjl5lytqVqZXEaeqWP8lTEao= +github.com/tj/go-spin v1.1.0/go.mod h1:Mg1mzmePZm4dva8Qz60H2lHwmJ2loum4VIrLgVnKwh4= github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/tommy-muehle/go-mnd v1.1.1/go.mod h1:dSUh0FtTP8VhvkL1S+gUR1OKd9ZnSaozuI6r3m6wOig= +github.com/tommy-muehle/go-mnd v1.3.1-0.20200224220436-e6f9a994e8fa/go.mod h1:dSUh0FtTP8VhvkL1S+gUR1OKd9ZnSaozuI6r3m6wOig= +github.com/trivago/tgo v1.0.1/go.mod h1:w4dpD+3tzNIIiIfkWWa85w5/B77tlvdZckQ+6PkFnhc= +github.com/trivago/tgo v1.0.7 h1:uaWH/XIy9aWYWpjm2CU3RpcqZXmX2ysQ9/Go+d9gyrM= +github.com/trivago/tgo v1.0.7/go.mod h1:w4dpD+3tzNIIiIfkWWa85w5/B77tlvdZckQ+6PkFnhc= +github.com/tsenart/vegeta v12.7.1-0.20190725001342-b5f4fca92137+incompatible/go.mod h1:Smz/ZWfhKRcyDDChZkG3CyTHdj87lHzio/HOCkbndXM= +github.com/ugorji/go v1.1.1/go.mod h1:hnLbHMwcvSihnDhEfx2/BzKp2xb0Y+ErdfYcrs9tkJQ= github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= github.com/ulikunitz/xz v0.5.6/go.mod h1:2bypXElzHzzJZwzH67Y6wb67pO62Rzfn7BSiF4ABRW8= +github.com/ulikunitz/xz v0.5.7/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= +github.com/ultraware/funlen v0.0.2/go.mod h1:Dp4UiAus7Wdb9KUZsYWZEWiRzGuM2kXM1lPbfaF6xhA= +github.com/ultraware/whitespace v0.0.4/go.mod h1:aVMh/gQve5Maj9hQ/hg+F75lr/X5A89uZnzAmWSineA= github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= +github.com/urfave/cli v1.18.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= +github.com/uudashr/gocognit v1.0.1/go.mod h1:j44Ayx2KW4+oB6SWMv8KsmHzZrOInQav7D3cQMJ5JUM= +github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw= github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= +github.com/valyala/fasthttp v1.2.0/go.mod h1:4vX61m6KN+xDduDNwXrhIAVZaZaZiQ1luJk8LWSxF3s= +github.com/valyala/quicktemplate v1.2.0/go.mod h1:EH+4AkTd43SvgIbQHYu59/cJyxDoOVRUAfrukLPuGJ4= +github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a/go.mod h1:v3UYOV9WzVtRmSR+PDvWpU/qWl4Wa5LApYYX4ZtKbio= github.com/vdemeester/k8s-pkg-credentialprovider v0.0.0-20200107171650-7c61ffa44238/go.mod h1:JwQJCMWpUDqjZrB5jpw0f5VbN7U95zxFy1ZDpoEarGo= github.com/vdemeester/k8s-pkg-credentialprovider v1.13.12-1/go.mod h1:Fko0rTxEtDW2kju5Ky7yFJNS3IcNvW8IPsp4/e9oev0= +github.com/vdemeester/k8s-pkg-credentialprovider v1.17.4/go.mod h1:inCTmtUdr5KJbreVojo06krnTgaeAz/Z7lynpPk/Q2c= github.com/vdemeester/k8s-pkg-credentialprovider v1.18.1-0.20201019120933-f1d16962a4db/go.mod h1:grWy0bkr1XO6hqbaaCKaPXqkBVlMGHYG6PGykktwbJc= github.com/vektah/gqlparser v1.1.2/go.mod h1:1ycwN7Ij5njmMkPPAOaRFY4rET2Enx7IkVv3vaXspKw= github.com/vmware/govmomi v0.20.3/go.mod h1:URlwyTFZX72RmxtxuaFL2Uj3fD1JTvZdx59bHWk6aFU= +github.com/xanzy/go-gitlab v0.31.0/go.mod h1:sPLojNBn68fMUWSxIJtdVVIP8uSBYqesTfDUseX11Ug= +github.com/xanzy/go-gitlab v0.32.0/go.mod h1:sPLojNBn68fMUWSxIJtdVVIP8uSBYqesTfDUseX11Ug= github.com/xanzy/ssh-agent v0.2.1 h1:TCbipTQL2JiiCprBWx9frJ2eJlCYT00NmctrHxVAr70= github.com/xanzy/ssh-agent v0.2.1/go.mod h1:mLlQY/MoOhWBj+gOGMQkOeiEvkx+8pJSI+0Bx9h2kr4= github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I= @@ -805,6 +1377,7 @@ github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1: github.com/xeipuuv/gojsonschema v0.0.0-20180618132009-1d523034197f/go.mod h1:5yf86TLmAcydyeJq5YvxkGPE2fm/u4myDekKRoLuqhs= github.com/xeipuuv/gojsonschema v1.1.0/go.mod h1:5yf86TLmAcydyeJq5YvxkGPE2fm/u4myDekKRoLuqhs= github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8/go.mod h1:HUYIGzjTL3rfEspMxjDjgmT5uz5wzYJKVo23qUhYTos= +github.com/xiang90/probing v0.0.0-20160813154853-07dd2e8dfe18/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/xlab/handysort v0.0.0-20150421192137-fb3537ed64a1/go.mod h1:QcJo0QPSfTONNIgpN5RA8prR7fF8nkF6cTWTcNerRO8= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= @@ -812,16 +1385,30 @@ github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43/go.mod h1:aX5oPXxHm3bOH+xeAttToC8pqch2ScQN/JoXYupl6xs= github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50/go.mod h1:NUSPSUX/bi6SeDMUh6brw0nXpxHnc96TguQh0+r/ssA= github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f/go.mod h1:GlGEuHIJweS1mbCqG+7vt2nvWLzLLnRHbXz5JKd/Qbg= +go.etcd.io/bbolt v1.3.1-etcd.7/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= +go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4= +go.etcd.io/etcd v0.0.0-20181031231232-83304cfc808c/go.mod h1:weASp41xM3dk0YHg1s/W8ecdGP5G4teSTMBPpYAaUgA= go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= +go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489/go.mod h1:yVHk9ub3CSBatqGNg7GRmsnfLWtoW60w4eDYfh7vHDg= +go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs= +go.etcd.io/etcd/client/pkg/v3 v3.5.0/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= +go.etcd.io/etcd/client/v2 v2.305.0/go.mod h1:h9puh54ZTgAKtEbut2oe9P4L/oqKCVB6xsXlzd7alYQ= +go.etcd.io/etcd/client/v3 v3.5.0/go.mod h1:AIKXXVX/DQXtfTEqBryiLTUXwON+GuvO6Z7lLS/oTh0= +go.etcd.io/etcd/pkg/v3 v3.5.0/go.mod h1:UzJGatBQ1lXChBkQF0AuAtkRQMYnHubxAEYIrC3MSsE= +go.etcd.io/etcd/raft/v3 v3.5.0/go.mod h1:UFOHSIvO/nKwd4lhkwabrTD3cqW5yVyYYf/KlD00Szc= +go.etcd.io/etcd/server/v3 v3.5.0/go.mod h1:3Ah5ruV+M+7RZr0+Y/5mNLwC+eQlni+mQmOVdCRJoS4= go.mongodb.org/mongo-driver v1.0.3/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= go.mongodb.org/mongo-driver v1.1.1/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= go.mongodb.org/mongo-driver v1.1.2/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= go.opencensus.io v0.15.0/go.mod h1:UffZAU+4sDEINUGP/B7UfBBkq4fqLu9zXAX7ke6CHW0= +go.opencensus.io v0.17.0/go.mod h1:mp1VrMQxhlqqDpKvH4UcQUa4YwlzNmymAjPrDdfxNpI= go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= @@ -829,25 +1416,60 @@ go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.1/go.mod h1:Ap50jQcDJrx6rB6VgeeFPtuPIf3wMRvRfrfYDO6+BmA= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.4 h1:LYy1Hy3MJdrCdMwwzxA/dRok4ejH+RwNGbuoD9fCjto= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= +go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= +go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= +go.opentelemetry.io/contrib v0.20.0/go.mod h1:G/EtFaa6qaN7+LxqfIAT3GiZa7Wv5DTBUzl5H4LY0Kc= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.20.0/go.mod h1:oVGt1LRbBOBq1A5BQLlUg9UaU/54aiHw8cgjV3aWZ/E= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.20.0/go.mod h1:2AboqHi0CiIZU0qwhtUfCYD1GeUzvvIXWNkhDt7ZMG4= +go.opentelemetry.io/otel v0.20.0/go.mod h1:Y3ugLH2oa81t5QO+Lty+zXf8zC9L26ax4Nzoxm/dooo= +go.opentelemetry.io/otel/exporters/otlp v0.20.0/go.mod h1:YIieizyaN77rtLJra0buKiNBOm9XQfkPEKBeuhoMwAM= +go.opentelemetry.io/otel/metric v0.20.0/go.mod h1:598I5tYlH1vzBjn+BTuhzTCSb/9debfNp6R3s7Pr1eU= +go.opentelemetry.io/otel/oteltest v0.20.0/go.mod h1:L7bgKf9ZB7qCwT9Up7i9/pn0PWIa9FqQ2IQ8LoxiGnw= +go.opentelemetry.io/otel/sdk v0.20.0/go.mod h1:g/IcepuwNsoiX5Byy2nNV0ySUF1em498m7hBWC279Yc= +go.opentelemetry.io/otel/sdk/export/metric v0.20.0/go.mod h1:h7RBNMsDJ5pmI1zExLi+bJK+Dr8NQCh0qGhm1KDnNlE= +go.opentelemetry.io/otel/sdk/metric v0.20.0/go.mod h1:knxiS8Xd4E/N+ZqKmUPf3gTTZ4/0TjTXukfxjzSTpHE= +go.opentelemetry.io/otel/trace v0.20.0/go.mod h1:6GjCW8zgDjwGHGa6GkyeB8+/5vjT16gUEi0Nf1iBdgw= +go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= +go.uber.org/atomic v0.0.0-20181018215023-8dc6146f7569/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.5.1/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= -go.uber.org/atomic v1.6.0 h1:Ezj3JGmsOnG1MoRWQkPBsKLe9DwWD9QeXzTRzzldNVk= go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw= +go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/goleak v1.1.10 h1:z+mqJhf6ss6BSfSM671tgKyZBFPTTJM+HLxnhPC3wu0= +go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= +go.uber.org/multierr v0.0.0-20180122172545-ddea229ff1df/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= go.uber.org/multierr v1.4.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= +go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4= +go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= +go.uber.org/zap v0.0.0-20180814183419-67bc79d13d15/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.uber.org/zap v1.9.2-0.20180814183419-67bc79d13d15/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= go.uber.org/zap v1.14.1/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc= +go.uber.org/zap v1.15.0/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc= +go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= +go.uber.org/zap v1.19.0 h1:mZQZefskPPCMIBCSEH0v2/iUqqLrYtaeqwD6FUGUnFE= +go.uber.org/zap v1.19.0/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= +go4.org v0.0.0-20201209231011-d4a079459e60 h1:iqAGo78tVOJXELHQFRjR6TMwItrvXH4hrGJ32I/NFF8= +go4.org v0.0.0-20201209231011-d4a079459e60/go.mod h1:CIiUVy99QCPfoE13bO4EZaz5GZMZXMSBGhxRdsvzbkg= +gocloud.dev v0.19.0 h1:EDRyaRAnMGSq/QBto486gWFxMLczAfIYUmusV7XLNBM= gocloud.dev v0.19.0/go.mod h1:SmKwiR8YwIMMJvQBKLsC3fHNyMwXLw3PMDO+VVteJMI= golang.org/x/crypto v0.0.0-20171113213409-9f005a07e0d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20180608092829-8ac0e0d97ce4/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181015023909-0c41d7ab0a0e/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181025213731-e84da0312774/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190219172222-a4c6cb3142f2/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= @@ -855,6 +1477,7 @@ golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACk golang.org/x/crypto v0.0.0-20190320223903-b7391e95e576/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190404164418-38d8ce5564a5/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= +golang.org/x/crypto v0.0.0-20190426145343-a29dc8fdc734/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= @@ -862,15 +1485,21 @@ golang.org/x/crypto v0.0.0-20190617133340-57b3e21c3d56/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190911031432-227b76d455e7/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191002192127-34f69633bfdc/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191117063200-497ca9f6d64f/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20191205180655-e7c4368fe9dd/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200128174031-69ecbb4d6d5d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200323165209-0ec3e9974c59/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200510223506-06a226fb4e37/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0 h1:hb9wdF1z5waM+dSIICn1l0DkLVDT3hqhhQsDNUmHPRE= golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83 h1:/ZScEX8SfEmUGRHs0gxpqteO5nfNW6axyZbBdw9A12g= +golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= +golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -887,6 +1516,7 @@ golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EH golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -896,8 +1526,10 @@ golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHl golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20200302205851-738671d3881b h1:Wh+f8QHJXR411sJR8/vRBTZ7YapZaRvUcLFFJhusH0k= golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 h1:VLliZ0d+/avPrXXH+OakdXhpJuEoBZuwh1m2j7U6Iug= +golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= golang.org/x/mobile v0.0.0-20190806162312-597adff16ade/go.mod h1:AlhUtkH4DA4asiFC5RgK7ZKmauvtkAVcy9L0epCzlWo= @@ -906,13 +1538,20 @@ golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.3.0 h1:RM4zey1++hCTbCVQfnWeKs9/IEsaBLA8vTkd0WVtmH4= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.1-0.20200828183125-ce943fd02449/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.2 h1:Gz96sIWK3OalVv/I/qNygP42zyoKp3xptRVCWRFEBvo= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180911220305-26e67e76b6c3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181108082009-03003ca0c849/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -931,10 +1570,15 @@ golang.org/x/net v0.0.0-20190619014844-b5b0513f8c1b/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190812203447-cdfb69ac37fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190912160710-24e19bdeb0f2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191002035440-2ec189313ef0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191112182307-2180aed22343/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191119073136-fc4aabc6c914/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -945,19 +1589,42 @@ golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/ golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20201110031124-69a78807bb2b h1:uwuIcX0g4Yl1NC5XAz37xsr2lTtcqevgzYNVt49waME= +golang.org/x/net v0.0.0-20200904194848-62affa334b73/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210224082022-3d97a244fca7/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= +golang.org/x/net v0.0.0-20210520170846-37e1c6afe023 h1:ADo5wSpq2gqaCGQWzk7S5vd//0iyyLeAratkEoG5dLE= +golang.org/x/net v0.0.0-20210520170846-37e1c6afe023/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/oauth2 v0.0.0-20180724155351-3d292e4d0cdc/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20181106182150-f42d05182288/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d h1:TzXSXBo42m9gQenoE3b9BGiEpg5IG2JkU5FkPIawgtw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210402161424-2e8d93401602 h1:0Ja1LBD+yisY6RWM/BH7TJVXWsSjs2VwBSmvSX4HdBc= +golang.org/x/oauth2 v0.0.0-20210402161424-2e8d93401602/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -965,18 +1632,24 @@ golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208 h1:qwRHBd0NqMbJxfbotnDhm2ByMI1Shq4Y6oRJo21SGJA= golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190219203350-90b0e4468f99/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190221075227-b4e8571b14e0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -993,97 +1666,160 @@ golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190620070143-6f217b454f45/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190712062909-fae7ac547cb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190804053845-51ab0e2deafa/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190912141932-bc967efca4b8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191010194322-b09406accb47/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191112214154-59a1497f0cea/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191119060738-e882bf8e40c2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191210023423-ac6580df4449/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200317113312-5766fd39f98d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200327173247-9dae0f8f5775/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200610111108-226ff32320da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200828194041-157a740278f4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200831180312-196b9ba8737a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201112073958-5cba982894dd h1:5CtCZbICpIOFdgO940moixOPjc0178IU44m4EjOO5IY= -golang.org/x/sys v0.0.0-20201112073958-5cba982894dd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210426230700-d19ff857e887/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210817190340-bfb29a6856f2 h1:c8PlLMqBbOHoqtjteWm5/kbe6rNY2pbRfbIMVnepueo= +golang.org/x/sys v0.0.0-20210817190340-bfb29a6856f2/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d h1:SZxvLBoTP5yHO3Frd4z4vrF+DBX9vMVanchswa69toE= +golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= -golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6 h1:aRYxNxv6iGQlyVaZmk6ZgYEDa+Jg18DxebPSrd6bg1M= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac h1:7zkz7BUtwNFFqcowJ+RIgu2MaV/MapERkDIy+mwPyjs= +golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181117154741-2ddaf7f79a09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190110163146-51295c7ec13a/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190125232054-d66bd3c5d5a6/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190221204921-83362c3779f5/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190311215038-5c2858a9cfe5/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190322203728-c1a832b0ad89/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190422233926-fe54fb35175b/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190521203540-521d6ed310dd/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190617190820-da514acc4774/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190706070813-72ffa07ba3db/go.mod h1:jcCCGcm9btYwXyDqrUWc6MKQKKGJCWEQ3AfLSRIbEuI= +golang.org/x/tools v0.0.0-20190719005602-e377ae9d6386/go.mod h1:jcCCGcm9btYwXyDqrUWc6MKQKKGJCWEQ3AfLSRIbEuI= golang.org/x/tools v0.0.0-20190729092621-ff9f1409240a/go.mod h1:jcCCGcm9btYwXyDqrUWc6MKQKKGJCWEQ3AfLSRIbEuI= golang.org/x/tools v0.0.0-20190807223507-b346f7fd45de/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190910044552-dd2b5c81c578/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190920225731-5eefd052ad72/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190927191325-030b2cf1153e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191010075000-0337d82405ff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191010171213-8abd42400456/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191112005509-a3f652f18032/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191113232020-e2727e816f5a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191118222007-07fc4c7f2b98/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200102140908-9497f49d5709/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200115165105-de0b1760071a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200204192400-7124308813f3/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200210192313-1ace956b0e17/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200214144324-88be01311a71/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= @@ -1091,16 +1827,45 @@ golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapK golang.org/x/tools v0.0.0-20200303214625-2b0b585e22fe/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200317043434-63da46f3035e/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200324003944-a576cf524670/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200329025819-fd4102a86c65/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200331202046-9d5940d49312/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200414032229-332987a829c3/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200422022333-3d57cf2e726e/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200426102838-f3a5411a4c3b/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200502202811-ed308ab3e770/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200527183253-8e7acdbce89d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200601175630-2caf76543d99/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200626171337-aa94e735be7f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200701000337-a32c0cb1d5b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200713011307-fd294ab11aed/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200725200936-102e7d357031/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200916195026-c9a70fc28ce3 h1:DywqrEscRX7O2phNjkT0L6lhHKGBoMLCNX+XcAe7t6s= +golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200828161849-5deb26317202/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= +golang.org/x/tools v0.0.0-20200915173823-2db8f0ff891c/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= golang.org/x/tools v0.0.0-20200916195026-c9a70fc28ce3/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= +golang.org/x/tools v0.0.0-20200918232735-d647fc253266/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= +golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= +golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.5 h1:ouewzE6p+/VEB31YYnTbEJdi8pFqKp4P4n85vwo3DHA= +golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -1108,10 +1873,16 @@ golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1N golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= gomodules.xyz/jsonpatch/v2 v2.0.1/go.mod h1:IhYNNY4jnS53ZnfE4PAmpKtDpTCj1JFXc+3mwe7XcUU= gomodules.xyz/jsonpatch/v2 v2.1.0/go.mod h1:IhYNNY4jnS53ZnfE4PAmpKtDpTCj1JFXc+3mwe7XcUU= +gomodules.xyz/jsonpatch/v2 v2.2.0 h1:4pT439QV83L+G9FkcCriY6EkpcK6r6bK+A5FBUMI7qY= +gomodules.xyz/jsonpatch/v2 v2.2.0/go.mod h1:WXp+iVDkoLQqPudfQ9GBlwB2eZ5DKOnjQZCYdOS8GPY= +gonum.org/v1/gonum v0.0.0-20181121035319-3f7ecaa7e8ca/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= gonum.org/v1/gonum v0.0.0-20190331200053-3d26580ed485/go.mod h1:2ltnJ7xHfj0zHS40VVPYEAAMTa3ZGguvHGBSJeRWqE0= +gonum.org/v1/netlib v0.0.0-20181029234149-ec6d1f5cefe6/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= gonum.org/v1/netlib v0.0.0-20190331212654-76723241ea4e/go.mod h1:kS+toOQn6AQKjmKJ7gzohV1XkqsFehRA2FbsbkopSuQ= google.golang.org/api v0.0.0-20160322025152-9bf6e6e569ff/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= +google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= +google.golang.org/api v0.0.0-20181021000519-a2651947f503/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.5.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= @@ -1130,19 +1901,37 @@ google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/ google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.25.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.26.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.29.0 h1:BaiDisFir8O4IJxvAabCGGkQ6yCJegNQqSVoYUNAnbk= google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= +google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= +google.golang.org/api v0.31.0/go.mod h1:CL+9IBCa2WWU6gRuBWaKqGWLFFwbEUXkfeMkHLQWYWo= +google.golang.org/api v0.32.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= +google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= +google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= +google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= +google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= +google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= +google.golang.org/api v0.44.0 h1:URs6qR1lAxDsqWITsQXI4ZkGiYJ5dHtRNiCpfs2OeKA= +google.golang.org/api v0.44.0/go.mod h1:EBOGZqzyhtvMDoxwS97ctnh0zUmYY6CxqXsc1AvkYD8= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= google.golang.org/appengine v1.6.2/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.6 h1:lMO5rYAqUxkmaj76jAkRUvt5JZgFymx/+Q5Mzfivuhc= google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= +google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/cloud v0.0.0-20151119220103-975617b05ea8/go.mod h1:0H1ncTHf11KCFhTc/+EFRbzSCOZx+VUbRMk55Yv5MYk= +google.golang.org/genproto v0.0.0-20170731182057-09f6ed296fc6/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20180608181217-32ee49c4dd80/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20181016170114-94acd270e44e/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= @@ -1155,6 +1944,7 @@ google.golang.org/genproto v0.0.0-20190716160619-c506a9f90610/go.mod h1:DMBHOl98 google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20190927181202-20e1ac93f88c/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= google.golang.org/genproto v0.0.0-20191009194640-548a555dbc03/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= @@ -1168,19 +1958,48 @@ google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfG google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200317114155-1f3552e48f24/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200326112834-f447254575fd/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/genproto v0.0.0-20200527145253-8367513e4ece/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= +google.golang.org/genproto v0.0.0-20200528110217-3d3490e7e671/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= +google.golang.org/genproto v0.0.0-20200603110839-e855014d5736/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= +google.golang.org/genproto v0.0.0-20200626011028-ee7919e894b5/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200701001935-0939c5918c31/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200711021454-869866162049/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200726014623-da3ae01ef02d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200731012542-8145dea6a485 h1:wTk5DQB3+1darAz4Ldomo0r5bUOCKX7gilxQ4sb2kno= -google.golang.org/genproto v0.0.0-20200731012542-8145dea6a485/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200804151602-45615f50871c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200831141814-d751682dd103/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200914193844-75d14daec038/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200921151605-7abf4a1a14d5/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= +google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c h1:wtujag7C+4D6KMoulW9YauvK2lgdvCMS260jsqqBXr0= +google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= +google.golang.org/grpc v1.13.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= +google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= +google.golang.org/grpc v1.15.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.19.1/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= @@ -1188,6 +2007,7 @@ google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLD google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.22.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.22.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= @@ -1199,8 +2019,18 @@ google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8 google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.31.0 h1:T7P4R73V3SSDPhH7WW7ATbfViLtmamH0DKrP3f9AuDI= google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.32.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= +google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= +google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= +google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.38.0 h1:/9BgsAsa5nWe26HqOlvlgJnqBuktYOLCgjCPqsa56W0= +google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -1210,54 +2040,74 @@ google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2 google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= -google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4c= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0 h1:bxAC2xTBsZGibn2RTntX0oH50xLsqy1OxA9tTL3p/lk= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20141024133853-64131543e789/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU= gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/gcfg.v1 v1.2.0/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo= -gopkg.in/inf.v0 v0.9.0/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/ini.v1 v1.46.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/ini.v1 v1.52.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/ini.v1 v1.56.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/ini.v1 v1.62.0 h1:duBzk771uxoUuOlyRLkHsygud9+5lrlGjdFBb4mSKDU= +gopkg.in/ini.v1 v1.62.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/jcmturner/aescts.v1 v1.0.1/go.mod h1:nsR8qBOg+OucoIW+WMhB3GspUQXq9XorLnQb9XtvcOo= gopkg.in/jcmturner/dnsutils.v1 v1.0.1/go.mod h1:m3v+5svpVOhtFAP/wSz+yzh4Mc0Fg7eRhxkJMWSIz9Q= gopkg.in/jcmturner/gokrb5.v7 v7.2.3/go.mod h1:l8VISx+WGYp+Fp7KRbsiUuXTTOnxIc3Tuvyavf11/WM= gopkg.in/jcmturner/gokrb5.v7 v7.3.0/go.mod h1:l8VISx+WGYp+Fp7KRbsiUuXTTOnxIc3Tuvyavf11/WM= gopkg.in/jcmturner/rpc.v1 v1.1.0/go.mod h1:YIdkC4XfD6GXbzje11McwsDuOlZQSb9W4vfLvuNnlv8= +gopkg.in/natefinch/lumberjack.v2 v2.0.0-20150622162204-20b71e5b60d7/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= +gopkg.in/robfig/cron.v2 v2.0.0-20150107220207-be2e0b0deed5 h1:E846t8CnR+lv5nE+VuiKTDG/v1U2stad0QzddfJC7kY= gopkg.in/robfig/cron.v2 v2.0.0-20150107220207-be2e0b0deed5/go.mod h1:hiOFpYm0ZJbusNj2ywpbrXowU3G8U6GIQzqn2mw1UIE= +gopkg.in/square/go-jose.v2 v2.0.0-20180411045311-89060dee6a84/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= gopkg.in/src-d/go-billy.v4 v4.3.2/go.mod h1:nDjArDMp+XMs1aFAESLRjfGSgfvoYN0hDfzEk0GjC98= gopkg.in/src-d/go-git-fixtures.v3 v3.5.0/go.mod h1:dLBcvytrw/TYZsNTWCnkNF2DSIlzWYqTe3rJR56Ac7g= gopkg.in/src-d/go-git.v4 v4.13.1/go.mod h1:nx5NYcxdKxq5fpltdHnPa2Exj4Sx0EclMWZQbYDu2z8= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/warnings.v0 v0.1.1/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= gopkg.in/warnings.v0 v0.1.2 h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME= gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= +gopkg.in/yaml.v1 v1.0.0-20140924161607-9f9df34309c0/go.mod h1:WDnlLJ4WF5VGsH/HVa3CI79GS0ol3YnhVnKP89i0kNg= gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.7/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20190709130402-674ba3eaed22/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= +gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= +gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8= helm.sh/helm/v3 v3.1.1/go.mod h1:WYsFJuMASa/4XUqLyv54s0U/f3mlAaRErGmyy4z921g= honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= @@ -1268,72 +2118,153 @@ honnef.co/go/tools v0.0.1-2019.2.2/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -k8s.io/api v0.17.0/go.mod h1:npsyOePkeP0CPwyGfXDHxvypiYMJxBWAMpQxCaJ4ZxI= -k8s.io/api v0.17.2/go.mod h1:BS9fjjLc4CMuqfSO8vgbHPKMt5+SF0ET6u/RVDihTo4= -k8s.io/api v0.17.3/go.mod h1:YZ0OTkuw7ipbe305fMpIdf3GLXZKRigjtZaV5gzC2J0= -k8s.io/api v0.18.8/go.mod h1:d/CXqwWv+Z2XEG1LgceeDmHQwpUJhROPx16SlxJgERY= +istio.io/test-infra/tools/prowgen v0.0.0-20220319022446-0c39030e6a2f h1:KGFtbPBe1bKa/xlJbp9Hr1bAgfaFaGHcEIkn1LXwcLg= +istio.io/test-infra/tools/prowgen v0.0.0-20220319022446-0c39030e6a2f/go.mod h1:Gw+71n/5LNYDhD7X9uEsFPYfWIL8WXBQjG3r3t+FFpE= +k8s.io/api v0.22.2 h1:M8ZzAD0V6725Fjg53fKeTJxGsJvRbk4TEm/fexHMtfw= +k8s.io/api v0.22.2/go.mod h1:y3ydYpLJAaDI+BbSe2xmGcqxiWHmWjkEeIbiwHvnPR8= +k8s.io/apiextensions-apiserver v0.0.0-20190918201827-3de75813f604/go.mod h1:7H8sjDlWQu89yWB3FhZfsLyRCRLuoXoCoY5qtwW1q6I= +k8s.io/apiextensions-apiserver v0.16.4/go.mod h1:HYQwjujEkXmQNhap2C9YDdIVOSskGZ3et0Mvjcyjbto= k8s.io/apiextensions-apiserver v0.17.2/go.mod h1:4KdMpjkEjjDI2pPfBA15OscyNldHWdBCfsWMDWAmSTs= -k8s.io/apimachinery v0.0.0-20190703205208-4cfb76a8bf76/go.mod h1:M2fZgZL9DbLfeJaPBCDqSqNsdsmLN+V29knYJnIXlMA= -k8s.io/apimachinery v0.17.0/go.mod h1:b9qmWdKlLuU9EBh+06BtLcSf/Mu89rWL33naRxs1uZg= -k8s.io/apimachinery v0.17.2/go.mod h1:b9qmWdKlLuU9EBh+06BtLcSf/Mu89rWL33naRxs1uZg= -k8s.io/apimachinery v0.17.3/go.mod h1:gxLnyZcGNdZTCLnq3fgzyg2A5BVCHTNDFrw8AmuJ+0g= -k8s.io/apimachinery v0.18.8/go.mod h1:6sQd+iHEqmOtALqOFjSWp2KZ9F0wlU/nWm0ZgsYWMig= -k8s.io/apimachinery v0.19.7 h1:nTaEnYVH+i//aPgMA0zTEV2lfVLCV9LextqVd67mulc= -k8s.io/apimachinery v0.19.7/go.mod h1:6sRbGRAVY5DOCuZwB5XkqguBqpqLU6q/kOaOdk29z6Q= +k8s.io/apiextensions-apiserver v0.17.6/go.mod h1:Z3CHLP3Tha+Rbav7JR3S+ye427UaJkHBomK2c4XtZ3A= +k8s.io/apiextensions-apiserver v0.21.1/go.mod h1:KESQFCGjqVcVsZ9g0xX5bacMjyX5emuWcS2arzdEouA= +k8s.io/apiextensions-apiserver v0.21.3/go.mod h1:kl6dap3Gd45+21Jnh6utCx8Z2xxLm8LGDkprcd+KbsE= +k8s.io/apiextensions-apiserver v0.22.2 h1:zK7qI8Ery7j2CaN23UCFaC1hj7dMiI87n01+nKuewd4= +k8s.io/apiextensions-apiserver v0.22.2/go.mod h1:2E0Ve/isxNl7tWLSUDgi6+cmwHi5fQRdwGVCxbC+KFA= +k8s.io/apimachinery v0.22.2 h1:ejz6y/zNma8clPVfNDLnPbleBo6MpoFy/HBiBqCouVk= +k8s.io/apimachinery v0.22.2/go.mod h1:O3oNtNadZdeOMxHFVxOreoznohCpy0z6mocxbZr7oJ0= +k8s.io/apiserver v0.0.0-20190918200908-1e17798da8c1/go.mod h1:4FuDU+iKPjdsdQSN3GsEKZLB/feQsj1y9dhhBDVV2Ns= +k8s.io/apiserver v0.16.4/go.mod h1:kbLJOak655g6W7C+muqu1F76u9wnEycfKMqbVaXIdAc= k8s.io/apiserver v0.17.0/go.mod h1:ABM+9x/prjINN6iiffRVNCBR2Wk7uY4z+EtEGZD48cg= k8s.io/apiserver v0.17.2/go.mod h1:lBmw/TtQdtxvrTk0e2cgtOxHizXI+d0mmGQURIHQZlo= +k8s.io/apiserver v0.17.4/go.mod h1:5ZDQ6Xr5MNBxyi3iUZXS84QOhZl+W7Oq2us/29c0j9I= +k8s.io/apiserver v0.17.6/go.mod h1:sAYqm8hUDNA9aj/TzqwsJoExWrxprKv0tqs/z88qym0= k8s.io/apiserver v0.18.8/go.mod h1:12u5FuGql8Cc497ORNj79rhPdiXQC4bf53X/skR/1YM= +k8s.io/apiserver v0.21.1/go.mod h1:nLLYZvMWn35glJ4/FZRhzLG/3MPxAaZTgV4FJZdr+tY= +k8s.io/apiserver v0.21.3/go.mod h1:eDPWlZG6/cCCMj/JBcEpDoK+I+6i3r9GsChYBHSbAzU= +k8s.io/apiserver v0.22.2/go.mod h1:vrpMmbyjWrgdyOvZTSpsusQq5iigKNWv9o9KlDAbBHI= k8s.io/cli-runtime v0.17.2/go.mod h1:aa8t9ziyQdbkuizkNLAw3qe3srSyWh9zlSB7zTqRNPI= k8s.io/cli-runtime v0.17.3/go.mod h1:X7idckYphH4SZflgNpOOViSxetiMj6xI0viMAjM81TA= -k8s.io/client-go v0.17.0/go.mod h1:TYgR6EUHs6k45hb6KWjVD6jFZvJV4gHDikv/It0xz+k= -k8s.io/client-go v0.17.2/go.mod h1:QAzRgsa0C2xl4/eVpeVAZMvikCn8Nm81yqVx3Kk9XYI= -k8s.io/client-go v0.17.3/go.mod h1:cLXlTMtWHkuK4tD360KpWz2gG2KtdWEr/OT02i3emRQ= -k8s.io/client-go v0.18.8/go.mod h1:HqFqMllQ5NnQJNwjro9k5zMyfhZlOwpuTLVrxjkYSxU= -k8s.io/client-go v9.0.0+incompatible/go.mod h1:7vJpHMYJwNQCWgzmNV+VYUl1zCObLyodBc8nIyt8L5s= +k8s.io/client-go v0.22.2 h1:DaSQgs02aCC1QcwUdkKZWOeaVsQjYvWv8ZazcZ6JcHc= +k8s.io/client-go v0.22.2/go.mod h1:sAlhrkVDf50ZHx6z4K0S40wISNTarf1r800F+RlCF6U= k8s.io/cloud-provider v0.17.0/go.mod h1:Ze4c3w2C0bRsjkBUoHpFi+qWe3ob1wI2/7cUn+YQIDE= +k8s.io/cloud-provider v0.17.4/go.mod h1:XEjKDzfD+b9MTLXQFlDGkk6Ho8SGMpaU8Uugx/KNK9U= k8s.io/cloud-provider v0.18.8/go.mod h1:cn9AlzMPVIXA4HHLVbgGUigaQlZyHSZ7WAwDEFNrQSs= +k8s.io/code-generator v0.0.0-20190612205613-18da4a14b22b/go.mod h1:G8bQwmHm2eafm5bgtX67XDZQ8CWKSGu9DekI+yN4Y5I= +k8s.io/code-generator v0.0.0-20190831074504-732c9ca86353/go.mod h1:V5BD6M4CyaN5m+VthcclXWsVcT1Hu+glwa1bi3MIsyE= +k8s.io/code-generator v0.16.4/go.mod h1:mJUgkl06XV4kstAnLHAIzJPVCOzVR+ZcfPIv4fUsFCY= k8s.io/code-generator v0.17.1/go.mod h1:DVmfPQgxQENqDIzVR2ddLXMH34qeszkKSdH/N+s+38s= k8s.io/code-generator v0.17.2/go.mod h1:DVmfPQgxQENqDIzVR2ddLXMH34qeszkKSdH/N+s+38s= k8s.io/code-generator v0.17.3/go.mod h1:l8BLVwASXQZTo2xamW5mQNFCe1XPiAesVq7Y1t7PiQQ= +k8s.io/code-generator v0.17.6/go.mod h1:iiHz51+oTx+Z9D0vB3CH3O4HDDPWrvZyUgUYaIE9h9M= +k8s.io/code-generator v0.18.0/go.mod h1:+UHX5rSbxmR8kzS+FAv7um6dtYrZokQvjHpDSYRVkTc= +k8s.io/code-generator v0.21.1/go.mod h1:hUlps5+9QaTrKx+jiM4rmq7YmH8wPOIko64uZCHDh6Q= +k8s.io/code-generator v0.21.3/go.mod h1:K3y0Bv9Cz2cOW2vXUrNZlFbflhuPvuadW6JdnN6gGKo= +k8s.io/code-generator v0.22.2/go.mod h1:eV77Y09IopzeXOJzndrDyCI88UBok2h6WxAlBwpxa+o= +k8s.io/component-base v0.0.0-20190918200425-ed2f0867c778/go.mod h1:DFWQCXgXVLiWtzFaS17KxHdlUeUymP7FLxZSkmL9/jU= +k8s.io/component-base v0.16.4/go.mod h1:GYQ+4hlkEwdlpAp59Ztc4gYuFhdoZqiAJD1unYDJ3FM= k8s.io/component-base v0.17.0/go.mod h1:rKuRAokNMY2nn2A6LP/MiwpoaMRHpfRnrPaUJJj1Yoc= k8s.io/component-base v0.17.2/go.mod h1:zMPW3g5aH7cHJpKYQ/ZsGMcgbsA/VyhEugF3QT1awLs= +k8s.io/component-base v0.17.4/go.mod h1:5BRqHMbbQPm2kKu35v3G+CpVq4K0RJKC7TRioF0I9lE= +k8s.io/component-base v0.17.6/go.mod h1:jgRLWl0B0rOzFNtxQ9E4BphPmDqoMafujdau6AdG2Xo= k8s.io/component-base v0.18.8/go.mod h1:00frPRDas29rx58pPCxNkhUfPbwajlyyvu8ruNgSErU= +k8s.io/component-base v0.21.1/go.mod h1:NgzFZ2qu4m1juby4TnrmpR8adRk6ka62YdH5DkIIyKA= +k8s.io/component-base v0.21.3/go.mod h1:kkuhtfEHeZM6LkX0saqSK8PbdO7A0HigUngmhhrwfGQ= +k8s.io/component-base v0.22.2 h1:vNIvE0AIrLhjX8drH0BgCNJcR4QZxMXcJzBsDplDx9M= +k8s.io/component-base v0.22.2/go.mod h1:5Br2QhI9OTe79p+TzPe9JKNQYvEKbq9rTJDWllunGug= k8s.io/csi-translation-lib v0.17.0/go.mod h1:HEF7MEz7pOLJCnxabi45IPkhSsE/KmxPQksuCrHKWls= +k8s.io/csi-translation-lib v0.17.4/go.mod h1:CsxmjwxEI0tTNMzffIAcgR9lX4wOh6AKHdxQrT7L0oo= k8s.io/csi-translation-lib v0.18.8/go.mod h1:6cA6Btlzxy9s3QrS4BCZzQqclIWnTLr6Jx3H2ctAzY4= +k8s.io/gengo v0.0.0-20190116091435-f8a0810f38af/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +k8s.io/gengo v0.0.0-20190306031000-7a1b7fb0289f/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/gengo v0.0.0-20190822140433-26a664648505/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/gengo v0.0.0-20191108084044-e500ee069b5c/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +k8s.io/gengo v0.0.0-20200114144118-36b2048a9120/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +k8s.io/gengo v0.0.0-20200205140755-e0e292d8aa12/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +k8s.io/gengo v0.0.0-20201214224949-b6c5ce23f027/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= k8s.io/klog v0.3.1/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= +k8s.io/klog v0.3.3/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= +k8s.io/klog v0.4.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= +k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= +k8s.io/klog/v2 v2.8.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec= +k8s.io/klog/v2 v2.9.0 h1:D7HV+n1V57XeZ0m6tdRkfknthUaM06VFbWldOFh8kzM= +k8s.io/klog/v2 v2.9.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec= +k8s.io/kube-openapi v0.0.0-20180731170545-e3762e86a74c/go.mod h1:BXM9ceUBTj2QnfH2MK1odQs778ajze1RxcmP6S8RVVc= k8s.io/kube-openapi v0.0.0-20190228160746-b3a7cee44a30/go.mod h1:BXM9ceUBTj2QnfH2MK1odQs778ajze1RxcmP6S8RVVc= +k8s.io/kube-openapi v0.0.0-20190816220812-743ec37842bf/go.mod h1:1TqjTSzOxsLGIKfj0lK8EeCP7K1iUG65v09OM0/WG5E= k8s.io/kube-openapi v0.0.0-20191107075043-30be4d16710a/go.mod h1:1TqjTSzOxsLGIKfj0lK8EeCP7K1iUG65v09OM0/WG5E= +k8s.io/kube-openapi v0.0.0-20200121204235-bf4fb3bd569c/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E= k8s.io/kube-openapi v0.0.0-20200410145947-61e04a5be9a6/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E= -k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6/go.mod h1:UuqjUnNftUyPE5H64/qeyjQoUZhGpeFDVdxjTeEVN2o= +k8s.io/kube-openapi v0.0.0-20200410145947-bcb3869e6f29/go.mod h1:F+5wygcW0wmRTnM3cOgIqGivxkwSWIWT5YdsDbeAOaU= +k8s.io/kube-openapi v0.0.0-20210305001622-591a79e4bda7/go.mod h1:wXW5VT87nVfh/iLV8FpR2uDvrFyomxbtb1KivDbvPTE= +k8s.io/kube-openapi v0.0.0-20210421082810-95288971da7e h1:KLHHjkdQFomZy8+06csTWZ0m1343QqxZhR2LJ1OxCYM= +k8s.io/kube-openapi v0.0.0-20210421082810-95288971da7e/go.mod h1:vHXdDvt9+2spS2Rx9ql3I8tycm3H9FDfdUoIuKCefvw= k8s.io/kubectl v0.17.2/go.mod h1:y4rfLV0n6aPmvbRCqZQjvOp3ezxsFgpqL+zF5jH/lxk= +k8s.io/kubernetes v1.11.10/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk= k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk= +k8s.io/kubernetes v1.14.7/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk= k8s.io/legacy-cloud-providers v0.17.0/go.mod h1:DdzaepJ3RtRy+e5YhNtrCYwlgyK87j/5+Yfp0L9Syp8= +k8s.io/legacy-cloud-providers v0.17.4/go.mod h1:FikRNoD64ECjkxO36gkDgJeiQWwyZTuBkhu+yxOc1Js= k8s.io/legacy-cloud-providers v0.18.8/go.mod h1:tgp4xYf6lvjrWnjQwTOPvWQE9IVqSBGPF4on0IyICQE= k8s.io/metrics v0.17.2/go.mod h1:3TkNHET4ROd+NfzNxkjoVfQ0Ob4iZnaHmSEA4vYpwLw= +k8s.io/test-infra v0.0.0-20181019233642-2e10a0bbe9b3/go.mod h1:2NzXB13Ji0nqpyublHeiPC4FZwU0TknfvyaaNfl/BTA= +k8s.io/test-infra v0.0.0-20191212060232-70b0b49fe247/go.mod h1:d8SKryJBXAwfCFVL4wieRez47J2NOOAb9d029sWLseQ= +k8s.io/test-infra v0.0.0-20200407001919-bc7f71ef65b8/go.mod h1:/WpJWcaDvuykB322WXP4kJbX8IpalOzuPxA62GpwkJk= k8s.io/test-infra v0.0.0-20200514184223-ba32c8aae783/go.mod h1:bW6thaPZfL2hW7ecjx2WYwlP9KQLM47/xIJyttkVk5s= -k8s.io/test-infra v0.0.0-20200617221206-ea73eaeab7ff h1:1moyKLm99rwMKnGQrFjOfGo+lAOQnC+jysslmVvw1FI= k8s.io/test-infra v0.0.0-20200617221206-ea73eaeab7ff/go.mod h1:L3+cRvwftUq8IW1TrHji5m3msnc4uck/7LsE/GR/aZk= +k8s.io/test-infra v0.0.0-20200630233406-1dca6122872e/go.mod h1:L3+cRvwftUq8IW1TrHji5m3msnc4uck/7LsE/GR/aZk= +k8s.io/test-infra v0.0.0-20210730160938-8ad9b8c53bd8/go.mod h1:RXgSaKbQA0upN4GGyH38yRkotDJr3myiKWkvdfB5yP4= +k8s.io/test-infra v0.0.0-20220110151312-600d25dbe068 h1:btpdq/fsXQ5f8wyDeFU+3+/TdnHNOVswFY7nEaoikiM= +k8s.io/test-infra v0.0.0-20220110151312-600d25dbe068/go.mod h1:7sVz+d0pDUY2t8+IFc9IImI6WxxWFFaZ3cOCkwM+dTI= +k8s.io/utils v0.0.0-20181019225348-5e321f9a457c/go.mod h1:8k8uAuAQ0rXslZKaEWd0c3oVhZz7sSzSiPnVZayjIX0= +k8s.io/utils v0.0.0-20190221042446-c2654d5206da/go.mod h1:8k8uAuAQ0rXslZKaEWd0c3oVhZz7sSzSiPnVZayjIX0= +k8s.io/utils v0.0.0-20190506122338-8fab8cb257d5/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= +k8s.io/utils v0.0.0-20190801114015-581e00157fb1/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= +k8s.io/utils v0.0.0-20190907131718-3d4f5b7dea0b/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= k8s.io/utils v0.0.0-20191114184206-e782cd3c129f/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= +k8s.io/utils v0.0.0-20200124190032-861946025e34/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= +k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +k8s.io/utils v0.0.0-20210527160623-6fdb442a123b/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +k8s.io/utils v0.0.0-20210819203725-bdf08cb9a70a h1:8dYfu/Fc9Gz2rNJKB9IQRGgQOh2clmRzNIPPY1xLY5g= +k8s.io/utils v0.0.0-20210819203725-bdf08cb9a70a/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +knative.dev/caching v0.0.0-20190719140829-2032732871ff/go.mod h1:dHXFU6CGlLlbzaWc32g80cR92iuBSpsslDNBWI8C7eg= knative.dev/caching v0.0.0-20200116200605-67bca2c83dfa/go.mod h1:dHXFU6CGlLlbzaWc32g80cR92iuBSpsslDNBWI8C7eg= +knative.dev/eventing-contrib v0.6.1-0.20190723221543-5ce18048c08b/go.mod h1:SnXZgSGgMSMLNFTwTnpaOH7hXDzTFtw0J8OmHflNx3g= knative.dev/eventing-contrib v0.11.2/go.mod h1:SnXZgSGgMSMLNFTwTnpaOH7hXDzTFtw0J8OmHflNx3g= knative.dev/hack v0.0.0-20220224013837-e1785985d364 h1:wOysRDkzacHlM2OEQoM5ayNT+s/Zymzk5Tv17RL++3g= knative.dev/hack v0.0.0-20220224013837-e1785985d364/go.mod h1:PHt8x8yX5Z9pPquBEfIj0X66f8iWkWfR0S/sarACJrI= +knative.dev/pkg v0.0.0-20191101194912-56c2594e4f11/go.mod h1:pgODObA1dTyhNoFxPZTTjNWfx6F0aKsKzn+vaT9XO/Q= +knative.dev/pkg v0.0.0-20191111150521-6d806b998379/go.mod h1:pgODObA1dTyhNoFxPZTTjNWfx6F0aKsKzn+vaT9XO/Q= knative.dev/pkg v0.0.0-20200207155214-fef852970f43/go.mod h1:pgODObA1dTyhNoFxPZTTjNWfx6F0aKsKzn+vaT9XO/Q= +knative.dev/pkg v0.0.0-20200428194351-90fc61bae7f7/go.mod h1:o+e8OVEJKIuvXPsGVPIautjXgs05xbos7G+QMRjuUps= +knative.dev/pkg v0.0.0-20200505191044-3da93ebb24c2/go.mod h1:Q6sL35DdGs8hIQZKdaCXJGgY8f90BmNBKSb8z6d/BTM= +knative.dev/pkg v0.0.0-20200515002500-16d7b963416f/go.mod h1:tMOHGbxtRz8zYFGEGpV/bpoTEM1o89MwYFC4YJXl3GY= +knative.dev/pkg v0.0.0-20200528142800-1c6815d7e4c9/go.mod h1:QgNZTxnwpB/oSpNcfnLVlw+WpEwwyKAvJlvR3hgeltA= +knative.dev/pkg v0.0.0-20200630170034-2c1a029eb97f/go.mod h1:7T15JzvjKXWnvIKcohz4brrsVq8jvwAcJwWY9xigAc0= +knative.dev/pkg v0.0.0-20200711004937-22502028e31a h1:NDQS+236vhwCP9oiBBGvQ5WGzbD0Y8Pcv9dtE2stg+Q= +knative.dev/pkg v0.0.0-20200711004937-22502028e31a/go.mod h1:AqAJV6rYi8IGikDjJ/9ZQd9qKdkXVlesVnVjwx62YB8= +knative.dev/test-infra v0.0.0-20200407185800-1b88cb3b45a5/go.mod h1:xcdUkMJrLlBswIZqL5zCuBFOC22WIPMQoVX1L35i0vQ= +knative.dev/test-infra v0.0.0-20200505052144-5ea2f705bb55/go.mod h1:WqF1Azka+FxPZ20keR2zCNtiQA1MP9ZB4BH4HuI+SIU= +knative.dev/test-infra v0.0.0-20200513011557-d03429a76034/go.mod h1:aMif0KXL4g19YCYwsy4Ocjjz5xgPlseYV+B95Oo4JGE= +knative.dev/test-infra v0.0.0-20200519015156-82551620b0a9/go.mod h1:A5b2OAXTOeHT3hHhVQm3dmtbuWvIDP7qzgtqxA3/2pE= +knative.dev/test-infra v0.0.0-20200630141629-15f40fe97047/go.mod h1:30tMsI1VXrG2m4ut7CFZbLg1VbcRsslPfGU+GWILm6E= +knative.dev/test-infra v0.0.0-20200707183444-aed09e56ddc7/go.mod h1:RjYAhXnZqeHw9+B0zsbqSPlae0lCvjekO/nw5ZMpLCs= modernc.org/cc v1.0.0/go.mod h1:1Sk4//wdnYJiUIxnW8ddKpaOJCF37yAdqYnkxUpaYxw= modernc.org/golex v1.0.0/go.mod h1:b/QX9oBD/LhixY6NDh+IdGv17hgB+51fET1i2kPSmvk= modernc.org/mathutil v1.0.0/go.mod h1:wU0vUrJsVWBZ4P6e7xtFJEhFSNsfRLJ8H458uRjg03k= modernc.org/strutil v1.0.0/go.mod h1:lstksw84oURvj9y3tn8lGvRxyRC1S2+g5uuIzNfIOBs= modernc.org/xc v1.0.0/go.mod h1:mRNCo0bvLjGhHO9WsyuKVU4q0ceiDDDoEeWDJHrNx8I= +mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed/go.mod h1:Xkxe497xwlCKkIaQYRfC7CSLworTXY9RMqwhhCm+8Nc= +mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b/go.mod h1:2odslEg/xrtNQqCYg2/jCoyKnw3vv5biOc3JnIcYfL4= +mvdan.cc/unparam v0.0.0-20190720180237-d51796306d8f/go.mod h1:4G1h5nDURzA3bwVMZIVpwbkw+04kSxk3rAtzlimaUJw= +mvdan.cc/unparam v0.0.0-20200501210554-b37ab49443f7/go.mod h1:HGC5lll35J70Y5v7vCGb9oLhHoScFwkHDJm/05RdSTc= mvdan.cc/xurls/v2 v2.0.0/go.mod h1:2/webFPYOXN9jp/lzuj0zuAVlF+9g4KPFJANH1oJhRU= pack.ag/amqp v0.11.0/go.mod h1:4/cbmt4EJXSKlG6LCfWHoqmN0uFdy5i/+YFz+fTfhV4= pack.ag/amqp v0.11.2/go.mod h1:4/cbmt4EJXSKlG6LCfWHoqmN0uFdy5i/+YFz+fTfhV4= @@ -1342,19 +2273,40 @@ rsc.io/letsencrypt v0.0.3/go.mod h1:buyQKZ6IXrRnB7TdkHP0RyEybLx18HHyOSoTyoOLqNY= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.7/go.mod h1:PHgbrJT7lCHcxMU+mDHEm+nx46H4zuuHZkDP6icnhu0= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.15/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.19/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.22/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= sigs.k8s.io/boskos v0.0.0-20200526191642-45fc818e2d00/go.mod h1:L1ubP7d1CCMSQSjKiZv6dGbh7b4kfoG+dFPj8cfYDnI= -sigs.k8s.io/boskos v0.0.0-20200729174948-794df80db9c9 h1:7NmcxK+xm/9QtUQbkJySnXtY2EqISsCCODjy9mw+YWM= -sigs.k8s.io/boskos v0.0.0-20200729174948-794df80db9c9/go.mod h1:ZO5RV+VxJS9mb6DvZ1yAjywoyq/wQ8b0vDoZxcIA5kE= +sigs.k8s.io/boskos v0.0.0-20200530174753-71e795271860/go.mod h1:L1ubP7d1CCMSQSjKiZv6dGbh7b4kfoG+dFPj8cfYDnI= +sigs.k8s.io/boskos v0.0.0-20200617235605-f289ba6555ba/go.mod h1:ZO5RV+VxJS9mb6DvZ1yAjywoyq/wQ8b0vDoZxcIA5kE= +sigs.k8s.io/boskos v0.0.0-20210730172138-093b54882439 h1:OCr84Jrq4HgrYxP9wrfSsGioR1VSpTZMh/RXMu5sm+8= +sigs.k8s.io/boskos v0.0.0-20210730172138-093b54882439/go.mod h1:AAucXMtW4quMteeqD7IGeIRVkYkc+jJNuD2Yp/LOdxE= +sigs.k8s.io/controller-runtime v0.3.0/go.mod h1:Cw6PkEg0Sa7dAYovGT4R0tRkGhHXpYijwNxYhAnAZZk= sigs.k8s.io/controller-runtime v0.5.0/go.mod h1:REiJzC7Y00U+2YkMbT8wxgrsX5USpXKGhb2sCtAXiT8= sigs.k8s.io/controller-runtime v0.5.4/go.mod h1:JZUwSMVbxDupo0lTJSSFP5pimEyxGynROImSsqIOx1A= +sigs.k8s.io/controller-runtime v0.9.0/go.mod h1:TgkfvrhhEw3PlI0BRL/5xM+89y3/yc0ZDfdbTl84si8= +sigs.k8s.io/controller-runtime v0.10.3 h1:s5Ttmw/B4AuIbwrXD3sfBkXwnPMMWrqpVj4WRt1dano= +sigs.k8s.io/controller-runtime v0.10.3/go.mod h1:CQp8eyUQZ/Q7PJvnIrB6/hgfTC1kBkGylwsLgOQi1WY= +sigs.k8s.io/controller-tools v0.6.3-0.20210827222652-7b3a8699fa04/go.mod h1:oaeGpjXn6+ZSEIQkUe/+3I40PNiDYp9aeawbt3xTgJ8= sigs.k8s.io/kustomize v2.0.3+incompatible/go.mod h1:MkjgH3RdOWrievjo6c9T245dYlB5QeXV4WCbnt/PEpU= +sigs.k8s.io/structured-merge-diff v0.0.0-20190302045857-e85c7b244fd2/go.mod h1:wWxsB5ozmmv/SG7nM11ayaAW51xMvak/t1r0CSlcokI= sigs.k8s.io/structured-merge-diff v0.0.0-20190525122527-15d366b2352e/go.mod h1:wWxsB5ozmmv/SG7nM11ayaAW51xMvak/t1r0CSlcokI= sigs.k8s.io/structured-merge-diff v1.0.1-0.20191108220359-b1b620dd3f06/go.mod h1:/ULNhyfzRopfcjskuui0cTITekDduZ7ycKN3oUT9R18= +sigs.k8s.io/structured-merge-diff v1.0.1 h1:LOs1LZWMsz1xs77Phr/pkB4LFaavH7IVq/3+WTN9XTA= +sigs.k8s.io/structured-merge-diff v1.0.1/go.mod h1:IIgPezJWb76P0hotTxzDbWsMYB8APh18qZnxkomBpxA= +sigs.k8s.io/structured-merge-diff/v2 v2.0.1/go.mod h1:Wb7vfKAodbKgf6tn1Kl0VvGj7mRH6DGaRcixXEJXTsE= sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200116222232-67a7b8c61874/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw= sigs.k8s.io/structured-merge-diff/v3 v3.0.0/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw= -sigs.k8s.io/structured-merge-diff/v4 v4.0.1/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= +sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= +sigs.k8s.io/structured-merge-diff/v4 v4.1.0/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= +sigs.k8s.io/structured-merge-diff/v4 v4.1.2 h1:Hr/htKFmJEbtMgS/UD0N+gtgctAqz81t3nu+sPzynno= +sigs.k8s.io/structured-merge-diff/v4 v4.1.2/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4= +sigs.k8s.io/testing_frameworks v0.1.1/go.mod h1:VVBKrHmJ6Ekkfz284YKhQePcdycOzNH9qL6ht1zEr/U= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= -sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q= sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= +sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo= +sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= +sourcegraph.com/sqs/pbtypes v0.0.0-20180604144634-d3ebe8f20ae4/go.mod h1:ketZ/q3QxT9HOBeFhu6RdvsftgpsbFHBF5Cas6cDKZ0= +sourcegraph.com/sqs/pbtypes v1.0.0/go.mod h1:3AciMUv4qUuRHRHhOG4TZOB+72GdPVz5k+c648qsFS4= vbom.ml/util v0.0.0-20160121211510-db5cfe13f5cc/go.mod h1:so/NYdZXCz+E3ZpW0uAoCj6uzU2+8OWDFv/HxUSs7kI= vbom.ml/util v0.0.0-20180919145318-efcd4e0f9787/go.mod h1:so/NYdZXCz+E3ZpW0uAoCj6uzU2+8OWDFv/HxUSs7kI= diff --git a/guides/prow_knative_setup.md b/guides/prow_knative_setup.md index 50df963b273..2796afa7802 100644 --- a/guides/prow_knative_setup.md +++ b/guides/prow_knative_setup.md @@ -31,19 +31,18 @@ All Prow config files for running Prow jobs for Knative projects are under 1. Make sure that _Knative Prow Robots_ team is an Admin of the repo. 1. Add the new repo to - [config_knative.yaml](../prow/config_knative.yaml), the meta + [jobs_config](../prow/jobs_config), the meta config file for generating Prow config and Prow job config. Check the top-level section `presubmits:` and `periodics:` for blueprints for what to add. Then run `./hack/generate-configs.sh` to regenerate - [prow/jobs/config.yaml](../prow/jobs/config.yaml) and - [prow/core](../prow/core), otherwise the presubmit + [prow/jobs/generated](../prow/jobs/generated), otherwise the presubmit test in test-infra will fail. Create a PR with the changes. Once it's merged the configs will be automatically updated by a postsubmit job. -2. Wait a few minutes, check that Prow is working by entering `/woof` as a +1. Wait a few minutes, check that Prow is working by entering `/woof` as a comment in any PR in the new repo. -3. Set **tide** as a required status check for the default branch. +1. Set **tide** as a required status check for the default branch. ![Branch Checks](branch_checks.png) @@ -53,7 +52,7 @@ All Prow config files for running Prow jobs for Knative projects are under `//test/presubmit-tests.sh` working, and optionally `//hack/release.sh` working for automated nightly releases). -2. Update [config_knative.yaml](../prow/config_knative.yaml) +1. Update [jobs_config](../prow/jobs_config) (usually, copy and update the existing configuration from another repository). Run `./hack/generate-configs.sh` to regenerate [prow/jobs/config.yaml](../prow/jobs/config.yaml), @@ -61,10 +60,10 @@ All Prow config files for running Prow jobs for Knative projects are under changes. Once it's merged the configs will be automatically updated by a postsubmit job. -3. Wait a few minutes, enter `/test [prow_job_name]` or `/test all` or `/retest` +1. Wait a few minutes, enter `/test [prow_job_name]` or `/test all` or `/retest` as a comment in any PR in the repo and ensure the test jobs are executed. -4. Optionally, set the new test jobs as required status checks for the default +1. Optionally, set the new test jobs as required status checks for the default branch. ![Branch Checks](branch_checks.png) diff --git a/guides/release_setup.md b/guides/release_setup.md index fa4bd00d9af..06f436a09a4 100644 --- a/guides/release_setup.md +++ b/guides/release_setup.md @@ -52,22 +52,11 @@ Versioned releases can be one of two kinds: [helper script documentation](https://github.com/knative/hack/README.md#using-the-releasesh-helper-script). 1. Enable `nightly`, `auto-release` and `dot-release` jobs for your repo in the - [config_knative.yaml](../prow/config_knative.yaml) file. For - example: + [jobs_config](../prow/jobs_config) file. - ``` - knative/MODULE: - - nightly: true - - dot-release: true - - auto-release: true - ``` - -2. Run `./hack/generate-configs.sh` to regenerate +1. Run `./hack/generate-configs.sh` to regenerate [config.yaml](../prow/jobs/config.yaml), otherwise the presubmit - test will fail. Merge such pull request and ask the - [oncall](https://knative.github.io/test-infra/) to update the Prow cluster - and TestGrid with the new configs, by running `make update-prow-job-config` - and `make update-testgrid-config` in `config/prow`. Within two hours the + test will fail. Merge such pull request, within two hours the 3 new jobs (nightly, auto-release and dot-release) will appear on TestGrid. The jobs can also be found in the @@ -86,19 +75,19 @@ Versioned releases can be one of two kinds: ### Starting the release from the Git CLI -1. Fetch the upstream remote. +1. Fetch the upstream remote. ```sh git fetch upstream ``` -1. Create a `release-X.Y` branch from `upstream/main`. +1. Create a `release-X.Y` branch from `upstream/main`. ```sh git branch --no-track release-X.Y upstream/main ``` -1. Push the branch to upstream. +1. Push the branch to upstream. ```sh git push upstream release-X.Y @@ -118,34 +107,34 @@ Write release notes and add them to the release at ## Adding a commit to the next minor version release -1. Fetch the upstream remote. +1. Fetch the upstream remote. ```sh git fetch upstream ``` -1. Create a branch based on the desired (usually the latest) `release-X.Y` +1. Create a branch based on the desired (usually the latest) `release-X.Y` branch. ```sh git checkout -b my-backport-branch upstream/release-X.Y ``` -1. Cherry-pick desired commits from main into the new branch. +1. Cherry-pick desired commits from main into the new branch. ```sh git cherry-pick ``` -1. Push the branch to your fork. +1. Push the branch to your fork. ```sh git push origin ``` -1. Create a PR for your branch based on the `release-X.Y` branch. +1. Create a PR for your branch based on the `release-X.Y` branch. -1. Once the PR is merged, it will be included in the next minor release, which +1. Once the PR is merged, it will be included in the next minor release, which is usually built Tuesday nights, between 2AM and 3AM. **Note**: If a minor release is required for a release branch that's not the diff --git a/hack/generate-configs.sh b/hack/generate-configs.sh index 524bf341a27..9c0768937ce 100755 --- a/hack/generate-configs.sh +++ b/hack/generate-configs.sh @@ -19,17 +19,14 @@ set -Eeuo pipefail REPO_ROOT_DIR="$(dirname "$(dirname "$(realpath "${BASH_SOURCE[0]}")")")" # Generate Prow configs since we are using generator -readonly CONFIG_GENERATOR_DIR="${REPO_ROOT_DIR}/tools/config-generator" +readonly CONFIG_GENERATOR_DIR="${REPO_ROOT_DIR}/tools/configgen" -# Generate config for production Prow +# Clean up existing generated config files. +rm -rf "${REPO_ROOT_DIR}/prow/jobs/generated/*" + +# Generate config for Prow jobs and TestGrid go run "${CONFIG_GENERATOR_DIR}" \ - --gcs-bucket="knative-prow" \ - --generate-testgrid-config=true \ - --generate-k8s-testgrid-config=true \ - --image-docker=gcr.io/knative-tests/test-infra \ - --prow-host=https://prow.knative.dev \ - --testgrid-gcs-bucket="knative-testgrid" \ - --prow-jobs-config-output="${REPO_ROOT_DIR}/prow/jobs/config.yaml" \ - --testgrid-config-output="${REPO_ROOT_DIR}/config/prow/testgrid/testgrid.yaml" \ - --k8s-testgrid-config-output="${REPO_ROOT_DIR}/config/prow/k8s-testgrid/k8s-testgrid.yaml" \ - "${REPO_ROOT_DIR}/prow/config_knative.yaml" + --prow-jobs-config-input="${REPO_ROOT_DIR}/prow/jobs_config" \ + --prow-jobs-config-output="${REPO_ROOT_DIR}/prow/jobs/generated" \ + --all-prow-jobs-config="${REPO_ROOT_DIR}/prow/jobs" \ + --testgrid-config-output="${REPO_ROOT_DIR}/config/prow/k8s-testgrid/k8s-testgrid.yaml" diff --git a/pkg/clustermanager/perf-tests/pkg/benchmark.go b/pkg/clustermanager/perf-tests/pkg/benchmark.go index 221e185845b..3398d404d39 100644 --- a/pkg/clustermanager/perf-tests/pkg/benchmark.go +++ b/pkg/clustermanager/perf-tests/pkg/benchmark.go @@ -24,7 +24,7 @@ import ( "path/filepath" "strings" - yaml "gopkg.in/yaml.v2" + "sigs.k8s.io/yaml" ) const ( @@ -45,15 +45,15 @@ var backupLocations = []string{"us-west1", "us-west2", "us-east1"} // GKECluster saves the config information for the GKE cluster type GKECluster struct { - Config ClusterConfig `yaml:"GKECluster,omitempty"` + Config ClusterConfig `json:"GKECluster,omitempty"` } // ClusterConfig is config for the cluster type ClusterConfig struct { - Location string `yaml:"location,omitempty"` - NodeCount int64 `yaml:"nodeCount,omitempty"` - NodeType string `yaml:"nodeType,omitempty"` - Addons string `yaml:"addons,omitempty"` + Location string `json:"location,omitempty"` + NodeCount int64 `json:"nodeCount,omitempty"` + NodeType string `json:"nodeType,omitempty"` + Addons string `json:"addons,omitempty"` } // benchmarkNames returns names of the benchmarks. diff --git a/pkg/helpers/dir.go b/pkg/helpers/dir.go index e6df61121d8..14a58efa638 100644 --- a/pkg/helpers/dir.go +++ b/pkg/helpers/dir.go @@ -18,6 +18,7 @@ package helpers import ( "fmt" + "log" "os" "os/exec" "strings" @@ -52,6 +53,16 @@ func GetRootDir() (string, error) { return strings.TrimSpace(string(output)), nil } +// MustGetRootDir gets directory of git root. +// Fatal if it fails. +func MustGetRootDir() string { + root, err := GetRootDir() + if err != nil { + log.Fatalf("Error getting root dir: %v", err) + } + return root +} + // ChdirToRoot change directory to git root dir func ChdirToRoot() error { d, err := GetRootDir() diff --git a/pkg/testgrid/testgrid.go b/pkg/testgrid/testgrid.go index 11d8c67a487..bc8bbc2c1b1 100644 --- a/pkg/testgrid/testgrid.go +++ b/pkg/testgrid/testgrid.go @@ -27,12 +27,12 @@ const ( // jobNameTestgridURLMap contains harded coded mapping of job name: Testgrid tab URL relative to base URL var jobNameTestgridURLMap = map[string]string{ - "ci-knative-serving-continuous": "serving#continuous", - "ci-knative-serving-istio-latest-mesh": "serving#istio-latest-mesh", - "ci-knative-serving-istio-latest-no-mesh": "serving#istio-latest-no-mesh", - "ci-knative-serving-kourier-stable": "serving#kourier-stable", - "ci-knative-serving-contour-latest": "serving#contour-latest", - "ci-knative-serving-gateway-api-latest": "serving#gateway-api-latest", + "continuous_serving_main_periodic": "serving#continuous", + "istio-latest-mesh-serving_main_periodic": "serving#istio-latest-mesh", + "istio-latest-no-mesh-serving_main_periodic": "serving#istio-latest-no-mesh", + "kourier-stable-serving_main_periodic": "serving#kourier-stable", + "contour-latest-serving_main_periodic": "serving#contour-latest", + "gateway-api-latest-serving_main_periodic": "serving#gateway-api-latest", } // GetTestgridTabURL gets Testgrid URL for giving job and filters for Testgrid diff --git a/pkg/testgrid/yaml.go b/pkg/testgrid/yaml.go index 8cb2678b51c..bc370beafbf 100644 --- a/pkg/testgrid/yaml.go +++ b/pkg/testgrid/yaml.go @@ -19,39 +19,35 @@ package testgrid import ( "fmt" "io/ioutil" - "path" - "gopkg.in/yaml.v2" - - "knative.dev/test-infra/pkg/helpers" + "sigs.k8s.io/yaml" ) -const configPath = "config/prow/testgrid/testgrid.yaml" - // Config is entire testgrid config type Config struct { - Dashboards []Dashboard `yaml:"dashboards"` + Dashboards []Dashboard `json:"dashboards"` + DashboardGroups []DashboardGroup `json:"dashboard_groups"` } -// Dashboard is single dashboard on testgrid -type Dashboard struct { - Name string `yaml:"name"` - Tabs []Tab `yaml:"dashboard_tab"` +// DashboardGroup is a group of dashboards on testgrid +type DashboardGroup struct { + // The name for the dashboard group. + Name string `json:"name"` + // A list of names specifying dashboards to show links to in a separate tabbed + // bar at the top of the page for each of the given dashboards. + DashboardNames []string `json:"dashboard_names"` } -// Tab is a single tab on testgrid -type Tab struct { - Name string `yaml:"name"` - TestGroupName string `yaml:"test_group_name"` +// Dashboard is single dashboard on testgrid +type Dashboard struct { + Name string `json:"name"` + DashboardTab []*DashboardTab `json:"dashboard_tab,omitempty"` } -// NewConfig loads from default config -func NewConfig() (*Config, error) { - root, err := helpers.GetRootDir() - if err != nil { - return nil, err - } - return NewConfigFromFile(path.Join(root, configPath)) +// DashboardTab is a single tab on testgrid +type DashboardTab struct { + Name string `json:"name"` + TestGroupName string `json:"test_group_name"` } // NewConfigFromFile loads config from file @@ -71,7 +67,7 @@ func NewConfigFromFile(fp string) (*Config, error) { // (generally this is prow job name) func (ac *Config) GetTabRelURL(tgName string) (string, error) { for _, dashboard := range ac.Dashboards { - for _, tab := range dashboard.Tabs { + for _, tab := range dashboard.DashboardTab { if tab.TestGroupName == tgName { return fmt.Sprintf("%s#%s", dashboard.Name, tab.Name), nil } diff --git a/pkg/testgrid/yaml_test.go b/pkg/testgrid/yaml_test.go deleted file mode 100644 index dc815274460..00000000000 --- a/pkg/testgrid/yaml_test.go +++ /dev/null @@ -1,36 +0,0 @@ -/* -Copyright 2019 The Knative Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package testgrid - -import ( - "testing" -) - -func TestConfigPath(t *testing.T) { - if _, err := NewConfig(); err != nil { - t.Fatalf("Testing default config file, want: no err, got: %v", err) - } -} - -func TestTabName(t *testing.T) { - ac, _ := NewConfig() - for tgName, URL := range jobNameTestgridURLMap { - if got, _ := ac.GetTabRelURL(tgName); got != URL { - t.Fatalf("Testing testgroup/tab mapping for '%s', want: '%s', got: '%s'", tgName, URL, got) - } - } -} diff --git a/prow/README.md b/prow/README.md index a77ec11952a..427193d8108 100644 --- a/prow/README.md +++ b/prow/README.md @@ -1,3 +1,5 @@ +# README + ## Knative prow This directory contains prow configs hosted for Knative. This prow is bumped by knative-autobump-config.yaml which uses [Kubernetes generic-autobumper](https://github.com/kubernetes/test-infra/tree/master/prow/cmd/generic-autobumper). @@ -5,8 +7,8 @@ This directory contains prow configs hosted for Knative. This prow is bumped by - `Makefile` Commands to interact with the Prow instance regarding configs and updates. - `cluster/*.yaml` Deployments of the Prow cluster. -- `jobs/config.yaml` Generated configuration of the Prow jobs. -- `config_knative.yaml` Input configuration for `config-generator` tool. +- `jobs/generated` Generated configuration of the Prow jobs. +- `jobs_config` Input configuration for `configgen` tool. - `jobs/run_job.sh` Convenience script to start a Prow job from command-line. - `jobs/pj-on-kind.sh` Convenience script to start a Prow job on kind from command-line. diff --git a/prow/config_knative.yaml b/prow/config_knative.yaml deleted file mode 100644 index 4ca44831d4a..00000000000 --- a/prow/config_knative.yaml +++ /dev/null @@ -1,1739 +0,0 @@ -presubmits: - knative/serving: - - repo-settings: null - - build-tests: true - resources: - requests: - memory: 12Gi - limits: - memory: 16Gi - - unit-tests: true - needs-monitor: true - - integration-tests: false - - custom-test: upgrade-tests - needs-monitor: true - args: - - --run-test - - ./test/e2e-upgrade-tests.sh - resources: - requests: - memory: 12Gi - limits: - memory: 16Gi - - go-coverage: false - - custom-test: performance-tests-kperf - needs-monitor: true - always-run: false - optional: true - args: - - --run-test - - ./test/performance/performance-tests.sh - resources: - requests: - memory: 12Gi - limits: - memory: 16Gi - - custom-test: istio-latest-mesh - needs-monitor: true - always-run: false - optional: true - args: - - --run-test - - ./test/e2e-tests.sh --istio-version latest --mesh - resources: - requests: - memory: 12Gi - limits: - memory: 16Gi - - custom-test: istio-latest-mesh-short - needs-monitor: true - always-run: false - optional: true - args: - - --run-test - - ./test/e2e-tests.sh --istio-version latest --mesh --short - resources: - requests: - memory: 12Gi - limits: - memory: 16Gi - - custom-test: istio-latest-mesh-tls - needs-monitor: true - always-run: false - optional: true - args: - - --run-test - - ./test/e2e-auto-tls-tests.sh --istio-version latest --mesh - resources: - requests: - memory: 12Gi - limits: - memory: 16Gi - - custom-test: istio-latest-no-mesh - needs-monitor: true - always-run: true - optional: false - args: - - --run-test - - ./test/e2e-tests.sh --istio-version latest --no-mesh - resources: - requests: - memory: 12Gi - limits: - memory: 16Gi - - custom-test: istio-latest-no-mesh-tls - needs-monitor: true - always-run: true - optional: false - args: - - --run-test - - ./test/e2e-auto-tls-tests.sh --istio-version latest --no-mesh - resources: - requests: - memory: 12Gi - limits: - memory: 16Gi - - custom-test: kourier-stable - needs-monitor: true - always-run: false - run-if-changed: ^third_party/kourier-latest/* - optional: false - args: - - --run-test - - ./test/e2e-tests.sh --kourier-version stable - resources: - requests: - memory: 12Gi - limits: - memory: 16Gi - - custom-test: kourier-stable-tls - needs-monitor: true - always-run: false - run-if-changed: ^third_party/kourier-latest/* - optional: false - args: - - --run-test - - ./test/e2e-auto-tls-tests.sh --kourier-version stable - resources: - requests: - memory: 12Gi - limits: - memory: 16Gi - - custom-test: contour-latest - needs-monitor: true - always-run: false - run-if-changed: ^third_party/contour-latest/* - args: - - --run-test - - ./test/e2e-tests.sh --contour-version latest - resources: - requests: - memory: 12Gi - limits: - memory: 16Gi - - custom-test: contour-tls - needs-monitor: true - always-run: false - run-if-changed: ^third_party/contour-latest/* - args: - - --run-test - - ./test/e2e-auto-tls-tests.sh --contour-version latest - resources: - requests: - memory: 12Gi - limits: - memory: 16Gi - - custom-test: gateway-api-latest - needs-monitor: true - always-run: false - run-if-changed: ^third_party/gateway-api-latest/* - args: - - --run-test - - ./test/e2e-tests.sh --gateway-api-version latest - resources: - requests: - memory: 12Gi - limits: - memory: 16Gi - - custom-test: https - always-run: false - run-if-changed: ^third_party/cert-manager-latest/* - optional: true - args: - - --run-test - - ./test/e2e-tests.sh --https - - --run-test - - ./test/e2e-auto-tls-tests.sh --https - knative/client: - - build-tests: true - - unit-tests: true - - integration-tests: true - - go-coverage: false - - custom-test: integration-tests-latest-release - always-run: true - command: - - ./test/presubmit-integration-tests-latest-release.sh - knative/client-pkg: - - build-tests: true - - unit-tests: true - - go-coverage: false - knative/client-contrib: - - build-tests: true - - unit-tests: true - - integration-tests: true - knative-sandbox/reconciler-test: - - build-tests: true - - unit-tests: true - - integration-tests: true - knative-sandbox/kn-plugin-diag: - - build-tests: true - - unit-tests: true - - integration-tests: true - knative-sandbox/kn-plugin-event: - - build-tests: true - - unit-tests: true - - integration-tests: true - knative-sandbox/kn-plugin-migration: - - build-tests: true - - unit-tests: true - - integration-tests: true - knative-sandbox/kn-plugin-operator: - - build-tests: true - - unit-tests: true - - integration-tests: true - knative-sandbox/kn-plugin-sample: - - build-tests: true - - unit-tests: true - - integration-tests: true - knative-sandbox/kn-plugin-service-log: - - build-tests: true - - unit-tests: true - - integration-tests: true - knative-sandbox/kn-plugin-source-kafka: - - build-tests: true - - unit-tests: true - - integration-tests: true - - go-coverage: false - knative-sandbox/kn-plugin-source-kamelet: - - build-tests: true - - unit-tests: true - - integration-tests: true - knative-sandbox/kn-plugin-admin: - - build-tests: true - - unit-tests: true - - integration-tests: true - - go-coverage: false - knative-sandbox/kn-plugin-quickstart: - - build-tests: true - - unit-tests: true - - integration-tests: true - - go-coverage: false - knative/eventing: - - repo-settings: null - - build-tests: true - resources: - requests: - memory: 12Gi - limits: - memory: 16Gi - - unit-tests: true - - integration-tests: true - needs-monitor: true - args: - - --run-test - - ./test/e2e-tests.sh - - custom-test: reconciler-tests - needs-monitor: true - optional: true - args: - - --run-test - - ./test/e2e-rekt-tests.sh - - custom-test: conformance-tests - needs-monitor: true - args: - - --run-test - - ./test/e2e-conformance-tests.sh - - custom-test: upgrade-tests - needs-monitor: true - args: - - --run-test - - ./test/e2e-upgrade-tests.sh - - go-coverage: true - knative/docs: - - build-tests: true - - unit-tests: true - - go-coverage: true - knative/pkg: - - build-tests: true - - unit-tests: true - - integration-tests: true - - go-coverage: false - knative/test-infra: - - build-tests: true - - unit-tests: true - - go-coverage: true - knative/hack: - - build-tests: true - - unit-tests: true - - integration-tests: true - args: - - --run-test - - ./test/e2e-tests.sh - - custom-test: kind-tests - always-run: true - needs-dind: true - args: - - --run-test - - ./test/e2e-kind.sh - knative/caching: - - build-tests: true - - unit-tests: true - - integration-tests: true - - go-coverage: false - knative-sandbox/sample-controller: - - build-tests: true - - unit-tests: true - knative-sandbox/sample-source: - - build-tests: true - - unit-tests: true - google/knative-gcp: - - repo-settings: null - - build-tests: true - resources: - requests: - memory: 12Gi - limits: - memory: 16Gi - - unit-tests: true - - integration-tests: true - resources: - requests: - memory: 12Gi - limits: - memory: 16Gi - needs-monitor: true - args: - - --run-test - - ./test/e2e-tests.sh - env-vars: - - ENABLE_AUTH_CHECK_TEST="true" - - custom-test: wi-tests - resources: - requests: - memory: 12Gi - limits: - memory: 16Gi - needs-monitor: true - args: - - --run-test - - ./test/e2e-wi-tests.sh - env-vars: - - ENABLE_AUTH_CHECK_TEST="true" - - custom-test: upgrade-tests - resources: - requests: - memory: 12Gi - limits: - memory: 16Gi - needs-monitor: true - optional: false - args: - - --run-test - - ./test/e2e-upgrade-tests.sh - - custom-test: conformance-tests - resources: - requests: - memory: 12Gi - limits: - memory: 16Gi - needs-monitor: true - args: - - --run-test - - ./test/e2e-conformance-tests.sh - - go-coverage: true - knative/networking: - - build-tests: true - - unit-tests: true - - integration-tests: true - - go-coverage: false - knative-sandbox/net-certmanager: - - build-tests: true - - unit-tests: true - - integration-tests: true - - go-coverage: true - knative-sandbox/net-contour: - - build-tests: true - - unit-tests: true - - integration-tests: true - - go-coverage: false - knative-sandbox/net-http01: - - build-tests: true - - unit-tests: true - - integration-tests: true - - go-coverage: false - knative-sandbox/net-gateway-api: - - build-tests: true - - unit-tests: true - - integration-tests: true - - go-coverage: false - knative-sandbox/net-istio: - - build-tests: true - - unit-tests: true - - integration-tests: true - - go-coverage: true - - custom-test: latest - optional: true - args: - - --run-test - - ./test/e2e-tests.sh --istio-version latest - - custom-test: latest-mesh - optional: true - args: - - --run-test - - ./test/e2e-tests.sh --istio-version latest --mesh - knative-sandbox/net-kourier: - - build-tests: true - - unit-tests: true - - integration-tests: true - - go-coverage: true - knative/website: - - build-tests: false - - unit-tests: false - - integration-tests: false - - go-coverage: false - knative/community: - - build-tests: false - - unit-tests: false - - integration-tests: false - - go-coverage: false - knative/specs: - - build-tests: false - - unit-tests: false - - integration-tests: false - - go-coverage: false - knative/operator: - - build-tests: true - - unit-tests: true - - integration-tests: true - - go-coverage: false - - custom-test: upgrade-tests - args: - - --run-test - - ./test/e2e-upgrade-tests.sh - - custom-test: serving-upgrade-tests - args: - - --run-test - - ./test/e2e-serving-upgrade-tests.sh - - custom-test: eventing-upgrade-tests - args: - - --run-test - - ./test/e2e-eventing-upgrade-tests.sh - knative-sandbox/async-component: - - build-tests: true - - unit-tests: true - - integration-tests: true - - go-coverage: true - knative-sandbox/eventing-autoscaler-keda: - - custom-test: integration-test-kafka-source - args: - - --run-test - - ./test/e2e-tests.sh --kafka-source - optional: true - - custom-test: integration-test-kafka-mt-source - args: - - --run-test - - ./test/e2e-tests.sh --kafka-mt-source - optional: true - - build-tests: false - - unit-tests: false - - integration-tests: false - - go-coverage: false - knative-sandbox/discovery: - - build-tests: true - - unit-tests: true - - integration-tests: true - knative-sandbox/eventing-camel: - - build-tests: false - - unit-tests: false - - integration-tests: false - knative-sandbox/eventing-kafka: - - custom-test: integration-test-channel-consolidated - args: - - --run-test - - ./test/e2e-tests.sh --consolidated - - custom-test: integration-test-channel-consolidated-tls - args: - - --run-test - - ./test/e2e-tests.sh --consolidated-tls - - custom-test: integration-test-channel-consolidated-sasl - args: - - --run-test - - ./test/e2e-tests.sh --consolidated-sasl - - custom-test: integration-test-channel-distributed - args: - - --run-test - - ./test/e2e-tests.sh --distributed - - custom-test: integration-test-mt-source - args: - - --run-test - - ./test/e2e-tests.sh --mt-source - optional: true - - custom-test: upgrade-tests - args: - - --run-test - - ./test/e2e-upgrade-tests.sh - - integration-tests: false - - build-tests: true - needs-dind: true - - unit-tests: true - needs-dind: true - - go-coverage: true - needs-dind: true - knative-sandbox/eventing-kafka-broker: - - build-tests: true - - unit-tests: true - - integration-tests: true - needs-dind: true - - custom-test: upgrade-tests - needs-dind: true - args: - - --run-test - - ./test/upgrade-tests.sh - - custom-test: reconciler-tests - needs-dind: true - args: - - --run-test - - ./test/reconciler-tests.sh - - go-coverage: true - - custom-test: channel-integration-tests-ssl - needs-dind: true - env-vars: - - EVENTING_KAFKA_BROKER_CHANNEL_AUTH_SCENARIO="SSL" - args: - - --run-test - - ./test/e2e-tests.sh - - custom-test: channel-integration-tests-sasl-ssl - needs-dind: true - env-vars: - - EVENTING_KAFKA_BROKER_CHANNEL_AUTH_SCENARIO="SASL_SSL" - args: - - --run-test - - ./test/e2e-tests.sh - - custom-test: channel-integration-tests-sasl-plain - needs-dind: true - env-vars: - - EVENTING_KAFKA_BROKER_CHANNEL_AUTH_SCENARIO="SASL_PLAIN" - args: - - --run-test - - ./test/e2e-tests.sh - - custom-test: channel-reconciler-tests-ssl - needs-dind: true - env-vars: - - EVENTING_KAFKA_BROKER_CHANNEL_AUTH_SCENARIO="SSL" - args: - - --run-test - - ./test/reconciler-tests.sh - - custom-test: channel-reconciler-tests-sasl-ssl - needs-dind: true - env-vars: - - EVENTING_KAFKA_BROKER_CHANNEL_AUTH_SCENARIO="SASL_SSL" - args: - - --run-test - - ./test/reconciler-tests.sh - - custom-test: channel-reconciler-tests-sasl-plain - needs-dind: true - env-vars: - - EVENTING_KAFKA_BROKER_CHANNEL_AUTH_SCENARIO="SASL_PLAIN" - args: - - --run-test - - ./test/reconciler-tests.sh - knative-sandbox/eventing-rabbitmq: - - build-tests: false - - unit-tests: false - - integration-tests: false - knative-sandbox/eventing-natss: - - build-tests: false - - unit-tests: false - - integration-tests: false - knative-sandbox/kperf: - - build-tests: true - - unit-tests: true - - integration-tests: true - knative-sandbox/monitoring: - - build-tests: false - - unit-tests: false - - integration-tests: false - knative-sandbox/eventing-kogito: - - build-tests: true - - unit-tests: true - - integration-tests: false - knative-sandbox/container-freezer: - - build-tests: true - - unit-tests: true - - integration-tests: true - - go-coverage: false -periodics: - knative/serving: - - continuous: true - resources: - requests: - memory: 12Gi - limits: - memory: 16Gi - - branch-ci: true - release: "0.26" - - branch-ci: true - release: "1.0" - - branch-ci: true - release: "1.1" - - branch-ci: true - release: "1.2" - - custom-job: istio-latest-mesh - command: - - ./test/presubmit-tests.sh - args: - - --run-test - - ./test/e2e-tests.sh --istio-version latest --mesh - - --run-test - - ./test/e2e-auto-tls-tests.sh --istio-version latest --mesh - - custom-job: istio-latest-no-mesh - command: - - ./test/presubmit-tests.sh - args: - - --run-test - - ./test/e2e-tests.sh --istio-version latest --no-mesh - - --run-test - - ./test/e2e-auto-tls-tests.sh --istio-version latest --no-mesh --run-http01-auto-tls-tests - - custom-job: istio-head-mesh - command: - - ./test/presubmit-tests.sh - args: - - --run-test - - ./test/e2e-tests.sh --istio-version head --mesh - - --run-test - - ./test/e2e-auto-tls-tests.sh --istio-version head --mesh - - custom-job: istio-head-no-mesh - command: - - ./test/presubmit-tests.sh - args: - - --run-test - - ./test/e2e-tests.sh --istio-version head --no-mesh - - --run-test - - ./test/e2e-auto-tls-tests.sh --istio-version head --no-mesh - - custom-job: kourier-stable - command: - - ./test/presubmit-tests.sh - args: - - --run-test - - ./test/e2e-tests.sh --kourier-version stable - - --run-test - - ./test/e2e-auto-tls-tests.sh --kourier-version stable --run-http01-auto-tls-tests - - custom-job: contour-latest - command: - - ./test/presubmit-tests.sh - args: - - --run-test - - ./test/e2e-tests.sh --contour-version latest - - --run-test - - ./test/e2e-auto-tls-tests.sh --contour-version latest --run-http01-auto-tls-tests - - custom-job: gateway-api-latest - command: - - ./test/presubmit-tests.sh - args: - - --run-test - - ./test/e2e-tests.sh --gateway-api-version latest - - custom-job: https - command: - - ./test/presubmit-tests.sh - args: - - --run-test - - ./test/e2e-tests.sh --https - - --run-test - - ./test/e2e-auto-tls-tests.sh --https - - custom-job: s390x-kourier-tests - cron: 0 3 * * * - command: - - bash - args: - - -c - - mkdir -p /root/.kube && cat /opt/cluster/ci-script > connect.sh && chmod +x connect.sh && server_addr=$(./connect.sh kourier-main) && kubectl get cm s390x-config-serving -n default -o jsonpath='{.data.adjustment-script}' > adjust.sh && chmod +x adjust.sh && ./adjust.sh && export TEST_OPTIONS=$TEST_OPTIONS' --ingressendpoint '${server_addr} && ./test/e2e-tests.sh --run-tests --kourier-version latest - env-vars: - - GO111MODULE="on" - - TEST_OPTIONS="--enable-alpha --enable-beta --resolvabledomain=false" - - DISABLE_MD_LINTING="1" - - KO_FLAGS="--platform=linux/s390x" - - SYSTEM_NAMESPACE="knative-serving" - - PLATFORM="linux/s390x" - - KUBECONFIG="/root/.kube/config" - - DOCKER_CONFIG="/opt/cluster" - external_cluster: - secret: s390x-cluster1 - - custom-job: s390x-kourier-tests - release: "1.0" - cron: 10 7 * * * - command: - - bash - args: - - -c - - mkdir -p /root/.kube && cat /opt/cluster/ci-script > connect.sh && chmod +x connect.sh && server_addr=$(./connect.sh kourier-10) && kubectl get cm s390x-config-serving -n default -o jsonpath='{.data.adjustment-script}' > adjust.sh && chmod +x adjust.sh && ./adjust.sh && export TEST_OPTIONS=$TEST_OPTIONS' --ingressendpoint '${server_addr} && ./test/e2e-tests.sh --run-tests --kourier-version latest - env-vars: - - GO111MODULE="on" - - TEST_OPTIONS="--enable-alpha --enable-beta --resolvabledomain=false" - - DISABLE_MD_LINTING="1" - - KO_FLAGS="--platform=linux/s390x" - - SYSTEM_NAMESPACE="knative-serving" - - PLATFORM="linux/s390x" - - KUBECONFIG="/root/.kube/config" - - DOCKER_CONFIG="/opt/cluster" - external_cluster: - secret: s390x-cluster1 - - custom-job: s390x-kourier-tests - release: "1.1" - cron: 20 11 * * * - command: - - bash - args: - - -c - - mkdir -p /root/.kube && cat /opt/cluster/ci-script > connect.sh && chmod +x connect.sh && server_addr=$(./connect.sh kourier-11) && kubectl get cm s390x-config-serving -n default -o jsonpath='{.data.adjustment-script}' > adjust.sh && chmod +x adjust.sh && ./adjust.sh && export TEST_OPTIONS=$TEST_OPTIONS' --ingressendpoint '${server_addr} && ./test/e2e-tests.sh --run-tests --kourier-version latest - env-vars: - - GO111MODULE="on" - - TEST_OPTIONS="--enable-alpha --enable-beta --resolvabledomain=false" - - DISABLE_MD_LINTING="1" - - KO_FLAGS="--platform=linux/s390x" - - SYSTEM_NAMESPACE="knative-serving" - - PLATFORM="linux/s390x" - - KUBECONFIG="/root/.kube/config" - - DOCKER_CONFIG="/opt/cluster" - external_cluster: - secret: s390x-cluster1 - - custom-job: s390x-kourier-tests - release: "1.2" - cron: 30 15 * * * - command: - - bash - args: - - -c - - mkdir -p /root/.kube && cat /opt/cluster/ci-script > connect.sh && chmod +x connect.sh && server_addr=$(./connect.sh kourier-12) && kubectl get cm s390x-config-serving -n default -o jsonpath='{.data.adjustment-script}' > adjust.sh && chmod +x adjust.sh && ./adjust.sh && export TEST_OPTIONS=$TEST_OPTIONS' --ingressendpoint '${server_addr} && ./test/e2e-tests.sh --run-tests --kourier-version latest - env-vars: - - GO111MODULE="on" - - TEST_OPTIONS="--enable-alpha --enable-beta --resolvabledomain=false" - - DISABLE_MD_LINTING="1" - - KO_FLAGS="--platform=linux/s390x" - - SYSTEM_NAMESPACE="knative-serving" - - PLATFORM="linux/s390x" - - KUBECONFIG="/root/.kube/config" - - DOCKER_CONFIG="/opt/cluster" - external_cluster: - secret: s390x-cluster1 - - custom-job: s390x-kourier-tests - release: "1.3" - cron: 40 19 * * * - command: - - bash - args: - - -c - - mkdir -p /root/.kube && cat /opt/cluster/ci-script > connect.sh && chmod +x connect.sh && server_addr=$(./connect.sh kourier-13) && kubectl get cm s390x-config-serving -n default -o jsonpath='{.data.adjustment-script}' > adjust.sh && chmod +x adjust.sh && ./adjust.sh && export TEST_OPTIONS=$TEST_OPTIONS' --ingressendpoint '${server_addr} && ./test/e2e-tests.sh --run-tests --kourier-version latest - env-vars: - - GO111MODULE="on" - - TEST_OPTIONS="--enable-alpha --enable-beta --resolvabledomain=false" - - DISABLE_MD_LINTING="1" - - KO_FLAGS="--platform=linux/s390x" - - SYSTEM_NAMESPACE="knative-serving" - - PLATFORM="linux/s390x" - - KUBECONFIG="/root/.kube/config" - - DOCKER_CONFIG="/opt/cluster" - external_cluster: - secret: s390x-cluster1 - - custom-job: s390x-contour-tests - cron: 0 5 * * * - command: - - bash - args: - - -c - - mkdir -p /root/.kube && cat /opt/cluster/ci-script > connect.sh && chmod +x connect.sh && server_addr=$(./connect.sh contour-main) && kubectl get cm s390x-config-serving -n default -o jsonpath='{.data.adjustment-script}' > adjust.sh && chmod +x adjust.sh && ./adjust.sh && export TEST_OPTIONS=$TEST_OPTIONS' --ingressendpoint '${server_addr} && ./test/e2e-tests.sh --run-tests --contour-version latest - env-vars: - - GO111MODULE="on" - - TEST_OPTIONS="--enable-alpha --enable-beta --resolvabledomain=false" - - DISABLE_MD_LINTING="1" - - KO_FLAGS="--platform=linux/s390x" - - SYSTEM_NAMESPACE="knative-serving" - - PLATFORM="linux/s390x" - - KUBECONFIG="/root/.kube/config" - - DOCKER_CONFIG="/opt/cluster" - external_cluster: - secret: s390x-cluster1 - - custom-job: s390x-contour-tests - release: "1.0" - cron: 10 9 * * * - command: - - bash - args: - - -c - - mkdir -p /root/.kube && cat /opt/cluster/ci-script > connect.sh && chmod +x connect.sh && server_addr=$(./connect.sh contour-10) && kubectl get cm s390x-config-serving -n default -o jsonpath='{.data.adjustment-script}' > adjust.sh && chmod +x adjust.sh && ./adjust.sh && export TEST_OPTIONS=$TEST_OPTIONS' --ingressendpoint '${server_addr} && ./test/e2e-tests.sh --run-tests --contour-version latest - env-vars: - - GO111MODULE="on" - - TEST_OPTIONS="--enable-alpha --enable-beta --resolvabledomain=false" - - DISABLE_MD_LINTING="1" - - KO_FLAGS="--platform=linux/s390x" - - SYSTEM_NAMESPACE="knative-serving" - - PLATFORM="linux/s390x" - - KUBECONFIG="/root/.kube/config" - - DOCKER_CONFIG="/opt/cluster" - external_cluster: - secret: s390x-cluster1 - - custom-job: s390x-contour-tests - release: "1.1" - cron: 20 13 * * * - command: - - bash - args: - - -c - - mkdir -p /root/.kube && cat /opt/cluster/ci-script > connect.sh && chmod +x connect.sh && server_addr=$(./connect.sh contour-11) && kubectl get cm s390x-config-serving -n default -o jsonpath='{.data.adjustment-script}' > adjust.sh && chmod +x adjust.sh && ./adjust.sh && export TEST_OPTIONS=$TEST_OPTIONS' --ingressendpoint '${server_addr} && ./test/e2e-tests.sh --run-tests --contour-version latest - env-vars: - - GO111MODULE="on" - - TEST_OPTIONS="--enable-alpha --enable-beta --resolvabledomain=false" - - DISABLE_MD_LINTING="1" - - KO_FLAGS="--platform=linux/s390x" - - SYSTEM_NAMESPACE="knative-serving" - - PLATFORM="linux/s390x" - - KUBECONFIG="/root/.kube/config" - - DOCKER_CONFIG="/opt/cluster" - external_cluster: - secret: s390x-cluster1 - - custom-job: s390x-contour-tests - release: "1.2" - cron: 30 17 * * * - command: - - bash - args: - - -c - - mkdir -p /root/.kube && cat /opt/cluster/ci-script > connect.sh && chmod +x connect.sh && server_addr=$(./connect.sh contour-12) && kubectl get cm s390x-config-serving -n default -o jsonpath='{.data.adjustment-script}' > adjust.sh && chmod +x adjust.sh && ./adjust.sh && export TEST_OPTIONS=$TEST_OPTIONS' --ingressendpoint '${server_addr} && ./test/e2e-tests.sh --run-tests --contour-version latest - env-vars: - - GO111MODULE="on" - - TEST_OPTIONS="--enable-alpha --enable-beta --resolvabledomain=false" - - DISABLE_MD_LINTING="1" - - KO_FLAGS="--platform=linux/s390x" - - SYSTEM_NAMESPACE="knative-serving" - - PLATFORM="linux/s390x" - - KUBECONFIG="/root/.kube/config" - - DOCKER_CONFIG="/opt/cluster" - external_cluster: - secret: s390x-cluster1 - - custom-job: s390x-contour-tests - release: "1.3" - cron: 40 21 * * * - command: - - bash - args: - - -c - - mkdir -p /root/.kube && cat /opt/cluster/ci-script > connect.sh && chmod +x connect.sh && server_addr=$(./connect.sh contour-13) && kubectl get cm s390x-config-serving -n default -o jsonpath='{.data.adjustment-script}' > adjust.sh && chmod +x adjust.sh && ./adjust.sh && export TEST_OPTIONS=$TEST_OPTIONS' --ingressendpoint '${server_addr} && ./test/e2e-tests.sh --run-tests --contour-version latest - env-vars: - - GO111MODULE="on" - - TEST_OPTIONS="--enable-alpha --enable-beta --resolvabledomain=false" - - DISABLE_MD_LINTING="1" - - KO_FLAGS="--platform=linux/s390x" - - SYSTEM_NAMESPACE="knative-serving" - - PLATFORM="linux/s390x" - - KUBECONFIG="/root/.kube/config" - - DOCKER_CONFIG="/opt/cluster" - external_cluster: - secret: s390x-cluster1 - - nightly: true - reporter_config: - slack: - channel: serving-api - job_states_to_report: - - failure - report_template: '"The nightly release job fails, check the log: <{{.Status.URL}}|View logs>"' - resources: - requests: - memory: 12Gi - limits: - memory: 16Gi - - dot-release: true - release: "0.26" - resources: - requests: - memory: 12Gi - limits: - memory: 16Gi - - dot-release: true - release: "1.0" - resources: - requests: - memory: 12Gi - limits: - memory: 16Gi - - dot-release: true - release: "1.1" - resources: - requests: - memory: 12Gi - limits: - memory: 16Gi - - dot-release: true - release: "1.2" - resources: - requests: - memory: 12Gi - limits: - memory: 16Gi - - auto-release: true - resources: - requests: - memory: 12Gi - limits: - memory: 16Gi - knative/client: - - continuous: true - - branch-ci: true - release: "1.0" - - branch-ci: true - release: "1.1" - - branch-ci: true - release: "1.2" - - branch-ci: true - release: "1.3" - - nightly: true - - custom-job: tekton - cron: 0 13 * * * - command: ./test/tekton-tests.sh - - dot-release: true - release: "1.0" - - dot-release: true - release: "1.1" - - dot-release: true - release: "1.2" - - dot-release: true - release: "1.3" - - auto-release: true - - custom-job: s390x-e2e-tests - cron: 0 14 * * * - command: - - bash - args: - - -c - - mkdir -p /root/.kube && cat /opt/cluster/ci-script > connect.sh && chmod +x connect.sh && ./connect.sh client-main && kubectl get cm s390x-config-client -n default -o jsonpath='{.data.adjustment-script}' > adjust.sh && chmod +x adjust.sh && ./adjust.sh && ./test/e2e-tests.sh --run-tests - env-vars: - - DISABLE_MD_LINTING="1" - - KO_FLAGS="--platform=linux/s390x" - - PLATFORM="linux/s390x" - - KUBECONFIG="/root/.kube/config" - - DOCKER_CONFIG="/opt/cluster" - - INGRESS_CLASS="contour.ingress.networking.knative.dev" - external_cluster: - secret: s390x-cluster1 - - custom-job: s390x-e2e-tests - release: "1.0" - cron: 10 18 * * * - command: - - bash - args: - - -c - - mkdir -p /root/.kube && cat /opt/cluster/ci-script > connect.sh && chmod +x connect.sh && ./connect.sh client-10 && kubectl get cm s390x-config-client -n default -o jsonpath='{.data.adjustment-script}' > adjust.sh && chmod +x adjust.sh && ./adjust.sh && ./test/e2e-tests.sh --run-tests - env-vars: - - DISABLE_MD_LINTING="1" - - KO_FLAGS="--platform=linux/s390x" - - PLATFORM="linux/s390x" - - KUBECONFIG="/root/.kube/config" - - DOCKER_CONFIG="/opt/cluster" - - INGRESS_CLASS="contour.ingress.networking.knative.dev" - external_cluster: - secret: s390x-cluster1 - - custom-job: s390x-e2e-tests - release: "1.1" - cron: 20 22 * * * - command: - - bash - args: - - -c - - mkdir -p /root/.kube && cat /opt/cluster/ci-script > connect.sh && chmod +x connect.sh && ./connect.sh client-11 && kubectl get cm s390x-config-client -n default -o jsonpath='{.data.adjustment-script}' > adjust.sh && chmod +x adjust.sh && ./adjust.sh && ./test/e2e-tests.sh --run-tests - env-vars: - - DISABLE_MD_LINTING="1" - - KO_FLAGS="--platform=linux/s390x" - - PLATFORM="linux/s390x" - - KUBECONFIG="/root/.kube/config" - - DOCKER_CONFIG="/opt/cluster" - - INGRESS_CLASS="contour.ingress.networking.knative.dev" - external_cluster: - secret: s390x-cluster1 - - custom-job: s390x-e2e-tests - release: "1.2" - cron: 30 2 * * * - command: - - bash - args: - - -c - - mkdir -p /root/.kube && cat /opt/cluster/ci-script > connect.sh && chmod +x connect.sh && ./connect.sh client-12 && kubectl get cm s390x-config-client -n default -o jsonpath='{.data.adjustment-script}' > adjust.sh && chmod +x adjust.sh && ./adjust.sh && ./test/e2e-tests.sh --run-tests - env-vars: - - DISABLE_MD_LINTING="1" - - KO_FLAGS="--platform=linux/s390x" - - PLATFORM="linux/s390x" - - KUBECONFIG="/root/.kube/config" - - DOCKER_CONFIG="/opt/cluster" - - INGRESS_CLASS="contour.ingress.networking.knative.dev" - external_cluster: - secret: s390x-cluster1 - - custom-job: s390x-e2e-tests - release: "1.3" - cron: 40 6 * * * - command: - - bash - args: - - -c - - mkdir -p /root/.kube && cat /opt/cluster/ci-script > connect.sh && chmod +x connect.sh && ./connect.sh client-13 && kubectl get cm s390x-config-client -n default -o jsonpath='{.data.adjustment-script}' > adjust.sh && chmod +x adjust.sh && ./adjust.sh && ./test/e2e-tests.sh --run-tests - env-vars: - - DISABLE_MD_LINTING="1" - - KO_FLAGS="--platform=linux/s390x" - - PLATFORM="linux/s390x" - - KUBECONFIG="/root/.kube/config" - - DOCKER_CONFIG="/opt/cluster" - - INGRESS_CLASS="contour.ingress.networking.knative.dev" - external_cluster: - secret: s390x-cluster1 - knative/client-pkg: - - continuous: true - - auto-release: true - - nightly: true - knative-sandbox/kn-plugin-diag: - - continuous: true - knative-sandbox/kn-plugin-event: - - continuous: true - - auto-release: true - - nightly: true - - dot-release: true - knative-sandbox/kn-plugin-func: - - auto-release: true - needs-dind: true - - nightly: true - needs-dind: true - - dot-release: true - needs-dind: true - knative-sandbox/kn-plugin-migration: - - continuous: true - knative-sandbox/kn-plugin-operator: - - continuous: true - knative-sandbox/kn-plugin-sample: - - continuous: true - knative-sandbox/kn-plugin-service-log: - - continuous: true - - auto-release: true - - nightly: true - - dot-release: true - knative-sandbox/kn-plugin-source-kafka: - - continuous: true - - auto-release: true - - nightly: true - - dot-release: true - release: "1.0" - - dot-release: true - release: "1.1" - - dot-release: true - release: "1.2" - - dot-release: true - release: "1.3" - knative-sandbox/kn-plugin-source-kamelet: - - continuous: true - - auto-release: true - - nightly: true - - dot-release: true - knative-sandbox/kn-plugin-admin: - - continuous: true - - auto-release: true - - nightly: true - - dot-release: true - knative-sandbox/kn-plugin-quickstart: - - continuous: true - - auto-release: true - - nightly: true - - dot-release: true - knative/docs: - - continuous: true - needs-dind: true - knative/eventing: - - continuous: true - resources: - requests: - memory: 12Gi - limits: - memory: 16Gi - - branch-ci: true - release: "0.26" - - branch-ci: true - release: "1.0" - - branch-ci: true - release: "1.1" - - branch-ci: true - release: "1.2" - - branch-ci: true - release: "1.3" - - nightly: true - reporter_config: - slack: - channel: eventing - job_states_to_report: - - failure - report_template: '"The nightly release job fails, check the log: <{{.Status.URL}}|View logs>"' - resources: - requests: - memory: 12Gi - limits: - memory: 16Gi - - dot-release: true - release: "0.26" - resources: - requests: - memory: 12Gi - limits: - memory: 16Gi - - dot-release: true - release: "1.0" - resources: - requests: - memory: 12Gi - limits: - memory: 16Gi - - dot-release: true - release: "1.1" - resources: - requests: - memory: 12Gi - limits: - memory: 16Gi - - dot-release: true - release: "1.2" - resources: - requests: - memory: 12Gi - limits: - memory: 16Gi - - dot-release: true - release: "1.3" - resources: - requests: - memory: 12Gi - limits: - memory: 16Gi - - auto-release: true - resources: - requests: - memory: 12Gi - limits: - memory: 16Gi - - custom-job: s390x-e2e-tests - cron: 0 7 * * * - command: - - bash - args: - - -c - - mkdir -p /root/.kube && cat /opt/cluster/ci-script > connect.sh && chmod +x connect.sh && ./connect.sh eventing-main && kubectl get cm s390x-config-eventing -n default -o jsonpath='{.data.adjustment-script}' > adjust.sh && chmod +x adjust.sh && ./adjust.sh && ./test/e2e-tests.sh --run-tests - env-vars: - - DISABLE_MD_LINTING="1" - - KO_FLAGS="--platform=linux/s390x" - - SYSTEM_NAMESPACE="knative-eventing" - - PLATFORM="linux/s390x" - - KUBECONFIG="/root/.kube/config" - - SCALE_CHAOSDUCK_TO_ZERO="1" - - DOCKER_CONFIG="/opt/cluster" - external_cluster: - secret: s390x-cluster1 - - custom-job: s390x-e2e-tests - release: "1.0" - cron: 10 11 * * * - command: - - bash - args: - - -c - - mkdir -p /root/.kube && cat /opt/cluster/ci-script > connect.sh && chmod +x connect.sh && ./connect.sh eventing-10 && kubectl get cm s390x-config-eventing -n default -o jsonpath='{.data.adjustment-script}' > adjust.sh && chmod +x adjust.sh && ./adjust.sh && ./test/e2e-tests.sh --run-tests - env-vars: - - DISABLE_MD_LINTING="1" - - KO_FLAGS="--platform=linux/s390x" - - SYSTEM_NAMESPACE="knative-eventing" - - PLATFORM="linux/s390x" - - KUBECONFIG="/root/.kube/config" - - SCALE_CHAOSDUCK_TO_ZERO="1" - - DOCKER_CONFIG="/opt/cluster" - external_cluster: - secret: s390x-cluster1 - - custom-job: s390x-e2e-tests - release: "1.1" - cron: 20 15 * * * - command: - - bash - args: - - -c - - mkdir -p /root/.kube && cat /opt/cluster/ci-script > connect.sh && chmod +x connect.sh && ./connect.sh eventing-11 && kubectl get cm s390x-config-eventing -n default -o jsonpath='{.data.adjustment-script}' > adjust.sh && chmod +x adjust.sh && ./adjust.sh && ./test/e2e-tests.sh --run-tests - env-vars: - - DISABLE_MD_LINTING="1" - - KO_FLAGS="--platform=linux/s390x" - - SYSTEM_NAMESPACE="knative-eventing" - - PLATFORM="linux/s390x" - - KUBECONFIG="/root/.kube/config" - - SCALE_CHAOSDUCK_TO_ZERO="1" - - DOCKER_CONFIG="/opt/cluster" - external_cluster: - secret: s390x-cluster1 - - custom-job: s390x-e2e-tests - release: "1.2" - cron: 30 19 * * * - command: - - bash - args: - - -c - - mkdir -p /root/.kube && cat /opt/cluster/ci-script > connect.sh && chmod +x connect.sh && ./connect.sh eventing-12 && kubectl get cm s390x-config-eventing -n default -o jsonpath='{.data.adjustment-script}' > adjust.sh && chmod +x adjust.sh && ./adjust.sh && ./test/e2e-tests.sh --run-tests - env-vars: - - DISABLE_MD_LINTING="1" - - KO_FLAGS="--platform=linux/s390x" - - SYSTEM_NAMESPACE="knative-eventing" - - PLATFORM="linux/s390x" - - KUBECONFIG="/root/.kube/config" - - SCALE_CHAOSDUCK_TO_ZERO="1" - - DOCKER_CONFIG="/opt/cluster" - external_cluster: - secret: s390x-cluster1 - - custom-job: s390x-e2e-tests - release: "1.3" - cron: 40 23 * * * - command: - - bash - args: - - -c - - mkdir -p /root/.kube && cat /opt/cluster/ci-script > connect.sh && chmod +x connect.sh && ./connect.sh eventing-13 && kubectl get cm s390x-config-eventing -n default -o jsonpath='{.data.adjustment-script}' > adjust.sh && chmod +x adjust.sh && ./adjust.sh && ./test/e2e-tests.sh --run-tests - env-vars: - - DISABLE_MD_LINTING="1" - - KO_FLAGS="--platform=linux/s390x" - - SYSTEM_NAMESPACE="knative-eventing" - - PLATFORM="linux/s390x" - - KUBECONFIG="/root/.kube/config" - - SCALE_CHAOSDUCK_TO_ZERO="1" - - DOCKER_CONFIG="/opt/cluster" - external_cluster: - secret: s390x-cluster1 - knative-sandbox/eventing-awssqs: - - continuous: true - - nightly: true - - auto-release: true - - dot-release: true - release: "0.26" - - dot-release: true - release: "1.0" - - dot-release: true - release: "1.1" - - dot-release: true - release: "1.2" - knative-sandbox/eventing-ceph: - - continuous: true - - nightly: true - - auto-release: true - - dot-release: true - release: "1.0" - - dot-release: true - release: "1.1" - - dot-release: true - release: "1.2" - - dot-release: true - release: "1.3" - resources: - requests: - memory: 12Gi - limits: - memory: 16Gi - needs-dind: true - knative-sandbox/eventing-couchdb: - - continuous: true - - nightly: true - - auto-release: true - - dot-release: true - release: "0.25" - - dot-release: true - release: "0.26" - - dot-release: true - release: "1.0" - - dot-release: true - release: "1.1" - knative-sandbox/eventing-github: - - continuous: true - - nightly: true - - auto-release: true - - dot-release: true - release: "1.0" - - dot-release: true - release: "1.1" - - dot-release: true - release: "1.2" - - dot-release: true - release: "1.3" - knative-sandbox/eventing-gitlab: - - continuous: true - - nightly: true - - dot-release: true - release: "1.0" - resources: - requests: - memory: 12Gi - limits: - memory: 16Gi - - dot-release: true - release: "1.1" - resources: - requests: - memory: 12Gi - limits: - memory: 16Gi - - dot-release: true - release: "1.2" - resources: - requests: - memory: 12Gi - limits: - memory: 16Gi - - dot-release: true - release: "1.3" - resources: - requests: - memory: 12Gi - limits: - memory: 16Gi - - auto-release: true - knative-sandbox/eventing-prometheus: - - continuous: true - - nightly: true - - auto-release: true - - dot-release: true - release: "0.25" - - dot-release: true - release: "0.26" - - dot-release: true - release: "1.0" - - dot-release: true - release: "1.1" - knative-sandbox/eventing-redis: - - continuous: true - - nightly: true - - auto-release: true - - dot-release: true - release: "1.0" - - dot-release: true - release: "1.1" - - dot-release: true - release: "1.2" - - dot-release: true - release: "1.3" - knative-sandbox/kperf: - - continuous: true - knative/pkg: - - continuous: true - knative/caching: - - continuous: true - knative-sandbox/sample-controller: - - continuous: true - - nightly: true - - auto-release: true - knative-sandbox/sample-source: - - continuous: true - - nightly: true - - auto-release: true - knative/test-infra: - - continuous: true - needs-dind: true - google/knative-gcp: - - nightly: true - args: - - --publish - - --tag-release - - --release-gcs - - knative-gcp-nightly - - --release-gcr - - gcr.io/knative-gcp-nightly - resources: - requests: - memory: 12Gi - limits: - memory: 16Gi - - auto-release: true - args: - - --auto-release - - --release-gcs - - knative-gcp - - --release-gcr - - gcr.io/knative-gcp - - --github-token - - /etc/hub-token/token - resources: - requests: - memory: 12Gi - limits: - memory: 16Gi - knative-sandbox/net-certmanager: - - continuous: true - - nightly: true - reporter_config: - slack: - channel: net-certmanager - job_states_to_report: - - failure - report_template: '"The nightly release job fails, check the log: <{{.Status.URL}}|View logs>"' - - dot-release: true - release: "1.0" - - dot-release: true - release: "1.1" - - dot-release: true - release: "1.2" - - dot-release: true - release: "1.3" - - auto-release: true - knative-sandbox/net-contour: - - continuous: true - - nightly: true - reporter_config: - slack: - channel: net-contour - job_states_to_report: - - failure - report_template: '"The nightly release job fails, check the log: <{{.Status.URL}}|View logs>"' - - dot-release: true - release: "1.0" - - dot-release: true - release: "1.1" - - dot-release: true - release: "1.2" - - dot-release: true - release: "1.3" - - auto-release: true - knative-sandbox/net-gateway-api: - - continuous: true - - nightly: true - reporter_config: - slack: - channel: net-gateway-api - job_states_to_report: - - failure - report_template: '"The nightly release job fails, check the log: <{{.Status.URL}}|View logs>"' - - auto-release: true - knative-sandbox/net-http01: - - continuous: true - - nightly: true - reporter_config: - slack: - channel: net-http01 - job_states_to_report: - - failure - report_template: '"The nightly release job fails, check the log: <{{.Status.URL}}|View logs>"' - - dot-release: true - - auto-release: true - knative-sandbox/net-istio: - - continuous: true - - nightly: true - reporter_config: - slack: - channel: net-istio - job_states_to_report: - - failure - report_template: '"The nightly release job fails, check the log: <{{.Status.URL}}|View logs>"' - - custom-job: latest - command: - - ./test/presubmit-tests.sh - args: - - --run-test - - ./test/e2e-tests.sh --istio-version latest - - dot-release: true - release: "1.0" - - dot-release: true - release: "1.1" - - dot-release: true - release: "1.2" - - dot-release: true - release: "1.3" - - auto-release: true - knative-sandbox/net-kourier: - - continuous: true - - nightly: true - reporter_config: - slack: - channel: net-kourier - job_states_to_report: - - failure - report_template: '"The nightly release job fails, check the log: <{{.Status.URL}}|View logs>"' - - dot-release: true - - auto-release: true - knative/operator: - - continuous: true - - branch-ci: true - release: "1.0" - resources: - requests: - memory: 12Gi - limits: - memory: 16Gi - - branch-ci: true - release: "1.1" - resources: - requests: - memory: 12Gi - limits: - memory: 16Gi - - branch-ci: true - release: "1.2" - resources: - requests: - memory: 12Gi - limits: - memory: 16Gi - - branch-ci: true - release: "1.3" - resources: - requests: - memory: 12Gi - limits: - memory: 16Gi - - nightly: true - - dot-release: true - release: "1.0" - resources: - requests: - memory: 12Gi - limits: - memory: 16Gi - - dot-release: true - release: "1.1" - resources: - requests: - memory: 12Gi - limits: - memory: 16Gi - - dot-release: true - release: "1.2" - resources: - requests: - memory: 12Gi - limits: - memory: 16Gi - - dot-release: true - release: "1.3" - resources: - requests: - memory: 12Gi - limits: - memory: 16Gi - - auto-release: true - - custom-job: s390x-e2e-tests - cron: 0 16 * * * - command: - - bash - args: - - -c - - mkdir -p /root/.kube && cat /opt/cluster/ci-script > connect.sh && chmod +x connect.sh && ./connect.sh operator-main && kubectl get cm s390x-config-operator -n default -o jsonpath='{.data.adjustment-script}' > adjust.sh && chmod +x adjust.sh && ./adjust.sh && ./test/e2e-tests.sh --run-tests - env-vars: - - DISABLE_MD_LINTING="1" - - KO_FLAGS="--platform=linux/s390x" - - PLATFORM="linux/s390x" - - KUBECONFIG="/root/.kube/config" - - DOCKER_CONFIG="/opt/cluster" - - INGRESS_CLASS="contour.ingress.networking.knative.dev" - external_cluster: - secret: s390x-cluster1 - - custom-job: s390x-e2e-tests - release: "1.0" - cron: 10 20 * * * - command: - - bash - args: - - -c - - mkdir -p /root/.kube && cat /opt/cluster/ci-script > connect.sh && chmod +x connect.sh && ./connect.sh operator-10 && kubectl get cm s390x-config-operator -n default -o jsonpath='{.data.adjustment-script}' > adjust.sh && chmod +x adjust.sh && ./adjust.sh && ./test/e2e-tests.sh --run-tests - env-vars: - - DISABLE_MD_LINTING="1" - - KO_FLAGS="--platform=linux/s390x" - - PLATFORM="linux/s390x" - - KUBECONFIG="/root/.kube/config" - - DOCKER_CONFIG="/opt/cluster" - - INGRESS_CLASS="contour.ingress.networking.knative.dev" - external_cluster: - secret: s390x-cluster1 - - custom-job: s390x-e2e-tests - release: "1.1" - cron: 20 0 * * * - command: - - bash - args: - - -c - - mkdir -p /root/.kube && cat /opt/cluster/ci-script > connect.sh && chmod +x connect.sh && ./connect.sh operator-11 && kubectl get cm s390x-config-operator -n default -o jsonpath='{.data.adjustment-script}' > adjust.sh && chmod +x adjust.sh && ./adjust.sh && ./test/e2e-tests.sh --run-tests - env-vars: - - DISABLE_MD_LINTING="1" - - KO_FLAGS="--platform=linux/s390x" - - PLATFORM="linux/s390x" - - KUBECONFIG="/root/.kube/config" - - DOCKER_CONFIG="/opt/cluster" - - INGRESS_CLASS="contour.ingress.networking.knative.dev" - external_cluster: - secret: s390x-cluster1 - - custom-job: s390x-e2e-tests - release: "1.2" - cron: 30 4 * * * - command: - - bash - args: - - -c - - mkdir -p /root/.kube && cat /opt/cluster/ci-script > connect.sh && chmod +x connect.sh && ./connect.sh operator-12 && kubectl get cm s390x-config-operator -n default -o jsonpath='{.data.adjustment-script}' > adjust.sh && chmod +x adjust.sh && ./adjust.sh && ./test/e2e-tests.sh --run-tests - env-vars: - - DISABLE_MD_LINTING="1" - - KO_FLAGS="--platform=linux/s390x" - - PLATFORM="linux/s390x" - - KUBECONFIG="/root/.kube/config" - - DOCKER_CONFIG="/opt/cluster" - - INGRESS_CLASS="contour.ingress.networking.knative.dev" - external_cluster: - secret: s390x-cluster1 - - custom-job: s390x-e2e-tests - release: "1.3" - cron: 40 8 * * * - command: - - bash - args: - - -c - - mkdir -p /root/.kube && cat /opt/cluster/ci-script > connect.sh && chmod +x connect.sh && ./connect.sh operator-13 && kubectl get cm s390x-config-operator -n default -o jsonpath='{.data.adjustment-script}' > adjust.sh && chmod +x adjust.sh && ./adjust.sh && ./test/e2e-tests.sh --run-tests - env-vars: - - DISABLE_MD_LINTING="1" - - KO_FLAGS="--platform=linux/s390x" - - PLATFORM="linux/s390x" - - KUBECONFIG="/root/.kube/config" - - DOCKER_CONFIG="/opt/cluster" - - INGRESS_CLASS="contour.ingress.networking.knative.dev" - external_cluster: - secret: s390x-cluster1 - knative-sandbox/async-component: - - continuous: true - - nightly: true - - dot-release: true - - auto-release: true - knative-sandbox/discovery: - - continuous: true - - nightly: true - reporter_config: - slack: - channel: eventing-sources - job_states_to_report: - - failure - report_template: '"The nightly release job for discovery failed, check the log: <{{.Status.URL}}|View logs>"' - - dot-release: true - - auto-release: true - knative-sandbox/eventing-camel: - - continuous: true - - nightly: true - reporter_config: - slack: - channel: eventing-sources - job_states_to_report: - - failure - report_template: '"The nightly release job for camel failed, check the log: <{{.Status.URL}}|View logs>"' - - dot-release: true - - auto-release: true - knative-sandbox/eventing-kafka: - - continuous: true - needs-dind: true - - nightly: true - needs-dind: true - reporter_config: - slack: - channel: eventing-kafka - job_states_to_report: - - failure - report_template: '"The nightly release job for eventing-kafka failed, check the log: <{{.Status.URL}}|View logs>"' - - dot-release: true - release: "1.0" - needs-dind: true - - dot-release: true - release: "1.1" - needs-dind: true - - dot-release: true - release: "1.2" - needs-dind: true - - dot-release: true - release: "1.3" - needs-dind: true - - auto-release: true - resources: - requests: - memory: 12Gi - limits: - memory: 16Gi - needs-dind: true - knative-sandbox/eventing-kafka-broker: - - continuous: true - needs-dind: true - - nightly: true - reporter_config: - slack: - channel: eventing-kafka - job_states_to_report: - - failure - report_template: '"The nightly release job for eventing-kafka-broker failed, check the log: <{{.Status.URL}}|View logs>"' - needs-dind: true - - dot-release: true - release: "1.0" - needs-dind: true - - dot-release: true - release: "1.1" - needs-dind: true - - dot-release: true - release: "1.2" - needs-dind: true - - dot-release: true - release: "1.3" - needs-dind: true - - auto-release: true - needs-dind: true - knative-sandbox/eventing-rabbitmq: - - nightly: true - reporter_config: - slack: - channel: eventing-rabbitmq - job_states_to_report: - - failure - report_template: '"The nightly release job for eventing-rabbitmq failed, check the log: <{{.Status.URL}}|View logs>"' - - auto-release: true - - dot-release: true - release: "1.0" - - dot-release: true - release: "1.1" - - dot-release: true - release: "1.2" - - dot-release: true - release: "1.3" - knative-sandbox/eventing-natss: - - nightly: true - reporter_config: - slack: - channel: eventing - job_states_to_report: - - failure - report_template: '"The nightly release job for eventing-natss failed, check the log: <{{.Status.URL}}|View logs>"' - - dot-release: true - release: "1.0" - - dot-release: true - release: "1.1" - - dot-release: true - release: "1.2" - - dot-release: true - release: "1.3" - - auto-release: true - knative-sandbox/eventing-autoscaler-keda: - - continuous: true - - nightly: true - - dot-release: true - - auto-release: true - knative-sandbox/eventing-kogito: - - continuous: true - - nightly: true - reporter_config: - slack: - channel: eventing-sources - job_states_to_report: - - failure - report_template: '"The nightly release job for Kogito failed, check the log: <{{.Status.URL}}|View logs>"' - - dot-release: true - - auto-release: true - knative-sandbox/container-freezer: - - continuous: true - - nightly: true - - dot-release: true - - auto-release: true diff --git a/prow/jobs/config.yaml b/prow/jobs/config.yaml deleted file mode 100644 index 3a2e69d61c4..00000000000 --- a/prow/jobs/config.yaml +++ /dev/null @@ -1,20751 +0,0 @@ -# Copyright 2020 The Knative Authors -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ####################################################################### -# #### #### -# #### THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. #### -# #### USE "./hack/generate-configs.sh" TO REGENERATE THIS FILE. #### -# #### #### -# ####################################################################### -presubmits: - knative/serving: - - name: pull-knative-serving-build-tests - agent: kubernetes - context: pull-knative-serving-build-tests - always_run: true - optional: false - rerun_command: "/test pull-knative-serving-build-tests" - trigger: "(?m)^/test (all|pull-knative-serving-build-tests),?(\\s+|$)" - decorate: true - path_alias: knative.dev/serving - cluster: "build-knative" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--build-tests" - volumeMounts: - - name: repoview-token - mountPath: /etc/repoview-token - readOnly: true - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - resources: - requests: - memory: 12Gi - limits: - memory: 16Gi - volumes: - - name: repoview-token - secret: - secretName: repoview-token - - name: test-account - secret: - secretName: test-account - - name: pull-knative-serving-unit-tests - agent: kubernetes - labels: - prow.k8s.io/pubsub.project: knative-tests - prow.k8s.io/pubsub.topic: knative-monitoring - prow.k8s.io/pubsub.runID: pull-knative-serving-unit-tests - context: pull-knative-serving-unit-tests - always_run: true - optional: false - rerun_command: "/test pull-knative-serving-unit-tests" - trigger: "(?m)^/test (all|pull-knative-serving-unit-tests),?(\\s+|$)" - decorate: true - path_alias: knative.dev/serving - cluster: "build-knative" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--unit-tests" - volumeMounts: - - name: repoview-token - mountPath: /etc/repoview-token - readOnly: true - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: repoview-token - secret: - secretName: repoview-token - - name: test-account - secret: - secretName: test-account - - name: pull-knative-serving-upgrade-tests - agent: kubernetes - labels: - prow.k8s.io/pubsub.project: knative-tests - prow.k8s.io/pubsub.topic: knative-monitoring - prow.k8s.io/pubsub.runID: pull-knative-serving-upgrade-tests - context: pull-knative-serving-upgrade-tests - always_run: true - optional: false - rerun_command: "/test pull-knative-serving-upgrade-tests" - trigger: "(?m)^/test (all|pull-knative-serving-upgrade-tests),?(\\s+|$)" - decorate: true - path_alias: knative.dev/serving - cluster: "build-knative" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--run-test" - - "./test/e2e-upgrade-tests.sh" - volumeMounts: - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - resources: - requests: - memory: 12Gi - limits: - memory: 16Gi - volumes: - - name: test-account - secret: - secretName: test-account - - name: pull-knative-serving-performance-tests-kperf - agent: kubernetes - labels: - prow.k8s.io/pubsub.project: knative-tests - prow.k8s.io/pubsub.topic: knative-monitoring - prow.k8s.io/pubsub.runID: pull-knative-serving-performance-tests-kperf - context: pull-knative-serving-performance-tests-kperf - always_run: false - optional: true - rerun_command: "/test pull-knative-serving-performance-tests-kperf" - trigger: "(?m)^/test (all|pull-knative-serving-performance-tests-kperf),?(\\s+|$)" - decorate: true - path_alias: knative.dev/serving - cluster: "build-knative" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--run-test" - - "./test/performance/performance-tests.sh" - volumeMounts: - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - resources: - requests: - memory: 12Gi - limits: - memory: 16Gi - volumes: - - name: test-account - secret: - secretName: test-account - - name: pull-knative-serving-istio-latest-mesh - agent: kubernetes - labels: - prow.k8s.io/pubsub.project: knative-tests - prow.k8s.io/pubsub.topic: knative-monitoring - prow.k8s.io/pubsub.runID: pull-knative-serving-istio-latest-mesh - context: pull-knative-serving-istio-latest-mesh - always_run: false - optional: true - rerun_command: "/test pull-knative-serving-istio-latest-mesh" - trigger: "(?m)^/test (all|pull-knative-serving-istio-latest-mesh),?(\\s+|$)" - decorate: true - path_alias: knative.dev/serving - cluster: "build-knative" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--run-test" - - "./test/e2e-tests.sh --istio-version latest --mesh" - volumeMounts: - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - resources: - requests: - memory: 12Gi - limits: - memory: 16Gi - volumes: - - name: test-account - secret: - secretName: test-account - - name: pull-knative-serving-istio-latest-mesh-short - agent: kubernetes - labels: - prow.k8s.io/pubsub.project: knative-tests - prow.k8s.io/pubsub.topic: knative-monitoring - prow.k8s.io/pubsub.runID: pull-knative-serving-istio-latest-mesh-short - context: pull-knative-serving-istio-latest-mesh-short - always_run: false - optional: true - rerun_command: "/test pull-knative-serving-istio-latest-mesh-short" - trigger: "(?m)^/test (all|pull-knative-serving-istio-latest-mesh-short),?(\\s+|$)" - decorate: true - path_alias: knative.dev/serving - cluster: "build-knative" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--run-test" - - "./test/e2e-tests.sh --istio-version latest --mesh --short" - volumeMounts: - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - resources: - requests: - memory: 12Gi - limits: - memory: 16Gi - volumes: - - name: test-account - secret: - secretName: test-account - - name: pull-knative-serving-istio-latest-mesh-tls - agent: kubernetes - labels: - prow.k8s.io/pubsub.project: knative-tests - prow.k8s.io/pubsub.topic: knative-monitoring - prow.k8s.io/pubsub.runID: pull-knative-serving-istio-latest-mesh-tls - context: pull-knative-serving-istio-latest-mesh-tls - always_run: false - optional: true - rerun_command: "/test pull-knative-serving-istio-latest-mesh-tls" - trigger: "(?m)^/test (all|pull-knative-serving-istio-latest-mesh-tls),?(\\s+|$)" - decorate: true - path_alias: knative.dev/serving - cluster: "build-knative" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--run-test" - - "./test/e2e-auto-tls-tests.sh --istio-version latest --mesh" - volumeMounts: - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - resources: - requests: - memory: 12Gi - limits: - memory: 16Gi - volumes: - - name: test-account - secret: - secretName: test-account - - name: pull-knative-serving-istio-latest-no-mesh - agent: kubernetes - labels: - prow.k8s.io/pubsub.project: knative-tests - prow.k8s.io/pubsub.topic: knative-monitoring - prow.k8s.io/pubsub.runID: pull-knative-serving-istio-latest-no-mesh - context: pull-knative-serving-istio-latest-no-mesh - always_run: true - optional: false - rerun_command: "/test pull-knative-serving-istio-latest-no-mesh" - trigger: "(?m)^/test (all|pull-knative-serving-istio-latest-no-mesh),?(\\s+|$)" - decorate: true - path_alias: knative.dev/serving - cluster: "build-knative" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--run-test" - - "./test/e2e-tests.sh --istio-version latest --no-mesh" - volumeMounts: - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - resources: - requests: - memory: 12Gi - limits: - memory: 16Gi - volumes: - - name: test-account - secret: - secretName: test-account - - name: pull-knative-serving-istio-latest-no-mesh-tls - agent: kubernetes - labels: - prow.k8s.io/pubsub.project: knative-tests - prow.k8s.io/pubsub.topic: knative-monitoring - prow.k8s.io/pubsub.runID: pull-knative-serving-istio-latest-no-mesh-tls - context: pull-knative-serving-istio-latest-no-mesh-tls - always_run: true - optional: false - rerun_command: "/test pull-knative-serving-istio-latest-no-mesh-tls" - trigger: "(?m)^/test (all|pull-knative-serving-istio-latest-no-mesh-tls),?(\\s+|$)" - decorate: true - path_alias: knative.dev/serving - cluster: "build-knative" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--run-test" - - "./test/e2e-auto-tls-tests.sh --istio-version latest --no-mesh" - volumeMounts: - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - resources: - requests: - memory: 12Gi - limits: - memory: 16Gi - volumes: - - name: test-account - secret: - secretName: test-account - - name: pull-knative-serving-kourier-stable - agent: kubernetes - labels: - prow.k8s.io/pubsub.project: knative-tests - prow.k8s.io/pubsub.topic: knative-monitoring - prow.k8s.io/pubsub.runID: pull-knative-serving-kourier-stable - context: pull-knative-serving-kourier-stable - always_run: false - optional: false - run_if_changed: "^third_party/kourier-latest/*" - rerun_command: "/test pull-knative-serving-kourier-stable" - trigger: "(?m)^/test (all|pull-knative-serving-kourier-stable),?(\\s+|$)" - decorate: true - path_alias: knative.dev/serving - cluster: "build-knative" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--run-test" - - "./test/e2e-tests.sh --kourier-version stable" - volumeMounts: - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - resources: - requests: - memory: 12Gi - limits: - memory: 16Gi - volumes: - - name: test-account - secret: - secretName: test-account - - name: pull-knative-serving-kourier-stable-tls - agent: kubernetes - labels: - prow.k8s.io/pubsub.project: knative-tests - prow.k8s.io/pubsub.topic: knative-monitoring - prow.k8s.io/pubsub.runID: pull-knative-serving-kourier-stable-tls - context: pull-knative-serving-kourier-stable-tls - always_run: false - optional: false - run_if_changed: "^third_party/kourier-latest/*" - rerun_command: "/test pull-knative-serving-kourier-stable-tls" - trigger: "(?m)^/test (all|pull-knative-serving-kourier-stable-tls),?(\\s+|$)" - decorate: true - path_alias: knative.dev/serving - cluster: "build-knative" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--run-test" - - "./test/e2e-auto-tls-tests.sh --kourier-version stable" - volumeMounts: - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - resources: - requests: - memory: 12Gi - limits: - memory: 16Gi - volumes: - - name: test-account - secret: - secretName: test-account - - name: pull-knative-serving-contour-latest - agent: kubernetes - labels: - prow.k8s.io/pubsub.project: knative-tests - prow.k8s.io/pubsub.topic: knative-monitoring - prow.k8s.io/pubsub.runID: pull-knative-serving-contour-latest - context: pull-knative-serving-contour-latest - always_run: false - optional: false - run_if_changed: "^third_party/contour-latest/*" - rerun_command: "/test pull-knative-serving-contour-latest" - trigger: "(?m)^/test (all|pull-knative-serving-contour-latest),?(\\s+|$)" - decorate: true - path_alias: knative.dev/serving - cluster: "build-knative" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--run-test" - - "./test/e2e-tests.sh --contour-version latest" - volumeMounts: - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - resources: - requests: - memory: 12Gi - limits: - memory: 16Gi - volumes: - - name: test-account - secret: - secretName: test-account - - name: pull-knative-serving-contour-tls - agent: kubernetes - labels: - prow.k8s.io/pubsub.project: knative-tests - prow.k8s.io/pubsub.topic: knative-monitoring - prow.k8s.io/pubsub.runID: pull-knative-serving-contour-tls - context: pull-knative-serving-contour-tls - always_run: false - optional: false - run_if_changed: "^third_party/contour-latest/*" - rerun_command: "/test pull-knative-serving-contour-tls" - trigger: "(?m)^/test (all|pull-knative-serving-contour-tls),?(\\s+|$)" - decorate: true - path_alias: knative.dev/serving - cluster: "build-knative" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--run-test" - - "./test/e2e-auto-tls-tests.sh --contour-version latest" - volumeMounts: - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - resources: - requests: - memory: 12Gi - limits: - memory: 16Gi - volumes: - - name: test-account - secret: - secretName: test-account - - name: pull-knative-serving-gateway-api-latest - agent: kubernetes - labels: - prow.k8s.io/pubsub.project: knative-tests - prow.k8s.io/pubsub.topic: knative-monitoring - prow.k8s.io/pubsub.runID: pull-knative-serving-gateway-api-latest - context: pull-knative-serving-gateway-api-latest - always_run: false - optional: false - run_if_changed: "^third_party/gateway-api-latest/*" - rerun_command: "/test pull-knative-serving-gateway-api-latest" - trigger: "(?m)^/test (all|pull-knative-serving-gateway-api-latest),?(\\s+|$)" - decorate: true - path_alias: knative.dev/serving - cluster: "build-knative" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--run-test" - - "./test/e2e-tests.sh --gateway-api-version latest" - volumeMounts: - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - resources: - requests: - memory: 12Gi - limits: - memory: 16Gi - volumes: - - name: test-account - secret: - secretName: test-account - - name: pull-knative-serving-https - agent: kubernetes - context: pull-knative-serving-https - always_run: false - optional: true - run_if_changed: "^third_party/cert-manager-latest/*" - rerun_command: "/test pull-knative-serving-https" - trigger: "(?m)^/test (all|pull-knative-serving-https),?(\\s+|$)" - decorate: true - path_alias: knative.dev/serving - cluster: "build-knative" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--run-test" - - "./test/e2e-tests.sh --https" - - "--run-test" - - "./test/e2e-auto-tls-tests.sh --https" - volumeMounts: - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: test-account - secret: - secretName: test-account - knative/client: - - name: pull-knative-client-build-tests - agent: kubernetes - context: pull-knative-client-build-tests - always_run: true - optional: false - rerun_command: "/test pull-knative-client-build-tests" - trigger: "(?m)^/test (all|pull-knative-client-build-tests),?(\\s+|$)" - decorate: true - path_alias: knative.dev/client - cluster: "build-knative" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--build-tests" - volumeMounts: - - name: repoview-token - mountPath: /etc/repoview-token - readOnly: true - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: repoview-token - secret: - secretName: repoview-token - - name: test-account - secret: - secretName: test-account - - name: pull-knative-client-unit-tests - agent: kubernetes - context: pull-knative-client-unit-tests - always_run: true - optional: false - rerun_command: "/test pull-knative-client-unit-tests" - trigger: "(?m)^/test (all|pull-knative-client-unit-tests),?(\\s+|$)" - decorate: true - path_alias: knative.dev/client - cluster: "build-knative" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--unit-tests" - volumeMounts: - - name: repoview-token - mountPath: /etc/repoview-token - readOnly: true - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: repoview-token - secret: - secretName: repoview-token - - name: test-account - secret: - secretName: test-account - - name: pull-knative-client-integration-tests - agent: kubernetes - context: pull-knative-client-integration-tests - always_run: true - optional: false - rerun_command: "/test pull-knative-client-integration-tests" - trigger: "(?m)^/test (all|pull-knative-client-integration-tests),?(\\s+|$)" - decorate: true - path_alias: knative.dev/client - cluster: "build-knative" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--integration-tests" - volumeMounts: - - name: repoview-token - mountPath: /etc/repoview-token - readOnly: true - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: repoview-token - secret: - secretName: repoview-token - - name: test-account - secret: - secretName: test-account - - name: pull-knative-client-integration-tests-latest-release - agent: kubernetes - context: pull-knative-client-integration-tests-latest-release - always_run: true - optional: false - rerun_command: "/test pull-knative-client-integration-tests-latest-release" - trigger: "(?m)^/test (all|pull-knative-client-integration-tests-latest-release),?(\\s+|$)" - decorate: true - path_alias: knative.dev/client - cluster: "build-knative" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-integration-tests-latest-release.sh" - volumeMounts: - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: test-account - secret: - secretName: test-account - knative/client-pkg: - - name: pull-knative-client-pkg-build-tests - agent: kubernetes - context: pull-knative-client-pkg-build-tests - always_run: true - optional: false - rerun_command: "/test pull-knative-client-pkg-build-tests" - trigger: "(?m)^/test (all|pull-knative-client-pkg-build-tests),?(\\s+|$)" - decorate: true - path_alias: knative.dev/client-pkg - cluster: "build-knative" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--build-tests" - volumeMounts: - - name: repoview-token - mountPath: /etc/repoview-token - readOnly: true - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: repoview-token - secret: - secretName: repoview-token - - name: test-account - secret: - secretName: test-account - - name: pull-knative-client-pkg-unit-tests - agent: kubernetes - context: pull-knative-client-pkg-unit-tests - always_run: true - optional: false - rerun_command: "/test pull-knative-client-pkg-unit-tests" - trigger: "(?m)^/test (all|pull-knative-client-pkg-unit-tests),?(\\s+|$)" - decorate: true - path_alias: knative.dev/client-pkg - cluster: "build-knative" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--unit-tests" - volumeMounts: - - name: repoview-token - mountPath: /etc/repoview-token - readOnly: true - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: repoview-token - secret: - secretName: repoview-token - - name: test-account - secret: - secretName: test-account - knative/client-contrib: - - name: pull-knative-client-contrib-build-tests - agent: kubernetes - context: pull-knative-client-contrib-build-tests - always_run: true - optional: false - rerun_command: "/test pull-knative-client-contrib-build-tests" - trigger: "(?m)^/test (all|pull-knative-client-contrib-build-tests),?(\\s+|$)" - decorate: true - path_alias: knative.dev/client-contrib - cluster: "build-knative" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--build-tests" - volumeMounts: - - name: repoview-token - mountPath: /etc/repoview-token - readOnly: true - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: repoview-token - secret: - secretName: repoview-token - - name: test-account - secret: - secretName: test-account - - name: pull-knative-client-contrib-unit-tests - agent: kubernetes - context: pull-knative-client-contrib-unit-tests - always_run: true - optional: false - rerun_command: "/test pull-knative-client-contrib-unit-tests" - trigger: "(?m)^/test (all|pull-knative-client-contrib-unit-tests),?(\\s+|$)" - decorate: true - path_alias: knative.dev/client-contrib - cluster: "build-knative" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--unit-tests" - volumeMounts: - - name: repoview-token - mountPath: /etc/repoview-token - readOnly: true - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: repoview-token - secret: - secretName: repoview-token - - name: test-account - secret: - secretName: test-account - - name: pull-knative-client-contrib-integration-tests - agent: kubernetes - context: pull-knative-client-contrib-integration-tests - always_run: true - optional: false - rerun_command: "/test pull-knative-client-contrib-integration-tests" - trigger: "(?m)^/test (all|pull-knative-client-contrib-integration-tests),?(\\s+|$)" - decorate: true - path_alias: knative.dev/client-contrib - cluster: "build-knative" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--integration-tests" - volumeMounts: - - name: repoview-token - mountPath: /etc/repoview-token - readOnly: true - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: repoview-token - secret: - secretName: repoview-token - - name: test-account - secret: - secretName: test-account - knative-sandbox/reconciler-test: - - name: pull-knative-sandbox-reconciler-test-build-tests - agent: kubernetes - context: pull-knative-sandbox-reconciler-test-build-tests - always_run: true - optional: false - rerun_command: "/test pull-knative-sandbox-reconciler-test-build-tests" - trigger: "(?m)^/test (all|pull-knative-sandbox-reconciler-test-build-tests),?(\\s+|$)" - decorate: true - path_alias: knative.dev/reconciler-test - cluster: "build-knative" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--build-tests" - volumeMounts: - - name: repoview-token - mountPath: /etc/repoview-token - readOnly: true - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: repoview-token - secret: - secretName: repoview-token - - name: test-account - secret: - secretName: test-account - - name: pull-knative-sandbox-reconciler-test-unit-tests - agent: kubernetes - context: pull-knative-sandbox-reconciler-test-unit-tests - always_run: true - optional: false - rerun_command: "/test pull-knative-sandbox-reconciler-test-unit-tests" - trigger: "(?m)^/test (all|pull-knative-sandbox-reconciler-test-unit-tests),?(\\s+|$)" - decorate: true - path_alias: knative.dev/reconciler-test - cluster: "build-knative" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--unit-tests" - volumeMounts: - - name: repoview-token - mountPath: /etc/repoview-token - readOnly: true - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: repoview-token - secret: - secretName: repoview-token - - name: test-account - secret: - secretName: test-account - - name: pull-knative-sandbox-reconciler-test-integration-tests - agent: kubernetes - context: pull-knative-sandbox-reconciler-test-integration-tests - always_run: true - optional: false - rerun_command: "/test pull-knative-sandbox-reconciler-test-integration-tests" - trigger: "(?m)^/test (all|pull-knative-sandbox-reconciler-test-integration-tests),?(\\s+|$)" - decorate: true - path_alias: knative.dev/reconciler-test - cluster: "build-knative" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--integration-tests" - volumeMounts: - - name: repoview-token - mountPath: /etc/repoview-token - readOnly: true - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: repoview-token - secret: - secretName: repoview-token - - name: test-account - secret: - secretName: test-account - knative-sandbox/kn-plugin-diag: - - name: pull-knative-sandbox-kn-plugin-diag-build-tests - agent: kubernetes - context: pull-knative-sandbox-kn-plugin-diag-build-tests - always_run: true - optional: false - rerun_command: "/test pull-knative-sandbox-kn-plugin-diag-build-tests" - trigger: "(?m)^/test (all|pull-knative-sandbox-kn-plugin-diag-build-tests),?(\\s+|$)" - decorate: true - path_alias: knative.dev/kn-plugin-diag - cluster: "build-knative" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--build-tests" - volumeMounts: - - name: repoview-token - mountPath: /etc/repoview-token - readOnly: true - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: repoview-token - secret: - secretName: repoview-token - - name: test-account - secret: - secretName: test-account - - name: pull-knative-sandbox-kn-plugin-diag-unit-tests - agent: kubernetes - context: pull-knative-sandbox-kn-plugin-diag-unit-tests - always_run: true - optional: false - rerun_command: "/test pull-knative-sandbox-kn-plugin-diag-unit-tests" - trigger: "(?m)^/test (all|pull-knative-sandbox-kn-plugin-diag-unit-tests),?(\\s+|$)" - decorate: true - path_alias: knative.dev/kn-plugin-diag - cluster: "build-knative" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--unit-tests" - volumeMounts: - - name: repoview-token - mountPath: /etc/repoview-token - readOnly: true - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: repoview-token - secret: - secretName: repoview-token - - name: test-account - secret: - secretName: test-account - - name: pull-knative-sandbox-kn-plugin-diag-integration-tests - agent: kubernetes - context: pull-knative-sandbox-kn-plugin-diag-integration-tests - always_run: true - optional: false - rerun_command: "/test pull-knative-sandbox-kn-plugin-diag-integration-tests" - trigger: "(?m)^/test (all|pull-knative-sandbox-kn-plugin-diag-integration-tests),?(\\s+|$)" - decorate: true - path_alias: knative.dev/kn-plugin-diag - cluster: "build-knative" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--integration-tests" - volumeMounts: - - name: repoview-token - mountPath: /etc/repoview-token - readOnly: true - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: repoview-token - secret: - secretName: repoview-token - - name: test-account - secret: - secretName: test-account - knative-sandbox/kn-plugin-event: - - name: pull-knative-sandbox-kn-plugin-event-build-tests - agent: kubernetes - context: pull-knative-sandbox-kn-plugin-event-build-tests - always_run: true - optional: false - rerun_command: "/test pull-knative-sandbox-kn-plugin-event-build-tests" - trigger: "(?m)^/test (all|pull-knative-sandbox-kn-plugin-event-build-tests),?(\\s+|$)" - decorate: true - path_alias: knative.dev/kn-plugin-event - cluster: "build-knative" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--build-tests" - volumeMounts: - - name: repoview-token - mountPath: /etc/repoview-token - readOnly: true - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: repoview-token - secret: - secretName: repoview-token - - name: test-account - secret: - secretName: test-account - - name: pull-knative-sandbox-kn-plugin-event-unit-tests - agent: kubernetes - context: pull-knative-sandbox-kn-plugin-event-unit-tests - always_run: true - optional: false - rerun_command: "/test pull-knative-sandbox-kn-plugin-event-unit-tests" - trigger: "(?m)^/test (all|pull-knative-sandbox-kn-plugin-event-unit-tests),?(\\s+|$)" - decorate: true - path_alias: knative.dev/kn-plugin-event - cluster: "build-knative" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--unit-tests" - volumeMounts: - - name: repoview-token - mountPath: /etc/repoview-token - readOnly: true - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: repoview-token - secret: - secretName: repoview-token - - name: test-account - secret: - secretName: test-account - - name: pull-knative-sandbox-kn-plugin-event-integration-tests - agent: kubernetes - context: pull-knative-sandbox-kn-plugin-event-integration-tests - always_run: true - optional: false - rerun_command: "/test pull-knative-sandbox-kn-plugin-event-integration-tests" - trigger: "(?m)^/test (all|pull-knative-sandbox-kn-plugin-event-integration-tests),?(\\s+|$)" - decorate: true - path_alias: knative.dev/kn-plugin-event - cluster: "build-knative" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--integration-tests" - volumeMounts: - - name: repoview-token - mountPath: /etc/repoview-token - readOnly: true - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: repoview-token - secret: - secretName: repoview-token - - name: test-account - secret: - secretName: test-account - knative-sandbox/kn-plugin-migration: - - name: pull-knative-sandbox-kn-plugin-migration-build-tests - agent: kubernetes - context: pull-knative-sandbox-kn-plugin-migration-build-tests - always_run: true - optional: false - rerun_command: "/test pull-knative-sandbox-kn-plugin-migration-build-tests" - trigger: "(?m)^/test (all|pull-knative-sandbox-kn-plugin-migration-build-tests),?(\\s+|$)" - decorate: true - path_alias: knative.dev/kn-plugin-migration - cluster: "build-knative" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--build-tests" - volumeMounts: - - name: repoview-token - mountPath: /etc/repoview-token - readOnly: true - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: repoview-token - secret: - secretName: repoview-token - - name: test-account - secret: - secretName: test-account - - name: pull-knative-sandbox-kn-plugin-migration-unit-tests - agent: kubernetes - context: pull-knative-sandbox-kn-plugin-migration-unit-tests - always_run: true - optional: false - rerun_command: "/test pull-knative-sandbox-kn-plugin-migration-unit-tests" - trigger: "(?m)^/test (all|pull-knative-sandbox-kn-plugin-migration-unit-tests),?(\\s+|$)" - decorate: true - path_alias: knative.dev/kn-plugin-migration - cluster: "build-knative" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--unit-tests" - volumeMounts: - - name: repoview-token - mountPath: /etc/repoview-token - readOnly: true - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: repoview-token - secret: - secretName: repoview-token - - name: test-account - secret: - secretName: test-account - - name: pull-knative-sandbox-kn-plugin-migration-integration-tests - agent: kubernetes - context: pull-knative-sandbox-kn-plugin-migration-integration-tests - always_run: true - optional: false - rerun_command: "/test pull-knative-sandbox-kn-plugin-migration-integration-tests" - trigger: "(?m)^/test (all|pull-knative-sandbox-kn-plugin-migration-integration-tests),?(\\s+|$)" - decorate: true - path_alias: knative.dev/kn-plugin-migration - cluster: "build-knative" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--integration-tests" - volumeMounts: - - name: repoview-token - mountPath: /etc/repoview-token - readOnly: true - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: repoview-token - secret: - secretName: repoview-token - - name: test-account - secret: - secretName: test-account - knative-sandbox/kn-plugin-operator: - - name: pull-knative-sandbox-kn-plugin-operator-build-tests - agent: kubernetes - context: pull-knative-sandbox-kn-plugin-operator-build-tests - always_run: true - optional: false - rerun_command: "/test pull-knative-sandbox-kn-plugin-operator-build-tests" - trigger: "(?m)^/test (all|pull-knative-sandbox-kn-plugin-operator-build-tests),?(\\s+|$)" - decorate: true - path_alias: knative.dev/kn-plugin-operator - cluster: "build-knative" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--build-tests" - volumeMounts: - - name: repoview-token - mountPath: /etc/repoview-token - readOnly: true - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: repoview-token - secret: - secretName: repoview-token - - name: test-account - secret: - secretName: test-account - - name: pull-knative-sandbox-kn-plugin-operator-unit-tests - agent: kubernetes - context: pull-knative-sandbox-kn-plugin-operator-unit-tests - always_run: true - optional: false - rerun_command: "/test pull-knative-sandbox-kn-plugin-operator-unit-tests" - trigger: "(?m)^/test (all|pull-knative-sandbox-kn-plugin-operator-unit-tests),?(\\s+|$)" - decorate: true - path_alias: knative.dev/kn-plugin-operator - cluster: "build-knative" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--unit-tests" - volumeMounts: - - name: repoview-token - mountPath: /etc/repoview-token - readOnly: true - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: repoview-token - secret: - secretName: repoview-token - - name: test-account - secret: - secretName: test-account - - name: pull-knative-sandbox-kn-plugin-operator-integration-tests - agent: kubernetes - context: pull-knative-sandbox-kn-plugin-operator-integration-tests - always_run: true - optional: false - rerun_command: "/test pull-knative-sandbox-kn-plugin-operator-integration-tests" - trigger: "(?m)^/test (all|pull-knative-sandbox-kn-plugin-operator-integration-tests),?(\\s+|$)" - decorate: true - path_alias: knative.dev/kn-plugin-operator - cluster: "build-knative" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--integration-tests" - volumeMounts: - - name: repoview-token - mountPath: /etc/repoview-token - readOnly: true - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: repoview-token - secret: - secretName: repoview-token - - name: test-account - secret: - secretName: test-account - knative-sandbox/kn-plugin-sample: - - name: pull-knative-sandbox-kn-plugin-sample-build-tests - agent: kubernetes - context: pull-knative-sandbox-kn-plugin-sample-build-tests - always_run: true - optional: false - rerun_command: "/test pull-knative-sandbox-kn-plugin-sample-build-tests" - trigger: "(?m)^/test (all|pull-knative-sandbox-kn-plugin-sample-build-tests),?(\\s+|$)" - decorate: true - path_alias: knative.dev/kn-plugin-sample - cluster: "build-knative" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--build-tests" - volumeMounts: - - name: repoview-token - mountPath: /etc/repoview-token - readOnly: true - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: repoview-token - secret: - secretName: repoview-token - - name: test-account - secret: - secretName: test-account - - name: pull-knative-sandbox-kn-plugin-sample-unit-tests - agent: kubernetes - context: pull-knative-sandbox-kn-plugin-sample-unit-tests - always_run: true - optional: false - rerun_command: "/test pull-knative-sandbox-kn-plugin-sample-unit-tests" - trigger: "(?m)^/test (all|pull-knative-sandbox-kn-plugin-sample-unit-tests),?(\\s+|$)" - decorate: true - path_alias: knative.dev/kn-plugin-sample - cluster: "build-knative" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--unit-tests" - volumeMounts: - - name: repoview-token - mountPath: /etc/repoview-token - readOnly: true - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: repoview-token - secret: - secretName: repoview-token - - name: test-account - secret: - secretName: test-account - - name: pull-knative-sandbox-kn-plugin-sample-integration-tests - agent: kubernetes - context: pull-knative-sandbox-kn-plugin-sample-integration-tests - always_run: true - optional: false - rerun_command: "/test pull-knative-sandbox-kn-plugin-sample-integration-tests" - trigger: "(?m)^/test (all|pull-knative-sandbox-kn-plugin-sample-integration-tests),?(\\s+|$)" - decorate: true - path_alias: knative.dev/kn-plugin-sample - cluster: "build-knative" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--integration-tests" - volumeMounts: - - name: repoview-token - mountPath: /etc/repoview-token - readOnly: true - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: repoview-token - secret: - secretName: repoview-token - - name: test-account - secret: - secretName: test-account - knative-sandbox/kn-plugin-service-log: - - name: pull-knative-sandbox-kn-plugin-service-log-build-tests - agent: kubernetes - context: pull-knative-sandbox-kn-plugin-service-log-build-tests - always_run: true - optional: false - rerun_command: "/test pull-knative-sandbox-kn-plugin-service-log-build-tests" - trigger: "(?m)^/test (all|pull-knative-sandbox-kn-plugin-service-log-build-tests),?(\\s+|$)" - decorate: true - path_alias: knative.dev/kn-plugin-service-log - cluster: "build-knative" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--build-tests" - volumeMounts: - - name: repoview-token - mountPath: /etc/repoview-token - readOnly: true - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: repoview-token - secret: - secretName: repoview-token - - name: test-account - secret: - secretName: test-account - - name: pull-knative-sandbox-kn-plugin-service-log-unit-tests - agent: kubernetes - context: pull-knative-sandbox-kn-plugin-service-log-unit-tests - always_run: true - optional: false - rerun_command: "/test pull-knative-sandbox-kn-plugin-service-log-unit-tests" - trigger: "(?m)^/test (all|pull-knative-sandbox-kn-plugin-service-log-unit-tests),?(\\s+|$)" - decorate: true - path_alias: knative.dev/kn-plugin-service-log - cluster: "build-knative" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--unit-tests" - volumeMounts: - - name: repoview-token - mountPath: /etc/repoview-token - readOnly: true - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: repoview-token - secret: - secretName: repoview-token - - name: test-account - secret: - secretName: test-account - - name: pull-knative-sandbox-kn-plugin-service-log-integration-tests - agent: kubernetes - context: pull-knative-sandbox-kn-plugin-service-log-integration-tests - always_run: true - optional: false - rerun_command: "/test pull-knative-sandbox-kn-plugin-service-log-integration-tests" - trigger: "(?m)^/test (all|pull-knative-sandbox-kn-plugin-service-log-integration-tests),?(\\s+|$)" - decorate: true - path_alias: knative.dev/kn-plugin-service-log - cluster: "build-knative" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--integration-tests" - volumeMounts: - - name: repoview-token - mountPath: /etc/repoview-token - readOnly: true - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: repoview-token - secret: - secretName: repoview-token - - name: test-account - secret: - secretName: test-account - knative-sandbox/kn-plugin-source-kafka: - - name: pull-knative-sandbox-kn-plugin-source-kafka-build-tests - agent: kubernetes - context: pull-knative-sandbox-kn-plugin-source-kafka-build-tests - always_run: true - optional: false - rerun_command: "/test pull-knative-sandbox-kn-plugin-source-kafka-build-tests" - trigger: "(?m)^/test (all|pull-knative-sandbox-kn-plugin-source-kafka-build-tests),?(\\s+|$)" - decorate: true - path_alias: knative.dev/kn-plugin-source-kafka - cluster: "build-knative" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--build-tests" - volumeMounts: - - name: repoview-token - mountPath: /etc/repoview-token - readOnly: true - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: repoview-token - secret: - secretName: repoview-token - - name: test-account - secret: - secretName: test-account - - name: pull-knative-sandbox-kn-plugin-source-kafka-unit-tests - agent: kubernetes - context: pull-knative-sandbox-kn-plugin-source-kafka-unit-tests - always_run: true - optional: false - rerun_command: "/test pull-knative-sandbox-kn-plugin-source-kafka-unit-tests" - trigger: "(?m)^/test (all|pull-knative-sandbox-kn-plugin-source-kafka-unit-tests),?(\\s+|$)" - decorate: true - path_alias: knative.dev/kn-plugin-source-kafka - cluster: "build-knative" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--unit-tests" - volumeMounts: - - name: repoview-token - mountPath: /etc/repoview-token - readOnly: true - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: repoview-token - secret: - secretName: repoview-token - - name: test-account - secret: - secretName: test-account - - name: pull-knative-sandbox-kn-plugin-source-kafka-integration-tests - agent: kubernetes - context: pull-knative-sandbox-kn-plugin-source-kafka-integration-tests - always_run: true - optional: false - rerun_command: "/test pull-knative-sandbox-kn-plugin-source-kafka-integration-tests" - trigger: "(?m)^/test (all|pull-knative-sandbox-kn-plugin-source-kafka-integration-tests),?(\\s+|$)" - decorate: true - path_alias: knative.dev/kn-plugin-source-kafka - cluster: "build-knative" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--integration-tests" - volumeMounts: - - name: repoview-token - mountPath: /etc/repoview-token - readOnly: true - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: repoview-token - secret: - secretName: repoview-token - - name: test-account - secret: - secretName: test-account - knative-sandbox/kn-plugin-source-kamelet: - - name: pull-knative-sandbox-kn-plugin-source-kamelet-build-tests - agent: kubernetes - context: pull-knative-sandbox-kn-plugin-source-kamelet-build-tests - always_run: true - optional: false - rerun_command: "/test pull-knative-sandbox-kn-plugin-source-kamelet-build-tests" - trigger: "(?m)^/test (all|pull-knative-sandbox-kn-plugin-source-kamelet-build-tests),?(\\s+|$)" - decorate: true - path_alias: knative.dev/kn-plugin-source-kamelet - cluster: "build-knative" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--build-tests" - volumeMounts: - - name: repoview-token - mountPath: /etc/repoview-token - readOnly: true - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: repoview-token - secret: - secretName: repoview-token - - name: test-account - secret: - secretName: test-account - - name: pull-knative-sandbox-kn-plugin-source-kamelet-unit-tests - agent: kubernetes - context: pull-knative-sandbox-kn-plugin-source-kamelet-unit-tests - always_run: true - optional: false - rerun_command: "/test pull-knative-sandbox-kn-plugin-source-kamelet-unit-tests" - trigger: "(?m)^/test (all|pull-knative-sandbox-kn-plugin-source-kamelet-unit-tests),?(\\s+|$)" - decorate: true - path_alias: knative.dev/kn-plugin-source-kamelet - cluster: "build-knative" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--unit-tests" - volumeMounts: - - name: repoview-token - mountPath: /etc/repoview-token - readOnly: true - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: repoview-token - secret: - secretName: repoview-token - - name: test-account - secret: - secretName: test-account - - name: pull-knative-sandbox-kn-plugin-source-kamelet-integration-tests - agent: kubernetes - context: pull-knative-sandbox-kn-plugin-source-kamelet-integration-tests - always_run: true - optional: false - rerun_command: "/test pull-knative-sandbox-kn-plugin-source-kamelet-integration-tests" - trigger: "(?m)^/test (all|pull-knative-sandbox-kn-plugin-source-kamelet-integration-tests),?(\\s+|$)" - decorate: true - path_alias: knative.dev/kn-plugin-source-kamelet - cluster: "build-knative" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--integration-tests" - volumeMounts: - - name: repoview-token - mountPath: /etc/repoview-token - readOnly: true - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: repoview-token - secret: - secretName: repoview-token - - name: test-account - secret: - secretName: test-account - knative-sandbox/kn-plugin-admin: - - name: pull-knative-sandbox-kn-plugin-admin-build-tests - agent: kubernetes - context: pull-knative-sandbox-kn-plugin-admin-build-tests - always_run: true - optional: false - rerun_command: "/test pull-knative-sandbox-kn-plugin-admin-build-tests" - trigger: "(?m)^/test (all|pull-knative-sandbox-kn-plugin-admin-build-tests),?(\\s+|$)" - decorate: true - path_alias: knative.dev/kn-plugin-admin - cluster: "build-knative" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--build-tests" - volumeMounts: - - name: repoview-token - mountPath: /etc/repoview-token - readOnly: true - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: repoview-token - secret: - secretName: repoview-token - - name: test-account - secret: - secretName: test-account - - name: pull-knative-sandbox-kn-plugin-admin-unit-tests - agent: kubernetes - context: pull-knative-sandbox-kn-plugin-admin-unit-tests - always_run: true - optional: false - rerun_command: "/test pull-knative-sandbox-kn-plugin-admin-unit-tests" - trigger: "(?m)^/test (all|pull-knative-sandbox-kn-plugin-admin-unit-tests),?(\\s+|$)" - decorate: true - path_alias: knative.dev/kn-plugin-admin - cluster: "build-knative" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--unit-tests" - volumeMounts: - - name: repoview-token - mountPath: /etc/repoview-token - readOnly: true - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: repoview-token - secret: - secretName: repoview-token - - name: test-account - secret: - secretName: test-account - - name: pull-knative-sandbox-kn-plugin-admin-integration-tests - agent: kubernetes - context: pull-knative-sandbox-kn-plugin-admin-integration-tests - always_run: true - optional: false - rerun_command: "/test pull-knative-sandbox-kn-plugin-admin-integration-tests" - trigger: "(?m)^/test (all|pull-knative-sandbox-kn-plugin-admin-integration-tests),?(\\s+|$)" - decorate: true - path_alias: knative.dev/kn-plugin-admin - cluster: "build-knative" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--integration-tests" - volumeMounts: - - name: repoview-token - mountPath: /etc/repoview-token - readOnly: true - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: repoview-token - secret: - secretName: repoview-token - - name: test-account - secret: - secretName: test-account - knative-sandbox/kn-plugin-quickstart: - - name: pull-knative-sandbox-kn-plugin-quickstart-build-tests - agent: kubernetes - context: pull-knative-sandbox-kn-plugin-quickstart-build-tests - always_run: true - optional: false - rerun_command: "/test pull-knative-sandbox-kn-plugin-quickstart-build-tests" - trigger: "(?m)^/test (all|pull-knative-sandbox-kn-plugin-quickstart-build-tests),?(\\s+|$)" - decorate: true - path_alias: knative.dev/kn-plugin-quickstart - cluster: "build-knative" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--build-tests" - volumeMounts: - - name: repoview-token - mountPath: /etc/repoview-token - readOnly: true - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: repoview-token - secret: - secretName: repoview-token - - name: test-account - secret: - secretName: test-account - - name: pull-knative-sandbox-kn-plugin-quickstart-unit-tests - agent: kubernetes - context: pull-knative-sandbox-kn-plugin-quickstart-unit-tests - always_run: true - optional: false - rerun_command: "/test pull-knative-sandbox-kn-plugin-quickstart-unit-tests" - trigger: "(?m)^/test (all|pull-knative-sandbox-kn-plugin-quickstart-unit-tests),?(\\s+|$)" - decorate: true - path_alias: knative.dev/kn-plugin-quickstart - cluster: "build-knative" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--unit-tests" - volumeMounts: - - name: repoview-token - mountPath: /etc/repoview-token - readOnly: true - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: repoview-token - secret: - secretName: repoview-token - - name: test-account - secret: - secretName: test-account - - name: pull-knative-sandbox-kn-plugin-quickstart-integration-tests - agent: kubernetes - context: pull-knative-sandbox-kn-plugin-quickstart-integration-tests - always_run: true - optional: false - rerun_command: "/test pull-knative-sandbox-kn-plugin-quickstart-integration-tests" - trigger: "(?m)^/test (all|pull-knative-sandbox-kn-plugin-quickstart-integration-tests),?(\\s+|$)" - decorate: true - path_alias: knative.dev/kn-plugin-quickstart - cluster: "build-knative" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--integration-tests" - volumeMounts: - - name: repoview-token - mountPath: /etc/repoview-token - readOnly: true - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: repoview-token - secret: - secretName: repoview-token - - name: test-account - secret: - secretName: test-account - knative/eventing: - - name: pull-knative-eventing-build-tests - agent: kubernetes - context: pull-knative-eventing-build-tests - always_run: true - optional: false - rerun_command: "/test pull-knative-eventing-build-tests" - trigger: "(?m)^/test (all|pull-knative-eventing-build-tests),?(\\s+|$)" - decorate: true - path_alias: knative.dev/eventing - cluster: "build-knative" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--build-tests" - volumeMounts: - - name: repoview-token - mountPath: /etc/repoview-token - readOnly: true - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - resources: - requests: - memory: 12Gi - limits: - memory: 16Gi - volumes: - - name: repoview-token - secret: - secretName: repoview-token - - name: test-account - secret: - secretName: test-account - - name: pull-knative-eventing-unit-tests - agent: kubernetes - context: pull-knative-eventing-unit-tests - always_run: true - optional: false - rerun_command: "/test pull-knative-eventing-unit-tests" - trigger: "(?m)^/test (all|pull-knative-eventing-unit-tests),?(\\s+|$)" - decorate: true - path_alias: knative.dev/eventing - cluster: "build-knative" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--unit-tests" - volumeMounts: - - name: repoview-token - mountPath: /etc/repoview-token - readOnly: true - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: repoview-token - secret: - secretName: repoview-token - - name: test-account - secret: - secretName: test-account - - name: pull-knative-eventing-integration-tests - agent: kubernetes - labels: - prow.k8s.io/pubsub.project: knative-tests - prow.k8s.io/pubsub.topic: knative-monitoring - prow.k8s.io/pubsub.runID: pull-knative-eventing-integration-tests - context: pull-knative-eventing-integration-tests - always_run: true - optional: false - rerun_command: "/test pull-knative-eventing-integration-tests" - trigger: "(?m)^/test (all|pull-knative-eventing-integration-tests),?(\\s+|$)" - decorate: true - path_alias: knative.dev/eventing - cluster: "build-knative" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--run-test" - - "./test/e2e-tests.sh" - volumeMounts: - - name: repoview-token - mountPath: /etc/repoview-token - readOnly: true - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: repoview-token - secret: - secretName: repoview-token - - name: test-account - secret: - secretName: test-account - - name: pull-knative-eventing-reconciler-tests - agent: kubernetes - labels: - prow.k8s.io/pubsub.project: knative-tests - prow.k8s.io/pubsub.topic: knative-monitoring - prow.k8s.io/pubsub.runID: pull-knative-eventing-reconciler-tests - context: pull-knative-eventing-reconciler-tests - always_run: true - optional: true - rerun_command: "/test pull-knative-eventing-reconciler-tests" - trigger: "(?m)^/test (all|pull-knative-eventing-reconciler-tests),?(\\s+|$)" - decorate: true - path_alias: knative.dev/eventing - cluster: "build-knative" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--run-test" - - "./test/e2e-rekt-tests.sh" - volumeMounts: - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: test-account - secret: - secretName: test-account - - name: pull-knative-eventing-conformance-tests - agent: kubernetes - labels: - prow.k8s.io/pubsub.project: knative-tests - prow.k8s.io/pubsub.topic: knative-monitoring - prow.k8s.io/pubsub.runID: pull-knative-eventing-conformance-tests - context: pull-knative-eventing-conformance-tests - always_run: true - optional: false - rerun_command: "/test pull-knative-eventing-conformance-tests" - trigger: "(?m)^/test (all|pull-knative-eventing-conformance-tests),?(\\s+|$)" - decorate: true - path_alias: knative.dev/eventing - cluster: "build-knative" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--run-test" - - "./test/e2e-conformance-tests.sh" - volumeMounts: - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: test-account - secret: - secretName: test-account - - name: pull-knative-eventing-upgrade-tests - agent: kubernetes - labels: - prow.k8s.io/pubsub.project: knative-tests - prow.k8s.io/pubsub.topic: knative-monitoring - prow.k8s.io/pubsub.runID: pull-knative-eventing-upgrade-tests - context: pull-knative-eventing-upgrade-tests - always_run: true - optional: false - rerun_command: "/test pull-knative-eventing-upgrade-tests" - trigger: "(?m)^/test (all|pull-knative-eventing-upgrade-tests),?(\\s+|$)" - decorate: true - path_alias: knative.dev/eventing - cluster: "build-knative" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--run-test" - - "./test/e2e-upgrade-tests.sh" - volumeMounts: - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: test-account - secret: - secretName: test-account - - name: pull-knative-eventing-go-coverage - agent: kubernetes - context: pull-knative-eventing-go-coverage - always_run: true - rerun_command: "/test pull-knative-eventing-go-coverage" - trigger: "(?m)^/test (all|pull-knative-eventing-go-coverage),?(\\s+|$)" - optional: true - decorate: true - path_alias: knative.dev/eventing - cluster: "build-knative" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "coverage" - - "--postsubmit-job-name=post-knative-eventing-go-coverage" - - "--artifacts=$(ARTIFACTS)" - - "--cov-threshold-percentage=50" - - "--github-token=/etc/covbot-token/token" - volumeMounts: - - name: covbot-token - mountPath: /etc/covbot-token - readOnly: true - volumes: - - name: covbot-token - secret: - secretName: covbot-token - knative/docs: - - name: pull-knative-docs-build-tests - agent: kubernetes - context: pull-knative-docs-build-tests - always_run: true - optional: false - rerun_command: "/test pull-knative-docs-build-tests" - trigger: "(?m)^/test (all|pull-knative-docs-build-tests),?(\\s+|$)" - decorate: true - cluster: "build-knative" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--build-tests" - volumeMounts: - - name: repoview-token - mountPath: /etc/repoview-token - readOnly: true - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: repoview-token - secret: - secretName: repoview-token - - name: test-account - secret: - secretName: test-account - - name: pull-knative-docs-unit-tests - agent: kubernetes - context: pull-knative-docs-unit-tests - always_run: true - optional: false - rerun_command: "/test pull-knative-docs-unit-tests" - trigger: "(?m)^/test (all|pull-knative-docs-unit-tests),?(\\s+|$)" - decorate: true - cluster: "build-knative" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--unit-tests" - volumeMounts: - - name: repoview-token - mountPath: /etc/repoview-token - readOnly: true - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: repoview-token - secret: - secretName: repoview-token - - name: test-account - secret: - secretName: test-account - - name: pull-knative-docs-go-coverage - agent: kubernetes - context: pull-knative-docs-go-coverage - always_run: true - rerun_command: "/test pull-knative-docs-go-coverage" - trigger: "(?m)^/test (all|pull-knative-docs-go-coverage),?(\\s+|$)" - optional: true - decorate: true - cluster: "build-knative" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "coverage" - - "--postsubmit-job-name=post-knative-docs-go-coverage" - - "--artifacts=$(ARTIFACTS)" - - "--cov-threshold-percentage=50" - - "--github-token=/etc/covbot-token/token" - volumeMounts: - - name: covbot-token - mountPath: /etc/covbot-token - readOnly: true - volumes: - - name: covbot-token - secret: - secretName: covbot-token - knative/pkg: - - name: pull-knative-pkg-build-tests - agent: kubernetes - context: pull-knative-pkg-build-tests - always_run: true - optional: false - rerun_command: "/test pull-knative-pkg-build-tests" - trigger: "(?m)^/test (all|pull-knative-pkg-build-tests),?(\\s+|$)" - decorate: true - path_alias: knative.dev/pkg - cluster: "build-knative" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--build-tests" - volumeMounts: - - name: repoview-token - mountPath: /etc/repoview-token - readOnly: true - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: repoview-token - secret: - secretName: repoview-token - - name: test-account - secret: - secretName: test-account - - name: pull-knative-pkg-unit-tests - agent: kubernetes - context: pull-knative-pkg-unit-tests - always_run: true - optional: false - rerun_command: "/test pull-knative-pkg-unit-tests" - trigger: "(?m)^/test (all|pull-knative-pkg-unit-tests),?(\\s+|$)" - decorate: true - path_alias: knative.dev/pkg - cluster: "build-knative" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--unit-tests" - volumeMounts: - - name: repoview-token - mountPath: /etc/repoview-token - readOnly: true - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: repoview-token - secret: - secretName: repoview-token - - name: test-account - secret: - secretName: test-account - - name: pull-knative-pkg-integration-tests - agent: kubernetes - context: pull-knative-pkg-integration-tests - always_run: true - optional: false - rerun_command: "/test pull-knative-pkg-integration-tests" - trigger: "(?m)^/test (all|pull-knative-pkg-integration-tests),?(\\s+|$)" - decorate: true - path_alias: knative.dev/pkg - cluster: "build-knative" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--integration-tests" - volumeMounts: - - name: repoview-token - mountPath: /etc/repoview-token - readOnly: true - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: repoview-token - secret: - secretName: repoview-token - - name: test-account - secret: - secretName: test-account - knative/test-infra: - - name: pull-knative-test-infra-build-tests - agent: kubernetes - context: pull-knative-test-infra-build-tests - always_run: true - optional: false - rerun_command: "/test pull-knative-test-infra-build-tests" - trigger: "(?m)^/test (all|pull-knative-test-infra-build-tests),?(\\s+|$)" - decorate: true - path_alias: knative.dev/test-infra - cluster: "build-knative" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--build-tests" - volumeMounts: - - name: repoview-token - mountPath: /etc/repoview-token - readOnly: true - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: repoview-token - secret: - secretName: repoview-token - - name: test-account - secret: - secretName: test-account - - name: pull-knative-test-infra-unit-tests - agent: kubernetes - context: pull-knative-test-infra-unit-tests - always_run: true - optional: false - rerun_command: "/test pull-knative-test-infra-unit-tests" - trigger: "(?m)^/test (all|pull-knative-test-infra-unit-tests),?(\\s+|$)" - decorate: true - path_alias: knative.dev/test-infra - cluster: "build-knative" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--unit-tests" - volumeMounts: - - name: repoview-token - mountPath: /etc/repoview-token - readOnly: true - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: repoview-token - secret: - secretName: repoview-token - - name: test-account - secret: - secretName: test-account - - name: pull-knative-test-infra-go-coverage - agent: kubernetes - context: pull-knative-test-infra-go-coverage - always_run: true - rerun_command: "/test pull-knative-test-infra-go-coverage" - trigger: "(?m)^/test (all|pull-knative-test-infra-go-coverage),?(\\s+|$)" - optional: true - decorate: true - path_alias: knative.dev/test-infra - cluster: "build-knative" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "coverage" - - "--postsubmit-job-name=post-knative-test-infra-go-coverage" - - "--artifacts=$(ARTIFACTS)" - - "--cov-threshold-percentage=50" - - "--github-token=/etc/covbot-token/token" - volumeMounts: - - name: covbot-token - mountPath: /etc/covbot-token - readOnly: true - volumes: - - name: covbot-token - secret: - secretName: covbot-token - knative/hack: - - name: pull-knative-hack-build-tests - agent: kubernetes - context: pull-knative-hack-build-tests - always_run: true - optional: false - rerun_command: "/test pull-knative-hack-build-tests" - trigger: "(?m)^/test (all|pull-knative-hack-build-tests),?(\\s+|$)" - decorate: true - path_alias: knative.dev/hack - cluster: "build-knative" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--build-tests" - volumeMounts: - - name: repoview-token - mountPath: /etc/repoview-token - readOnly: true - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: repoview-token - secret: - secretName: repoview-token - - name: test-account - secret: - secretName: test-account - - name: pull-knative-hack-unit-tests - agent: kubernetes - context: pull-knative-hack-unit-tests - always_run: true - optional: false - rerun_command: "/test pull-knative-hack-unit-tests" - trigger: "(?m)^/test (all|pull-knative-hack-unit-tests),?(\\s+|$)" - decorate: true - path_alias: knative.dev/hack - cluster: "build-knative" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--unit-tests" - volumeMounts: - - name: repoview-token - mountPath: /etc/repoview-token - readOnly: true - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: repoview-token - secret: - secretName: repoview-token - - name: test-account - secret: - secretName: test-account - - name: pull-knative-hack-integration-tests - agent: kubernetes - context: pull-knative-hack-integration-tests - always_run: true - optional: false - rerun_command: "/test pull-knative-hack-integration-tests" - trigger: "(?m)^/test (all|pull-knative-hack-integration-tests),?(\\s+|$)" - decorate: true - path_alias: knative.dev/hack - cluster: "build-knative" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--run-test" - - "./test/e2e-tests.sh" - volumeMounts: - - name: repoview-token - mountPath: /etc/repoview-token - readOnly: true - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: repoview-token - secret: - secretName: repoview-token - - name: test-account - secret: - secretName: test-account - - name: pull-knative-hack-kind-tests - agent: kubernetes - context: pull-knative-hack-kind-tests - always_run: true - optional: false - rerun_command: "/test pull-knative-hack-kind-tests" - trigger: "(?m)^/test (all|pull-knative-hack-kind-tests),?(\\s+|$)" - decorate: true - path_alias: knative.dev/hack - cluster: "build-knative" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--run-test" - - "./test/e2e-kind.sh" - securityContext: - privileged: true - volumeMounts: - - name: docker-graph - mountPath: /docker-graph - - name: modules - mountPath: /lib/modules - - name: cgroup - mountPath: /sys/fs/cgroup - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: DOCKER_IN_DOCKER_ENABLED - value: "true" - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: docker-graph - emptyDir: {} - - name: modules - hostPath: - path: /lib/modules - type: Directory - - name: cgroup - hostPath: - path: /sys/fs/cgroup - type: Directory - - name: test-account - secret: - secretName: test-account - knative/caching: - - name: pull-knative-caching-build-tests - agent: kubernetes - context: pull-knative-caching-build-tests - always_run: true - optional: false - rerun_command: "/test pull-knative-caching-build-tests" - trigger: "(?m)^/test (all|pull-knative-caching-build-tests),?(\\s+|$)" - decorate: true - path_alias: knative.dev/caching - cluster: "build-knative" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--build-tests" - volumeMounts: - - name: repoview-token - mountPath: /etc/repoview-token - readOnly: true - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: repoview-token - secret: - secretName: repoview-token - - name: test-account - secret: - secretName: test-account - - name: pull-knative-caching-unit-tests - agent: kubernetes - context: pull-knative-caching-unit-tests - always_run: true - optional: false - rerun_command: "/test pull-knative-caching-unit-tests" - trigger: "(?m)^/test (all|pull-knative-caching-unit-tests),?(\\s+|$)" - decorate: true - path_alias: knative.dev/caching - cluster: "build-knative" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--unit-tests" - volumeMounts: - - name: repoview-token - mountPath: /etc/repoview-token - readOnly: true - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: repoview-token - secret: - secretName: repoview-token - - name: test-account - secret: - secretName: test-account - - name: pull-knative-caching-integration-tests - agent: kubernetes - context: pull-knative-caching-integration-tests - always_run: true - optional: false - rerun_command: "/test pull-knative-caching-integration-tests" - trigger: "(?m)^/test (all|pull-knative-caching-integration-tests),?(\\s+|$)" - decorate: true - path_alias: knative.dev/caching - cluster: "build-knative" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--integration-tests" - volumeMounts: - - name: repoview-token - mountPath: /etc/repoview-token - readOnly: true - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: repoview-token - secret: - secretName: repoview-token - - name: test-account - secret: - secretName: test-account - knative-sandbox/sample-controller: - - name: pull-knative-sandbox-sample-controller-build-tests - agent: kubernetes - context: pull-knative-sandbox-sample-controller-build-tests - always_run: true - optional: false - rerun_command: "/test pull-knative-sandbox-sample-controller-build-tests" - trigger: "(?m)^/test (all|pull-knative-sandbox-sample-controller-build-tests),?(\\s+|$)" - decorate: true - path_alias: knative.dev/sample-controller - cluster: "build-knative" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--build-tests" - volumeMounts: - - name: repoview-token - mountPath: /etc/repoview-token - readOnly: true - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: repoview-token - secret: - secretName: repoview-token - - name: test-account - secret: - secretName: test-account - - name: pull-knative-sandbox-sample-controller-unit-tests - agent: kubernetes - context: pull-knative-sandbox-sample-controller-unit-tests - always_run: true - optional: false - rerun_command: "/test pull-knative-sandbox-sample-controller-unit-tests" - trigger: "(?m)^/test (all|pull-knative-sandbox-sample-controller-unit-tests),?(\\s+|$)" - decorate: true - path_alias: knative.dev/sample-controller - cluster: "build-knative" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--unit-tests" - volumeMounts: - - name: repoview-token - mountPath: /etc/repoview-token - readOnly: true - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: repoview-token - secret: - secretName: repoview-token - - name: test-account - secret: - secretName: test-account - knative-sandbox/sample-source: - - name: pull-knative-sandbox-sample-source-build-tests - agent: kubernetes - context: pull-knative-sandbox-sample-source-build-tests - always_run: true - optional: false - rerun_command: "/test pull-knative-sandbox-sample-source-build-tests" - trigger: "(?m)^/test (all|pull-knative-sandbox-sample-source-build-tests),?(\\s+|$)" - decorate: true - path_alias: knative.dev/sample-source - cluster: "build-knative" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--build-tests" - volumeMounts: - - name: repoview-token - mountPath: /etc/repoview-token - readOnly: true - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: repoview-token - secret: - secretName: repoview-token - - name: test-account - secret: - secretName: test-account - - name: pull-knative-sandbox-sample-source-unit-tests - agent: kubernetes - context: pull-knative-sandbox-sample-source-unit-tests - always_run: true - optional: false - rerun_command: "/test pull-knative-sandbox-sample-source-unit-tests" - trigger: "(?m)^/test (all|pull-knative-sandbox-sample-source-unit-tests),?(\\s+|$)" - decorate: true - path_alias: knative.dev/sample-source - cluster: "build-knative" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--unit-tests" - volumeMounts: - - name: repoview-token - mountPath: /etc/repoview-token - readOnly: true - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: repoview-token - secret: - secretName: repoview-token - - name: test-account - secret: - secretName: test-account - google/knative-gcp: - - name: pull-google-knative-gcp-build-tests - agent: kubernetes - context: pull-google-knative-gcp-build-tests - always_run: true - optional: false - rerun_command: "/test pull-google-knative-gcp-build-tests" - trigger: "(?m)^/test (all|pull-google-knative-gcp-build-tests),?(\\s+|$)" - decorate: true - cluster: "build-knative" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--build-tests" - volumeMounts: - - name: repoview-token - mountPath: /etc/repoview-token - readOnly: true - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - resources: - requests: - memory: 12Gi - limits: - memory: 16Gi - volumes: - - name: repoview-token - secret: - secretName: repoview-token - - name: test-account - secret: - secretName: test-account - - name: pull-google-knative-gcp-unit-tests - agent: kubernetes - context: pull-google-knative-gcp-unit-tests - always_run: true - optional: false - rerun_command: "/test pull-google-knative-gcp-unit-tests" - trigger: "(?m)^/test (all|pull-google-knative-gcp-unit-tests),?(\\s+|$)" - decorate: true - cluster: "build-knative" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--unit-tests" - volumeMounts: - - name: repoview-token - mountPath: /etc/repoview-token - readOnly: true - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: repoview-token - secret: - secretName: repoview-token - - name: test-account - secret: - secretName: test-account - - name: pull-google-knative-gcp-integration-tests - agent: kubernetes - labels: - prow.k8s.io/pubsub.project: knative-tests - prow.k8s.io/pubsub.topic: knative-monitoring - prow.k8s.io/pubsub.runID: pull-google-knative-gcp-integration-tests - context: pull-google-knative-gcp-integration-tests - always_run: true - optional: false - rerun_command: "/test pull-google-knative-gcp-integration-tests" - trigger: "(?m)^/test (all|pull-google-knative-gcp-integration-tests),?(\\s+|$)" - decorate: true - cluster: "build-knative" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--run-test" - - "./test/e2e-tests.sh" - volumeMounts: - - name: repoview-token - mountPath: /etc/repoview-token - readOnly: true - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: ENABLE_AUTH_CHECK_TEST - value: "true" - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - resources: - requests: - memory: 12Gi - limits: - memory: 16Gi - volumes: - - name: repoview-token - secret: - secretName: repoview-token - - name: test-account - secret: - secretName: test-account - - name: pull-google-knative-gcp-wi-tests - agent: kubernetes - labels: - prow.k8s.io/pubsub.project: knative-tests - prow.k8s.io/pubsub.topic: knative-monitoring - prow.k8s.io/pubsub.runID: pull-google-knative-gcp-wi-tests - context: pull-google-knative-gcp-wi-tests - always_run: true - optional: false - rerun_command: "/test pull-google-knative-gcp-wi-tests" - trigger: "(?m)^/test (all|pull-google-knative-gcp-wi-tests),?(\\s+|$)" - decorate: true - cluster: "build-knative" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--run-test" - - "./test/e2e-wi-tests.sh" - volumeMounts: - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: ENABLE_AUTH_CHECK_TEST - value: "true" - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - resources: - requests: - memory: 12Gi - limits: - memory: 16Gi - volumes: - - name: test-account - secret: - secretName: test-account - - name: pull-google-knative-gcp-upgrade-tests - agent: kubernetes - labels: - prow.k8s.io/pubsub.project: knative-tests - prow.k8s.io/pubsub.topic: knative-monitoring - prow.k8s.io/pubsub.runID: pull-google-knative-gcp-upgrade-tests - context: pull-google-knative-gcp-upgrade-tests - always_run: true - optional: false - rerun_command: "/test pull-google-knative-gcp-upgrade-tests" - trigger: "(?m)^/test (all|pull-google-knative-gcp-upgrade-tests),?(\\s+|$)" - decorate: true - cluster: "build-knative" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--run-test" - - "./test/e2e-upgrade-tests.sh" - volumeMounts: - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - resources: - requests: - memory: 12Gi - limits: - memory: 16Gi - volumes: - - name: test-account - secret: - secretName: test-account - - name: pull-google-knative-gcp-conformance-tests - agent: kubernetes - labels: - prow.k8s.io/pubsub.project: knative-tests - prow.k8s.io/pubsub.topic: knative-monitoring - prow.k8s.io/pubsub.runID: pull-google-knative-gcp-conformance-tests - context: pull-google-knative-gcp-conformance-tests - always_run: true - optional: false - rerun_command: "/test pull-google-knative-gcp-conformance-tests" - trigger: "(?m)^/test (all|pull-google-knative-gcp-conformance-tests),?(\\s+|$)" - decorate: true - cluster: "build-knative" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--run-test" - - "./test/e2e-conformance-tests.sh" - volumeMounts: - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - resources: - requests: - memory: 12Gi - limits: - memory: 16Gi - volumes: - - name: test-account - secret: - secretName: test-account - - name: pull-google-knative-gcp-go-coverage - agent: kubernetes - context: pull-google-knative-gcp-go-coverage - always_run: true - rerun_command: "/test pull-google-knative-gcp-go-coverage" - trigger: "(?m)^/test (all|pull-google-knative-gcp-go-coverage),?(\\s+|$)" - optional: true - decorate: true - cluster: "build-knative" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "coverage" - - "--postsubmit-job-name=post-google-knative-gcp-go-coverage" - - "--artifacts=$(ARTIFACTS)" - - "--cov-threshold-percentage=50" - - "--github-token=/etc/covbot-token/token" - volumeMounts: - - name: covbot-token - mountPath: /etc/covbot-token - readOnly: true - volumes: - - name: covbot-token - secret: - secretName: covbot-token - knative/networking: - - name: pull-knative-networking-build-tests - agent: kubernetes - context: pull-knative-networking-build-tests - always_run: true - optional: false - rerun_command: "/test pull-knative-networking-build-tests" - trigger: "(?m)^/test (all|pull-knative-networking-build-tests),?(\\s+|$)" - decorate: true - path_alias: knative.dev/networking - cluster: "build-knative" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--build-tests" - volumeMounts: - - name: repoview-token - mountPath: /etc/repoview-token - readOnly: true - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: repoview-token - secret: - secretName: repoview-token - - name: test-account - secret: - secretName: test-account - - name: pull-knative-networking-unit-tests - agent: kubernetes - context: pull-knative-networking-unit-tests - always_run: true - optional: false - rerun_command: "/test pull-knative-networking-unit-tests" - trigger: "(?m)^/test (all|pull-knative-networking-unit-tests),?(\\s+|$)" - decorate: true - path_alias: knative.dev/networking - cluster: "build-knative" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--unit-tests" - volumeMounts: - - name: repoview-token - mountPath: /etc/repoview-token - readOnly: true - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: repoview-token - secret: - secretName: repoview-token - - name: test-account - secret: - secretName: test-account - - name: pull-knative-networking-integration-tests - agent: kubernetes - context: pull-knative-networking-integration-tests - always_run: true - optional: false - rerun_command: "/test pull-knative-networking-integration-tests" - trigger: "(?m)^/test (all|pull-knative-networking-integration-tests),?(\\s+|$)" - decorate: true - path_alias: knative.dev/networking - cluster: "build-knative" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--integration-tests" - volumeMounts: - - name: repoview-token - mountPath: /etc/repoview-token - readOnly: true - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: repoview-token - secret: - secretName: repoview-token - - name: test-account - secret: - secretName: test-account - knative-sandbox/net-certmanager: - - name: pull-knative-sandbox-net-certmanager-build-tests - agent: kubernetes - context: pull-knative-sandbox-net-certmanager-build-tests - always_run: true - optional: false - rerun_command: "/test pull-knative-sandbox-net-certmanager-build-tests" - trigger: "(?m)^/test (all|pull-knative-sandbox-net-certmanager-build-tests),?(\\s+|$)" - decorate: true - path_alias: knative.dev/net-certmanager - cluster: "build-knative" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--build-tests" - volumeMounts: - - name: repoview-token - mountPath: /etc/repoview-token - readOnly: true - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: repoview-token - secret: - secretName: repoview-token - - name: test-account - secret: - secretName: test-account - - name: pull-knative-sandbox-net-certmanager-unit-tests - agent: kubernetes - context: pull-knative-sandbox-net-certmanager-unit-tests - always_run: true - optional: false - rerun_command: "/test pull-knative-sandbox-net-certmanager-unit-tests" - trigger: "(?m)^/test (all|pull-knative-sandbox-net-certmanager-unit-tests),?(\\s+|$)" - decorate: true - path_alias: knative.dev/net-certmanager - cluster: "build-knative" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--unit-tests" - volumeMounts: - - name: repoview-token - mountPath: /etc/repoview-token - readOnly: true - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: repoview-token - secret: - secretName: repoview-token - - name: test-account - secret: - secretName: test-account - - name: pull-knative-sandbox-net-certmanager-integration-tests - agent: kubernetes - context: pull-knative-sandbox-net-certmanager-integration-tests - always_run: true - optional: false - rerun_command: "/test pull-knative-sandbox-net-certmanager-integration-tests" - trigger: "(?m)^/test (all|pull-knative-sandbox-net-certmanager-integration-tests),?(\\s+|$)" - decorate: true - path_alias: knative.dev/net-certmanager - cluster: "build-knative" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--integration-tests" - volumeMounts: - - name: repoview-token - mountPath: /etc/repoview-token - readOnly: true - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: repoview-token - secret: - secretName: repoview-token - - name: test-account - secret: - secretName: test-account - - name: pull-knative-sandbox-net-certmanager-go-coverage - agent: kubernetes - context: pull-knative-sandbox-net-certmanager-go-coverage - always_run: true - rerun_command: "/test pull-knative-sandbox-net-certmanager-go-coverage" - trigger: "(?m)^/test (all|pull-knative-sandbox-net-certmanager-go-coverage),?(\\s+|$)" - optional: true - decorate: true - path_alias: knative.dev/net-certmanager - cluster: "build-knative" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "coverage" - - "--postsubmit-job-name=post-knative-sandbox-net-certmanager-go-coverage" - - "--artifacts=$(ARTIFACTS)" - - "--cov-threshold-percentage=50" - - "--github-token=/etc/covbot-token/token" - volumeMounts: - - name: covbot-token - mountPath: /etc/covbot-token - readOnly: true - volumes: - - name: covbot-token - secret: - secretName: covbot-token - knative-sandbox/net-contour: - - name: pull-knative-sandbox-net-contour-build-tests - agent: kubernetes - context: pull-knative-sandbox-net-contour-build-tests - always_run: true - optional: false - rerun_command: "/test pull-knative-sandbox-net-contour-build-tests" - trigger: "(?m)^/test (all|pull-knative-sandbox-net-contour-build-tests),?(\\s+|$)" - decorate: true - path_alias: knative.dev/net-contour - cluster: "build-knative" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--build-tests" - volumeMounts: - - name: repoview-token - mountPath: /etc/repoview-token - readOnly: true - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: repoview-token - secret: - secretName: repoview-token - - name: test-account - secret: - secretName: test-account - - name: pull-knative-sandbox-net-contour-unit-tests - agent: kubernetes - context: pull-knative-sandbox-net-contour-unit-tests - always_run: true - optional: false - rerun_command: "/test pull-knative-sandbox-net-contour-unit-tests" - trigger: "(?m)^/test (all|pull-knative-sandbox-net-contour-unit-tests),?(\\s+|$)" - decorate: true - path_alias: knative.dev/net-contour - cluster: "build-knative" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--unit-tests" - volumeMounts: - - name: repoview-token - mountPath: /etc/repoview-token - readOnly: true - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: repoview-token - secret: - secretName: repoview-token - - name: test-account - secret: - secretName: test-account - - name: pull-knative-sandbox-net-contour-integration-tests - agent: kubernetes - context: pull-knative-sandbox-net-contour-integration-tests - always_run: true - optional: false - rerun_command: "/test pull-knative-sandbox-net-contour-integration-tests" - trigger: "(?m)^/test (all|pull-knative-sandbox-net-contour-integration-tests),?(\\s+|$)" - decorate: true - path_alias: knative.dev/net-contour - cluster: "build-knative" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--integration-tests" - volumeMounts: - - name: repoview-token - mountPath: /etc/repoview-token - readOnly: true - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: repoview-token - secret: - secretName: repoview-token - - name: test-account - secret: - secretName: test-account - knative-sandbox/net-http01: - - name: pull-knative-sandbox-net-http01-build-tests - agent: kubernetes - context: pull-knative-sandbox-net-http01-build-tests - always_run: true - optional: false - rerun_command: "/test pull-knative-sandbox-net-http01-build-tests" - trigger: "(?m)^/test (all|pull-knative-sandbox-net-http01-build-tests),?(\\s+|$)" - decorate: true - path_alias: knative.dev/net-http01 - cluster: "build-knative" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--build-tests" - volumeMounts: - - name: repoview-token - mountPath: /etc/repoview-token - readOnly: true - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: repoview-token - secret: - secretName: repoview-token - - name: test-account - secret: - secretName: test-account - - name: pull-knative-sandbox-net-http01-unit-tests - agent: kubernetes - context: pull-knative-sandbox-net-http01-unit-tests - always_run: true - optional: false - rerun_command: "/test pull-knative-sandbox-net-http01-unit-tests" - trigger: "(?m)^/test (all|pull-knative-sandbox-net-http01-unit-tests),?(\\s+|$)" - decorate: true - path_alias: knative.dev/net-http01 - cluster: "build-knative" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--unit-tests" - volumeMounts: - - name: repoview-token - mountPath: /etc/repoview-token - readOnly: true - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: repoview-token - secret: - secretName: repoview-token - - name: test-account - secret: - secretName: test-account - - name: pull-knative-sandbox-net-http01-integration-tests - agent: kubernetes - context: pull-knative-sandbox-net-http01-integration-tests - always_run: true - optional: false - rerun_command: "/test pull-knative-sandbox-net-http01-integration-tests" - trigger: "(?m)^/test (all|pull-knative-sandbox-net-http01-integration-tests),?(\\s+|$)" - decorate: true - path_alias: knative.dev/net-http01 - cluster: "build-knative" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--integration-tests" - volumeMounts: - - name: repoview-token - mountPath: /etc/repoview-token - readOnly: true - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: repoview-token - secret: - secretName: repoview-token - - name: test-account - secret: - secretName: test-account - knative-sandbox/net-gateway-api: - - name: pull-knative-sandbox-net-gateway-api-build-tests - agent: kubernetes - context: pull-knative-sandbox-net-gateway-api-build-tests - always_run: true - optional: false - rerun_command: "/test pull-knative-sandbox-net-gateway-api-build-tests" - trigger: "(?m)^/test (all|pull-knative-sandbox-net-gateway-api-build-tests),?(\\s+|$)" - decorate: true - path_alias: knative.dev/net-gateway-api - cluster: "build-knative" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--build-tests" - volumeMounts: - - name: repoview-token - mountPath: /etc/repoview-token - readOnly: true - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: repoview-token - secret: - secretName: repoview-token - - name: test-account - secret: - secretName: test-account - - name: pull-knative-sandbox-net-gateway-api-unit-tests - agent: kubernetes - context: pull-knative-sandbox-net-gateway-api-unit-tests - always_run: true - optional: false - rerun_command: "/test pull-knative-sandbox-net-gateway-api-unit-tests" - trigger: "(?m)^/test (all|pull-knative-sandbox-net-gateway-api-unit-tests),?(\\s+|$)" - decorate: true - path_alias: knative.dev/net-gateway-api - cluster: "build-knative" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--unit-tests" - volumeMounts: - - name: repoview-token - mountPath: /etc/repoview-token - readOnly: true - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: repoview-token - secret: - secretName: repoview-token - - name: test-account - secret: - secretName: test-account - - name: pull-knative-sandbox-net-gateway-api-integration-tests - agent: kubernetes - context: pull-knative-sandbox-net-gateway-api-integration-tests - always_run: true - optional: false - rerun_command: "/test pull-knative-sandbox-net-gateway-api-integration-tests" - trigger: "(?m)^/test (all|pull-knative-sandbox-net-gateway-api-integration-tests),?(\\s+|$)" - decorate: true - path_alias: knative.dev/net-gateway-api - cluster: "build-knative" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--integration-tests" - volumeMounts: - - name: repoview-token - mountPath: /etc/repoview-token - readOnly: true - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: repoview-token - secret: - secretName: repoview-token - - name: test-account - secret: - secretName: test-account - knative-sandbox/net-istio: - - name: pull-knative-sandbox-net-istio-build-tests - agent: kubernetes - context: pull-knative-sandbox-net-istio-build-tests - always_run: true - optional: false - rerun_command: "/test pull-knative-sandbox-net-istio-build-tests" - trigger: "(?m)^/test (all|pull-knative-sandbox-net-istio-build-tests),?(\\s+|$)" - decorate: true - path_alias: knative.dev/net-istio - cluster: "build-knative" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--build-tests" - volumeMounts: - - name: repoview-token - mountPath: /etc/repoview-token - readOnly: true - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: repoview-token - secret: - secretName: repoview-token - - name: test-account - secret: - secretName: test-account - - name: pull-knative-sandbox-net-istio-unit-tests - agent: kubernetes - context: pull-knative-sandbox-net-istio-unit-tests - always_run: true - optional: false - rerun_command: "/test pull-knative-sandbox-net-istio-unit-tests" - trigger: "(?m)^/test (all|pull-knative-sandbox-net-istio-unit-tests),?(\\s+|$)" - decorate: true - path_alias: knative.dev/net-istio - cluster: "build-knative" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--unit-tests" - volumeMounts: - - name: repoview-token - mountPath: /etc/repoview-token - readOnly: true - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: repoview-token - secret: - secretName: repoview-token - - name: test-account - secret: - secretName: test-account - - name: pull-knative-sandbox-net-istio-integration-tests - agent: kubernetes - context: pull-knative-sandbox-net-istio-integration-tests - always_run: true - optional: false - rerun_command: "/test pull-knative-sandbox-net-istio-integration-tests" - trigger: "(?m)^/test (all|pull-knative-sandbox-net-istio-integration-tests),?(\\s+|$)" - decorate: true - path_alias: knative.dev/net-istio - cluster: "build-knative" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--integration-tests" - volumeMounts: - - name: repoview-token - mountPath: /etc/repoview-token - readOnly: true - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: repoview-token - secret: - secretName: repoview-token - - name: test-account - secret: - secretName: test-account - - name: pull-knative-sandbox-net-istio-go-coverage - agent: kubernetes - context: pull-knative-sandbox-net-istio-go-coverage - always_run: true - rerun_command: "/test pull-knative-sandbox-net-istio-go-coverage" - trigger: "(?m)^/test (all|pull-knative-sandbox-net-istio-go-coverage),?(\\s+|$)" - optional: true - decorate: true - path_alias: knative.dev/net-istio - cluster: "build-knative" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "coverage" - - "--postsubmit-job-name=post-knative-sandbox-net-istio-go-coverage" - - "--artifacts=$(ARTIFACTS)" - - "--cov-threshold-percentage=50" - - "--github-token=/etc/covbot-token/token" - volumeMounts: - - name: covbot-token - mountPath: /etc/covbot-token - readOnly: true - volumes: - - name: covbot-token - secret: - secretName: covbot-token - - name: pull-knative-sandbox-net-istio-latest - agent: kubernetes - context: pull-knative-sandbox-net-istio-latest - always_run: true - optional: true - rerun_command: "/test pull-knative-sandbox-net-istio-latest" - trigger: "(?m)^/test (all|pull-knative-sandbox-net-istio-latest),?(\\s+|$)" - decorate: true - path_alias: knative.dev/net-istio - cluster: "build-knative" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--run-test" - - "./test/e2e-tests.sh --istio-version latest" - volumeMounts: - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: test-account - secret: - secretName: test-account - - name: pull-knative-sandbox-net-istio-latest-mesh - agent: kubernetes - context: pull-knative-sandbox-net-istio-latest-mesh - always_run: true - optional: true - rerun_command: "/test pull-knative-sandbox-net-istio-latest-mesh" - trigger: "(?m)^/test (all|pull-knative-sandbox-net-istio-latest-mesh),?(\\s+|$)" - decorate: true - path_alias: knative.dev/net-istio - cluster: "build-knative" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--run-test" - - "./test/e2e-tests.sh --istio-version latest --mesh" - volumeMounts: - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: test-account - secret: - secretName: test-account - knative-sandbox/net-kourier: - - name: pull-knative-sandbox-net-kourier-build-tests - agent: kubernetes - context: pull-knative-sandbox-net-kourier-build-tests - always_run: true - optional: false - rerun_command: "/test pull-knative-sandbox-net-kourier-build-tests" - trigger: "(?m)^/test (all|pull-knative-sandbox-net-kourier-build-tests),?(\\s+|$)" - decorate: true - path_alias: knative.dev/net-kourier - cluster: "build-knative" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--build-tests" - volumeMounts: - - name: repoview-token - mountPath: /etc/repoview-token - readOnly: true - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: repoview-token - secret: - secretName: repoview-token - - name: test-account - secret: - secretName: test-account - - name: pull-knative-sandbox-net-kourier-unit-tests - agent: kubernetes - context: pull-knative-sandbox-net-kourier-unit-tests - always_run: true - optional: false - rerun_command: "/test pull-knative-sandbox-net-kourier-unit-tests" - trigger: "(?m)^/test (all|pull-knative-sandbox-net-kourier-unit-tests),?(\\s+|$)" - decorate: true - path_alias: knative.dev/net-kourier - cluster: "build-knative" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--unit-tests" - volumeMounts: - - name: repoview-token - mountPath: /etc/repoview-token - readOnly: true - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: repoview-token - secret: - secretName: repoview-token - - name: test-account - secret: - secretName: test-account - - name: pull-knative-sandbox-net-kourier-integration-tests - agent: kubernetes - context: pull-knative-sandbox-net-kourier-integration-tests - always_run: true - optional: false - rerun_command: "/test pull-knative-sandbox-net-kourier-integration-tests" - trigger: "(?m)^/test (all|pull-knative-sandbox-net-kourier-integration-tests),?(\\s+|$)" - decorate: true - path_alias: knative.dev/net-kourier - cluster: "build-knative" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--integration-tests" - volumeMounts: - - name: repoview-token - mountPath: /etc/repoview-token - readOnly: true - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: repoview-token - secret: - secretName: repoview-token - - name: test-account - secret: - secretName: test-account - - name: pull-knative-sandbox-net-kourier-go-coverage - agent: kubernetes - context: pull-knative-sandbox-net-kourier-go-coverage - always_run: true - rerun_command: "/test pull-knative-sandbox-net-kourier-go-coverage" - trigger: "(?m)^/test (all|pull-knative-sandbox-net-kourier-go-coverage),?(\\s+|$)" - optional: true - decorate: true - path_alias: knative.dev/net-kourier - cluster: "build-knative" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "coverage" - - "--postsubmit-job-name=post-knative-sandbox-net-kourier-go-coverage" - - "--artifacts=$(ARTIFACTS)" - - "--cov-threshold-percentage=50" - - "--github-token=/etc/covbot-token/token" - volumeMounts: - - name: covbot-token - mountPath: /etc/covbot-token - readOnly: true - volumes: - - name: covbot-token - secret: - secretName: covbot-token - knative/operator: - - name: pull-knative-operator-build-tests - agent: kubernetes - context: pull-knative-operator-build-tests - always_run: true - optional: false - rerun_command: "/test pull-knative-operator-build-tests" - trigger: "(?m)^/test (all|pull-knative-operator-build-tests),?(\\s+|$)" - decorate: true - path_alias: knative.dev/operator - cluster: "build-knative" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--build-tests" - volumeMounts: - - name: repoview-token - mountPath: /etc/repoview-token - readOnly: true - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: repoview-token - secret: - secretName: repoview-token - - name: test-account - secret: - secretName: test-account - - name: pull-knative-operator-unit-tests - agent: kubernetes - context: pull-knative-operator-unit-tests - always_run: true - optional: false - rerun_command: "/test pull-knative-operator-unit-tests" - trigger: "(?m)^/test (all|pull-knative-operator-unit-tests),?(\\s+|$)" - decorate: true - path_alias: knative.dev/operator - cluster: "build-knative" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--unit-tests" - volumeMounts: - - name: repoview-token - mountPath: /etc/repoview-token - readOnly: true - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: repoview-token - secret: - secretName: repoview-token - - name: test-account - secret: - secretName: test-account - - name: pull-knative-operator-integration-tests - agent: kubernetes - context: pull-knative-operator-integration-tests - always_run: true - optional: false - rerun_command: "/test pull-knative-operator-integration-tests" - trigger: "(?m)^/test (all|pull-knative-operator-integration-tests),?(\\s+|$)" - decorate: true - path_alias: knative.dev/operator - cluster: "build-knative" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--integration-tests" - volumeMounts: - - name: repoview-token - mountPath: /etc/repoview-token - readOnly: true - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: repoview-token - secret: - secretName: repoview-token - - name: test-account - secret: - secretName: test-account - - name: pull-knative-operator-upgrade-tests - agent: kubernetes - context: pull-knative-operator-upgrade-tests - always_run: true - optional: false - rerun_command: "/test pull-knative-operator-upgrade-tests" - trigger: "(?m)^/test (all|pull-knative-operator-upgrade-tests),?(\\s+|$)" - decorate: true - path_alias: knative.dev/operator - cluster: "build-knative" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--run-test" - - "./test/e2e-upgrade-tests.sh" - volumeMounts: - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: test-account - secret: - secretName: test-account - - name: pull-knative-operator-serving-upgrade-tests - agent: kubernetes - context: pull-knative-operator-serving-upgrade-tests - always_run: true - optional: false - rerun_command: "/test pull-knative-operator-serving-upgrade-tests" - trigger: "(?m)^/test (all|pull-knative-operator-serving-upgrade-tests),?(\\s+|$)" - decorate: true - path_alias: knative.dev/operator - cluster: "build-knative" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--run-test" - - "./test/e2e-serving-upgrade-tests.sh" - volumeMounts: - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: test-account - secret: - secretName: test-account - - name: pull-knative-operator-eventing-upgrade-tests - agent: kubernetes - context: pull-knative-operator-eventing-upgrade-tests - always_run: true - optional: false - rerun_command: "/test pull-knative-operator-eventing-upgrade-tests" - trigger: "(?m)^/test (all|pull-knative-operator-eventing-upgrade-tests),?(\\s+|$)" - decorate: true - path_alias: knative.dev/operator - cluster: "build-knative" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--run-test" - - "./test/e2e-eventing-upgrade-tests.sh" - volumeMounts: - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: test-account - secret: - secretName: test-account - knative-sandbox/async-component: - - name: pull-knative-sandbox-async-component-build-tests - agent: kubernetes - context: pull-knative-sandbox-async-component-build-tests - always_run: true - optional: false - rerun_command: "/test pull-knative-sandbox-async-component-build-tests" - trigger: "(?m)^/test (all|pull-knative-sandbox-async-component-build-tests),?(\\s+|$)" - decorate: true - path_alias: knative.dev/async-component - cluster: "build-knative" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--build-tests" - volumeMounts: - - name: repoview-token - mountPath: /etc/repoview-token - readOnly: true - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: repoview-token - secret: - secretName: repoview-token - - name: test-account - secret: - secretName: test-account - - name: pull-knative-sandbox-async-component-unit-tests - agent: kubernetes - context: pull-knative-sandbox-async-component-unit-tests - always_run: true - optional: false - rerun_command: "/test pull-knative-sandbox-async-component-unit-tests" - trigger: "(?m)^/test (all|pull-knative-sandbox-async-component-unit-tests),?(\\s+|$)" - decorate: true - path_alias: knative.dev/async-component - cluster: "build-knative" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--unit-tests" - volumeMounts: - - name: repoview-token - mountPath: /etc/repoview-token - readOnly: true - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: repoview-token - secret: - secretName: repoview-token - - name: test-account - secret: - secretName: test-account - - name: pull-knative-sandbox-async-component-integration-tests - agent: kubernetes - context: pull-knative-sandbox-async-component-integration-tests - always_run: true - optional: false - rerun_command: "/test pull-knative-sandbox-async-component-integration-tests" - trigger: "(?m)^/test (all|pull-knative-sandbox-async-component-integration-tests),?(\\s+|$)" - decorate: true - path_alias: knative.dev/async-component - cluster: "build-knative" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--integration-tests" - volumeMounts: - - name: repoview-token - mountPath: /etc/repoview-token - readOnly: true - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: repoview-token - secret: - secretName: repoview-token - - name: test-account - secret: - secretName: test-account - - name: pull-knative-sandbox-async-component-go-coverage - agent: kubernetes - context: pull-knative-sandbox-async-component-go-coverage - always_run: true - rerun_command: "/test pull-knative-sandbox-async-component-go-coverage" - trigger: "(?m)^/test (all|pull-knative-sandbox-async-component-go-coverage),?(\\s+|$)" - optional: true - decorate: true - path_alias: knative.dev/async-component - cluster: "build-knative" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "coverage" - - "--postsubmit-job-name=post-knative-sandbox-async-component-go-coverage" - - "--artifacts=$(ARTIFACTS)" - - "--cov-threshold-percentage=50" - - "--github-token=/etc/covbot-token/token" - volumeMounts: - - name: covbot-token - mountPath: /etc/covbot-token - readOnly: true - volumes: - - name: covbot-token - secret: - secretName: covbot-token - knative-sandbox/eventing-autoscaler-keda: - - name: pull-knative-sandbox-eventing-autoscaler-keda-integration-test-kafka-source - agent: kubernetes - context: pull-knative-sandbox-eventing-autoscaler-keda-integration-test-kafka-source - always_run: true - optional: true - rerun_command: "/test pull-knative-sandbox-eventing-autoscaler-keda-integration-test-kafka-source" - trigger: "(?m)^/test (all|pull-knative-sandbox-eventing-autoscaler-keda-integration-test-kafka-source),?(\\s+|$)" - decorate: true - path_alias: knative.dev/eventing-autoscaler-keda - cluster: "build-knative" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--run-test" - - "./test/e2e-tests.sh --kafka-source" - volumeMounts: - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: test-account - secret: - secretName: test-account - - name: pull-knative-sandbox-eventing-autoscaler-keda-integration-test-kafka-mt-source - agent: kubernetes - context: pull-knative-sandbox-eventing-autoscaler-keda-integration-test-kafka-mt-source - always_run: true - optional: true - rerun_command: "/test pull-knative-sandbox-eventing-autoscaler-keda-integration-test-kafka-mt-source" - trigger: "(?m)^/test (all|pull-knative-sandbox-eventing-autoscaler-keda-integration-test-kafka-mt-source),?(\\s+|$)" - decorate: true - path_alias: knative.dev/eventing-autoscaler-keda - cluster: "build-knative" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--run-test" - - "./test/e2e-tests.sh --kafka-mt-source" - volumeMounts: - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: test-account - secret: - secretName: test-account - knative-sandbox/discovery: - - name: pull-knative-sandbox-discovery-build-tests - agent: kubernetes - context: pull-knative-sandbox-discovery-build-tests - always_run: true - optional: false - rerun_command: "/test pull-knative-sandbox-discovery-build-tests" - trigger: "(?m)^/test (all|pull-knative-sandbox-discovery-build-tests),?(\\s+|$)" - decorate: true - path_alias: knative.dev/discovery - cluster: "build-knative" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--build-tests" - volumeMounts: - - name: repoview-token - mountPath: /etc/repoview-token - readOnly: true - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: repoview-token - secret: - secretName: repoview-token - - name: test-account - secret: - secretName: test-account - - name: pull-knative-sandbox-discovery-unit-tests - agent: kubernetes - context: pull-knative-sandbox-discovery-unit-tests - always_run: true - optional: false - rerun_command: "/test pull-knative-sandbox-discovery-unit-tests" - trigger: "(?m)^/test (all|pull-knative-sandbox-discovery-unit-tests),?(\\s+|$)" - decorate: true - path_alias: knative.dev/discovery - cluster: "build-knative" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--unit-tests" - volumeMounts: - - name: repoview-token - mountPath: /etc/repoview-token - readOnly: true - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: repoview-token - secret: - secretName: repoview-token - - name: test-account - secret: - secretName: test-account - - name: pull-knative-sandbox-discovery-integration-tests - agent: kubernetes - context: pull-knative-sandbox-discovery-integration-tests - always_run: true - optional: false - rerun_command: "/test pull-knative-sandbox-discovery-integration-tests" - trigger: "(?m)^/test (all|pull-knative-sandbox-discovery-integration-tests),?(\\s+|$)" - decorate: true - path_alias: knative.dev/discovery - cluster: "build-knative" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--integration-tests" - volumeMounts: - - name: repoview-token - mountPath: /etc/repoview-token - readOnly: true - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: repoview-token - secret: - secretName: repoview-token - - name: test-account - secret: - secretName: test-account - knative-sandbox/eventing-kafka: - - name: pull-knative-sandbox-eventing-kafka-integration-test-channel-consolidated - agent: kubernetes - context: pull-knative-sandbox-eventing-kafka-integration-test-channel-consolidated - always_run: true - optional: false - rerun_command: "/test pull-knative-sandbox-eventing-kafka-integration-test-channel-consolidated" - trigger: "(?m)^/test (all|pull-knative-sandbox-eventing-kafka-integration-test-channel-consolidated),?(\\s+|$)" - decorate: true - path_alias: knative.dev/eventing-kafka - cluster: "build-knative" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--run-test" - - "./test/e2e-tests.sh --consolidated" - volumeMounts: - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: test-account - secret: - secretName: test-account - - name: pull-knative-sandbox-eventing-kafka-integration-test-channel-consolidated-tls - agent: kubernetes - context: pull-knative-sandbox-eventing-kafka-integration-test-channel-consolidated-tls - always_run: true - optional: false - rerun_command: "/test pull-knative-sandbox-eventing-kafka-integration-test-channel-consolidated-tls" - trigger: "(?m)^/test (all|pull-knative-sandbox-eventing-kafka-integration-test-channel-consolidated-tls),?(\\s+|$)" - decorate: true - path_alias: knative.dev/eventing-kafka - cluster: "build-knative" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--run-test" - - "./test/e2e-tests.sh --consolidated-tls" - volumeMounts: - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: test-account - secret: - secretName: test-account - - name: pull-knative-sandbox-eventing-kafka-integration-test-channel-consolidated-sasl - agent: kubernetes - context: pull-knative-sandbox-eventing-kafka-integration-test-channel-consolidated-sasl - always_run: true - optional: false - rerun_command: "/test pull-knative-sandbox-eventing-kafka-integration-test-channel-consolidated-sasl" - trigger: "(?m)^/test (all|pull-knative-sandbox-eventing-kafka-integration-test-channel-consolidated-sasl),?(\\s+|$)" - decorate: true - path_alias: knative.dev/eventing-kafka - cluster: "build-knative" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--run-test" - - "./test/e2e-tests.sh --consolidated-sasl" - volumeMounts: - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: test-account - secret: - secretName: test-account - - name: pull-knative-sandbox-eventing-kafka-integration-test-channel-distributed - agent: kubernetes - context: pull-knative-sandbox-eventing-kafka-integration-test-channel-distributed - always_run: true - optional: false - rerun_command: "/test pull-knative-sandbox-eventing-kafka-integration-test-channel-distributed" - trigger: "(?m)^/test (all|pull-knative-sandbox-eventing-kafka-integration-test-channel-distributed),?(\\s+|$)" - decorate: true - path_alias: knative.dev/eventing-kafka - cluster: "build-knative" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--run-test" - - "./test/e2e-tests.sh --distributed" - volumeMounts: - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: test-account - secret: - secretName: test-account - - name: pull-knative-sandbox-eventing-kafka-integration-test-mt-source - agent: kubernetes - context: pull-knative-sandbox-eventing-kafka-integration-test-mt-source - always_run: true - optional: true - rerun_command: "/test pull-knative-sandbox-eventing-kafka-integration-test-mt-source" - trigger: "(?m)^/test (all|pull-knative-sandbox-eventing-kafka-integration-test-mt-source),?(\\s+|$)" - decorate: true - path_alias: knative.dev/eventing-kafka - cluster: "build-knative" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--run-test" - - "./test/e2e-tests.sh --mt-source" - volumeMounts: - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: test-account - secret: - secretName: test-account - - name: pull-knative-sandbox-eventing-kafka-upgrade-tests - agent: kubernetes - context: pull-knative-sandbox-eventing-kafka-upgrade-tests - always_run: true - optional: false - rerun_command: "/test pull-knative-sandbox-eventing-kafka-upgrade-tests" - trigger: "(?m)^/test (all|pull-knative-sandbox-eventing-kafka-upgrade-tests),?(\\s+|$)" - decorate: true - path_alias: knative.dev/eventing-kafka - cluster: "build-knative" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--run-test" - - "./test/e2e-upgrade-tests.sh" - volumeMounts: - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: test-account - secret: - secretName: test-account - - name: pull-knative-sandbox-eventing-kafka-build-tests - agent: kubernetes - context: pull-knative-sandbox-eventing-kafka-build-tests - always_run: true - optional: false - rerun_command: "/test pull-knative-sandbox-eventing-kafka-build-tests" - trigger: "(?m)^/test (all|pull-knative-sandbox-eventing-kafka-build-tests),?(\\s+|$)" - decorate: true - path_alias: knative.dev/eventing-kafka - cluster: "build-knative" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--build-tests" - securityContext: - privileged: true - volumeMounts: - - name: repoview-token - mountPath: /etc/repoview-token - readOnly: true - - name: docker-graph - mountPath: /docker-graph - - name: modules - mountPath: /lib/modules - - name: cgroup - mountPath: /sys/fs/cgroup - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: DOCKER_IN_DOCKER_ENABLED - value: "true" - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: repoview-token - secret: - secretName: repoview-token - - name: docker-graph - emptyDir: {} - - name: modules - hostPath: - path: /lib/modules - type: Directory - - name: cgroup - hostPath: - path: /sys/fs/cgroup - type: Directory - - name: test-account - secret: - secretName: test-account - - name: pull-knative-sandbox-eventing-kafka-unit-tests - agent: kubernetes - context: pull-knative-sandbox-eventing-kafka-unit-tests - always_run: true - optional: false - rerun_command: "/test pull-knative-sandbox-eventing-kafka-unit-tests" - trigger: "(?m)^/test (all|pull-knative-sandbox-eventing-kafka-unit-tests),?(\\s+|$)" - decorate: true - path_alias: knative.dev/eventing-kafka - cluster: "build-knative" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--unit-tests" - securityContext: - privileged: true - volumeMounts: - - name: repoview-token - mountPath: /etc/repoview-token - readOnly: true - - name: docker-graph - mountPath: /docker-graph - - name: modules - mountPath: /lib/modules - - name: cgroup - mountPath: /sys/fs/cgroup - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: DOCKER_IN_DOCKER_ENABLED - value: "true" - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: repoview-token - secret: - secretName: repoview-token - - name: docker-graph - emptyDir: {} - - name: modules - hostPath: - path: /lib/modules - type: Directory - - name: cgroup - hostPath: - path: /sys/fs/cgroup - type: Directory - - name: test-account - secret: - secretName: test-account - - name: pull-knative-sandbox-eventing-kafka-go-coverage - agent: kubernetes - context: pull-knative-sandbox-eventing-kafka-go-coverage - always_run: true - rerun_command: "/test pull-knative-sandbox-eventing-kafka-go-coverage" - trigger: "(?m)^/test (all|pull-knative-sandbox-eventing-kafka-go-coverage),?(\\s+|$)" - optional: true - decorate: true - path_alias: knative.dev/eventing-kafka - cluster: "build-knative" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "coverage" - - "--postsubmit-job-name=post-knative-sandbox-eventing-kafka-go-coverage" - - "--artifacts=$(ARTIFACTS)" - - "--cov-threshold-percentage=50" - - "--github-token=/etc/covbot-token/token" - volumeMounts: - - name: covbot-token - mountPath: /etc/covbot-token - readOnly: true - - name: docker-graph - mountPath: /docker-graph - - name: modules - mountPath: /lib/modules - - name: cgroup - mountPath: /sys/fs/cgroup - env: - - name: DOCKER_IN_DOCKER_ENABLED - value: "true" - volumes: - - name: covbot-token - secret: - secretName: covbot-token - - name: docker-graph - emptyDir: {} - - name: modules - hostPath: - path: /lib/modules - type: Directory - - name: cgroup - hostPath: - path: /sys/fs/cgroup - type: Directory - knative-sandbox/eventing-kafka-broker: - - name: pull-knative-sandbox-eventing-kafka-broker-build-tests - agent: kubernetes - context: pull-knative-sandbox-eventing-kafka-broker-build-tests - always_run: true - optional: false - rerun_command: "/test pull-knative-sandbox-eventing-kafka-broker-build-tests" - trigger: "(?m)^/test (all|pull-knative-sandbox-eventing-kafka-broker-build-tests),?(\\s+|$)" - decorate: true - path_alias: knative.dev/eventing-kafka-broker - cluster: "build-knative" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--build-tests" - volumeMounts: - - name: repoview-token - mountPath: /etc/repoview-token - readOnly: true - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: repoview-token - secret: - secretName: repoview-token - - name: test-account - secret: - secretName: test-account - - name: pull-knative-sandbox-eventing-kafka-broker-unit-tests - agent: kubernetes - context: pull-knative-sandbox-eventing-kafka-broker-unit-tests - always_run: true - optional: false - rerun_command: "/test pull-knative-sandbox-eventing-kafka-broker-unit-tests" - trigger: "(?m)^/test (all|pull-knative-sandbox-eventing-kafka-broker-unit-tests),?(\\s+|$)" - decorate: true - path_alias: knative.dev/eventing-kafka-broker - cluster: "build-knative" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--unit-tests" - volumeMounts: - - name: repoview-token - mountPath: /etc/repoview-token - readOnly: true - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: repoview-token - secret: - secretName: repoview-token - - name: test-account - secret: - secretName: test-account - - name: pull-knative-sandbox-eventing-kafka-broker-integration-tests - agent: kubernetes - context: pull-knative-sandbox-eventing-kafka-broker-integration-tests - always_run: true - optional: false - rerun_command: "/test pull-knative-sandbox-eventing-kafka-broker-integration-tests" - trigger: "(?m)^/test (all|pull-knative-sandbox-eventing-kafka-broker-integration-tests),?(\\s+|$)" - decorate: true - path_alias: knative.dev/eventing-kafka-broker - cluster: "build-knative" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--integration-tests" - securityContext: - privileged: true - volumeMounts: - - name: repoview-token - mountPath: /etc/repoview-token - readOnly: true - - name: docker-graph - mountPath: /docker-graph - - name: modules - mountPath: /lib/modules - - name: cgroup - mountPath: /sys/fs/cgroup - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: DOCKER_IN_DOCKER_ENABLED - value: "true" - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: repoview-token - secret: - secretName: repoview-token - - name: docker-graph - emptyDir: {} - - name: modules - hostPath: - path: /lib/modules - type: Directory - - name: cgroup - hostPath: - path: /sys/fs/cgroup - type: Directory - - name: test-account - secret: - secretName: test-account - - name: pull-knative-sandbox-eventing-kafka-broker-upgrade-tests - agent: kubernetes - context: pull-knative-sandbox-eventing-kafka-broker-upgrade-tests - always_run: true - optional: false - rerun_command: "/test pull-knative-sandbox-eventing-kafka-broker-upgrade-tests" - trigger: "(?m)^/test (all|pull-knative-sandbox-eventing-kafka-broker-upgrade-tests),?(\\s+|$)" - decorate: true - path_alias: knative.dev/eventing-kafka-broker - cluster: "build-knative" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--run-test" - - "./test/upgrade-tests.sh" - securityContext: - privileged: true - volumeMounts: - - name: docker-graph - mountPath: /docker-graph - - name: modules - mountPath: /lib/modules - - name: cgroup - mountPath: /sys/fs/cgroup - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: DOCKER_IN_DOCKER_ENABLED - value: "true" - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: docker-graph - emptyDir: {} - - name: modules - hostPath: - path: /lib/modules - type: Directory - - name: cgroup - hostPath: - path: /sys/fs/cgroup - type: Directory - - name: test-account - secret: - secretName: test-account - - name: pull-knative-sandbox-eventing-kafka-broker-reconciler-tests - agent: kubernetes - context: pull-knative-sandbox-eventing-kafka-broker-reconciler-tests - always_run: true - optional: false - rerun_command: "/test pull-knative-sandbox-eventing-kafka-broker-reconciler-tests" - trigger: "(?m)^/test (all|pull-knative-sandbox-eventing-kafka-broker-reconciler-tests),?(\\s+|$)" - decorate: true - path_alias: knative.dev/eventing-kafka-broker - cluster: "build-knative" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--run-test" - - "./test/reconciler-tests.sh" - securityContext: - privileged: true - volumeMounts: - - name: docker-graph - mountPath: /docker-graph - - name: modules - mountPath: /lib/modules - - name: cgroup - mountPath: /sys/fs/cgroup - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: DOCKER_IN_DOCKER_ENABLED - value: "true" - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: docker-graph - emptyDir: {} - - name: modules - hostPath: - path: /lib/modules - type: Directory - - name: cgroup - hostPath: - path: /sys/fs/cgroup - type: Directory - - name: test-account - secret: - secretName: test-account - - name: pull-knative-sandbox-eventing-kafka-broker-go-coverage - agent: kubernetes - context: pull-knative-sandbox-eventing-kafka-broker-go-coverage - always_run: true - rerun_command: "/test pull-knative-sandbox-eventing-kafka-broker-go-coverage" - trigger: "(?m)^/test (all|pull-knative-sandbox-eventing-kafka-broker-go-coverage),?(\\s+|$)" - optional: true - decorate: true - path_alias: knative.dev/eventing-kafka-broker - cluster: "build-knative" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "coverage" - - "--postsubmit-job-name=post-knative-sandbox-eventing-kafka-broker-go-coverage" - - "--artifacts=$(ARTIFACTS)" - - "--cov-threshold-percentage=50" - - "--github-token=/etc/covbot-token/token" - volumeMounts: - - name: covbot-token - mountPath: /etc/covbot-token - readOnly: true - volumes: - - name: covbot-token - secret: - secretName: covbot-token - - name: pull-knative-sandbox-eventing-kafka-broker-channel-integration-tests-ssl - agent: kubernetes - context: pull-knative-sandbox-eventing-kafka-broker-channel-integration-tests-ssl - always_run: true - optional: false - rerun_command: "/test pull-knative-sandbox-eventing-kafka-broker-channel-integration-tests-ssl" - trigger: "(?m)^/test (all|pull-knative-sandbox-eventing-kafka-broker-channel-integration-tests-ssl),?(\\s+|$)" - decorate: true - path_alias: knative.dev/eventing-kafka-broker - cluster: "build-knative" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--run-test" - - "./test/e2e-tests.sh" - securityContext: - privileged: true - volumeMounts: - - name: docker-graph - mountPath: /docker-graph - - name: modules - mountPath: /lib/modules - - name: cgroup - mountPath: /sys/fs/cgroup - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: DOCKER_IN_DOCKER_ENABLED - value: "true" - - name: EVENTING_KAFKA_BROKER_CHANNEL_AUTH_SCENARIO - value: "SSL" - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: docker-graph - emptyDir: {} - - name: modules - hostPath: - path: /lib/modules - type: Directory - - name: cgroup - hostPath: - path: /sys/fs/cgroup - type: Directory - - name: test-account - secret: - secretName: test-account - - name: pull-knative-sandbox-eventing-kafka-broker-channel-integration-tests-sasl-ssl - agent: kubernetes - context: pull-knative-sandbox-eventing-kafka-broker-channel-integration-tests-sasl-ssl - always_run: true - optional: false - rerun_command: "/test pull-knative-sandbox-eventing-kafka-broker-channel-integration-tests-sasl-ssl" - trigger: "(?m)^/test (all|pull-knative-sandbox-eventing-kafka-broker-channel-integration-tests-sasl-ssl),?(\\s+|$)" - decorate: true - path_alias: knative.dev/eventing-kafka-broker - cluster: "build-knative" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--run-test" - - "./test/e2e-tests.sh" - securityContext: - privileged: true - volumeMounts: - - name: docker-graph - mountPath: /docker-graph - - name: modules - mountPath: /lib/modules - - name: cgroup - mountPath: /sys/fs/cgroup - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: DOCKER_IN_DOCKER_ENABLED - value: "true" - - name: EVENTING_KAFKA_BROKER_CHANNEL_AUTH_SCENARIO - value: "SASL_SSL" - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: docker-graph - emptyDir: {} - - name: modules - hostPath: - path: /lib/modules - type: Directory - - name: cgroup - hostPath: - path: /sys/fs/cgroup - type: Directory - - name: test-account - secret: - secretName: test-account - - name: pull-knative-sandbox-eventing-kafka-broker-channel-integration-tests-sasl-plain - agent: kubernetes - context: pull-knative-sandbox-eventing-kafka-broker-channel-integration-tests-sasl-plain - always_run: true - optional: false - rerun_command: "/test pull-knative-sandbox-eventing-kafka-broker-channel-integration-tests-sasl-plain" - trigger: "(?m)^/test (all|pull-knative-sandbox-eventing-kafka-broker-channel-integration-tests-sasl-plain),?(\\s+|$)" - decorate: true - path_alias: knative.dev/eventing-kafka-broker - cluster: "build-knative" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--run-test" - - "./test/e2e-tests.sh" - securityContext: - privileged: true - volumeMounts: - - name: docker-graph - mountPath: /docker-graph - - name: modules - mountPath: /lib/modules - - name: cgroup - mountPath: /sys/fs/cgroup - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: DOCKER_IN_DOCKER_ENABLED - value: "true" - - name: EVENTING_KAFKA_BROKER_CHANNEL_AUTH_SCENARIO - value: "SASL_PLAIN" - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: docker-graph - emptyDir: {} - - name: modules - hostPath: - path: /lib/modules - type: Directory - - name: cgroup - hostPath: - path: /sys/fs/cgroup - type: Directory - - name: test-account - secret: - secretName: test-account - - name: pull-knative-sandbox-eventing-kafka-broker-channel-reconciler-tests-ssl - agent: kubernetes - context: pull-knative-sandbox-eventing-kafka-broker-channel-reconciler-tests-ssl - always_run: true - optional: false - rerun_command: "/test pull-knative-sandbox-eventing-kafka-broker-channel-reconciler-tests-ssl" - trigger: "(?m)^/test (all|pull-knative-sandbox-eventing-kafka-broker-channel-reconciler-tests-ssl),?(\\s+|$)" - decorate: true - path_alias: knative.dev/eventing-kafka-broker - cluster: "build-knative" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--run-test" - - "./test/reconciler-tests.sh" - securityContext: - privileged: true - volumeMounts: - - name: docker-graph - mountPath: /docker-graph - - name: modules - mountPath: /lib/modules - - name: cgroup - mountPath: /sys/fs/cgroup - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: DOCKER_IN_DOCKER_ENABLED - value: "true" - - name: EVENTING_KAFKA_BROKER_CHANNEL_AUTH_SCENARIO - value: "SSL" - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: docker-graph - emptyDir: {} - - name: modules - hostPath: - path: /lib/modules - type: Directory - - name: cgroup - hostPath: - path: /sys/fs/cgroup - type: Directory - - name: test-account - secret: - secretName: test-account - - name: pull-knative-sandbox-eventing-kafka-broker-channel-reconciler-tests-sasl-ssl - agent: kubernetes - context: pull-knative-sandbox-eventing-kafka-broker-channel-reconciler-tests-sasl-ssl - always_run: true - optional: false - rerun_command: "/test pull-knative-sandbox-eventing-kafka-broker-channel-reconciler-tests-sasl-ssl" - trigger: "(?m)^/test (all|pull-knative-sandbox-eventing-kafka-broker-channel-reconciler-tests-sasl-ssl),?(\\s+|$)" - decorate: true - path_alias: knative.dev/eventing-kafka-broker - cluster: "build-knative" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--run-test" - - "./test/reconciler-tests.sh" - securityContext: - privileged: true - volumeMounts: - - name: docker-graph - mountPath: /docker-graph - - name: modules - mountPath: /lib/modules - - name: cgroup - mountPath: /sys/fs/cgroup - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: DOCKER_IN_DOCKER_ENABLED - value: "true" - - name: EVENTING_KAFKA_BROKER_CHANNEL_AUTH_SCENARIO - value: "SASL_SSL" - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: docker-graph - emptyDir: {} - - name: modules - hostPath: - path: /lib/modules - type: Directory - - name: cgroup - hostPath: - path: /sys/fs/cgroup - type: Directory - - name: test-account - secret: - secretName: test-account - - name: pull-knative-sandbox-eventing-kafka-broker-channel-reconciler-tests-sasl-plain - agent: kubernetes - context: pull-knative-sandbox-eventing-kafka-broker-channel-reconciler-tests-sasl-plain - always_run: true - optional: false - rerun_command: "/test pull-knative-sandbox-eventing-kafka-broker-channel-reconciler-tests-sasl-plain" - trigger: "(?m)^/test (all|pull-knative-sandbox-eventing-kafka-broker-channel-reconciler-tests-sasl-plain),?(\\s+|$)" - decorate: true - path_alias: knative.dev/eventing-kafka-broker - cluster: "build-knative" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--run-test" - - "./test/reconciler-tests.sh" - securityContext: - privileged: true - volumeMounts: - - name: docker-graph - mountPath: /docker-graph - - name: modules - mountPath: /lib/modules - - name: cgroup - mountPath: /sys/fs/cgroup - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: DOCKER_IN_DOCKER_ENABLED - value: "true" - - name: EVENTING_KAFKA_BROKER_CHANNEL_AUTH_SCENARIO - value: "SASL_PLAIN" - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: docker-graph - emptyDir: {} - - name: modules - hostPath: - path: /lib/modules - type: Directory - - name: cgroup - hostPath: - path: /sys/fs/cgroup - type: Directory - - name: test-account - secret: - secretName: test-account - knative-sandbox/kperf: - - name: pull-knative-sandbox-kperf-build-tests - agent: kubernetes - context: pull-knative-sandbox-kperf-build-tests - always_run: true - optional: false - rerun_command: "/test pull-knative-sandbox-kperf-build-tests" - trigger: "(?m)^/test (all|pull-knative-sandbox-kperf-build-tests),?(\\s+|$)" - decorate: true - path_alias: knative.dev/kperf - cluster: "build-knative" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--build-tests" - volumeMounts: - - name: repoview-token - mountPath: /etc/repoview-token - readOnly: true - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: repoview-token - secret: - secretName: repoview-token - - name: test-account - secret: - secretName: test-account - - name: pull-knative-sandbox-kperf-unit-tests - agent: kubernetes - context: pull-knative-sandbox-kperf-unit-tests - always_run: true - optional: false - rerun_command: "/test pull-knative-sandbox-kperf-unit-tests" - trigger: "(?m)^/test (all|pull-knative-sandbox-kperf-unit-tests),?(\\s+|$)" - decorate: true - path_alias: knative.dev/kperf - cluster: "build-knative" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--unit-tests" - volumeMounts: - - name: repoview-token - mountPath: /etc/repoview-token - readOnly: true - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: repoview-token - secret: - secretName: repoview-token - - name: test-account - secret: - secretName: test-account - - name: pull-knative-sandbox-kperf-integration-tests - agent: kubernetes - context: pull-knative-sandbox-kperf-integration-tests - always_run: true - optional: false - rerun_command: "/test pull-knative-sandbox-kperf-integration-tests" - trigger: "(?m)^/test (all|pull-knative-sandbox-kperf-integration-tests),?(\\s+|$)" - decorate: true - path_alias: knative.dev/kperf - cluster: "build-knative" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--integration-tests" - volumeMounts: - - name: repoview-token - mountPath: /etc/repoview-token - readOnly: true - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: repoview-token - secret: - secretName: repoview-token - - name: test-account - secret: - secretName: test-account - knative-sandbox/eventing-kogito: - - name: pull-knative-sandbox-eventing-kogito-build-tests - agent: kubernetes - context: pull-knative-sandbox-eventing-kogito-build-tests - always_run: true - optional: false - rerun_command: "/test pull-knative-sandbox-eventing-kogito-build-tests" - trigger: "(?m)^/test (all|pull-knative-sandbox-eventing-kogito-build-tests),?(\\s+|$)" - decorate: true - path_alias: knative.dev/eventing-kogito - cluster: "build-knative" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--build-tests" - volumeMounts: - - name: repoview-token - mountPath: /etc/repoview-token - readOnly: true - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: repoview-token - secret: - secretName: repoview-token - - name: test-account - secret: - secretName: test-account - - name: pull-knative-sandbox-eventing-kogito-unit-tests - agent: kubernetes - context: pull-knative-sandbox-eventing-kogito-unit-tests - always_run: true - optional: false - rerun_command: "/test pull-knative-sandbox-eventing-kogito-unit-tests" - trigger: "(?m)^/test (all|pull-knative-sandbox-eventing-kogito-unit-tests),?(\\s+|$)" - decorate: true - path_alias: knative.dev/eventing-kogito - cluster: "build-knative" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--unit-tests" - volumeMounts: - - name: repoview-token - mountPath: /etc/repoview-token - readOnly: true - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: repoview-token - secret: - secretName: repoview-token - - name: test-account - secret: - secretName: test-account - knative-sandbox/container-freezer: - - name: pull-knative-sandbox-container-freezer-build-tests - agent: kubernetes - context: pull-knative-sandbox-container-freezer-build-tests - always_run: true - optional: false - rerun_command: "/test pull-knative-sandbox-container-freezer-build-tests" - trigger: "(?m)^/test (all|pull-knative-sandbox-container-freezer-build-tests),?(\\s+|$)" - decorate: true - path_alias: knative.dev/container-freezer - cluster: "build-knative" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--build-tests" - volumeMounts: - - name: repoview-token - mountPath: /etc/repoview-token - readOnly: true - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: repoview-token - secret: - secretName: repoview-token - - name: test-account - secret: - secretName: test-account - - name: pull-knative-sandbox-container-freezer-unit-tests - agent: kubernetes - context: pull-knative-sandbox-container-freezer-unit-tests - always_run: true - optional: false - rerun_command: "/test pull-knative-sandbox-container-freezer-unit-tests" - trigger: "(?m)^/test (all|pull-knative-sandbox-container-freezer-unit-tests),?(\\s+|$)" - decorate: true - path_alias: knative.dev/container-freezer - cluster: "build-knative" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--unit-tests" - volumeMounts: - - name: repoview-token - mountPath: /etc/repoview-token - readOnly: true - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: repoview-token - secret: - secretName: repoview-token - - name: test-account - secret: - secretName: test-account - - name: pull-knative-sandbox-container-freezer-integration-tests - agent: kubernetes - context: pull-knative-sandbox-container-freezer-integration-tests - always_run: true - optional: false - rerun_command: "/test pull-knative-sandbox-container-freezer-integration-tests" - trigger: "(?m)^/test (all|pull-knative-sandbox-container-freezer-integration-tests),?(\\s+|$)" - decorate: true - path_alias: knative.dev/container-freezer - cluster: "build-knative" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--integration-tests" - volumeMounts: - - name: repoview-token - mountPath: /etc/repoview-token - readOnly: true - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: repoview-token - secret: - secretName: repoview-token - - name: test-account - secret: - secretName: test-account -periodics: -- cron: "0 */12 * * *" - name: ci-knative-serving-continuous - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative - repo: serving - path_alias: knative.dev/serving - base_ref: main - annotations: - testgrid-dashboards: serving - testgrid-tab-name: continuous - testgrid-alert-stale-results-hours: "3" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--all-tests" - volumeMounts: - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - resources: - requests: - memory: 12Gi - limits: - memory: 16Gi - volumes: - - name: test-account - secret: - secretName: test-account -- cron: "1 8 * * *" - name: ci-knative-serving-0.26-continuous - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative - repo: serving - path_alias: knative.dev/serving - base_ref: release-0.26 - annotations: - testgrid-dashboards: knative-0.26 - testgrid-tab-name: serving-continuous - testgrid-alert-stale-results-hours: "3" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./hack/release.sh" - - "--nopublish" - - "--notag-release" - securityContext: - privileged: true - volumeMounts: - - name: docker-graph - mountPath: /docker-graph - - name: modules - mountPath: /lib/modules - - name: cgroup - mountPath: /sys/fs/cgroup - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: DOCKER_IN_DOCKER_ENABLED - value: "true" - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - - name: PULL_BASE_REF - value: release-0.26 - volumes: - - name: docker-graph - emptyDir: {} - - name: modules - hostPath: - path: /lib/modules - type: Directory - - name: cgroup - hostPath: - path: /sys/fs/cgroup - type: Directory - - name: test-account - secret: - secretName: test-account -- cron: "22 8 * * *" - name: ci-knative-serving-1.0-continuous - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative - repo: serving - path_alias: knative.dev/serving - base_ref: release-1.0 - annotations: - testgrid-dashboards: knative-1.0 - testgrid-tab-name: serving-continuous - testgrid-alert-stale-results-hours: "3" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./hack/release.sh" - - "--nopublish" - - "--notag-release" - securityContext: - privileged: true - volumeMounts: - - name: docker-graph - mountPath: /docker-graph - - name: modules - mountPath: /lib/modules - - name: cgroup - mountPath: /sys/fs/cgroup - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: DOCKER_IN_DOCKER_ENABLED - value: "true" - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - - name: PULL_BASE_REF - value: release-1.0 - volumes: - - name: docker-graph - emptyDir: {} - - name: modules - hostPath: - path: /lib/modules - type: Directory - - name: cgroup - hostPath: - path: /sys/fs/cgroup - type: Directory - - name: test-account - secret: - secretName: test-account -- cron: "31 8 * * *" - name: ci-knative-serving-1.1-continuous - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative - repo: serving - path_alias: knative.dev/serving - base_ref: release-1.1 - annotations: - testgrid-dashboards: knative-1.1 - testgrid-tab-name: serving-continuous - testgrid-alert-stale-results-hours: "3" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./hack/release.sh" - - "--nopublish" - - "--notag-release" - securityContext: - privileged: true - volumeMounts: - - name: docker-graph - mountPath: /docker-graph - - name: modules - mountPath: /lib/modules - - name: cgroup - mountPath: /sys/fs/cgroup - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: DOCKER_IN_DOCKER_ENABLED - value: "true" - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - - name: PULL_BASE_REF - value: release-1.1 - volumes: - - name: docker-graph - emptyDir: {} - - name: modules - hostPath: - path: /lib/modules - type: Directory - - name: cgroup - hostPath: - path: /sys/fs/cgroup - type: Directory - - name: test-account - secret: - secretName: test-account -- cron: "20 8 * * *" - name: ci-knative-serving-1.2-continuous - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative - repo: serving - path_alias: knative.dev/serving - base_ref: release-1.2 - annotations: - testgrid-dashboards: knative-1.2 - testgrid-tab-name: serving-continuous - testgrid-alert-stale-results-hours: "3" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./hack/release.sh" - - "--nopublish" - - "--notag-release" - securityContext: - privileged: true - volumeMounts: - - name: docker-graph - mountPath: /docker-graph - - name: modules - mountPath: /lib/modules - - name: cgroup - mountPath: /sys/fs/cgroup - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: DOCKER_IN_DOCKER_ENABLED - value: "true" - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - - name: PULL_BASE_REF - value: release-1.2 - volumes: - - name: docker-graph - emptyDir: {} - - name: modules - hostPath: - path: /lib/modules - type: Directory - - name: cgroup - hostPath: - path: /sys/fs/cgroup - type: Directory - - name: test-account - secret: - secretName: test-account -- cron: "5 */9 * * *" - name: ci-knative-serving-istio-latest-mesh - agent: kubernetes - decorate: true - decoration_config: - timeout: 120m - cluster: "build-knative" - extra_refs: - - org: knative - repo: serving - path_alias: knative.dev/serving - base_ref: main - annotations: - testgrid-dashboards: serving - testgrid-tab-name: istio-latest-mesh - testgrid-alert-stale-results-hours: "3" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--run-test" - - "./test/e2e-tests.sh --istio-version latest --mesh" - - "--run-test" - - "./test/e2e-auto-tls-tests.sh --istio-version latest --mesh" - volumeMounts: - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: test-account - secret: - secretName: test-account -- cron: "31 */9 * * *" - name: ci-knative-serving-istio-latest-no-mesh - agent: kubernetes - decorate: true - decoration_config: - timeout: 120m - cluster: "build-knative" - extra_refs: - - org: knative - repo: serving - path_alias: knative.dev/serving - base_ref: main - annotations: - testgrid-dashboards: serving - testgrid-tab-name: istio-latest-no-mesh - testgrid-alert-stale-results-hours: "3" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--run-test" - - "./test/e2e-tests.sh --istio-version latest --no-mesh" - - "--run-test" - - "./test/e2e-auto-tls-tests.sh --istio-version latest --no-mesh --run-http01-auto-tls-tests" - volumeMounts: - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: test-account - secret: - secretName: test-account -- cron: "26 */9 * * *" - name: ci-knative-serving-istio-head-mesh - agent: kubernetes - decorate: true - decoration_config: - timeout: 120m - cluster: "build-knative" - extra_refs: - - org: knative - repo: serving - path_alias: knative.dev/serving - base_ref: main - annotations: - testgrid-dashboards: serving - testgrid-tab-name: istio-head-mesh - testgrid-alert-stale-results-hours: "3" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--run-test" - - "./test/e2e-tests.sh --istio-version head --mesh" - - "--run-test" - - "./test/e2e-auto-tls-tests.sh --istio-version head --mesh" - volumeMounts: - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: test-account - secret: - secretName: test-account -- cron: "42 */9 * * *" - name: ci-knative-serving-istio-head-no-mesh - agent: kubernetes - decorate: true - decoration_config: - timeout: 120m - cluster: "build-knative" - extra_refs: - - org: knative - repo: serving - path_alias: knative.dev/serving - base_ref: main - annotations: - testgrid-dashboards: serving - testgrid-tab-name: istio-head-no-mesh - testgrid-alert-stale-results-hours: "3" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--run-test" - - "./test/e2e-tests.sh --istio-version head --no-mesh" - - "--run-test" - - "./test/e2e-auto-tls-tests.sh --istio-version head --no-mesh" - volumeMounts: - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: test-account - secret: - secretName: test-account -- cron: "24 */9 * * *" - name: ci-knative-serving-kourier-stable - agent: kubernetes - decorate: true - decoration_config: - timeout: 120m - cluster: "build-knative" - extra_refs: - - org: knative - repo: serving - path_alias: knative.dev/serving - base_ref: main - annotations: - testgrid-dashboards: serving - testgrid-tab-name: kourier-stable - testgrid-alert-stale-results-hours: "3" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--run-test" - - "./test/e2e-tests.sh --kourier-version stable" - - "--run-test" - - "./test/e2e-auto-tls-tests.sh --kourier-version stable --run-http01-auto-tls-tests" - volumeMounts: - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: test-account - secret: - secretName: test-account -- cron: "43 */9 * * *" - name: ci-knative-serving-contour-latest - agent: kubernetes - decorate: true - decoration_config: - timeout: 120m - cluster: "build-knative" - extra_refs: - - org: knative - repo: serving - path_alias: knative.dev/serving - base_ref: main - annotations: - testgrid-dashboards: serving - testgrid-tab-name: contour-latest - testgrid-alert-stale-results-hours: "3" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--run-test" - - "./test/e2e-tests.sh --contour-version latest" - - "--run-test" - - "./test/e2e-auto-tls-tests.sh --contour-version latest --run-http01-auto-tls-tests" - volumeMounts: - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: test-account - secret: - secretName: test-account -- cron: "32 */9 * * *" - name: ci-knative-serving-gateway-api-latest - agent: kubernetes - decorate: true - decoration_config: - timeout: 120m - cluster: "build-knative" - extra_refs: - - org: knative - repo: serving - path_alias: knative.dev/serving - base_ref: main - annotations: - testgrid-dashboards: serving - testgrid-tab-name: gateway-api-latest - testgrid-alert-stale-results-hours: "3" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--run-test" - - "./test/e2e-tests.sh --gateway-api-version latest" - volumeMounts: - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: test-account - secret: - secretName: test-account -- cron: "42 */9 * * *" - name: ci-knative-serving-https - agent: kubernetes - decorate: true - decoration_config: - timeout: 120m - cluster: "build-knative" - extra_refs: - - org: knative - repo: serving - path_alias: knative.dev/serving - base_ref: main - annotations: - testgrid-dashboards: serving - testgrid-tab-name: https - testgrid-alert-stale-results-hours: "3" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--run-test" - - "./test/e2e-tests.sh --https" - - "--run-test" - - "./test/e2e-auto-tls-tests.sh --https" - volumeMounts: - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: test-account - secret: - secretName: test-account -- cron: "0 3 * * *" - name: ci-knative-serving-s390x-kourier-tests - agent: kubernetes - decorate: true - decoration_config: - timeout: 120m - cluster: "build-knative" - extra_refs: - - org: knative - repo: serving - path_alias: knative.dev/serving - base_ref: main - annotations: - testgrid-dashboards: serving - testgrid-tab-name: s390x-kourier-tests - testgrid-alert-stale-results-hours: "3" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "bash" - - "-c" - - "mkdir -p /root/.kube && cat /opt/cluster/ci-script > connect.sh && chmod +x connect.sh && server_addr=$(./connect.sh kourier-main) && kubectl get cm s390x-config-serving -n default -o jsonpath='{.data.adjustment-script}' > adjust.sh && chmod +x adjust.sh && ./adjust.sh && export TEST_OPTIONS=$TEST_OPTIONS' --ingressendpoint '${server_addr} && ./test/e2e-tests.sh --run-tests --kourier-version latest" - volumeMounts: - - name: s390x-cluster1 - mountPath: /opt/cluster - readOnly: true - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GO111MODULE - value: "on" - - name: TEST_OPTIONS - value: "--enable-alpha --enable-beta --resolvabledomain=false" - - name: DISABLE_MD_LINTING - value: "1" - - name: KO_FLAGS - value: "--platform=linux/s390x" - - name: SYSTEM_NAMESPACE - value: "knative-serving" - - name: PLATFORM - value: "linux/s390x" - - name: KUBECONFIG - value: "/root/.kube/config" - - name: DOCKER_CONFIG - value: "/opt/cluster" - - name: KO_DOCKER_REPO - valueFrom: - secretKeyRef: - name: s390x-cluster1 - key: ko-docker-repo - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: s390x-cluster1 - secret: - secretName: s390x-cluster1 - defaultMode: 0600 - - name: test-account - secret: - secretName: test-account -- cron: "10 7 * * *" - name: ci-knative-serving-1.0-s390x-kourier-tests - agent: kubernetes - decorate: true - decoration_config: - timeout: 120m - cluster: "build-knative" - extra_refs: - - org: knative - repo: serving - path_alias: knative.dev/serving - base_ref: release-1.0 - annotations: - testgrid-dashboards: knative-1.0 - testgrid-tab-name: serving-s390x-kourier-tests - testgrid-alert-stale-results-hours: "3" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "bash" - - "-c" - - "mkdir -p /root/.kube && cat /opt/cluster/ci-script > connect.sh && chmod +x connect.sh && server_addr=$(./connect.sh kourier-10) && kubectl get cm s390x-config-serving -n default -o jsonpath='{.data.adjustment-script}' > adjust.sh && chmod +x adjust.sh && ./adjust.sh && export TEST_OPTIONS=$TEST_OPTIONS' --ingressendpoint '${server_addr} && ./test/e2e-tests.sh --run-tests --kourier-version latest" - volumeMounts: - - name: s390x-cluster1 - mountPath: /opt/cluster - readOnly: true - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GO111MODULE - value: "on" - - name: TEST_OPTIONS - value: "--enable-alpha --enable-beta --resolvabledomain=false" - - name: DISABLE_MD_LINTING - value: "1" - - name: KO_FLAGS - value: "--platform=linux/s390x" - - name: SYSTEM_NAMESPACE - value: "knative-serving" - - name: PLATFORM - value: "linux/s390x" - - name: KUBECONFIG - value: "/root/.kube/config" - - name: DOCKER_CONFIG - value: "/opt/cluster" - - name: KO_DOCKER_REPO - valueFrom: - secretKeyRef: - name: s390x-cluster1 - key: ko-docker-repo - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - - name: PULL_BASE_REF - value: release-1.0 - volumes: - - name: s390x-cluster1 - secret: - secretName: s390x-cluster1 - defaultMode: 0600 - - name: test-account - secret: - secretName: test-account -- cron: "20 11 * * *" - name: ci-knative-serving-1.1-s390x-kourier-tests - agent: kubernetes - decorate: true - decoration_config: - timeout: 120m - cluster: "build-knative" - extra_refs: - - org: knative - repo: serving - path_alias: knative.dev/serving - base_ref: release-1.1 - annotations: - testgrid-dashboards: knative-1.1 - testgrid-tab-name: serving-s390x-kourier-tests - testgrid-alert-stale-results-hours: "3" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "bash" - - "-c" - - "mkdir -p /root/.kube && cat /opt/cluster/ci-script > connect.sh && chmod +x connect.sh && server_addr=$(./connect.sh kourier-11) && kubectl get cm s390x-config-serving -n default -o jsonpath='{.data.adjustment-script}' > adjust.sh && chmod +x adjust.sh && ./adjust.sh && export TEST_OPTIONS=$TEST_OPTIONS' --ingressendpoint '${server_addr} && ./test/e2e-tests.sh --run-tests --kourier-version latest" - volumeMounts: - - name: s390x-cluster1 - mountPath: /opt/cluster - readOnly: true - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GO111MODULE - value: "on" - - name: TEST_OPTIONS - value: "--enable-alpha --enable-beta --resolvabledomain=false" - - name: DISABLE_MD_LINTING - value: "1" - - name: KO_FLAGS - value: "--platform=linux/s390x" - - name: SYSTEM_NAMESPACE - value: "knative-serving" - - name: PLATFORM - value: "linux/s390x" - - name: KUBECONFIG - value: "/root/.kube/config" - - name: DOCKER_CONFIG - value: "/opt/cluster" - - name: KO_DOCKER_REPO - valueFrom: - secretKeyRef: - name: s390x-cluster1 - key: ko-docker-repo - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - - name: PULL_BASE_REF - value: release-1.1 - volumes: - - name: s390x-cluster1 - secret: - secretName: s390x-cluster1 - defaultMode: 0600 - - name: test-account - secret: - secretName: test-account -- cron: "30 15 * * *" - name: ci-knative-serving-1.2-s390x-kourier-tests - agent: kubernetes - decorate: true - decoration_config: - timeout: 120m - cluster: "build-knative" - extra_refs: - - org: knative - repo: serving - path_alias: knative.dev/serving - base_ref: release-1.2 - annotations: - testgrid-dashboards: knative-1.2 - testgrid-tab-name: serving-s390x-kourier-tests - testgrid-alert-stale-results-hours: "3" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "bash" - - "-c" - - "mkdir -p /root/.kube && cat /opt/cluster/ci-script > connect.sh && chmod +x connect.sh && server_addr=$(./connect.sh kourier-12) && kubectl get cm s390x-config-serving -n default -o jsonpath='{.data.adjustment-script}' > adjust.sh && chmod +x adjust.sh && ./adjust.sh && export TEST_OPTIONS=$TEST_OPTIONS' --ingressendpoint '${server_addr} && ./test/e2e-tests.sh --run-tests --kourier-version latest" - volumeMounts: - - name: s390x-cluster1 - mountPath: /opt/cluster - readOnly: true - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GO111MODULE - value: "on" - - name: TEST_OPTIONS - value: "--enable-alpha --enable-beta --resolvabledomain=false" - - name: DISABLE_MD_LINTING - value: "1" - - name: KO_FLAGS - value: "--platform=linux/s390x" - - name: SYSTEM_NAMESPACE - value: "knative-serving" - - name: PLATFORM - value: "linux/s390x" - - name: KUBECONFIG - value: "/root/.kube/config" - - name: DOCKER_CONFIG - value: "/opt/cluster" - - name: KO_DOCKER_REPO - valueFrom: - secretKeyRef: - name: s390x-cluster1 - key: ko-docker-repo - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - - name: PULL_BASE_REF - value: release-1.2 - volumes: - - name: s390x-cluster1 - secret: - secretName: s390x-cluster1 - defaultMode: 0600 - - name: test-account - secret: - secretName: test-account -- cron: "40 19 * * *" - name: ci-knative-serving-1.3-s390x-kourier-tests - agent: kubernetes - decorate: true - decoration_config: - timeout: 120m - cluster: "build-knative" - extra_refs: - - org: knative - repo: serving - path_alias: knative.dev/serving - base_ref: release-1.3 - annotations: - testgrid-dashboards: knative-1.3 - testgrid-tab-name: serving-s390x-kourier-tests - testgrid-alert-stale-results-hours: "3" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "bash" - - "-c" - - "mkdir -p /root/.kube && cat /opt/cluster/ci-script > connect.sh && chmod +x connect.sh && server_addr=$(./connect.sh kourier-13) && kubectl get cm s390x-config-serving -n default -o jsonpath='{.data.adjustment-script}' > adjust.sh && chmod +x adjust.sh && ./adjust.sh && export TEST_OPTIONS=$TEST_OPTIONS' --ingressendpoint '${server_addr} && ./test/e2e-tests.sh --run-tests --kourier-version latest" - volumeMounts: - - name: s390x-cluster1 - mountPath: /opt/cluster - readOnly: true - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GO111MODULE - value: "on" - - name: TEST_OPTIONS - value: "--enable-alpha --enable-beta --resolvabledomain=false" - - name: DISABLE_MD_LINTING - value: "1" - - name: KO_FLAGS - value: "--platform=linux/s390x" - - name: SYSTEM_NAMESPACE - value: "knative-serving" - - name: PLATFORM - value: "linux/s390x" - - name: KUBECONFIG - value: "/root/.kube/config" - - name: DOCKER_CONFIG - value: "/opt/cluster" - - name: KO_DOCKER_REPO - valueFrom: - secretKeyRef: - name: s390x-cluster1 - key: ko-docker-repo - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - - name: PULL_BASE_REF - value: release-1.3 - volumes: - - name: s390x-cluster1 - secret: - secretName: s390x-cluster1 - defaultMode: 0600 - - name: test-account - secret: - secretName: test-account -- cron: "0 5 * * *" - name: ci-knative-serving-s390x-contour-tests - agent: kubernetes - decorate: true - decoration_config: - timeout: 120m - cluster: "build-knative" - extra_refs: - - org: knative - repo: serving - path_alias: knative.dev/serving - base_ref: main - annotations: - testgrid-dashboards: serving - testgrid-tab-name: s390x-contour-tests - testgrid-alert-stale-results-hours: "3" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "bash" - - "-c" - - "mkdir -p /root/.kube && cat /opt/cluster/ci-script > connect.sh && chmod +x connect.sh && server_addr=$(./connect.sh contour-main) && kubectl get cm s390x-config-serving -n default -o jsonpath='{.data.adjustment-script}' > adjust.sh && chmod +x adjust.sh && ./adjust.sh && export TEST_OPTIONS=$TEST_OPTIONS' --ingressendpoint '${server_addr} && ./test/e2e-tests.sh --run-tests --contour-version latest" - volumeMounts: - - name: s390x-cluster1 - mountPath: /opt/cluster - readOnly: true - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GO111MODULE - value: "on" - - name: TEST_OPTIONS - value: "--enable-alpha --enable-beta --resolvabledomain=false" - - name: DISABLE_MD_LINTING - value: "1" - - name: KO_FLAGS - value: "--platform=linux/s390x" - - name: SYSTEM_NAMESPACE - value: "knative-serving" - - name: PLATFORM - value: "linux/s390x" - - name: KUBECONFIG - value: "/root/.kube/config" - - name: DOCKER_CONFIG - value: "/opt/cluster" - - name: KO_DOCKER_REPO - valueFrom: - secretKeyRef: - name: s390x-cluster1 - key: ko-docker-repo - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: s390x-cluster1 - secret: - secretName: s390x-cluster1 - defaultMode: 0600 - - name: test-account - secret: - secretName: test-account -- cron: "10 9 * * *" - name: ci-knative-serving-1.0-s390x-contour-tests - agent: kubernetes - decorate: true - decoration_config: - timeout: 120m - cluster: "build-knative" - extra_refs: - - org: knative - repo: serving - path_alias: knative.dev/serving - base_ref: release-1.0 - annotations: - testgrid-dashboards: knative-1.0 - testgrid-tab-name: serving-s390x-contour-tests - testgrid-alert-stale-results-hours: "3" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "bash" - - "-c" - - "mkdir -p /root/.kube && cat /opt/cluster/ci-script > connect.sh && chmod +x connect.sh && server_addr=$(./connect.sh contour-10) && kubectl get cm s390x-config-serving -n default -o jsonpath='{.data.adjustment-script}' > adjust.sh && chmod +x adjust.sh && ./adjust.sh && export TEST_OPTIONS=$TEST_OPTIONS' --ingressendpoint '${server_addr} && ./test/e2e-tests.sh --run-tests --contour-version latest" - volumeMounts: - - name: s390x-cluster1 - mountPath: /opt/cluster - readOnly: true - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GO111MODULE - value: "on" - - name: TEST_OPTIONS - value: "--enable-alpha --enable-beta --resolvabledomain=false" - - name: DISABLE_MD_LINTING - value: "1" - - name: KO_FLAGS - value: "--platform=linux/s390x" - - name: SYSTEM_NAMESPACE - value: "knative-serving" - - name: PLATFORM - value: "linux/s390x" - - name: KUBECONFIG - value: "/root/.kube/config" - - name: DOCKER_CONFIG - value: "/opt/cluster" - - name: KO_DOCKER_REPO - valueFrom: - secretKeyRef: - name: s390x-cluster1 - key: ko-docker-repo - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - - name: PULL_BASE_REF - value: release-1.0 - volumes: - - name: s390x-cluster1 - secret: - secretName: s390x-cluster1 - defaultMode: 0600 - - name: test-account - secret: - secretName: test-account -- cron: "20 13 * * *" - name: ci-knative-serving-1.1-s390x-contour-tests - agent: kubernetes - decorate: true - decoration_config: - timeout: 120m - cluster: "build-knative" - extra_refs: - - org: knative - repo: serving - path_alias: knative.dev/serving - base_ref: release-1.1 - annotations: - testgrid-dashboards: knative-1.1 - testgrid-tab-name: serving-s390x-contour-tests - testgrid-alert-stale-results-hours: "3" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "bash" - - "-c" - - "mkdir -p /root/.kube && cat /opt/cluster/ci-script > connect.sh && chmod +x connect.sh && server_addr=$(./connect.sh contour-11) && kubectl get cm s390x-config-serving -n default -o jsonpath='{.data.adjustment-script}' > adjust.sh && chmod +x adjust.sh && ./adjust.sh && export TEST_OPTIONS=$TEST_OPTIONS' --ingressendpoint '${server_addr} && ./test/e2e-tests.sh --run-tests --contour-version latest" - volumeMounts: - - name: s390x-cluster1 - mountPath: /opt/cluster - readOnly: true - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GO111MODULE - value: "on" - - name: TEST_OPTIONS - value: "--enable-alpha --enable-beta --resolvabledomain=false" - - name: DISABLE_MD_LINTING - value: "1" - - name: KO_FLAGS - value: "--platform=linux/s390x" - - name: SYSTEM_NAMESPACE - value: "knative-serving" - - name: PLATFORM - value: "linux/s390x" - - name: KUBECONFIG - value: "/root/.kube/config" - - name: DOCKER_CONFIG - value: "/opt/cluster" - - name: KO_DOCKER_REPO - valueFrom: - secretKeyRef: - name: s390x-cluster1 - key: ko-docker-repo - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - - name: PULL_BASE_REF - value: release-1.1 - volumes: - - name: s390x-cluster1 - secret: - secretName: s390x-cluster1 - defaultMode: 0600 - - name: test-account - secret: - secretName: test-account -- cron: "30 17 * * *" - name: ci-knative-serving-1.2-s390x-contour-tests - agent: kubernetes - decorate: true - decoration_config: - timeout: 120m - cluster: "build-knative" - extra_refs: - - org: knative - repo: serving - path_alias: knative.dev/serving - base_ref: release-1.2 - annotations: - testgrid-dashboards: knative-1.2 - testgrid-tab-name: serving-s390x-contour-tests - testgrid-alert-stale-results-hours: "3" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "bash" - - "-c" - - "mkdir -p /root/.kube && cat /opt/cluster/ci-script > connect.sh && chmod +x connect.sh && server_addr=$(./connect.sh contour-12) && kubectl get cm s390x-config-serving -n default -o jsonpath='{.data.adjustment-script}' > adjust.sh && chmod +x adjust.sh && ./adjust.sh && export TEST_OPTIONS=$TEST_OPTIONS' --ingressendpoint '${server_addr} && ./test/e2e-tests.sh --run-tests --contour-version latest" - volumeMounts: - - name: s390x-cluster1 - mountPath: /opt/cluster - readOnly: true - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GO111MODULE - value: "on" - - name: TEST_OPTIONS - value: "--enable-alpha --enable-beta --resolvabledomain=false" - - name: DISABLE_MD_LINTING - value: "1" - - name: KO_FLAGS - value: "--platform=linux/s390x" - - name: SYSTEM_NAMESPACE - value: "knative-serving" - - name: PLATFORM - value: "linux/s390x" - - name: KUBECONFIG - value: "/root/.kube/config" - - name: DOCKER_CONFIG - value: "/opt/cluster" - - name: KO_DOCKER_REPO - valueFrom: - secretKeyRef: - name: s390x-cluster1 - key: ko-docker-repo - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - - name: PULL_BASE_REF - value: release-1.2 - volumes: - - name: s390x-cluster1 - secret: - secretName: s390x-cluster1 - defaultMode: 0600 - - name: test-account - secret: - secretName: test-account -- cron: "40 21 * * *" - name: ci-knative-serving-1.3-s390x-contour-tests - agent: kubernetes - decorate: true - decoration_config: - timeout: 120m - cluster: "build-knative" - extra_refs: - - org: knative - repo: serving - path_alias: knative.dev/serving - base_ref: release-1.3 - annotations: - testgrid-dashboards: knative-1.3 - testgrid-tab-name: serving-s390x-contour-tests - testgrid-alert-stale-results-hours: "3" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "bash" - - "-c" - - "mkdir -p /root/.kube && cat /opt/cluster/ci-script > connect.sh && chmod +x connect.sh && server_addr=$(./connect.sh contour-13) && kubectl get cm s390x-config-serving -n default -o jsonpath='{.data.adjustment-script}' > adjust.sh && chmod +x adjust.sh && ./adjust.sh && export TEST_OPTIONS=$TEST_OPTIONS' --ingressendpoint '${server_addr} && ./test/e2e-tests.sh --run-tests --contour-version latest" - volumeMounts: - - name: s390x-cluster1 - mountPath: /opt/cluster - readOnly: true - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GO111MODULE - value: "on" - - name: TEST_OPTIONS - value: "--enable-alpha --enable-beta --resolvabledomain=false" - - name: DISABLE_MD_LINTING - value: "1" - - name: KO_FLAGS - value: "--platform=linux/s390x" - - name: SYSTEM_NAMESPACE - value: "knative-serving" - - name: PLATFORM - value: "linux/s390x" - - name: KUBECONFIG - value: "/root/.kube/config" - - name: DOCKER_CONFIG - value: "/opt/cluster" - - name: KO_DOCKER_REPO - valueFrom: - secretKeyRef: - name: s390x-cluster1 - key: ko-docker-repo - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - - name: PULL_BASE_REF - value: release-1.3 - volumes: - - name: s390x-cluster1 - secret: - secretName: s390x-cluster1 - defaultMode: 0600 - - name: test-account - secret: - secretName: test-account -- cron: "20 9 * * *" - name: ci-knative-serving-nightly-release - agent: kubernetes - decorate: true - reporter_config: - slack: - channel: serving-api - report_template: "The nightly release job fails, check the log: <{{.Status.URL}}|View logs>" - job_states_to_report: - - "failure" - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative - repo: serving - path_alias: knative.dev/serving - base_ref: main - annotations: - testgrid-dashboards: serving - testgrid-tab-name: nightly-release - testgrid-alert-email: "serverless-engprod-sea@google.com" - testgrid-num-failures-to-alert: "1" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./hack/release.sh" - - "--publish" - - "--tag-release" - volumeMounts: - - name: nightly-account - mountPath: /etc/nightly-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/nightly-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - resources: - requests: - memory: 12Gi - limits: - memory: 16Gi - volumes: - - name: nightly-account - secret: - secretName: nightly-account -- cron: "57 9 * * 2" - name: ci-knative-serving-0.26-dot-release - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative - repo: serving - path_alias: knative.dev/serving - base_ref: release-0.26 - annotations: - testgrid-dashboards: knative-0.26 - testgrid-tab-name: serving-dot-release - testgrid-alert-stale-results-hours: "3" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./hack/release.sh" - - "--dot-release" - - "--release-gcs" - - "knative-releases/serving" - - "--release-gcr" - - "gcr.io/knative-releases" - - "--github-token" - - "/etc/hub-token/token" - - "--branch" - - "release-0.26" - volumeMounts: - - name: hub-token - mountPath: /etc/hub-token - readOnly: true - - name: release-account - mountPath: /etc/release-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/release-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - - name: PULL_BASE_REF - value: release-0.26 - resources: - requests: - memory: 12Gi - limits: - memory: 16Gi - volumes: - - name: hub-token - secret: - secretName: hub-token - - name: release-account - secret: - secretName: release-account -- cron: "20 9 * * 2" - name: ci-knative-serving-1.0-dot-release - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative - repo: serving - path_alias: knative.dev/serving - base_ref: release-1.0 - annotations: - testgrid-dashboards: knative-1.0 - testgrid-tab-name: serving-dot-release - testgrid-alert-stale-results-hours: "3" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./hack/release.sh" - - "--dot-release" - - "--release-gcs" - - "knative-releases/serving" - - "--release-gcr" - - "gcr.io/knative-releases" - - "--github-token" - - "/etc/hub-token/token" - - "--branch" - - "release-1.0" - volumeMounts: - - name: hub-token - mountPath: /etc/hub-token - readOnly: true - - name: release-account - mountPath: /etc/release-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/release-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - - name: PULL_BASE_REF - value: release-1.0 - resources: - requests: - memory: 12Gi - limits: - memory: 16Gi - volumes: - - name: hub-token - secret: - secretName: hub-token - - name: release-account - secret: - secretName: release-account -- cron: "43 9 * * 2" - name: ci-knative-serving-1.1-dot-release - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative - repo: serving - path_alias: knative.dev/serving - base_ref: release-1.1 - annotations: - testgrid-dashboards: knative-1.1 - testgrid-tab-name: serving-dot-release - testgrid-alert-stale-results-hours: "3" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./hack/release.sh" - - "--dot-release" - - "--release-gcs" - - "knative-releases/serving" - - "--release-gcr" - - "gcr.io/knative-releases" - - "--github-token" - - "/etc/hub-token/token" - - "--branch" - - "release-1.1" - volumeMounts: - - name: hub-token - mountPath: /etc/hub-token - readOnly: true - - name: release-account - mountPath: /etc/release-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/release-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - - name: PULL_BASE_REF - value: release-1.1 - resources: - requests: - memory: 12Gi - limits: - memory: 16Gi - volumes: - - name: hub-token - secret: - secretName: hub-token - - name: release-account - secret: - secretName: release-account -- cron: "38 9 * * 2" - name: ci-knative-serving-1.2-dot-release - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative - repo: serving - path_alias: knative.dev/serving - base_ref: release-1.2 - annotations: - testgrid-dashboards: knative-1.2 - testgrid-tab-name: serving-dot-release - testgrid-alert-stale-results-hours: "3" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./hack/release.sh" - - "--dot-release" - - "--release-gcs" - - "knative-releases/serving" - - "--release-gcr" - - "gcr.io/knative-releases" - - "--github-token" - - "/etc/hub-token/token" - - "--branch" - - "release-1.2" - volumeMounts: - - name: hub-token - mountPath: /etc/hub-token - readOnly: true - - name: release-account - mountPath: /etc/release-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/release-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - - name: PULL_BASE_REF - value: release-1.2 - resources: - requests: - memory: 12Gi - limits: - memory: 16Gi - volumes: - - name: hub-token - secret: - secretName: hub-token - - name: release-account - secret: - secretName: release-account -- cron: "20 */12 * * *" - name: ci-knative-serving-auto-release - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative - repo: serving - path_alias: knative.dev/serving - base_ref: main - annotations: - testgrid-dashboards: serving - testgrid-tab-name: auto-release - testgrid-alert-email: "serverless-engprod-sea@google.com" - testgrid-num-failures-to-alert: "1" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./hack/release.sh" - - "--auto-release" - - "--release-gcs" - - "knative-releases/serving" - - "--release-gcr" - - "gcr.io/knative-releases" - - "--github-token" - - "/etc/hub-token/token" - volumeMounts: - - name: hub-token - mountPath: /etc/hub-token - readOnly: true - - name: release-account - mountPath: /etc/release-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/release-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - resources: - requests: - memory: 12Gi - limits: - memory: 16Gi - volumes: - - name: hub-token - secret: - secretName: hub-token - - name: release-account - secret: - secretName: release-account -- cron: "53 */12 * * *" - name: ci-knative-client-continuous - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative - repo: client - path_alias: knative.dev/client - base_ref: main - annotations: - testgrid-dashboards: client - testgrid-tab-name: continuous - testgrid-alert-stale-results-hours: "3" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--all-tests" - volumeMounts: - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: test-account - secret: - secretName: test-account -- cron: "27 8 * * *" - name: ci-knative-client-1.0-continuous - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative - repo: client - path_alias: knative.dev/client - base_ref: release-1.0 - annotations: - testgrid-dashboards: knative-1.0 - testgrid-tab-name: client-continuous - testgrid-alert-stale-results-hours: "3" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./hack/release.sh" - - "--nopublish" - - "--notag-release" - securityContext: - privileged: true - volumeMounts: - - name: docker-graph - mountPath: /docker-graph - - name: modules - mountPath: /lib/modules - - name: cgroup - mountPath: /sys/fs/cgroup - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: DOCKER_IN_DOCKER_ENABLED - value: "true" - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - - name: PULL_BASE_REF - value: release-1.0 - volumes: - - name: docker-graph - emptyDir: {} - - name: modules - hostPath: - path: /lib/modules - type: Directory - - name: cgroup - hostPath: - path: /sys/fs/cgroup - type: Directory - - name: test-account - secret: - secretName: test-account -- cron: "14 8 * * *" - name: ci-knative-client-1.1-continuous - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative - repo: client - path_alias: knative.dev/client - base_ref: release-1.1 - annotations: - testgrid-dashboards: knative-1.1 - testgrid-tab-name: client-continuous - testgrid-alert-stale-results-hours: "3" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./hack/release.sh" - - "--nopublish" - - "--notag-release" - securityContext: - privileged: true - volumeMounts: - - name: docker-graph - mountPath: /docker-graph - - name: modules - mountPath: /lib/modules - - name: cgroup - mountPath: /sys/fs/cgroup - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: DOCKER_IN_DOCKER_ENABLED - value: "true" - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - - name: PULL_BASE_REF - value: release-1.1 - volumes: - - name: docker-graph - emptyDir: {} - - name: modules - hostPath: - path: /lib/modules - type: Directory - - name: cgroup - hostPath: - path: /sys/fs/cgroup - type: Directory - - name: test-account - secret: - secretName: test-account -- cron: "49 8 * * *" - name: ci-knative-client-1.2-continuous - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative - repo: client - path_alias: knative.dev/client - base_ref: release-1.2 - annotations: - testgrid-dashboards: knative-1.2 - testgrid-tab-name: client-continuous - testgrid-alert-stale-results-hours: "3" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./hack/release.sh" - - "--nopublish" - - "--notag-release" - securityContext: - privileged: true - volumeMounts: - - name: docker-graph - mountPath: /docker-graph - - name: modules - mountPath: /lib/modules - - name: cgroup - mountPath: /sys/fs/cgroup - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: DOCKER_IN_DOCKER_ENABLED - value: "true" - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - - name: PULL_BASE_REF - value: release-1.2 - volumes: - - name: docker-graph - emptyDir: {} - - name: modules - hostPath: - path: /lib/modules - type: Directory - - name: cgroup - hostPath: - path: /sys/fs/cgroup - type: Directory - - name: test-account - secret: - secretName: test-account -- cron: "56 8 * * *" - name: ci-knative-client-1.3-continuous - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative - repo: client - path_alias: knative.dev/client - base_ref: release-1.3 - annotations: - testgrid-dashboards: knative-1.3 - testgrid-tab-name: client-continuous - testgrid-alert-stale-results-hours: "3" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./hack/release.sh" - - "--nopublish" - - "--notag-release" - securityContext: - privileged: true - volumeMounts: - - name: docker-graph - mountPath: /docker-graph - - name: modules - mountPath: /lib/modules - - name: cgroup - mountPath: /sys/fs/cgroup - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: DOCKER_IN_DOCKER_ENABLED - value: "true" - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - - name: PULL_BASE_REF - value: release-1.3 - volumes: - - name: docker-graph - emptyDir: {} - - name: modules - hostPath: - path: /lib/modules - type: Directory - - name: cgroup - hostPath: - path: /sys/fs/cgroup - type: Directory - - name: test-account - secret: - secretName: test-account -- cron: "59 9 * * *" - name: ci-knative-client-nightly-release - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative - repo: client - path_alias: knative.dev/client - base_ref: main - annotations: - testgrid-dashboards: client - testgrid-tab-name: nightly-release - testgrid-alert-email: "serverless-engprod-sea@google.com" - testgrid-num-failures-to-alert: "1" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./hack/release.sh" - - "--publish" - - "--tag-release" - volumeMounts: - - name: nightly-account - mountPath: /etc/nightly-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/nightly-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: nightly-account - secret: - secretName: nightly-account -- cron: "0 13 * * *" - name: ci-knative-client-tekton - agent: kubernetes - decorate: true - decoration_config: - timeout: 120m - cluster: "build-knative" - extra_refs: - - org: knative - repo: client - path_alias: knative.dev/client - base_ref: main - annotations: - testgrid-dashboards: client - testgrid-tab-name: tekton - testgrid-alert-stale-results-hours: "3" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/tekton-tests.sh" - volumeMounts: - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: test-account - secret: - secretName: test-account -- cron: "55 9 * * 2" - name: ci-knative-client-1.0-dot-release - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative - repo: client - path_alias: knative.dev/client - base_ref: release-1.0 - annotations: - testgrid-dashboards: knative-1.0 - testgrid-tab-name: client-dot-release - testgrid-alert-stale-results-hours: "3" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./hack/release.sh" - - "--dot-release" - - "--release-gcs" - - "knative-releases/client" - - "--release-gcr" - - "gcr.io/knative-releases" - - "--github-token" - - "/etc/hub-token/token" - - "--branch" - - "release-1.0" - volumeMounts: - - name: hub-token - mountPath: /etc/hub-token - readOnly: true - - name: release-account - mountPath: /etc/release-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/release-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - - name: PULL_BASE_REF - value: release-1.0 - volumes: - - name: hub-token - secret: - secretName: hub-token - - name: release-account - secret: - secretName: release-account -- cron: "48 9 * * 2" - name: ci-knative-client-1.1-dot-release - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative - repo: client - path_alias: knative.dev/client - base_ref: release-1.1 - annotations: - testgrid-dashboards: knative-1.1 - testgrid-tab-name: client-dot-release - testgrid-alert-stale-results-hours: "3" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./hack/release.sh" - - "--dot-release" - - "--release-gcs" - - "knative-releases/client" - - "--release-gcr" - - "gcr.io/knative-releases" - - "--github-token" - - "/etc/hub-token/token" - - "--branch" - - "release-1.1" - volumeMounts: - - name: hub-token - mountPath: /etc/hub-token - readOnly: true - - name: release-account - mountPath: /etc/release-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/release-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - - name: PULL_BASE_REF - value: release-1.1 - volumes: - - name: hub-token - secret: - secretName: hub-token - - name: release-account - secret: - secretName: release-account -- cron: "21 9 * * 2" - name: ci-knative-client-1.2-dot-release - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative - repo: client - path_alias: knative.dev/client - base_ref: release-1.2 - annotations: - testgrid-dashboards: knative-1.2 - testgrid-tab-name: client-dot-release - testgrid-alert-stale-results-hours: "3" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./hack/release.sh" - - "--dot-release" - - "--release-gcs" - - "knative-releases/client" - - "--release-gcr" - - "gcr.io/knative-releases" - - "--github-token" - - "/etc/hub-token/token" - - "--branch" - - "release-1.2" - volumeMounts: - - name: hub-token - mountPath: /etc/hub-token - readOnly: true - - name: release-account - mountPath: /etc/release-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/release-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - - name: PULL_BASE_REF - value: release-1.2 - volumes: - - name: hub-token - secret: - secretName: hub-token - - name: release-account - secret: - secretName: release-account -- cron: "50 9 * * 2" - name: ci-knative-client-1.3-dot-release - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative - repo: client - path_alias: knative.dev/client - base_ref: release-1.3 - annotations: - testgrid-dashboards: knative-1.3 - testgrid-tab-name: client-dot-release - testgrid-alert-stale-results-hours: "3" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./hack/release.sh" - - "--dot-release" - - "--release-gcs" - - "knative-releases/client" - - "--release-gcr" - - "gcr.io/knative-releases" - - "--github-token" - - "/etc/hub-token/token" - - "--branch" - - "release-1.3" - volumeMounts: - - name: hub-token - mountPath: /etc/hub-token - readOnly: true - - name: release-account - mountPath: /etc/release-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/release-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - - name: PULL_BASE_REF - value: release-1.3 - volumes: - - name: hub-token - secret: - secretName: hub-token - - name: release-account - secret: - secretName: release-account -- cron: "29 */12 * * *" - name: ci-knative-client-auto-release - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative - repo: client - path_alias: knative.dev/client - base_ref: main - annotations: - testgrid-dashboards: client - testgrid-tab-name: auto-release - testgrid-alert-email: "serverless-engprod-sea@google.com" - testgrid-num-failures-to-alert: "1" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./hack/release.sh" - - "--auto-release" - - "--release-gcs" - - "knative-releases/client" - - "--release-gcr" - - "gcr.io/knative-releases" - - "--github-token" - - "/etc/hub-token/token" - volumeMounts: - - name: hub-token - mountPath: /etc/hub-token - readOnly: true - - name: release-account - mountPath: /etc/release-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/release-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: hub-token - secret: - secretName: hub-token - - name: release-account - secret: - secretName: release-account -- cron: "0 14 * * *" - name: ci-knative-client-s390x-e2e-tests - agent: kubernetes - decorate: true - decoration_config: - timeout: 120m - cluster: "build-knative" - extra_refs: - - org: knative - repo: client - path_alias: knative.dev/client - base_ref: main - annotations: - testgrid-dashboards: client - testgrid-tab-name: s390x-e2e-tests - testgrid-alert-stale-results-hours: "3" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "bash" - - "-c" - - "mkdir -p /root/.kube && cat /opt/cluster/ci-script > connect.sh && chmod +x connect.sh && ./connect.sh client-main && kubectl get cm s390x-config-client -n default -o jsonpath='{.data.adjustment-script}' > adjust.sh && chmod +x adjust.sh && ./adjust.sh && ./test/e2e-tests.sh --run-tests" - volumeMounts: - - name: s390x-cluster1 - mountPath: /opt/cluster - readOnly: true - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: DISABLE_MD_LINTING - value: "1" - - name: KO_FLAGS - value: "--platform=linux/s390x" - - name: PLATFORM - value: "linux/s390x" - - name: KUBECONFIG - value: "/root/.kube/config" - - name: DOCKER_CONFIG - value: "/opt/cluster" - - name: INGRESS_CLASS - value: "contour.ingress.networking.knative.dev" - - name: KO_DOCKER_REPO - valueFrom: - secretKeyRef: - name: s390x-cluster1 - key: ko-docker-repo - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: s390x-cluster1 - secret: - secretName: s390x-cluster1 - defaultMode: 0600 - - name: test-account - secret: - secretName: test-account -- cron: "10 18 * * *" - name: ci-knative-client-1.0-s390x-e2e-tests - agent: kubernetes - decorate: true - decoration_config: - timeout: 120m - cluster: "build-knative" - extra_refs: - - org: knative - repo: client - path_alias: knative.dev/client - base_ref: release-1.0 - annotations: - testgrid-dashboards: knative-1.0 - testgrid-tab-name: client-s390x-e2e-tests - testgrid-alert-stale-results-hours: "3" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "bash" - - "-c" - - "mkdir -p /root/.kube && cat /opt/cluster/ci-script > connect.sh && chmod +x connect.sh && ./connect.sh client-10 && kubectl get cm s390x-config-client -n default -o jsonpath='{.data.adjustment-script}' > adjust.sh && chmod +x adjust.sh && ./adjust.sh && ./test/e2e-tests.sh --run-tests" - volumeMounts: - - name: s390x-cluster1 - mountPath: /opt/cluster - readOnly: true - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: DISABLE_MD_LINTING - value: "1" - - name: KO_FLAGS - value: "--platform=linux/s390x" - - name: PLATFORM - value: "linux/s390x" - - name: KUBECONFIG - value: "/root/.kube/config" - - name: DOCKER_CONFIG - value: "/opt/cluster" - - name: INGRESS_CLASS - value: "contour.ingress.networking.knative.dev" - - name: KO_DOCKER_REPO - valueFrom: - secretKeyRef: - name: s390x-cluster1 - key: ko-docker-repo - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - - name: PULL_BASE_REF - value: release-1.0 - volumes: - - name: s390x-cluster1 - secret: - secretName: s390x-cluster1 - defaultMode: 0600 - - name: test-account - secret: - secretName: test-account -- cron: "20 22 * * *" - name: ci-knative-client-1.1-s390x-e2e-tests - agent: kubernetes - decorate: true - decoration_config: - timeout: 120m - cluster: "build-knative" - extra_refs: - - org: knative - repo: client - path_alias: knative.dev/client - base_ref: release-1.1 - annotations: - testgrid-dashboards: knative-1.1 - testgrid-tab-name: client-s390x-e2e-tests - testgrid-alert-stale-results-hours: "3" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "bash" - - "-c" - - "mkdir -p /root/.kube && cat /opt/cluster/ci-script > connect.sh && chmod +x connect.sh && ./connect.sh client-11 && kubectl get cm s390x-config-client -n default -o jsonpath='{.data.adjustment-script}' > adjust.sh && chmod +x adjust.sh && ./adjust.sh && ./test/e2e-tests.sh --run-tests" - volumeMounts: - - name: s390x-cluster1 - mountPath: /opt/cluster - readOnly: true - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: DISABLE_MD_LINTING - value: "1" - - name: KO_FLAGS - value: "--platform=linux/s390x" - - name: PLATFORM - value: "linux/s390x" - - name: KUBECONFIG - value: "/root/.kube/config" - - name: DOCKER_CONFIG - value: "/opt/cluster" - - name: INGRESS_CLASS - value: "contour.ingress.networking.knative.dev" - - name: KO_DOCKER_REPO - valueFrom: - secretKeyRef: - name: s390x-cluster1 - key: ko-docker-repo - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - - name: PULL_BASE_REF - value: release-1.1 - volumes: - - name: s390x-cluster1 - secret: - secretName: s390x-cluster1 - defaultMode: 0600 - - name: test-account - secret: - secretName: test-account -- cron: "30 2 * * *" - name: ci-knative-client-1.2-s390x-e2e-tests - agent: kubernetes - decorate: true - decoration_config: - timeout: 120m - cluster: "build-knative" - extra_refs: - - org: knative - repo: client - path_alias: knative.dev/client - base_ref: release-1.2 - annotations: - testgrid-dashboards: knative-1.2 - testgrid-tab-name: client-s390x-e2e-tests - testgrid-alert-stale-results-hours: "3" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "bash" - - "-c" - - "mkdir -p /root/.kube && cat /opt/cluster/ci-script > connect.sh && chmod +x connect.sh && ./connect.sh client-12 && kubectl get cm s390x-config-client -n default -o jsonpath='{.data.adjustment-script}' > adjust.sh && chmod +x adjust.sh && ./adjust.sh && ./test/e2e-tests.sh --run-tests" - volumeMounts: - - name: s390x-cluster1 - mountPath: /opt/cluster - readOnly: true - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: DISABLE_MD_LINTING - value: "1" - - name: KO_FLAGS - value: "--platform=linux/s390x" - - name: PLATFORM - value: "linux/s390x" - - name: KUBECONFIG - value: "/root/.kube/config" - - name: DOCKER_CONFIG - value: "/opt/cluster" - - name: INGRESS_CLASS - value: "contour.ingress.networking.knative.dev" - - name: KO_DOCKER_REPO - valueFrom: - secretKeyRef: - name: s390x-cluster1 - key: ko-docker-repo - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - - name: PULL_BASE_REF - value: release-1.2 - volumes: - - name: s390x-cluster1 - secret: - secretName: s390x-cluster1 - defaultMode: 0600 - - name: test-account - secret: - secretName: test-account -- cron: "40 6 * * *" - name: ci-knative-client-1.3-s390x-e2e-tests - agent: kubernetes - decorate: true - decoration_config: - timeout: 120m - cluster: "build-knative" - extra_refs: - - org: knative - repo: client - path_alias: knative.dev/client - base_ref: release-1.3 - annotations: - testgrid-dashboards: knative-1.3 - testgrid-tab-name: client-s390x-e2e-tests - testgrid-alert-stale-results-hours: "3" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "bash" - - "-c" - - "mkdir -p /root/.kube && cat /opt/cluster/ci-script > connect.sh && chmod +x connect.sh && ./connect.sh client-13 && kubectl get cm s390x-config-client -n default -o jsonpath='{.data.adjustment-script}' > adjust.sh && chmod +x adjust.sh && ./adjust.sh && ./test/e2e-tests.sh --run-tests" - volumeMounts: - - name: s390x-cluster1 - mountPath: /opt/cluster - readOnly: true - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: DISABLE_MD_LINTING - value: "1" - - name: KO_FLAGS - value: "--platform=linux/s390x" - - name: PLATFORM - value: "linux/s390x" - - name: KUBECONFIG - value: "/root/.kube/config" - - name: DOCKER_CONFIG - value: "/opt/cluster" - - name: INGRESS_CLASS - value: "contour.ingress.networking.knative.dev" - - name: KO_DOCKER_REPO - valueFrom: - secretKeyRef: - name: s390x-cluster1 - key: ko-docker-repo - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - - name: PULL_BASE_REF - value: release-1.3 - volumes: - - name: s390x-cluster1 - secret: - secretName: s390x-cluster1 - defaultMode: 0600 - - name: test-account - secret: - secretName: test-account -- cron: "44 */12 * * *" - name: ci-knative-client-pkg-continuous - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative - repo: client-pkg - path_alias: knative.dev/client-pkg - base_ref: main - annotations: - testgrid-dashboards: client-pkg - testgrid-tab-name: continuous - testgrid-alert-stale-results-hours: "3" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--all-tests" - volumeMounts: - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: test-account - secret: - secretName: test-account -- cron: "8 */12 * * *" - name: ci-knative-client-pkg-auto-release - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative - repo: client-pkg - path_alias: knative.dev/client-pkg - base_ref: main - annotations: - testgrid-dashboards: client-pkg - testgrid-tab-name: auto-release - testgrid-alert-email: "serverless-engprod-sea@google.com" - testgrid-num-failures-to-alert: "1" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./hack/release.sh" - - "--auto-release" - - "--release-gcs" - - "knative-releases/client-pkg" - - "--release-gcr" - - "gcr.io/knative-releases" - - "--github-token" - - "/etc/hub-token/token" - volumeMounts: - - name: hub-token - mountPath: /etc/hub-token - readOnly: true - - name: release-account - mountPath: /etc/release-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/release-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: hub-token - secret: - secretName: hub-token - - name: release-account - secret: - secretName: release-account -- cron: "28 9 * * *" - name: ci-knative-client-pkg-nightly-release - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative - repo: client-pkg - path_alias: knative.dev/client-pkg - base_ref: main - annotations: - testgrid-dashboards: client-pkg - testgrid-tab-name: nightly-release - testgrid-alert-email: "serverless-engprod-sea@google.com" - testgrid-num-failures-to-alert: "1" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./hack/release.sh" - - "--publish" - - "--tag-release" - volumeMounts: - - name: nightly-account - mountPath: /etc/nightly-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/nightly-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: nightly-account - secret: - secretName: nightly-account -- cron: "1 */12 * * *" - name: ci-knative-sandbox-kn-plugin-diag-continuous - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative-sandbox - repo: kn-plugin-diag - path_alias: knative.dev/kn-plugin-diag - base_ref: main - annotations: - testgrid-dashboards: kn-plugin-diag - testgrid-tab-name: continuous - testgrid-alert-stale-results-hours: "3" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--all-tests" - volumeMounts: - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: test-account - secret: - secretName: test-account -- cron: "20 */12 * * *" - name: ci-knative-sandbox-kn-plugin-event-continuous - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative-sandbox - repo: kn-plugin-event - path_alias: knative.dev/kn-plugin-event - base_ref: main - annotations: - testgrid-dashboards: kn-plugin-event - testgrid-tab-name: continuous - testgrid-alert-stale-results-hours: "3" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--all-tests" - volumeMounts: - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: test-account - secret: - secretName: test-account -- cron: "24 */12 * * *" - name: ci-knative-sandbox-kn-plugin-event-auto-release - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative-sandbox - repo: kn-plugin-event - path_alias: knative.dev/kn-plugin-event - base_ref: main - annotations: - testgrid-dashboards: kn-plugin-event - testgrid-tab-name: auto-release - testgrid-alert-email: "serverless-engprod-sea@google.com" - testgrid-num-failures-to-alert: "1" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./hack/release.sh" - - "--auto-release" - - "--release-gcs" - - "knative-releases/kn-plugin-event" - - "--release-gcr" - - "gcr.io/knative-releases" - - "--github-token" - - "/etc/hub-token/token" - volumeMounts: - - name: hub-token - mountPath: /etc/hub-token - readOnly: true - - name: release-account - mountPath: /etc/release-account - readOnly: true - env: - - name: ORG_NAME - value: knative-sandbox - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/release-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: hub-token - secret: - secretName: hub-token - - name: release-account - secret: - secretName: release-account -- cron: "0 9 * * *" - name: ci-knative-sandbox-kn-plugin-event-nightly-release - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative-sandbox - repo: kn-plugin-event - path_alias: knative.dev/kn-plugin-event - base_ref: main - annotations: - testgrid-dashboards: kn-plugin-event - testgrid-tab-name: nightly-release - testgrid-alert-email: "serverless-engprod-sea@google.com" - testgrid-num-failures-to-alert: "1" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./hack/release.sh" - - "--publish" - - "--tag-release" - volumeMounts: - - name: nightly-account - mountPath: /etc/nightly-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/nightly-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: nightly-account - secret: - secretName: nightly-account -- cron: "52 9 * * 2" - name: ci-knative-sandbox-kn-plugin-event-dot-release - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative-sandbox - repo: kn-plugin-event - path_alias: knative.dev/kn-plugin-event - base_ref: main - annotations: - testgrid-dashboards: kn-plugin-event - testgrid-tab-name: dot-release - testgrid-alert-stale-results-hours: "170" - testgrid-alert-email: "serverless-engprod-sea@google.com" - testgrid-num-failures-to-alert: "1" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./hack/release.sh" - - "--dot-release" - - "--release-gcs" - - "knative-releases/kn-plugin-event" - - "--release-gcr" - - "gcr.io/knative-releases" - - "--github-token" - - "/etc/hub-token/token" - volumeMounts: - - name: hub-token - mountPath: /etc/hub-token - readOnly: true - - name: release-account - mountPath: /etc/release-account - readOnly: true - env: - - name: ORG_NAME - value: knative-sandbox - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/release-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: hub-token - secret: - secretName: hub-token - - name: release-account - secret: - secretName: release-account -- cron: "18 */12 * * *" - name: ci-knative-sandbox-kn-plugin-func-auto-release - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative-sandbox - repo: kn-plugin-func - path_alias: knative.dev/kn-plugin-func - base_ref: main - annotations: - testgrid-dashboards: kn-plugin-func - testgrid-tab-name: auto-release - testgrid-alert-email: "serverless-engprod-sea@google.com" - testgrid-num-failures-to-alert: "1" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./hack/release.sh" - - "--auto-release" - - "--release-gcs" - - "knative-releases/kn-plugin-func" - - "--release-gcr" - - "gcr.io/knative-releases" - - "--github-token" - - "/etc/hub-token/token" - securityContext: - privileged: true - volumeMounts: - - name: hub-token - mountPath: /etc/hub-token - readOnly: true - - name: docker-graph - mountPath: /docker-graph - - name: modules - mountPath: /lib/modules - - name: cgroup - mountPath: /sys/fs/cgroup - - name: release-account - mountPath: /etc/release-account - readOnly: true - env: - - name: ORG_NAME - value: knative-sandbox - - name: DOCKER_IN_DOCKER_ENABLED - value: "true" - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/release-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: hub-token - secret: - secretName: hub-token - - name: docker-graph - emptyDir: {} - - name: modules - hostPath: - path: /lib/modules - type: Directory - - name: cgroup - hostPath: - path: /sys/fs/cgroup - type: Directory - - name: release-account - secret: - secretName: release-account -- cron: "6 9 * * *" - name: ci-knative-sandbox-kn-plugin-func-nightly-release - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative-sandbox - repo: kn-plugin-func - path_alias: knative.dev/kn-plugin-func - base_ref: main - annotations: - testgrid-dashboards: kn-plugin-func - testgrid-tab-name: nightly-release - testgrid-alert-email: "serverless-engprod-sea@google.com" - testgrid-num-failures-to-alert: "1" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./hack/release.sh" - - "--publish" - - "--tag-release" - securityContext: - privileged: true - volumeMounts: - - name: docker-graph - mountPath: /docker-graph - - name: modules - mountPath: /lib/modules - - name: cgroup - mountPath: /sys/fs/cgroup - - name: nightly-account - mountPath: /etc/nightly-account - readOnly: true - env: - - name: DOCKER_IN_DOCKER_ENABLED - value: "true" - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/nightly-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: docker-graph - emptyDir: {} - - name: modules - hostPath: - path: /lib/modules - type: Directory - - name: cgroup - hostPath: - path: /sys/fs/cgroup - type: Directory - - name: nightly-account - secret: - secretName: nightly-account -- cron: "14 9 * * 2" - name: ci-knative-sandbox-kn-plugin-func-dot-release - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative-sandbox - repo: kn-plugin-func - path_alias: knative.dev/kn-plugin-func - base_ref: main - annotations: - testgrid-dashboards: kn-plugin-func - testgrid-tab-name: dot-release - testgrid-alert-stale-results-hours: "170" - testgrid-alert-email: "serverless-engprod-sea@google.com" - testgrid-num-failures-to-alert: "1" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./hack/release.sh" - - "--dot-release" - - "--release-gcs" - - "knative-releases/kn-plugin-func" - - "--release-gcr" - - "gcr.io/knative-releases" - - "--github-token" - - "/etc/hub-token/token" - securityContext: - privileged: true - volumeMounts: - - name: hub-token - mountPath: /etc/hub-token - readOnly: true - - name: docker-graph - mountPath: /docker-graph - - name: modules - mountPath: /lib/modules - - name: cgroup - mountPath: /sys/fs/cgroup - - name: release-account - mountPath: /etc/release-account - readOnly: true - env: - - name: ORG_NAME - value: knative-sandbox - - name: DOCKER_IN_DOCKER_ENABLED - value: "true" - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/release-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: hub-token - secret: - secretName: hub-token - - name: docker-graph - emptyDir: {} - - name: modules - hostPath: - path: /lib/modules - type: Directory - - name: cgroup - hostPath: - path: /sys/fs/cgroup - type: Directory - - name: release-account - secret: - secretName: release-account -- cron: "28 */12 * * *" - name: ci-knative-sandbox-kn-plugin-migration-continuous - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative-sandbox - repo: kn-plugin-migration - path_alias: knative.dev/kn-plugin-migration - base_ref: main - annotations: - testgrid-dashboards: kn-plugin-migration - testgrid-tab-name: continuous - testgrid-alert-stale-results-hours: "3" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--all-tests" - volumeMounts: - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: test-account - secret: - secretName: test-account -- cron: "16 */12 * * *" - name: ci-knative-sandbox-kn-plugin-operator-continuous - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative-sandbox - repo: kn-plugin-operator - path_alias: knative.dev/kn-plugin-operator - base_ref: main - annotations: - testgrid-dashboards: kn-plugin-operator - testgrid-tab-name: continuous - testgrid-alert-stale-results-hours: "3" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--all-tests" - volumeMounts: - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: test-account - secret: - secretName: test-account -- cron: "38 */12 * * *" - name: ci-knative-sandbox-kn-plugin-sample-continuous - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative-sandbox - repo: kn-plugin-sample - path_alias: knative.dev/kn-plugin-sample - base_ref: main - annotations: - testgrid-dashboards: kn-plugin-sample - testgrid-tab-name: continuous - testgrid-alert-stale-results-hours: "3" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--all-tests" - volumeMounts: - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: test-account - secret: - secretName: test-account -- cron: "16 */12 * * *" - name: ci-knative-sandbox-kn-plugin-service-log-continuous - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative-sandbox - repo: kn-plugin-service-log - path_alias: knative.dev/kn-plugin-service-log - base_ref: main - annotations: - testgrid-dashboards: kn-plugin-service-log - testgrid-tab-name: continuous - testgrid-alert-stale-results-hours: "3" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--all-tests" - volumeMounts: - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: test-account - secret: - secretName: test-account -- cron: "0 */12 * * *" - name: ci-knative-sandbox-kn-plugin-service-log-auto-release - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative-sandbox - repo: kn-plugin-service-log - path_alias: knative.dev/kn-plugin-service-log - base_ref: main - annotations: - testgrid-dashboards: kn-plugin-service-log - testgrid-tab-name: auto-release - testgrid-alert-email: "serverless-engprod-sea@google.com" - testgrid-num-failures-to-alert: "1" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./hack/release.sh" - - "--auto-release" - - "--release-gcs" - - "knative-releases/kn-plugin-service-log" - - "--release-gcr" - - "gcr.io/knative-releases" - - "--github-token" - - "/etc/hub-token/token" - volumeMounts: - - name: hub-token - mountPath: /etc/hub-token - readOnly: true - - name: release-account - mountPath: /etc/release-account - readOnly: true - env: - - name: ORG_NAME - value: knative-sandbox - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/release-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: hub-token - secret: - secretName: hub-token - - name: release-account - secret: - secretName: release-account -- cron: "24 9 * * *" - name: ci-knative-sandbox-kn-plugin-service-log-nightly-release - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative-sandbox - repo: kn-plugin-service-log - path_alias: knative.dev/kn-plugin-service-log - base_ref: main - annotations: - testgrid-dashboards: kn-plugin-service-log - testgrid-tab-name: nightly-release - testgrid-alert-email: "serverless-engprod-sea@google.com" - testgrid-num-failures-to-alert: "1" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./hack/release.sh" - - "--publish" - - "--tag-release" - volumeMounts: - - name: nightly-account - mountPath: /etc/nightly-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/nightly-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: nightly-account - secret: - secretName: nightly-account -- cron: "36 9 * * 2" - name: ci-knative-sandbox-kn-plugin-service-log-dot-release - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative-sandbox - repo: kn-plugin-service-log - path_alias: knative.dev/kn-plugin-service-log - base_ref: main - annotations: - testgrid-dashboards: kn-plugin-service-log - testgrid-tab-name: dot-release - testgrid-alert-stale-results-hours: "170" - testgrid-alert-email: "serverless-engprod-sea@google.com" - testgrid-num-failures-to-alert: "1" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./hack/release.sh" - - "--dot-release" - - "--release-gcs" - - "knative-releases/kn-plugin-service-log" - - "--release-gcr" - - "gcr.io/knative-releases" - - "--github-token" - - "/etc/hub-token/token" - volumeMounts: - - name: hub-token - mountPath: /etc/hub-token - readOnly: true - - name: release-account - mountPath: /etc/release-account - readOnly: true - env: - - name: ORG_NAME - value: knative-sandbox - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/release-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: hub-token - secret: - secretName: hub-token - - name: release-account - secret: - secretName: release-account -- cron: "14 */12 * * *" - name: ci-knative-sandbox-kn-plugin-source-kafka-continuous - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative-sandbox - repo: kn-plugin-source-kafka - path_alias: knative.dev/kn-plugin-source-kafka - base_ref: main - annotations: - testgrid-dashboards: kn-plugin-source-kafka - testgrid-tab-name: continuous - testgrid-alert-stale-results-hours: "3" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--all-tests" - volumeMounts: - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: test-account - secret: - secretName: test-account -- cron: "34 */12 * * *" - name: ci-knative-sandbox-kn-plugin-source-kafka-auto-release - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative-sandbox - repo: kn-plugin-source-kafka - path_alias: knative.dev/kn-plugin-source-kafka - base_ref: main - annotations: - testgrid-dashboards: kn-plugin-source-kafka - testgrid-tab-name: auto-release - testgrid-alert-email: "serverless-engprod-sea@google.com" - testgrid-num-failures-to-alert: "1" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./hack/release.sh" - - "--auto-release" - - "--release-gcs" - - "knative-releases/kn-plugin-source-kafka" - - "--release-gcr" - - "gcr.io/knative-releases" - - "--github-token" - - "/etc/hub-token/token" - volumeMounts: - - name: hub-token - mountPath: /etc/hub-token - readOnly: true - - name: release-account - mountPath: /etc/release-account - readOnly: true - env: - - name: ORG_NAME - value: knative-sandbox - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/release-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: hub-token - secret: - secretName: hub-token - - name: release-account - secret: - secretName: release-account -- cron: "54 9 * * *" - name: ci-knative-sandbox-kn-plugin-source-kafka-nightly-release - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative-sandbox - repo: kn-plugin-source-kafka - path_alias: knative.dev/kn-plugin-source-kafka - base_ref: main - annotations: - testgrid-dashboards: kn-plugin-source-kafka - testgrid-tab-name: nightly-release - testgrid-alert-email: "serverless-engprod-sea@google.com" - testgrid-num-failures-to-alert: "1" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./hack/release.sh" - - "--publish" - - "--tag-release" - volumeMounts: - - name: nightly-account - mountPath: /etc/nightly-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/nightly-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: nightly-account - secret: - secretName: nightly-account -- cron: "46 9 * * 2" - name: ci-knative-sandbox-kn-plugin-source-kafka-1.0-dot-release - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative-sandbox - repo: kn-plugin-source-kafka - path_alias: knative.dev/kn-plugin-source-kafka - base_ref: release-1.0 - annotations: - testgrid-dashboards: knative-sandbox-1.0 - testgrid-tab-name: kn-plugin-source-kafka-dot-release - testgrid-alert-stale-results-hours: "3" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./hack/release.sh" - - "--dot-release" - - "--release-gcs" - - "knative-releases/kn-plugin-source-kafka" - - "--release-gcr" - - "gcr.io/knative-releases" - - "--github-token" - - "/etc/hub-token/token" - - "--branch" - - "release-1.0" - volumeMounts: - - name: hub-token - mountPath: /etc/hub-token - readOnly: true - - name: release-account - mountPath: /etc/release-account - readOnly: true - env: - - name: ORG_NAME - value: knative-sandbox - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/release-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - - name: PULL_BASE_REF - value: release-1.0 - volumes: - - name: hub-token - secret: - secretName: hub-token - - name: release-account - secret: - secretName: release-account -- cron: "9 9 * * 2" - name: ci-knative-sandbox-kn-plugin-source-kafka-1.1-dot-release - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative-sandbox - repo: kn-plugin-source-kafka - path_alias: knative.dev/kn-plugin-source-kafka - base_ref: release-1.1 - annotations: - testgrid-dashboards: knative-sandbox-1.1 - testgrid-tab-name: kn-plugin-source-kafka-dot-release - testgrid-alert-stale-results-hours: "3" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./hack/release.sh" - - "--dot-release" - - "--release-gcs" - - "knative-releases/kn-plugin-source-kafka" - - "--release-gcr" - - "gcr.io/knative-releases" - - "--github-token" - - "/etc/hub-token/token" - - "--branch" - - "release-1.1" - volumeMounts: - - name: hub-token - mountPath: /etc/hub-token - readOnly: true - - name: release-account - mountPath: /etc/release-account - readOnly: true - env: - - name: ORG_NAME - value: knative-sandbox - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/release-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - - name: PULL_BASE_REF - value: release-1.1 - volumes: - - name: hub-token - secret: - secretName: hub-token - - name: release-account - secret: - secretName: release-account -- cron: "20 9 * * 2" - name: ci-knative-sandbox-kn-plugin-source-kafka-1.2-dot-release - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative-sandbox - repo: kn-plugin-source-kafka - path_alias: knative.dev/kn-plugin-source-kafka - base_ref: release-1.2 - annotations: - testgrid-dashboards: knative-sandbox-1.2 - testgrid-tab-name: kn-plugin-source-kafka-dot-release - testgrid-alert-stale-results-hours: "3" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./hack/release.sh" - - "--dot-release" - - "--release-gcs" - - "knative-releases/kn-plugin-source-kafka" - - "--release-gcr" - - "gcr.io/knative-releases" - - "--github-token" - - "/etc/hub-token/token" - - "--branch" - - "release-1.2" - volumeMounts: - - name: hub-token - mountPath: /etc/hub-token - readOnly: true - - name: release-account - mountPath: /etc/release-account - readOnly: true - env: - - name: ORG_NAME - value: knative-sandbox - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/release-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - - name: PULL_BASE_REF - value: release-1.2 - volumes: - - name: hub-token - secret: - secretName: hub-token - - name: release-account - secret: - secretName: release-account -- cron: "31 9 * * 2" - name: ci-knative-sandbox-kn-plugin-source-kafka-1.3-dot-release - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative-sandbox - repo: kn-plugin-source-kafka - path_alias: knative.dev/kn-plugin-source-kafka - base_ref: release-1.3 - annotations: - testgrid-dashboards: knative-sandbox-1.3 - testgrid-tab-name: kn-plugin-source-kafka-dot-release - testgrid-alert-stale-results-hours: "3" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./hack/release.sh" - - "--dot-release" - - "--release-gcs" - - "knative-releases/kn-plugin-source-kafka" - - "--release-gcr" - - "gcr.io/knative-releases" - - "--github-token" - - "/etc/hub-token/token" - - "--branch" - - "release-1.3" - volumeMounts: - - name: hub-token - mountPath: /etc/hub-token - readOnly: true - - name: release-account - mountPath: /etc/release-account - readOnly: true - env: - - name: ORG_NAME - value: knative-sandbox - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/release-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - - name: PULL_BASE_REF - value: release-1.3 - volumes: - - name: hub-token - secret: - secretName: hub-token - - name: release-account - secret: - secretName: release-account -- cron: "25 */12 * * *" - name: ci-knative-sandbox-kn-plugin-source-kamelet-continuous - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative-sandbox - repo: kn-plugin-source-kamelet - path_alias: knative.dev/kn-plugin-source-kamelet - base_ref: main - annotations: - testgrid-dashboards: kn-plugin-source-kamelet - testgrid-tab-name: continuous - testgrid-alert-stale-results-hours: "3" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--all-tests" - volumeMounts: - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: test-account - secret: - secretName: test-account -- cron: "17 */12 * * *" - name: ci-knative-sandbox-kn-plugin-source-kamelet-auto-release - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative-sandbox - repo: kn-plugin-source-kamelet - path_alias: knative.dev/kn-plugin-source-kamelet - base_ref: main - annotations: - testgrid-dashboards: kn-plugin-source-kamelet - testgrid-tab-name: auto-release - testgrid-alert-email: "serverless-engprod-sea@google.com" - testgrid-num-failures-to-alert: "1" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./hack/release.sh" - - "--auto-release" - - "--release-gcs" - - "knative-releases/kn-plugin-source-kamelet" - - "--release-gcr" - - "gcr.io/knative-releases" - - "--github-token" - - "/etc/hub-token/token" - volumeMounts: - - name: hub-token - mountPath: /etc/hub-token - readOnly: true - - name: release-account - mountPath: /etc/release-account - readOnly: true - env: - - name: ORG_NAME - value: knative-sandbox - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/release-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: hub-token - secret: - secretName: hub-token - - name: release-account - secret: - secretName: release-account -- cron: "15 9 * * *" - name: ci-knative-sandbox-kn-plugin-source-kamelet-nightly-release - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative-sandbox - repo: kn-plugin-source-kamelet - path_alias: knative.dev/kn-plugin-source-kamelet - base_ref: main - annotations: - testgrid-dashboards: kn-plugin-source-kamelet - testgrid-tab-name: nightly-release - testgrid-alert-email: "serverless-engprod-sea@google.com" - testgrid-num-failures-to-alert: "1" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./hack/release.sh" - - "--publish" - - "--tag-release" - volumeMounts: - - name: nightly-account - mountPath: /etc/nightly-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/nightly-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: nightly-account - secret: - secretName: nightly-account -- cron: "47 9 * * 2" - name: ci-knative-sandbox-kn-plugin-source-kamelet-dot-release - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative-sandbox - repo: kn-plugin-source-kamelet - path_alias: knative.dev/kn-plugin-source-kamelet - base_ref: main - annotations: - testgrid-dashboards: kn-plugin-source-kamelet - testgrid-tab-name: dot-release - testgrid-alert-stale-results-hours: "170" - testgrid-alert-email: "serverless-engprod-sea@google.com" - testgrid-num-failures-to-alert: "1" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./hack/release.sh" - - "--dot-release" - - "--release-gcs" - - "knative-releases/kn-plugin-source-kamelet" - - "--release-gcr" - - "gcr.io/knative-releases" - - "--github-token" - - "/etc/hub-token/token" - volumeMounts: - - name: hub-token - mountPath: /etc/hub-token - readOnly: true - - name: release-account - mountPath: /etc/release-account - readOnly: true - env: - - name: ORG_NAME - value: knative-sandbox - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/release-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: hub-token - secret: - secretName: hub-token - - name: release-account - secret: - secretName: release-account -- cron: "33 */12 * * *" - name: ci-knative-sandbox-kn-plugin-admin-continuous - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative-sandbox - repo: kn-plugin-admin - path_alias: knative.dev/kn-plugin-admin - base_ref: main - annotations: - testgrid-dashboards: kn-plugin-admin - testgrid-tab-name: continuous - testgrid-alert-stale-results-hours: "3" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--all-tests" - volumeMounts: - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: test-account - secret: - secretName: test-account -- cron: "53 */12 * * *" - name: ci-knative-sandbox-kn-plugin-admin-auto-release - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative-sandbox - repo: kn-plugin-admin - path_alias: knative.dev/kn-plugin-admin - base_ref: main - annotations: - testgrid-dashboards: kn-plugin-admin - testgrid-tab-name: auto-release - testgrid-alert-email: "serverless-engprod-sea@google.com" - testgrid-num-failures-to-alert: "1" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./hack/release.sh" - - "--auto-release" - - "--release-gcs" - - "knative-releases/kn-plugin-admin" - - "--release-gcr" - - "gcr.io/knative-releases" - - "--github-token" - - "/etc/hub-token/token" - volumeMounts: - - name: hub-token - mountPath: /etc/hub-token - readOnly: true - - name: release-account - mountPath: /etc/release-account - readOnly: true - env: - - name: ORG_NAME - value: knative-sandbox - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/release-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: hub-token - secret: - secretName: hub-token - - name: release-account - secret: - secretName: release-account -- cron: "11 9 * * *" - name: ci-knative-sandbox-kn-plugin-admin-nightly-release - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative-sandbox - repo: kn-plugin-admin - path_alias: knative.dev/kn-plugin-admin - base_ref: main - annotations: - testgrid-dashboards: kn-plugin-admin - testgrid-tab-name: nightly-release - testgrid-alert-email: "serverless-engprod-sea@google.com" - testgrid-num-failures-to-alert: "1" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./hack/release.sh" - - "--publish" - - "--tag-release" - volumeMounts: - - name: nightly-account - mountPath: /etc/nightly-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/nightly-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: nightly-account - secret: - secretName: nightly-account -- cron: "59 9 * * 2" - name: ci-knative-sandbox-kn-plugin-admin-dot-release - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative-sandbox - repo: kn-plugin-admin - path_alias: knative.dev/kn-plugin-admin - base_ref: main - annotations: - testgrid-dashboards: kn-plugin-admin - testgrid-tab-name: dot-release - testgrid-alert-stale-results-hours: "170" - testgrid-alert-email: "serverless-engprod-sea@google.com" - testgrid-num-failures-to-alert: "1" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./hack/release.sh" - - "--dot-release" - - "--release-gcs" - - "knative-releases/kn-plugin-admin" - - "--release-gcr" - - "gcr.io/knative-releases" - - "--github-token" - - "/etc/hub-token/token" - volumeMounts: - - name: hub-token - mountPath: /etc/hub-token - readOnly: true - - name: release-account - mountPath: /etc/release-account - readOnly: true - env: - - name: ORG_NAME - value: knative-sandbox - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/release-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: hub-token - secret: - secretName: hub-token - - name: release-account - secret: - secretName: release-account -- cron: "19 */12 * * *" - name: ci-knative-sandbox-kn-plugin-quickstart-continuous - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative-sandbox - repo: kn-plugin-quickstart - path_alias: knative.dev/kn-plugin-quickstart - base_ref: main - annotations: - testgrid-dashboards: kn-plugin-quickstart - testgrid-tab-name: continuous - testgrid-alert-stale-results-hours: "3" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--all-tests" - volumeMounts: - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: test-account - secret: - secretName: test-account -- cron: "35 */12 * * *" - name: ci-knative-sandbox-kn-plugin-quickstart-auto-release - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative-sandbox - repo: kn-plugin-quickstart - path_alias: knative.dev/kn-plugin-quickstart - base_ref: main - annotations: - testgrid-dashboards: kn-plugin-quickstart - testgrid-tab-name: auto-release - testgrid-alert-email: "serverless-engprod-sea@google.com" - testgrid-num-failures-to-alert: "1" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./hack/release.sh" - - "--auto-release" - - "--release-gcs" - - "knative-releases/kn-plugin-quickstart" - - "--release-gcr" - - "gcr.io/knative-releases" - - "--github-token" - - "/etc/hub-token/token" - volumeMounts: - - name: hub-token - mountPath: /etc/hub-token - readOnly: true - - name: release-account - mountPath: /etc/release-account - readOnly: true - env: - - name: ORG_NAME - value: knative-sandbox - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/release-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: hub-token - secret: - secretName: hub-token - - name: release-account - secret: - secretName: release-account -- cron: "33 9 * * *" - name: ci-knative-sandbox-kn-plugin-quickstart-nightly-release - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative-sandbox - repo: kn-plugin-quickstart - path_alias: knative.dev/kn-plugin-quickstart - base_ref: main - annotations: - testgrid-dashboards: kn-plugin-quickstart - testgrid-tab-name: nightly-release - testgrid-alert-email: "serverless-engprod-sea@google.com" - testgrid-num-failures-to-alert: "1" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./hack/release.sh" - - "--publish" - - "--tag-release" - volumeMounts: - - name: nightly-account - mountPath: /etc/nightly-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/nightly-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: nightly-account - secret: - secretName: nightly-account -- cron: "5 9 * * 2" - name: ci-knative-sandbox-kn-plugin-quickstart-dot-release - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative-sandbox - repo: kn-plugin-quickstart - path_alias: knative.dev/kn-plugin-quickstart - base_ref: main - annotations: - testgrid-dashboards: kn-plugin-quickstart - testgrid-tab-name: dot-release - testgrid-alert-stale-results-hours: "170" - testgrid-alert-email: "serverless-engprod-sea@google.com" - testgrid-num-failures-to-alert: "1" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./hack/release.sh" - - "--dot-release" - - "--release-gcs" - - "knative-releases/kn-plugin-quickstart" - - "--release-gcr" - - "gcr.io/knative-releases" - - "--github-token" - - "/etc/hub-token/token" - volumeMounts: - - name: hub-token - mountPath: /etc/hub-token - readOnly: true - - name: release-account - mountPath: /etc/release-account - readOnly: true - env: - - name: ORG_NAME - value: knative-sandbox - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/release-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: hub-token - secret: - secretName: hub-token - - name: release-account - secret: - secretName: release-account -- cron: "45 */12 * * *" - name: ci-knative-docs-continuous - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative - repo: docs - base_ref: main - annotations: - testgrid-dashboards: docs - testgrid-tab-name: continuous - testgrid-alert-stale-results-hours: "3" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--all-tests" - securityContext: - privileged: true - volumeMounts: - - name: docker-graph - mountPath: /docker-graph - - name: modules - mountPath: /lib/modules - - name: cgroup - mountPath: /sys/fs/cgroup - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: DOCKER_IN_DOCKER_ENABLED - value: "true" - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: docker-graph - emptyDir: {} - - name: modules - hostPath: - path: /lib/modules - type: Directory - - name: cgroup - hostPath: - path: /sys/fs/cgroup - type: Directory - - name: test-account - secret: - secretName: test-account -- cron: "0 1 * * *" - name: ci-knative-docs-go-coverage - labels: - prow.k8s.io/pubsub.project: knative-tests - prow.k8s.io/pubsub.topic: knative-monitoring - prow.k8s.io/pubsub.runID: ci-knative-docs-go-coverage - agent: kubernetes - decorate: true - cluster: "build-knative" - extra_refs: - - org: knative - repo: docs - base_ref: main - annotations: - testgrid-dashboards: docs - testgrid-tab-name: docs-go-coverage - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - "runner.sh" - args: - - "coverage" - - "--artifacts=$(ARTIFACTS)" - - "--cov-threshold-percentage=50" -- cron: "52 */12 * * *" - name: ci-knative-eventing-continuous - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative - repo: eventing - path_alias: knative.dev/eventing - base_ref: main - annotations: - testgrid-dashboards: eventing - testgrid-tab-name: continuous - testgrid-alert-stale-results-hours: "3" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--all-tests" - volumeMounts: - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - resources: - requests: - memory: 12Gi - limits: - memory: 16Gi - volumes: - - name: test-account - secret: - secretName: test-account -- cron: "5 8 * * *" - name: ci-knative-eventing-0.26-continuous - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative - repo: eventing - path_alias: knative.dev/eventing - base_ref: release-0.26 - annotations: - testgrid-dashboards: knative-0.26 - testgrid-tab-name: eventing-continuous - testgrid-alert-stale-results-hours: "3" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./hack/release.sh" - - "--nopublish" - - "--notag-release" - securityContext: - privileged: true - volumeMounts: - - name: docker-graph - mountPath: /docker-graph - - name: modules - mountPath: /lib/modules - - name: cgroup - mountPath: /sys/fs/cgroup - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: DOCKER_IN_DOCKER_ENABLED - value: "true" - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - - name: PULL_BASE_REF - value: release-0.26 - volumes: - - name: docker-graph - emptyDir: {} - - name: modules - hostPath: - path: /lib/modules - type: Directory - - name: cgroup - hostPath: - path: /sys/fs/cgroup - type: Directory - - name: test-account - secret: - secretName: test-account -- cron: "50 8 * * *" - name: ci-knative-eventing-1.0-continuous - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative - repo: eventing - path_alias: knative.dev/eventing - base_ref: release-1.0 - annotations: - testgrid-dashboards: knative-1.0 - testgrid-tab-name: eventing-continuous - testgrid-alert-stale-results-hours: "3" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./hack/release.sh" - - "--nopublish" - - "--notag-release" - securityContext: - privileged: true - volumeMounts: - - name: docker-graph - mountPath: /docker-graph - - name: modules - mountPath: /lib/modules - - name: cgroup - mountPath: /sys/fs/cgroup - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: DOCKER_IN_DOCKER_ENABLED - value: "true" - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - - name: PULL_BASE_REF - value: release-1.0 - volumes: - - name: docker-graph - emptyDir: {} - - name: modules - hostPath: - path: /lib/modules - type: Directory - - name: cgroup - hostPath: - path: /sys/fs/cgroup - type: Directory - - name: test-account - secret: - secretName: test-account -- cron: "7 8 * * *" - name: ci-knative-eventing-1.1-continuous - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative - repo: eventing - path_alias: knative.dev/eventing - base_ref: release-1.1 - annotations: - testgrid-dashboards: knative-1.1 - testgrid-tab-name: eventing-continuous - testgrid-alert-stale-results-hours: "3" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./hack/release.sh" - - "--nopublish" - - "--notag-release" - securityContext: - privileged: true - volumeMounts: - - name: docker-graph - mountPath: /docker-graph - - name: modules - mountPath: /lib/modules - - name: cgroup - mountPath: /sys/fs/cgroup - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: DOCKER_IN_DOCKER_ENABLED - value: "true" - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - - name: PULL_BASE_REF - value: release-1.1 - volumes: - - name: docker-graph - emptyDir: {} - - name: modules - hostPath: - path: /lib/modules - type: Directory - - name: cgroup - hostPath: - path: /sys/fs/cgroup - type: Directory - - name: test-account - secret: - secretName: test-account -- cron: "0 8 * * *" - name: ci-knative-eventing-1.2-continuous - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative - repo: eventing - path_alias: knative.dev/eventing - base_ref: release-1.2 - annotations: - testgrid-dashboards: knative-1.2 - testgrid-tab-name: eventing-continuous - testgrid-alert-stale-results-hours: "3" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./hack/release.sh" - - "--nopublish" - - "--notag-release" - securityContext: - privileged: true - volumeMounts: - - name: docker-graph - mountPath: /docker-graph - - name: modules - mountPath: /lib/modules - - name: cgroup - mountPath: /sys/fs/cgroup - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: DOCKER_IN_DOCKER_ENABLED - value: "true" - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - - name: PULL_BASE_REF - value: release-1.2 - volumes: - - name: docker-graph - emptyDir: {} - - name: modules - hostPath: - path: /lib/modules - type: Directory - - name: cgroup - hostPath: - path: /sys/fs/cgroup - type: Directory - - name: test-account - secret: - secretName: test-account -- cron: "45 8 * * *" - name: ci-knative-eventing-1.3-continuous - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative - repo: eventing - path_alias: knative.dev/eventing - base_ref: release-1.3 - annotations: - testgrid-dashboards: knative-1.3 - testgrid-tab-name: eventing-continuous - testgrid-alert-stale-results-hours: "3" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./hack/release.sh" - - "--nopublish" - - "--notag-release" - securityContext: - privileged: true - volumeMounts: - - name: docker-graph - mountPath: /docker-graph - - name: modules - mountPath: /lib/modules - - name: cgroup - mountPath: /sys/fs/cgroup - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: DOCKER_IN_DOCKER_ENABLED - value: "true" - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - - name: PULL_BASE_REF - value: release-1.3 - volumes: - - name: docker-graph - emptyDir: {} - - name: modules - hostPath: - path: /lib/modules - type: Directory - - name: cgroup - hostPath: - path: /sys/fs/cgroup - type: Directory - - name: test-account - secret: - secretName: test-account -- cron: "12 9 * * *" - name: ci-knative-eventing-nightly-release - agent: kubernetes - decorate: true - reporter_config: - slack: - channel: eventing - report_template: "The nightly release job fails, check the log: <{{.Status.URL}}|View logs>" - job_states_to_report: - - "failure" - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative - repo: eventing - path_alias: knative.dev/eventing - base_ref: main - annotations: - testgrid-dashboards: eventing - testgrid-tab-name: nightly-release - testgrid-alert-email: "serverless-engprod-sea@google.com" - testgrid-num-failures-to-alert: "1" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./hack/release.sh" - - "--publish" - - "--tag-release" - volumeMounts: - - name: nightly-account - mountPath: /etc/nightly-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/nightly-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - resources: - requests: - memory: 12Gi - limits: - memory: 16Gi - volumes: - - name: nightly-account - secret: - secretName: nightly-account -- cron: "9 9 * * 2" - name: ci-knative-eventing-0.26-dot-release - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative - repo: eventing - path_alias: knative.dev/eventing - base_ref: release-0.26 - annotations: - testgrid-dashboards: knative-0.26 - testgrid-tab-name: eventing-dot-release - testgrid-alert-stale-results-hours: "3" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./hack/release.sh" - - "--dot-release" - - "--release-gcs" - - "knative-releases/eventing" - - "--release-gcr" - - "gcr.io/knative-releases" - - "--github-token" - - "/etc/hub-token/token" - - "--branch" - - "release-0.26" - volumeMounts: - - name: hub-token - mountPath: /etc/hub-token - readOnly: true - - name: release-account - mountPath: /etc/release-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/release-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - - name: PULL_BASE_REF - value: release-0.26 - resources: - requests: - memory: 12Gi - limits: - memory: 16Gi - volumes: - - name: hub-token - secret: - secretName: hub-token - - name: release-account - secret: - secretName: release-account -- cron: "28 9 * * 2" - name: ci-knative-eventing-1.0-dot-release - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative - repo: eventing - path_alias: knative.dev/eventing - base_ref: release-1.0 - annotations: - testgrid-dashboards: knative-1.0 - testgrid-tab-name: eventing-dot-release - testgrid-alert-stale-results-hours: "3" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./hack/release.sh" - - "--dot-release" - - "--release-gcs" - - "knative-releases/eventing" - - "--release-gcr" - - "gcr.io/knative-releases" - - "--github-token" - - "/etc/hub-token/token" - - "--branch" - - "release-1.0" - volumeMounts: - - name: hub-token - mountPath: /etc/hub-token - readOnly: true - - name: release-account - mountPath: /etc/release-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/release-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - - name: PULL_BASE_REF - value: release-1.0 - resources: - requests: - memory: 12Gi - limits: - memory: 16Gi - volumes: - - name: hub-token - secret: - secretName: hub-token - - name: release-account - secret: - secretName: release-account -- cron: "55 9 * * 2" - name: ci-knative-eventing-1.1-dot-release - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative - repo: eventing - path_alias: knative.dev/eventing - base_ref: release-1.1 - annotations: - testgrid-dashboards: knative-1.1 - testgrid-tab-name: eventing-dot-release - testgrid-alert-stale-results-hours: "3" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./hack/release.sh" - - "--dot-release" - - "--release-gcs" - - "knative-releases/eventing" - - "--release-gcr" - - "gcr.io/knative-releases" - - "--github-token" - - "/etc/hub-token/token" - - "--branch" - - "release-1.1" - volumeMounts: - - name: hub-token - mountPath: /etc/hub-token - readOnly: true - - name: release-account - mountPath: /etc/release-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/release-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - - name: PULL_BASE_REF - value: release-1.1 - resources: - requests: - memory: 12Gi - limits: - memory: 16Gi - volumes: - - name: hub-token - secret: - secretName: hub-token - - name: release-account - secret: - secretName: release-account -- cron: "58 9 * * 2" - name: ci-knative-eventing-1.2-dot-release - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative - repo: eventing - path_alias: knative.dev/eventing - base_ref: release-1.2 - annotations: - testgrid-dashboards: knative-1.2 - testgrid-tab-name: eventing-dot-release - testgrid-alert-stale-results-hours: "3" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./hack/release.sh" - - "--dot-release" - - "--release-gcs" - - "knative-releases/eventing" - - "--release-gcr" - - "gcr.io/knative-releases" - - "--github-token" - - "/etc/hub-token/token" - - "--branch" - - "release-1.2" - volumeMounts: - - name: hub-token - mountPath: /etc/hub-token - readOnly: true - - name: release-account - mountPath: /etc/release-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/release-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - - name: PULL_BASE_REF - value: release-1.2 - resources: - requests: - memory: 12Gi - limits: - memory: 16Gi - volumes: - - name: hub-token - secret: - secretName: hub-token - - name: release-account - secret: - secretName: release-account -- cron: "1 9 * * 2" - name: ci-knative-eventing-1.3-dot-release - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative - repo: eventing - path_alias: knative.dev/eventing - base_ref: release-1.3 - annotations: - testgrid-dashboards: knative-1.3 - testgrid-tab-name: eventing-dot-release - testgrid-alert-stale-results-hours: "3" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./hack/release.sh" - - "--dot-release" - - "--release-gcs" - - "knative-releases/eventing" - - "--release-gcr" - - "gcr.io/knative-releases" - - "--github-token" - - "/etc/hub-token/token" - - "--branch" - - "release-1.3" - volumeMounts: - - name: hub-token - mountPath: /etc/hub-token - readOnly: true - - name: release-account - mountPath: /etc/release-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/release-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - - name: PULL_BASE_REF - value: release-1.3 - resources: - requests: - memory: 12Gi - limits: - memory: 16Gi - volumes: - - name: hub-token - secret: - secretName: hub-token - - name: release-account - secret: - secretName: release-account -- cron: "52 */12 * * *" - name: ci-knative-eventing-auto-release - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative - repo: eventing - path_alias: knative.dev/eventing - base_ref: main - annotations: - testgrid-dashboards: eventing - testgrid-tab-name: auto-release - testgrid-alert-email: "serverless-engprod-sea@google.com" - testgrid-num-failures-to-alert: "1" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./hack/release.sh" - - "--auto-release" - - "--release-gcs" - - "knative-releases/eventing" - - "--release-gcr" - - "gcr.io/knative-releases" - - "--github-token" - - "/etc/hub-token/token" - volumeMounts: - - name: hub-token - mountPath: /etc/hub-token - readOnly: true - - name: release-account - mountPath: /etc/release-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/release-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - resources: - requests: - memory: 12Gi - limits: - memory: 16Gi - volumes: - - name: hub-token - secret: - secretName: hub-token - - name: release-account - secret: - secretName: release-account -- cron: "0 7 * * *" - name: ci-knative-eventing-s390x-e2e-tests - agent: kubernetes - decorate: true - decoration_config: - timeout: 120m - cluster: "build-knative" - extra_refs: - - org: knative - repo: eventing - path_alias: knative.dev/eventing - base_ref: main - annotations: - testgrid-dashboards: eventing - testgrid-tab-name: s390x-e2e-tests - testgrid-alert-stale-results-hours: "3" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "bash" - - "-c" - - "mkdir -p /root/.kube && cat /opt/cluster/ci-script > connect.sh && chmod +x connect.sh && ./connect.sh eventing-main && kubectl get cm s390x-config-eventing -n default -o jsonpath='{.data.adjustment-script}' > adjust.sh && chmod +x adjust.sh && ./adjust.sh && ./test/e2e-tests.sh --run-tests" - volumeMounts: - - name: s390x-cluster1 - mountPath: /opt/cluster - readOnly: true - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: DISABLE_MD_LINTING - value: "1" - - name: KO_FLAGS - value: "--platform=linux/s390x" - - name: SYSTEM_NAMESPACE - value: "knative-eventing" - - name: PLATFORM - value: "linux/s390x" - - name: KUBECONFIG - value: "/root/.kube/config" - - name: SCALE_CHAOSDUCK_TO_ZERO - value: "1" - - name: DOCKER_CONFIG - value: "/opt/cluster" - - name: KO_DOCKER_REPO - valueFrom: - secretKeyRef: - name: s390x-cluster1 - key: ko-docker-repo - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: s390x-cluster1 - secret: - secretName: s390x-cluster1 - defaultMode: 0600 - - name: test-account - secret: - secretName: test-account -- cron: "10 11 * * *" - name: ci-knative-eventing-1.0-s390x-e2e-tests - agent: kubernetes - decorate: true - decoration_config: - timeout: 120m - cluster: "build-knative" - extra_refs: - - org: knative - repo: eventing - path_alias: knative.dev/eventing - base_ref: release-1.0 - annotations: - testgrid-dashboards: knative-1.0 - testgrid-tab-name: eventing-s390x-e2e-tests - testgrid-alert-stale-results-hours: "3" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "bash" - - "-c" - - "mkdir -p /root/.kube && cat /opt/cluster/ci-script > connect.sh && chmod +x connect.sh && ./connect.sh eventing-10 && kubectl get cm s390x-config-eventing -n default -o jsonpath='{.data.adjustment-script}' > adjust.sh && chmod +x adjust.sh && ./adjust.sh && ./test/e2e-tests.sh --run-tests" - volumeMounts: - - name: s390x-cluster1 - mountPath: /opt/cluster - readOnly: true - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: DISABLE_MD_LINTING - value: "1" - - name: KO_FLAGS - value: "--platform=linux/s390x" - - name: SYSTEM_NAMESPACE - value: "knative-eventing" - - name: PLATFORM - value: "linux/s390x" - - name: KUBECONFIG - value: "/root/.kube/config" - - name: SCALE_CHAOSDUCK_TO_ZERO - value: "1" - - name: DOCKER_CONFIG - value: "/opt/cluster" - - name: KO_DOCKER_REPO - valueFrom: - secretKeyRef: - name: s390x-cluster1 - key: ko-docker-repo - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - - name: PULL_BASE_REF - value: release-1.0 - volumes: - - name: s390x-cluster1 - secret: - secretName: s390x-cluster1 - defaultMode: 0600 - - name: test-account - secret: - secretName: test-account -- cron: "20 15 * * *" - name: ci-knative-eventing-1.1-s390x-e2e-tests - agent: kubernetes - decorate: true - decoration_config: - timeout: 120m - cluster: "build-knative" - extra_refs: - - org: knative - repo: eventing - path_alias: knative.dev/eventing - base_ref: release-1.1 - annotations: - testgrid-dashboards: knative-1.1 - testgrid-tab-name: eventing-s390x-e2e-tests - testgrid-alert-stale-results-hours: "3" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "bash" - - "-c" - - "mkdir -p /root/.kube && cat /opt/cluster/ci-script > connect.sh && chmod +x connect.sh && ./connect.sh eventing-11 && kubectl get cm s390x-config-eventing -n default -o jsonpath='{.data.adjustment-script}' > adjust.sh && chmod +x adjust.sh && ./adjust.sh && ./test/e2e-tests.sh --run-tests" - volumeMounts: - - name: s390x-cluster1 - mountPath: /opt/cluster - readOnly: true - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: DISABLE_MD_LINTING - value: "1" - - name: KO_FLAGS - value: "--platform=linux/s390x" - - name: SYSTEM_NAMESPACE - value: "knative-eventing" - - name: PLATFORM - value: "linux/s390x" - - name: KUBECONFIG - value: "/root/.kube/config" - - name: SCALE_CHAOSDUCK_TO_ZERO - value: "1" - - name: DOCKER_CONFIG - value: "/opt/cluster" - - name: KO_DOCKER_REPO - valueFrom: - secretKeyRef: - name: s390x-cluster1 - key: ko-docker-repo - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - - name: PULL_BASE_REF - value: release-1.1 - volumes: - - name: s390x-cluster1 - secret: - secretName: s390x-cluster1 - defaultMode: 0600 - - name: test-account - secret: - secretName: test-account -- cron: "30 19 * * *" - name: ci-knative-eventing-1.2-s390x-e2e-tests - agent: kubernetes - decorate: true - decoration_config: - timeout: 120m - cluster: "build-knative" - extra_refs: - - org: knative - repo: eventing - path_alias: knative.dev/eventing - base_ref: release-1.2 - annotations: - testgrid-dashboards: knative-1.2 - testgrid-tab-name: eventing-s390x-e2e-tests - testgrid-alert-stale-results-hours: "3" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "bash" - - "-c" - - "mkdir -p /root/.kube && cat /opt/cluster/ci-script > connect.sh && chmod +x connect.sh && ./connect.sh eventing-12 && kubectl get cm s390x-config-eventing -n default -o jsonpath='{.data.adjustment-script}' > adjust.sh && chmod +x adjust.sh && ./adjust.sh && ./test/e2e-tests.sh --run-tests" - volumeMounts: - - name: s390x-cluster1 - mountPath: /opt/cluster - readOnly: true - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: DISABLE_MD_LINTING - value: "1" - - name: KO_FLAGS - value: "--platform=linux/s390x" - - name: SYSTEM_NAMESPACE - value: "knative-eventing" - - name: PLATFORM - value: "linux/s390x" - - name: KUBECONFIG - value: "/root/.kube/config" - - name: SCALE_CHAOSDUCK_TO_ZERO - value: "1" - - name: DOCKER_CONFIG - value: "/opt/cluster" - - name: KO_DOCKER_REPO - valueFrom: - secretKeyRef: - name: s390x-cluster1 - key: ko-docker-repo - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - - name: PULL_BASE_REF - value: release-1.2 - volumes: - - name: s390x-cluster1 - secret: - secretName: s390x-cluster1 - defaultMode: 0600 - - name: test-account - secret: - secretName: test-account -- cron: "40 23 * * *" - name: ci-knative-eventing-1.3-s390x-e2e-tests - agent: kubernetes - decorate: true - decoration_config: - timeout: 120m - cluster: "build-knative" - extra_refs: - - org: knative - repo: eventing - path_alias: knative.dev/eventing - base_ref: release-1.3 - annotations: - testgrid-dashboards: knative-1.3 - testgrid-tab-name: eventing-s390x-e2e-tests - testgrid-alert-stale-results-hours: "3" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "bash" - - "-c" - - "mkdir -p /root/.kube && cat /opt/cluster/ci-script > connect.sh && chmod +x connect.sh && ./connect.sh eventing-13 && kubectl get cm s390x-config-eventing -n default -o jsonpath='{.data.adjustment-script}' > adjust.sh && chmod +x adjust.sh && ./adjust.sh && ./test/e2e-tests.sh --run-tests" - volumeMounts: - - name: s390x-cluster1 - mountPath: /opt/cluster - readOnly: true - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: DISABLE_MD_LINTING - value: "1" - - name: KO_FLAGS - value: "--platform=linux/s390x" - - name: SYSTEM_NAMESPACE - value: "knative-eventing" - - name: PLATFORM - value: "linux/s390x" - - name: KUBECONFIG - value: "/root/.kube/config" - - name: SCALE_CHAOSDUCK_TO_ZERO - value: "1" - - name: DOCKER_CONFIG - value: "/opt/cluster" - - name: KO_DOCKER_REPO - valueFrom: - secretKeyRef: - name: s390x-cluster1 - key: ko-docker-repo - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - - name: PULL_BASE_REF - value: release-1.3 - volumes: - - name: s390x-cluster1 - secret: - secretName: s390x-cluster1 - defaultMode: 0600 - - name: test-account - secret: - secretName: test-account -- cron: "0 1 * * *" - name: ci-knative-eventing-go-coverage - labels: - prow.k8s.io/pubsub.project: knative-tests - prow.k8s.io/pubsub.topic: knative-monitoring - prow.k8s.io/pubsub.runID: ci-knative-eventing-go-coverage - agent: kubernetes - decorate: true - cluster: "build-knative" - extra_refs: - - org: knative - repo: eventing - path_alias: knative.dev/eventing - base_ref: main - annotations: - testgrid-dashboards: eventing - testgrid-tab-name: eventing-go-coverage - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - "runner.sh" - args: - - "coverage" - - "--artifacts=$(ARTIFACTS)" - - "--cov-threshold-percentage=50" -- cron: "21 */12 * * *" - name: ci-knative-sandbox-eventing-awssqs-continuous - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative-sandbox - repo: eventing-awssqs - path_alias: knative.dev/eventing-awssqs - base_ref: main - annotations: - testgrid-dashboards: eventing-awssqs - testgrid-tab-name: continuous - testgrid-alert-stale-results-hours: "3" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--all-tests" - volumeMounts: - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: test-account - secret: - secretName: test-account -- cron: "51 9 * * *" - name: ci-knative-sandbox-eventing-awssqs-nightly-release - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative-sandbox - repo: eventing-awssqs - path_alias: knative.dev/eventing-awssqs - base_ref: main - annotations: - testgrid-dashboards: eventing-awssqs - testgrid-tab-name: nightly-release - testgrid-alert-email: "serverless-engprod-sea@google.com" - testgrid-num-failures-to-alert: "1" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./hack/release.sh" - - "--publish" - - "--tag-release" - volumeMounts: - - name: nightly-account - mountPath: /etc/nightly-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/nightly-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: nightly-account - secret: - secretName: nightly-account -- cron: "21 */12 * * *" - name: ci-knative-sandbox-eventing-awssqs-auto-release - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative-sandbox - repo: eventing-awssqs - path_alias: knative.dev/eventing-awssqs - base_ref: main - annotations: - testgrid-dashboards: eventing-awssqs - testgrid-tab-name: auto-release - testgrid-alert-email: "serverless-engprod-sea@google.com" - testgrid-num-failures-to-alert: "1" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./hack/release.sh" - - "--auto-release" - - "--release-gcs" - - "knative-releases/eventing-awssqs" - - "--release-gcr" - - "gcr.io/knative-releases" - - "--github-token" - - "/etc/hub-token/token" - volumeMounts: - - name: hub-token - mountPath: /etc/hub-token - readOnly: true - - name: release-account - mountPath: /etc/release-account - readOnly: true - env: - - name: ORG_NAME - value: knative-sandbox - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/release-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: hub-token - secret: - secretName: hub-token - - name: release-account - secret: - secretName: release-account -- cron: "32 9 * * 2" - name: ci-knative-sandbox-eventing-awssqs-0.26-dot-release - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative-sandbox - repo: eventing-awssqs - path_alias: knative.dev/eventing-awssqs - base_ref: release-0.26 - annotations: - testgrid-dashboards: knative-sandbox-0.26 - testgrid-tab-name: eventing-awssqs-dot-release - testgrid-alert-stale-results-hours: "3" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./hack/release.sh" - - "--dot-release" - - "--release-gcs" - - "knative-releases/eventing-awssqs" - - "--release-gcr" - - "gcr.io/knative-releases" - - "--github-token" - - "/etc/hub-token/token" - - "--branch" - - "release-0.26" - volumeMounts: - - name: hub-token - mountPath: /etc/hub-token - readOnly: true - - name: release-account - mountPath: /etc/release-account - readOnly: true - env: - - name: ORG_NAME - value: knative-sandbox - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/release-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - - name: PULL_BASE_REF - value: release-0.26 - volumes: - - name: hub-token - secret: - secretName: hub-token - - name: release-account - secret: - secretName: release-account -- cron: "35 9 * * 2" - name: ci-knative-sandbox-eventing-awssqs-1.0-dot-release - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative-sandbox - repo: eventing-awssqs - path_alias: knative.dev/eventing-awssqs - base_ref: release-1.0 - annotations: - testgrid-dashboards: knative-sandbox-1.0 - testgrid-tab-name: eventing-awssqs-dot-release - testgrid-alert-stale-results-hours: "3" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./hack/release.sh" - - "--dot-release" - - "--release-gcs" - - "knative-releases/eventing-awssqs" - - "--release-gcr" - - "gcr.io/knative-releases" - - "--github-token" - - "/etc/hub-token/token" - - "--branch" - - "release-1.0" - volumeMounts: - - name: hub-token - mountPath: /etc/hub-token - readOnly: true - - name: release-account - mountPath: /etc/release-account - readOnly: true - env: - - name: ORG_NAME - value: knative-sandbox - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/release-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - - name: PULL_BASE_REF - value: release-1.0 - volumes: - - name: hub-token - secret: - secretName: hub-token - - name: release-account - secret: - secretName: release-account -- cron: "56 9 * * 2" - name: ci-knative-sandbox-eventing-awssqs-1.1-dot-release - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative-sandbox - repo: eventing-awssqs - path_alias: knative.dev/eventing-awssqs - base_ref: release-1.1 - annotations: - testgrid-dashboards: knative-sandbox-1.1 - testgrid-tab-name: eventing-awssqs-dot-release - testgrid-alert-stale-results-hours: "3" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./hack/release.sh" - - "--dot-release" - - "--release-gcs" - - "knative-releases/eventing-awssqs" - - "--release-gcr" - - "gcr.io/knative-releases" - - "--github-token" - - "/etc/hub-token/token" - - "--branch" - - "release-1.1" - volumeMounts: - - name: hub-token - mountPath: /etc/hub-token - readOnly: true - - name: release-account - mountPath: /etc/release-account - readOnly: true - env: - - name: ORG_NAME - value: knative-sandbox - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/release-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - - name: PULL_BASE_REF - value: release-1.1 - volumes: - - name: hub-token - secret: - secretName: hub-token - - name: release-account - secret: - secretName: release-account -- cron: "13 9 * * 2" - name: ci-knative-sandbox-eventing-awssqs-1.2-dot-release - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative-sandbox - repo: eventing-awssqs - path_alias: knative.dev/eventing-awssqs - base_ref: release-1.2 - annotations: - testgrid-dashboards: knative-sandbox-1.2 - testgrid-tab-name: eventing-awssqs-dot-release - testgrid-alert-stale-results-hours: "3" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./hack/release.sh" - - "--dot-release" - - "--release-gcs" - - "knative-releases/eventing-awssqs" - - "--release-gcr" - - "gcr.io/knative-releases" - - "--github-token" - - "/etc/hub-token/token" - - "--branch" - - "release-1.2" - volumeMounts: - - name: hub-token - mountPath: /etc/hub-token - readOnly: true - - name: release-account - mountPath: /etc/release-account - readOnly: true - env: - - name: ORG_NAME - value: knative-sandbox - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/release-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - - name: PULL_BASE_REF - value: release-1.2 - volumes: - - name: hub-token - secret: - secretName: hub-token - - name: release-account - secret: - secretName: release-account -- cron: "27 */12 * * *" - name: ci-knative-sandbox-eventing-ceph-continuous - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative-sandbox - repo: eventing-ceph - path_alias: knative.dev/eventing-ceph - base_ref: main - annotations: - testgrid-dashboards: eventing-ceph - testgrid-tab-name: continuous - testgrid-alert-stale-results-hours: "3" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--all-tests" - volumeMounts: - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: test-account - secret: - secretName: test-account -- cron: "5 9 * * *" - name: ci-knative-sandbox-eventing-ceph-nightly-release - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative-sandbox - repo: eventing-ceph - path_alias: knative.dev/eventing-ceph - base_ref: main - annotations: - testgrid-dashboards: eventing-ceph - testgrid-tab-name: nightly-release - testgrid-alert-email: "serverless-engprod-sea@google.com" - testgrid-num-failures-to-alert: "1" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./hack/release.sh" - - "--publish" - - "--tag-release" - volumeMounts: - - name: nightly-account - mountPath: /etc/nightly-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/nightly-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: nightly-account - secret: - secretName: nightly-account -- cron: "47 */12 * * *" - name: ci-knative-sandbox-eventing-ceph-auto-release - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative-sandbox - repo: eventing-ceph - path_alias: knative.dev/eventing-ceph - base_ref: main - annotations: - testgrid-dashboards: eventing-ceph - testgrid-tab-name: auto-release - testgrid-alert-email: "serverless-engprod-sea@google.com" - testgrid-num-failures-to-alert: "1" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./hack/release.sh" - - "--auto-release" - - "--release-gcs" - - "knative-releases/eventing-ceph" - - "--release-gcr" - - "gcr.io/knative-releases" - - "--github-token" - - "/etc/hub-token/token" - volumeMounts: - - name: hub-token - mountPath: /etc/hub-token - readOnly: true - - name: release-account - mountPath: /etc/release-account - readOnly: true - env: - - name: ORG_NAME - value: knative-sandbox - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/release-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: hub-token - secret: - secretName: hub-token - - name: release-account - secret: - secretName: release-account -- cron: "29 9 * * 2" - name: ci-knative-sandbox-eventing-ceph-1.0-dot-release - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative-sandbox - repo: eventing-ceph - path_alias: knative.dev/eventing-ceph - base_ref: release-1.0 - annotations: - testgrid-dashboards: knative-sandbox-1.0 - testgrid-tab-name: eventing-ceph-dot-release - testgrid-alert-stale-results-hours: "3" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./hack/release.sh" - - "--dot-release" - - "--release-gcs" - - "knative-releases/eventing-ceph" - - "--release-gcr" - - "gcr.io/knative-releases" - - "--github-token" - - "/etc/hub-token/token" - - "--branch" - - "release-1.0" - volumeMounts: - - name: hub-token - mountPath: /etc/hub-token - readOnly: true - - name: release-account - mountPath: /etc/release-account - readOnly: true - env: - - name: ORG_NAME - value: knative-sandbox - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/release-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - - name: PULL_BASE_REF - value: release-1.0 - volumes: - - name: hub-token - secret: - secretName: hub-token - - name: release-account - secret: - secretName: release-account -- cron: "14 9 * * 2" - name: ci-knative-sandbox-eventing-ceph-1.1-dot-release - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative-sandbox - repo: eventing-ceph - path_alias: knative.dev/eventing-ceph - base_ref: release-1.1 - annotations: - testgrid-dashboards: knative-sandbox-1.1 - testgrid-tab-name: eventing-ceph-dot-release - testgrid-alert-stale-results-hours: "3" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./hack/release.sh" - - "--dot-release" - - "--release-gcs" - - "knative-releases/eventing-ceph" - - "--release-gcr" - - "gcr.io/knative-releases" - - "--github-token" - - "/etc/hub-token/token" - - "--branch" - - "release-1.1" - volumeMounts: - - name: hub-token - mountPath: /etc/hub-token - readOnly: true - - name: release-account - mountPath: /etc/release-account - readOnly: true - env: - - name: ORG_NAME - value: knative-sandbox - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/release-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - - name: PULL_BASE_REF - value: release-1.1 - volumes: - - name: hub-token - secret: - secretName: hub-token - - name: release-account - secret: - secretName: release-account -- cron: "3 9 * * 2" - name: ci-knative-sandbox-eventing-ceph-1.2-dot-release - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative-sandbox - repo: eventing-ceph - path_alias: knative.dev/eventing-ceph - base_ref: release-1.2 - annotations: - testgrid-dashboards: knative-sandbox-1.2 - testgrid-tab-name: eventing-ceph-dot-release - testgrid-alert-stale-results-hours: "3" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./hack/release.sh" - - "--dot-release" - - "--release-gcs" - - "knative-releases/eventing-ceph" - - "--release-gcr" - - "gcr.io/knative-releases" - - "--github-token" - - "/etc/hub-token/token" - - "--branch" - - "release-1.2" - volumeMounts: - - name: hub-token - mountPath: /etc/hub-token - readOnly: true - - name: release-account - mountPath: /etc/release-account - readOnly: true - env: - - name: ORG_NAME - value: knative-sandbox - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/release-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - - name: PULL_BASE_REF - value: release-1.2 - volumes: - - name: hub-token - secret: - secretName: hub-token - - name: release-account - secret: - secretName: release-account -- cron: "28 9 * * 2" - name: ci-knative-sandbox-eventing-ceph-1.3-dot-release - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative-sandbox - repo: eventing-ceph - path_alias: knative.dev/eventing-ceph - base_ref: release-1.3 - annotations: - testgrid-dashboards: knative-sandbox-1.3 - testgrid-tab-name: eventing-ceph-dot-release - testgrid-alert-stale-results-hours: "3" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./hack/release.sh" - - "--dot-release" - - "--release-gcs" - - "knative-releases/eventing-ceph" - - "--release-gcr" - - "gcr.io/knative-releases" - - "--github-token" - - "/etc/hub-token/token" - - "--branch" - - "release-1.3" - securityContext: - privileged: true - volumeMounts: - - name: hub-token - mountPath: /etc/hub-token - readOnly: true - - name: docker-graph - mountPath: /docker-graph - - name: modules - mountPath: /lib/modules - - name: cgroup - mountPath: /sys/fs/cgroup - - name: release-account - mountPath: /etc/release-account - readOnly: true - env: - - name: ORG_NAME - value: knative-sandbox - - name: DOCKER_IN_DOCKER_ENABLED - value: "true" - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/release-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - - name: PULL_BASE_REF - value: release-1.3 - resources: - requests: - memory: 12Gi - limits: - memory: 16Gi - volumes: - - name: hub-token - secret: - secretName: hub-token - - name: docker-graph - emptyDir: {} - - name: modules - hostPath: - path: /lib/modules - type: Directory - - name: cgroup - hostPath: - path: /sys/fs/cgroup - type: Directory - - name: release-account - secret: - secretName: release-account -- cron: "37 */12 * * *" - name: ci-knative-sandbox-eventing-couchdb-continuous - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative-sandbox - repo: eventing-couchdb - path_alias: knative.dev/eventing-couchdb - base_ref: main - annotations: - testgrid-dashboards: eventing-couchdb - testgrid-tab-name: continuous - testgrid-alert-stale-results-hours: "3" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--all-tests" - volumeMounts: - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: test-account - secret: - secretName: test-account -- cron: "11 9 * * *" - name: ci-knative-sandbox-eventing-couchdb-nightly-release - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative-sandbox - repo: eventing-couchdb - path_alias: knative.dev/eventing-couchdb - base_ref: main - annotations: - testgrid-dashboards: eventing-couchdb - testgrid-tab-name: nightly-release - testgrid-alert-email: "serverless-engprod-sea@google.com" - testgrid-num-failures-to-alert: "1" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./hack/release.sh" - - "--publish" - - "--tag-release" - volumeMounts: - - name: nightly-account - mountPath: /etc/nightly-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/nightly-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: nightly-account - secret: - secretName: nightly-account -- cron: "33 */12 * * *" - name: ci-knative-sandbox-eventing-couchdb-auto-release - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative-sandbox - repo: eventing-couchdb - path_alias: knative.dev/eventing-couchdb - base_ref: main - annotations: - testgrid-dashboards: eventing-couchdb - testgrid-tab-name: auto-release - testgrid-alert-email: "serverless-engprod-sea@google.com" - testgrid-num-failures-to-alert: "1" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./hack/release.sh" - - "--auto-release" - - "--release-gcs" - - "knative-releases/eventing-couchdb" - - "--release-gcr" - - "gcr.io/knative-releases" - - "--github-token" - - "/etc/hub-token/token" - volumeMounts: - - name: hub-token - mountPath: /etc/hub-token - readOnly: true - - name: release-account - mountPath: /etc/release-account - readOnly: true - env: - - name: ORG_NAME - value: knative-sandbox - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/release-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: hub-token - secret: - secretName: hub-token - - name: release-account - secret: - secretName: release-account -- cron: "9 9 * * 2" - name: ci-knative-sandbox-eventing-couchdb-0.25-dot-release - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative-sandbox - repo: eventing-couchdb - path_alias: knative.dev/eventing-couchdb - base_ref: release-0.25 - annotations: - testgrid-dashboards: knative-sandbox-0.25 - testgrid-tab-name: eventing-couchdb-dot-release - testgrid-alert-stale-results-hours: "3" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./hack/release.sh" - - "--dot-release" - - "--release-gcs" - - "knative-releases/eventing-couchdb" - - "--release-gcr" - - "gcr.io/knative-releases" - - "--github-token" - - "/etc/hub-token/token" - - "--branch" - - "release-0.25" - volumeMounts: - - name: hub-token - mountPath: /etc/hub-token - readOnly: true - - name: release-account - mountPath: /etc/release-account - readOnly: true - env: - - name: ORG_NAME - value: knative-sandbox - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/release-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - - name: PULL_BASE_REF - value: release-0.25 - volumes: - - name: hub-token - secret: - secretName: hub-token - - name: release-account - secret: - secretName: release-account -- cron: "12 9 * * 2" - name: ci-knative-sandbox-eventing-couchdb-0.26-dot-release - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative-sandbox - repo: eventing-couchdb - path_alias: knative.dev/eventing-couchdb - base_ref: release-0.26 - annotations: - testgrid-dashboards: knative-sandbox-0.26 - testgrid-tab-name: eventing-couchdb-dot-release - testgrid-alert-stale-results-hours: "3" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./hack/release.sh" - - "--dot-release" - - "--release-gcs" - - "knative-releases/eventing-couchdb" - - "--release-gcr" - - "gcr.io/knative-releases" - - "--github-token" - - "/etc/hub-token/token" - - "--branch" - - "release-0.26" - volumeMounts: - - name: hub-token - mountPath: /etc/hub-token - readOnly: true - - name: release-account - mountPath: /etc/release-account - readOnly: true - env: - - name: ORG_NAME - value: knative-sandbox - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/release-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - - name: PULL_BASE_REF - value: release-0.26 - volumes: - - name: hub-token - secret: - secretName: hub-token - - name: release-account - secret: - secretName: release-account -- cron: "51 9 * * 2" - name: ci-knative-sandbox-eventing-couchdb-1.0-dot-release - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative-sandbox - repo: eventing-couchdb - path_alias: knative.dev/eventing-couchdb - base_ref: release-1.0 - annotations: - testgrid-dashboards: knative-sandbox-1.0 - testgrid-tab-name: eventing-couchdb-dot-release - testgrid-alert-stale-results-hours: "3" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./hack/release.sh" - - "--dot-release" - - "--release-gcs" - - "knative-releases/eventing-couchdb" - - "--release-gcr" - - "gcr.io/knative-releases" - - "--github-token" - - "/etc/hub-token/token" - - "--branch" - - "release-1.0" - volumeMounts: - - name: hub-token - mountPath: /etc/hub-token - readOnly: true - - name: release-account - mountPath: /etc/release-account - readOnly: true - env: - - name: ORG_NAME - value: knative-sandbox - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/release-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - - name: PULL_BASE_REF - value: release-1.0 - volumes: - - name: hub-token - secret: - secretName: hub-token - - name: release-account - secret: - secretName: release-account -- cron: "8 9 * * 2" - name: ci-knative-sandbox-eventing-couchdb-1.1-dot-release - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative-sandbox - repo: eventing-couchdb - path_alias: knative.dev/eventing-couchdb - base_ref: release-1.1 - annotations: - testgrid-dashboards: knative-sandbox-1.1 - testgrid-tab-name: eventing-couchdb-dot-release - testgrid-alert-stale-results-hours: "3" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./hack/release.sh" - - "--dot-release" - - "--release-gcs" - - "knative-releases/eventing-couchdb" - - "--release-gcr" - - "gcr.io/knative-releases" - - "--github-token" - - "/etc/hub-token/token" - - "--branch" - - "release-1.1" - volumeMounts: - - name: hub-token - mountPath: /etc/hub-token - readOnly: true - - name: release-account - mountPath: /etc/release-account - readOnly: true - env: - - name: ORG_NAME - value: knative-sandbox - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/release-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - - name: PULL_BASE_REF - value: release-1.1 - volumes: - - name: hub-token - secret: - secretName: hub-token - - name: release-account - secret: - secretName: release-account -- cron: "46 */12 * * *" - name: ci-knative-sandbox-eventing-github-continuous - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative-sandbox - repo: eventing-github - path_alias: knative.dev/eventing-github - base_ref: main - annotations: - testgrid-dashboards: eventing-github - testgrid-tab-name: continuous - testgrid-alert-stale-results-hours: "3" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--all-tests" - volumeMounts: - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: test-account - secret: - secretName: test-account -- cron: "22 9 * * *" - name: ci-knative-sandbox-eventing-github-nightly-release - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative-sandbox - repo: eventing-github - path_alias: knative.dev/eventing-github - base_ref: main - annotations: - testgrid-dashboards: eventing-github - testgrid-tab-name: nightly-release - testgrid-alert-email: "serverless-engprod-sea@google.com" - testgrid-num-failures-to-alert: "1" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./hack/release.sh" - - "--publish" - - "--tag-release" - volumeMounts: - - name: nightly-account - mountPath: /etc/nightly-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/nightly-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: nightly-account - secret: - secretName: nightly-account -- cron: "42 */12 * * *" - name: ci-knative-sandbox-eventing-github-auto-release - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative-sandbox - repo: eventing-github - path_alias: knative.dev/eventing-github - base_ref: main - annotations: - testgrid-dashboards: eventing-github - testgrid-tab-name: auto-release - testgrid-alert-email: "serverless-engprod-sea@google.com" - testgrid-num-failures-to-alert: "1" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./hack/release.sh" - - "--auto-release" - - "--release-gcs" - - "knative-releases/eventing-github" - - "--release-gcr" - - "gcr.io/knative-releases" - - "--github-token" - - "/etc/hub-token/token" - volumeMounts: - - name: hub-token - mountPath: /etc/hub-token - readOnly: true - - name: release-account - mountPath: /etc/release-account - readOnly: true - env: - - name: ORG_NAME - value: knative-sandbox - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/release-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: hub-token - secret: - secretName: hub-token - - name: release-account - secret: - secretName: release-account -- cron: "10 9 * * 2" - name: ci-knative-sandbox-eventing-github-1.0-dot-release - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative-sandbox - repo: eventing-github - path_alias: knative.dev/eventing-github - base_ref: release-1.0 - annotations: - testgrid-dashboards: knative-sandbox-1.0 - testgrid-tab-name: eventing-github-dot-release - testgrid-alert-stale-results-hours: "3" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./hack/release.sh" - - "--dot-release" - - "--release-gcs" - - "knative-releases/eventing-github" - - "--release-gcr" - - "gcr.io/knative-releases" - - "--github-token" - - "/etc/hub-token/token" - - "--branch" - - "release-1.0" - volumeMounts: - - name: hub-token - mountPath: /etc/hub-token - readOnly: true - - name: release-account - mountPath: /etc/release-account - readOnly: true - env: - - name: ORG_NAME - value: knative-sandbox - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/release-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - - name: PULL_BASE_REF - value: release-1.0 - volumes: - - name: hub-token - secret: - secretName: hub-token - - name: release-account - secret: - secretName: release-account -- cron: "13 9 * * 2" - name: ci-knative-sandbox-eventing-github-1.1-dot-release - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative-sandbox - repo: eventing-github - path_alias: knative.dev/eventing-github - base_ref: release-1.1 - annotations: - testgrid-dashboards: knative-sandbox-1.1 - testgrid-tab-name: eventing-github-dot-release - testgrid-alert-stale-results-hours: "3" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./hack/release.sh" - - "--dot-release" - - "--release-gcs" - - "knative-releases/eventing-github" - - "--release-gcr" - - "gcr.io/knative-releases" - - "--github-token" - - "/etc/hub-token/token" - - "--branch" - - "release-1.1" - volumeMounts: - - name: hub-token - mountPath: /etc/hub-token - readOnly: true - - name: release-account - mountPath: /etc/release-account - readOnly: true - env: - - name: ORG_NAME - value: knative-sandbox - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/release-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - - name: PULL_BASE_REF - value: release-1.1 - volumes: - - name: hub-token - secret: - secretName: hub-token - - name: release-account - secret: - secretName: release-account -- cron: "4 9 * * 2" - name: ci-knative-sandbox-eventing-github-1.2-dot-release - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative-sandbox - repo: eventing-github - path_alias: knative.dev/eventing-github - base_ref: release-1.2 - annotations: - testgrid-dashboards: knative-sandbox-1.2 - testgrid-tab-name: eventing-github-dot-release - testgrid-alert-stale-results-hours: "3" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./hack/release.sh" - - "--dot-release" - - "--release-gcs" - - "knative-releases/eventing-github" - - "--release-gcr" - - "gcr.io/knative-releases" - - "--github-token" - - "/etc/hub-token/token" - - "--branch" - - "release-1.2" - volumeMounts: - - name: hub-token - mountPath: /etc/hub-token - readOnly: true - - name: release-account - mountPath: /etc/release-account - readOnly: true - env: - - name: ORG_NAME - value: knative-sandbox - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/release-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - - name: PULL_BASE_REF - value: release-1.2 - volumes: - - name: hub-token - secret: - secretName: hub-token - - name: release-account - secret: - secretName: release-account -- cron: "11 9 * * 2" - name: ci-knative-sandbox-eventing-github-1.3-dot-release - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative-sandbox - repo: eventing-github - path_alias: knative.dev/eventing-github - base_ref: release-1.3 - annotations: - testgrid-dashboards: knative-sandbox-1.3 - testgrid-tab-name: eventing-github-dot-release - testgrid-alert-stale-results-hours: "3" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./hack/release.sh" - - "--dot-release" - - "--release-gcs" - - "knative-releases/eventing-github" - - "--release-gcr" - - "gcr.io/knative-releases" - - "--github-token" - - "/etc/hub-token/token" - - "--branch" - - "release-1.3" - volumeMounts: - - name: hub-token - mountPath: /etc/hub-token - readOnly: true - - name: release-account - mountPath: /etc/release-account - readOnly: true - env: - - name: ORG_NAME - value: knative-sandbox - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/release-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - - name: PULL_BASE_REF - value: release-1.3 - volumes: - - name: hub-token - secret: - secretName: hub-token - - name: release-account - secret: - secretName: release-account -- cron: "38 */12 * * *" - name: ci-knative-sandbox-eventing-gitlab-continuous - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative-sandbox - repo: eventing-gitlab - path_alias: knative.dev/eventing-gitlab - base_ref: main - annotations: - testgrid-dashboards: eventing-gitlab - testgrid-tab-name: continuous - testgrid-alert-stale-results-hours: "3" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--all-tests" - volumeMounts: - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: test-account - secret: - secretName: test-account -- cron: "6 9 * * *" - name: ci-knative-sandbox-eventing-gitlab-nightly-release - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative-sandbox - repo: eventing-gitlab - path_alias: knative.dev/eventing-gitlab - base_ref: main - annotations: - testgrid-dashboards: eventing-gitlab - testgrid-tab-name: nightly-release - testgrid-alert-email: "serverless-engprod-sea@google.com" - testgrid-num-failures-to-alert: "1" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./hack/release.sh" - - "--publish" - - "--tag-release" - volumeMounts: - - name: nightly-account - mountPath: /etc/nightly-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/nightly-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: nightly-account - secret: - secretName: nightly-account -- cron: "14 9 * * 2" - name: ci-knative-sandbox-eventing-gitlab-1.0-dot-release - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative-sandbox - repo: eventing-gitlab - path_alias: knative.dev/eventing-gitlab - base_ref: release-1.0 - annotations: - testgrid-dashboards: knative-sandbox-1.0 - testgrid-tab-name: eventing-gitlab-dot-release - testgrid-alert-stale-results-hours: "3" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./hack/release.sh" - - "--dot-release" - - "--release-gcs" - - "knative-releases/eventing-gitlab" - - "--release-gcr" - - "gcr.io/knative-releases" - - "--github-token" - - "/etc/hub-token/token" - - "--branch" - - "release-1.0" - volumeMounts: - - name: hub-token - mountPath: /etc/hub-token - readOnly: true - - name: release-account - mountPath: /etc/release-account - readOnly: true - env: - - name: ORG_NAME - value: knative-sandbox - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/release-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - - name: PULL_BASE_REF - value: release-1.0 - resources: - requests: - memory: 12Gi - limits: - memory: 16Gi - volumes: - - name: hub-token - secret: - secretName: hub-token - - name: release-account - secret: - secretName: release-account -- cron: "17 9 * * 2" - name: ci-knative-sandbox-eventing-gitlab-1.1-dot-release - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative-sandbox - repo: eventing-gitlab - path_alias: knative.dev/eventing-gitlab - base_ref: release-1.1 - annotations: - testgrid-dashboards: knative-sandbox-1.1 - testgrid-tab-name: eventing-gitlab-dot-release - testgrid-alert-stale-results-hours: "3" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./hack/release.sh" - - "--dot-release" - - "--release-gcs" - - "knative-releases/eventing-gitlab" - - "--release-gcr" - - "gcr.io/knative-releases" - - "--github-token" - - "/etc/hub-token/token" - - "--branch" - - "release-1.1" - volumeMounts: - - name: hub-token - mountPath: /etc/hub-token - readOnly: true - - name: release-account - mountPath: /etc/release-account - readOnly: true - env: - - name: ORG_NAME - value: knative-sandbox - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/release-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - - name: PULL_BASE_REF - value: release-1.1 - resources: - requests: - memory: 12Gi - limits: - memory: 16Gi - volumes: - - name: hub-token - secret: - secretName: hub-token - - name: release-account - secret: - secretName: release-account -- cron: "4 9 * * 2" - name: ci-knative-sandbox-eventing-gitlab-1.2-dot-release - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative-sandbox - repo: eventing-gitlab - path_alias: knative.dev/eventing-gitlab - base_ref: release-1.2 - annotations: - testgrid-dashboards: knative-sandbox-1.2 - testgrid-tab-name: eventing-gitlab-dot-release - testgrid-alert-stale-results-hours: "3" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./hack/release.sh" - - "--dot-release" - - "--release-gcs" - - "knative-releases/eventing-gitlab" - - "--release-gcr" - - "gcr.io/knative-releases" - - "--github-token" - - "/etc/hub-token/token" - - "--branch" - - "release-1.2" - volumeMounts: - - name: hub-token - mountPath: /etc/hub-token - readOnly: true - - name: release-account - mountPath: /etc/release-account - readOnly: true - env: - - name: ORG_NAME - value: knative-sandbox - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/release-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - - name: PULL_BASE_REF - value: release-1.2 - resources: - requests: - memory: 12Gi - limits: - memory: 16Gi - volumes: - - name: hub-token - secret: - secretName: hub-token - - name: release-account - secret: - secretName: release-account -- cron: "3 9 * * 2" - name: ci-knative-sandbox-eventing-gitlab-1.3-dot-release - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative-sandbox - repo: eventing-gitlab - path_alias: knative.dev/eventing-gitlab - base_ref: release-1.3 - annotations: - testgrid-dashboards: knative-sandbox-1.3 - testgrid-tab-name: eventing-gitlab-dot-release - testgrid-alert-stale-results-hours: "3" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./hack/release.sh" - - "--dot-release" - - "--release-gcs" - - "knative-releases/eventing-gitlab" - - "--release-gcr" - - "gcr.io/knative-releases" - - "--github-token" - - "/etc/hub-token/token" - - "--branch" - - "release-1.3" - volumeMounts: - - name: hub-token - mountPath: /etc/hub-token - readOnly: true - - name: release-account - mountPath: /etc/release-account - readOnly: true - env: - - name: ORG_NAME - value: knative-sandbox - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/release-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - - name: PULL_BASE_REF - value: release-1.3 - resources: - requests: - memory: 12Gi - limits: - memory: 16Gi - volumes: - - name: hub-token - secret: - secretName: hub-token - - name: release-account - secret: - secretName: release-account -- cron: "6 */12 * * *" - name: ci-knative-sandbox-eventing-gitlab-auto-release - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative-sandbox - repo: eventing-gitlab - path_alias: knative.dev/eventing-gitlab - base_ref: main - annotations: - testgrid-dashboards: eventing-gitlab - testgrid-tab-name: auto-release - testgrid-alert-email: "serverless-engprod-sea@google.com" - testgrid-num-failures-to-alert: "1" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./hack/release.sh" - - "--auto-release" - - "--release-gcs" - - "knative-releases/eventing-gitlab" - - "--release-gcr" - - "gcr.io/knative-releases" - - "--github-token" - - "/etc/hub-token/token" - volumeMounts: - - name: hub-token - mountPath: /etc/hub-token - readOnly: true - - name: release-account - mountPath: /etc/release-account - readOnly: true - env: - - name: ORG_NAME - value: knative-sandbox - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/release-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: hub-token - secret: - secretName: hub-token - - name: release-account - secret: - secretName: release-account -- cron: "7 */12 * * *" - name: ci-knative-sandbox-eventing-prometheus-continuous - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative-sandbox - repo: eventing-prometheus - path_alias: knative.dev/eventing-prometheus - base_ref: main - annotations: - testgrid-dashboards: eventing-prometheus - testgrid-tab-name: continuous - testgrid-alert-stale-results-hours: "3" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--all-tests" - volumeMounts: - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: test-account - secret: - secretName: test-account -- cron: "37 9 * * *" - name: ci-knative-sandbox-eventing-prometheus-nightly-release - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative-sandbox - repo: eventing-prometheus - path_alias: knative.dev/eventing-prometheus - base_ref: main - annotations: - testgrid-dashboards: eventing-prometheus - testgrid-tab-name: nightly-release - testgrid-alert-email: "serverless-engprod-sea@google.com" - testgrid-num-failures-to-alert: "1" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./hack/release.sh" - - "--publish" - - "--tag-release" - volumeMounts: - - name: nightly-account - mountPath: /etc/nightly-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/nightly-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: nightly-account - secret: - secretName: nightly-account -- cron: "11 */12 * * *" - name: ci-knative-sandbox-eventing-prometheus-auto-release - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative-sandbox - repo: eventing-prometheus - path_alias: knative.dev/eventing-prometheus - base_ref: main - annotations: - testgrid-dashboards: eventing-prometheus - testgrid-tab-name: auto-release - testgrid-alert-email: "serverless-engprod-sea@google.com" - testgrid-num-failures-to-alert: "1" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./hack/release.sh" - - "--auto-release" - - "--release-gcs" - - "knative-releases/eventing-prometheus" - - "--release-gcr" - - "gcr.io/knative-releases" - - "--github-token" - - "/etc/hub-token/token" - volumeMounts: - - name: hub-token - mountPath: /etc/hub-token - readOnly: true - - name: release-account - mountPath: /etc/release-account - readOnly: true - env: - - name: ORG_NAME - value: knative-sandbox - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/release-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: hub-token - secret: - secretName: hub-token - - name: release-account - secret: - secretName: release-account -- cron: "7 9 * * 2" - name: ci-knative-sandbox-eventing-prometheus-0.25-dot-release - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative-sandbox - repo: eventing-prometheus - path_alias: knative.dev/eventing-prometheus - base_ref: release-0.25 - annotations: - testgrid-dashboards: knative-sandbox-0.25 - testgrid-tab-name: eventing-prometheus-dot-release - testgrid-alert-stale-results-hours: "3" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./hack/release.sh" - - "--dot-release" - - "--release-gcs" - - "knative-releases/eventing-prometheus" - - "--release-gcr" - - "gcr.io/knative-releases" - - "--github-token" - - "/etc/hub-token/token" - - "--branch" - - "release-0.25" - volumeMounts: - - name: hub-token - mountPath: /etc/hub-token - readOnly: true - - name: release-account - mountPath: /etc/release-account - readOnly: true - env: - - name: ORG_NAME - value: knative-sandbox - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/release-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - - name: PULL_BASE_REF - value: release-0.25 - volumes: - - name: hub-token - secret: - secretName: hub-token - - name: release-account - secret: - secretName: release-account -- cron: "2 9 * * 2" - name: ci-knative-sandbox-eventing-prometheus-0.26-dot-release - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative-sandbox - repo: eventing-prometheus - path_alias: knative.dev/eventing-prometheus - base_ref: release-0.26 - annotations: - testgrid-dashboards: knative-sandbox-0.26 - testgrid-tab-name: eventing-prometheus-dot-release - testgrid-alert-stale-results-hours: "3" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./hack/release.sh" - - "--dot-release" - - "--release-gcs" - - "knative-releases/eventing-prometheus" - - "--release-gcr" - - "gcr.io/knative-releases" - - "--github-token" - - "/etc/hub-token/token" - - "--branch" - - "release-0.26" - volumeMounts: - - name: hub-token - mountPath: /etc/hub-token - readOnly: true - - name: release-account - mountPath: /etc/release-account - readOnly: true - env: - - name: ORG_NAME - value: knative-sandbox - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/release-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - - name: PULL_BASE_REF - value: release-0.26 - volumes: - - name: hub-token - secret: - secretName: hub-token - - name: release-account - secret: - secretName: release-account -- cron: "49 9 * * 2" - name: ci-knative-sandbox-eventing-prometheus-1.0-dot-release - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative-sandbox - repo: eventing-prometheus - path_alias: knative.dev/eventing-prometheus - base_ref: release-1.0 - annotations: - testgrid-dashboards: knative-sandbox-1.0 - testgrid-tab-name: eventing-prometheus-dot-release - testgrid-alert-stale-results-hours: "3" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./hack/release.sh" - - "--dot-release" - - "--release-gcs" - - "knative-releases/eventing-prometheus" - - "--release-gcr" - - "gcr.io/knative-releases" - - "--github-token" - - "/etc/hub-token/token" - - "--branch" - - "release-1.0" - volumeMounts: - - name: hub-token - mountPath: /etc/hub-token - readOnly: true - - name: release-account - mountPath: /etc/release-account - readOnly: true - env: - - name: ORG_NAME - value: knative-sandbox - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/release-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - - name: PULL_BASE_REF - value: release-1.0 - volumes: - - name: hub-token - secret: - secretName: hub-token - - name: release-account - secret: - secretName: release-account -- cron: "46 9 * * 2" - name: ci-knative-sandbox-eventing-prometheus-1.1-dot-release - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative-sandbox - repo: eventing-prometheus - path_alias: knative.dev/eventing-prometheus - base_ref: release-1.1 - annotations: - testgrid-dashboards: knative-sandbox-1.1 - testgrid-tab-name: eventing-prometheus-dot-release - testgrid-alert-stale-results-hours: "3" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./hack/release.sh" - - "--dot-release" - - "--release-gcs" - - "knative-releases/eventing-prometheus" - - "--release-gcr" - - "gcr.io/knative-releases" - - "--github-token" - - "/etc/hub-token/token" - - "--branch" - - "release-1.1" - volumeMounts: - - name: hub-token - mountPath: /etc/hub-token - readOnly: true - - name: release-account - mountPath: /etc/release-account - readOnly: true - env: - - name: ORG_NAME - value: knative-sandbox - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/release-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - - name: PULL_BASE_REF - value: release-1.1 - volumes: - - name: hub-token - secret: - secretName: hub-token - - name: release-account - secret: - secretName: release-account -- cron: "0 */12 * * *" - name: ci-knative-sandbox-eventing-redis-continuous - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative-sandbox - repo: eventing-redis - path_alias: knative.dev/eventing-redis - base_ref: main - annotations: - testgrid-dashboards: eventing-redis - testgrid-tab-name: continuous - testgrid-alert-stale-results-hours: "3" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--all-tests" - volumeMounts: - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: test-account - secret: - secretName: test-account -- cron: "48 9 * * *" - name: ci-knative-sandbox-eventing-redis-nightly-release - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative-sandbox - repo: eventing-redis - path_alias: knative.dev/eventing-redis - base_ref: main - annotations: - testgrid-dashboards: eventing-redis - testgrid-tab-name: nightly-release - testgrid-alert-email: "serverless-engprod-sea@google.com" - testgrid-num-failures-to-alert: "1" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./hack/release.sh" - - "--publish" - - "--tag-release" - volumeMounts: - - name: nightly-account - mountPath: /etc/nightly-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/nightly-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: nightly-account - secret: - secretName: nightly-account -- cron: "48 */12 * * *" - name: ci-knative-sandbox-eventing-redis-auto-release - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative-sandbox - repo: eventing-redis - path_alias: knative.dev/eventing-redis - base_ref: main - annotations: - testgrid-dashboards: eventing-redis - testgrid-tab-name: auto-release - testgrid-alert-email: "serverless-engprod-sea@google.com" - testgrid-num-failures-to-alert: "1" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./hack/release.sh" - - "--auto-release" - - "--release-gcs" - - "knative-releases/eventing-redis" - - "--release-gcr" - - "gcr.io/knative-releases" - - "--github-token" - - "/etc/hub-token/token" - volumeMounts: - - name: hub-token - mountPath: /etc/hub-token - readOnly: true - - name: release-account - mountPath: /etc/release-account - readOnly: true - env: - - name: ORG_NAME - value: knative-sandbox - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/release-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: hub-token - secret: - secretName: hub-token - - name: release-account - secret: - secretName: release-account -- cron: "32 9 * * 2" - name: ci-knative-sandbox-eventing-redis-1.0-dot-release - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative-sandbox - repo: eventing-redis - path_alias: knative.dev/eventing-redis - base_ref: release-1.0 - annotations: - testgrid-dashboards: knative-sandbox-1.0 - testgrid-tab-name: eventing-redis-dot-release - testgrid-alert-stale-results-hours: "3" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./hack/release.sh" - - "--dot-release" - - "--release-gcs" - - "knative-releases/eventing-redis" - - "--release-gcr" - - "gcr.io/knative-releases" - - "--github-token" - - "/etc/hub-token/token" - - "--branch" - - "release-1.0" - volumeMounts: - - name: hub-token - mountPath: /etc/hub-token - readOnly: true - - name: release-account - mountPath: /etc/release-account - readOnly: true - env: - - name: ORG_NAME - value: knative-sandbox - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/release-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - - name: PULL_BASE_REF - value: release-1.0 - volumes: - - name: hub-token - secret: - secretName: hub-token - - name: release-account - secret: - secretName: release-account -- cron: "23 9 * * 2" - name: ci-knative-sandbox-eventing-redis-1.1-dot-release - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative-sandbox - repo: eventing-redis - path_alias: knative.dev/eventing-redis - base_ref: release-1.1 - annotations: - testgrid-dashboards: knative-sandbox-1.1 - testgrid-tab-name: eventing-redis-dot-release - testgrid-alert-stale-results-hours: "3" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./hack/release.sh" - - "--dot-release" - - "--release-gcs" - - "knative-releases/eventing-redis" - - "--release-gcr" - - "gcr.io/knative-releases" - - "--github-token" - - "/etc/hub-token/token" - - "--branch" - - "release-1.1" - volumeMounts: - - name: hub-token - mountPath: /etc/hub-token - readOnly: true - - name: release-account - mountPath: /etc/release-account - readOnly: true - env: - - name: ORG_NAME - value: knative-sandbox - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/release-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - - name: PULL_BASE_REF - value: release-1.1 - volumes: - - name: hub-token - secret: - secretName: hub-token - - name: release-account - secret: - secretName: release-account -- cron: "26 9 * * 2" - name: ci-knative-sandbox-eventing-redis-1.2-dot-release - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative-sandbox - repo: eventing-redis - path_alias: knative.dev/eventing-redis - base_ref: release-1.2 - annotations: - testgrid-dashboards: knative-sandbox-1.2 - testgrid-tab-name: eventing-redis-dot-release - testgrid-alert-stale-results-hours: "3" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./hack/release.sh" - - "--dot-release" - - "--release-gcs" - - "knative-releases/eventing-redis" - - "--release-gcr" - - "gcr.io/knative-releases" - - "--github-token" - - "/etc/hub-token/token" - - "--branch" - - "release-1.2" - volumeMounts: - - name: hub-token - mountPath: /etc/hub-token - readOnly: true - - name: release-account - mountPath: /etc/release-account - readOnly: true - env: - - name: ORG_NAME - value: knative-sandbox - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/release-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - - name: PULL_BASE_REF - value: release-1.2 - volumes: - - name: hub-token - secret: - secretName: hub-token - - name: release-account - secret: - secretName: release-account -- cron: "25 9 * * 2" - name: ci-knative-sandbox-eventing-redis-1.3-dot-release - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative-sandbox - repo: eventing-redis - path_alias: knative.dev/eventing-redis - base_ref: release-1.3 - annotations: - testgrid-dashboards: knative-sandbox-1.3 - testgrid-tab-name: eventing-redis-dot-release - testgrid-alert-stale-results-hours: "3" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./hack/release.sh" - - "--dot-release" - - "--release-gcs" - - "knative-releases/eventing-redis" - - "--release-gcr" - - "gcr.io/knative-releases" - - "--github-token" - - "/etc/hub-token/token" - - "--branch" - - "release-1.3" - volumeMounts: - - name: hub-token - mountPath: /etc/hub-token - readOnly: true - - name: release-account - mountPath: /etc/release-account - readOnly: true - env: - - name: ORG_NAME - value: knative-sandbox - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/release-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - - name: PULL_BASE_REF - value: release-1.3 - volumes: - - name: hub-token - secret: - secretName: hub-token - - name: release-account - secret: - secretName: release-account -- cron: "16 */12 * * *" - name: ci-knative-sandbox-kperf-continuous - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative-sandbox - repo: kperf - path_alias: knative.dev/kperf - base_ref: main - annotations: - testgrid-dashboards: kperf - testgrid-tab-name: continuous - testgrid-alert-stale-results-hours: "3" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--all-tests" - volumeMounts: - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: test-account - secret: - secretName: test-account -- cron: "2 */12 * * *" - name: ci-knative-pkg-continuous - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative - repo: pkg - path_alias: knative.dev/pkg - base_ref: main - annotations: - testgrid-dashboards: pkg - testgrid-tab-name: continuous - testgrid-alert-stale-results-hours: "3" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--all-tests" - volumeMounts: - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: test-account - secret: - secretName: test-account -- cron: "47 */12 * * *" - name: ci-knative-caching-continuous - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative - repo: caching - path_alias: knative.dev/caching - base_ref: main - annotations: - testgrid-dashboards: caching - testgrid-tab-name: continuous - testgrid-alert-stale-results-hours: "3" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--all-tests" - volumeMounts: - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: test-account - secret: - secretName: test-account -- cron: "5 */12 * * *" - name: ci-knative-sandbox-sample-controller-continuous - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative-sandbox - repo: sample-controller - path_alias: knative.dev/sample-controller - base_ref: main - annotations: - testgrid-dashboards: sample-controller - testgrid-tab-name: continuous - testgrid-alert-stale-results-hours: "3" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--all-tests" - volumeMounts: - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: test-account - secret: - secretName: test-account -- cron: "55 9 * * *" - name: ci-knative-sandbox-sample-controller-nightly-release - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative-sandbox - repo: sample-controller - path_alias: knative.dev/sample-controller - base_ref: main - annotations: - testgrid-dashboards: sample-controller - testgrid-tab-name: nightly-release - testgrid-alert-email: "serverless-engprod-sea@google.com" - testgrid-num-failures-to-alert: "1" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./hack/release.sh" - - "--publish" - - "--tag-release" - volumeMounts: - - name: nightly-account - mountPath: /etc/nightly-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/nightly-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: nightly-account - secret: - secretName: nightly-account -- cron: "1 */12 * * *" - name: ci-knative-sandbox-sample-controller-auto-release - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative-sandbox - repo: sample-controller - path_alias: knative.dev/sample-controller - base_ref: main - annotations: - testgrid-dashboards: sample-controller - testgrid-tab-name: auto-release - testgrid-alert-email: "serverless-engprod-sea@google.com" - testgrid-num-failures-to-alert: "1" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./hack/release.sh" - - "--auto-release" - - "--release-gcs" - - "knative-releases/sample-controller" - - "--release-gcr" - - "gcr.io/knative-releases" - - "--github-token" - - "/etc/hub-token/token" - volumeMounts: - - name: hub-token - mountPath: /etc/hub-token - readOnly: true - - name: release-account - mountPath: /etc/release-account - readOnly: true - env: - - name: ORG_NAME - value: knative-sandbox - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/release-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: hub-token - secret: - secretName: hub-token - - name: release-account - secret: - secretName: release-account -- cron: "50 */12 * * *" - name: ci-knative-sandbox-sample-source-continuous - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative-sandbox - repo: sample-source - path_alias: knative.dev/sample-source - base_ref: main - annotations: - testgrid-dashboards: sample-source - testgrid-tab-name: continuous - testgrid-alert-stale-results-hours: "3" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--all-tests" - volumeMounts: - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: test-account - secret: - secretName: test-account -- cron: "10 9 * * *" - name: ci-knative-sandbox-sample-source-nightly-release - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative-sandbox - repo: sample-source - path_alias: knative.dev/sample-source - base_ref: main - annotations: - testgrid-dashboards: sample-source - testgrid-tab-name: nightly-release - testgrid-alert-email: "serverless-engprod-sea@google.com" - testgrid-num-failures-to-alert: "1" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./hack/release.sh" - - "--publish" - - "--tag-release" - volumeMounts: - - name: nightly-account - mountPath: /etc/nightly-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/nightly-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: nightly-account - secret: - secretName: nightly-account -- cron: "54 */12 * * *" - name: ci-knative-sandbox-sample-source-auto-release - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative-sandbox - repo: sample-source - path_alias: knative.dev/sample-source - base_ref: main - annotations: - testgrid-dashboards: sample-source - testgrid-tab-name: auto-release - testgrid-alert-email: "serverless-engprod-sea@google.com" - testgrid-num-failures-to-alert: "1" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./hack/release.sh" - - "--auto-release" - - "--release-gcs" - - "knative-releases/sample-source" - - "--release-gcr" - - "gcr.io/knative-releases" - - "--github-token" - - "/etc/hub-token/token" - volumeMounts: - - name: hub-token - mountPath: /etc/hub-token - readOnly: true - - name: release-account - mountPath: /etc/release-account - readOnly: true - env: - - name: ORG_NAME - value: knative-sandbox - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/release-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: hub-token - secret: - secretName: hub-token - - name: release-account - secret: - secretName: release-account -- cron: "49 */12 * * *" - name: ci-knative-test-infra-continuous - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative - repo: test-infra - path_alias: knative.dev/test-infra - base_ref: main - annotations: - testgrid-dashboards: test-infra - testgrid-tab-name: continuous - testgrid-alert-stale-results-hours: "3" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--all-tests" - securityContext: - privileged: true - volumeMounts: - - name: docker-graph - mountPath: /docker-graph - - name: modules - mountPath: /lib/modules - - name: cgroup - mountPath: /sys/fs/cgroup - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: DOCKER_IN_DOCKER_ENABLED - value: "true" - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: docker-graph - emptyDir: {} - - name: modules - hostPath: - path: /lib/modules - type: Directory - - name: cgroup - hostPath: - path: /sys/fs/cgroup - type: Directory - - name: test-account - secret: - secretName: test-account -- cron: "0 1 * * *" - name: ci-knative-test-infra-go-coverage - labels: - prow.k8s.io/pubsub.project: knative-tests - prow.k8s.io/pubsub.topic: knative-monitoring - prow.k8s.io/pubsub.runID: ci-knative-test-infra-go-coverage - agent: kubernetes - decorate: true - cluster: "build-knative" - extra_refs: - - org: knative - repo: test-infra - path_alias: knative.dev/test-infra - base_ref: main - annotations: - testgrid-dashboards: test-infra - testgrid-tab-name: test-infra-go-coverage - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - "runner.sh" - args: - - "coverage" - - "--artifacts=$(ARTIFACTS)" - - "--cov-threshold-percentage=50" -- cron: "34 9 * * *" - name: ci-google-knative-gcp-nightly-release - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: google - repo: knative-gcp - base_ref: main - annotations: - testgrid-dashboards: knative-gcp - testgrid-tab-name: nightly-release - testgrid-alert-email: "serverless-engprod-sea@google.com" - testgrid-num-failures-to-alert: "1" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./hack/release.sh" - - "--publish" - - "--tag-release" - - "--release-gcs" - - "knative-gcp-nightly" - - "--release-gcr" - - "gcr.io/knative-gcp-nightly" - volumeMounts: - - name: nightly-account - mountPath: /etc/nightly-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/nightly-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - resources: - requests: - memory: 12Gi - limits: - memory: 16Gi - volumes: - - name: nightly-account - secret: - secretName: nightly-account -- cron: "42 */12 * * *" - name: ci-google-knative-gcp-auto-release - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: google - repo: knative-gcp - base_ref: main - annotations: - testgrid-dashboards: knative-gcp - testgrid-tab-name: auto-release - testgrid-alert-email: "serverless-engprod-sea@google.com" - testgrid-num-failures-to-alert: "1" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./hack/release.sh" - - "--auto-release" - - "--release-gcs" - - "knative-gcp" - - "--release-gcr" - - "gcr.io/knative-gcp" - - "--github-token" - - "/etc/hub-token/token" - volumeMounts: - - name: hub-token - mountPath: /etc/hub-token - readOnly: true - - name: release-account - mountPath: /etc/release-account - readOnly: true - env: - - name: ORG_NAME - value: google - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/release-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - resources: - requests: - memory: 12Gi - limits: - memory: 16Gi - volumes: - - name: hub-token - secret: - secretName: hub-token - - name: release-account - secret: - secretName: release-account -- cron: "0 1 * * *" - name: ci-google-knative-gcp-go-coverage - labels: - prow.k8s.io/pubsub.project: knative-tests - prow.k8s.io/pubsub.topic: knative-monitoring - prow.k8s.io/pubsub.runID: ci-google-knative-gcp-go-coverage - agent: kubernetes - decorate: true - cluster: "build-knative" - extra_refs: - - org: google - repo: knative-gcp - base_ref: main - annotations: - testgrid-dashboards: knative-gcp - testgrid-tab-name: knative-gcp-go-coverage - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - "runner.sh" - args: - - "coverage" - - "--artifacts=$(ARTIFACTS)" - - "--cov-threshold-percentage=50" -- cron: "59 */12 * * *" - name: ci-knative-sandbox-net-certmanager-continuous - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative-sandbox - repo: net-certmanager - path_alias: knative.dev/net-certmanager - base_ref: main - annotations: - testgrid-dashboards: net-certmanager - testgrid-tab-name: continuous - testgrid-alert-stale-results-hours: "3" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--all-tests" - volumeMounts: - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: test-account - secret: - secretName: test-account -- cron: "45 9 * * *" - name: ci-knative-sandbox-net-certmanager-nightly-release - agent: kubernetes - decorate: true - reporter_config: - slack: - channel: net-certmanager - report_template: "The nightly release job fails, check the log: <{{.Status.URL}}|View logs>" - job_states_to_report: - - "failure" - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative-sandbox - repo: net-certmanager - path_alias: knative.dev/net-certmanager - base_ref: main - annotations: - testgrid-dashboards: net-certmanager - testgrid-tab-name: nightly-release - testgrid-alert-email: "serverless-engprod-sea@google.com" - testgrid-num-failures-to-alert: "1" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./hack/release.sh" - - "--publish" - - "--tag-release" - volumeMounts: - - name: nightly-account - mountPath: /etc/nightly-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/nightly-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: nightly-account - secret: - secretName: nightly-account -- cron: "1 9 * * 2" - name: ci-knative-sandbox-net-certmanager-1.0-dot-release - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative-sandbox - repo: net-certmanager - path_alias: knative.dev/net-certmanager - base_ref: release-1.0 - annotations: - testgrid-dashboards: knative-sandbox-1.0 - testgrid-tab-name: net-certmanager-dot-release - testgrid-alert-stale-results-hours: "3" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./hack/release.sh" - - "--dot-release" - - "--release-gcs" - - "knative-releases/net-certmanager" - - "--release-gcr" - - "gcr.io/knative-releases" - - "--github-token" - - "/etc/hub-token/token" - - "--branch" - - "release-1.0" - volumeMounts: - - name: hub-token - mountPath: /etc/hub-token - readOnly: true - - name: release-account - mountPath: /etc/release-account - readOnly: true - env: - - name: ORG_NAME - value: knative-sandbox - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/release-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - - name: PULL_BASE_REF - value: release-1.0 - volumes: - - name: hub-token - secret: - secretName: hub-token - - name: release-account - secret: - secretName: release-account -- cron: "46 9 * * 2" - name: ci-knative-sandbox-net-certmanager-1.1-dot-release - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative-sandbox - repo: net-certmanager - path_alias: knative.dev/net-certmanager - base_ref: release-1.1 - annotations: - testgrid-dashboards: knative-sandbox-1.1 - testgrid-tab-name: net-certmanager-dot-release - testgrid-alert-stale-results-hours: "3" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./hack/release.sh" - - "--dot-release" - - "--release-gcs" - - "knative-releases/net-certmanager" - - "--release-gcr" - - "gcr.io/knative-releases" - - "--github-token" - - "/etc/hub-token/token" - - "--branch" - - "release-1.1" - volumeMounts: - - name: hub-token - mountPath: /etc/hub-token - readOnly: true - - name: release-account - mountPath: /etc/release-account - readOnly: true - env: - - name: ORG_NAME - value: knative-sandbox - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/release-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - - name: PULL_BASE_REF - value: release-1.1 - volumes: - - name: hub-token - secret: - secretName: hub-token - - name: release-account - secret: - secretName: release-account -- cron: "27 9 * * 2" - name: ci-knative-sandbox-net-certmanager-1.2-dot-release - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative-sandbox - repo: net-certmanager - path_alias: knative.dev/net-certmanager - base_ref: release-1.2 - annotations: - testgrid-dashboards: knative-sandbox-1.2 - testgrid-tab-name: net-certmanager-dot-release - testgrid-alert-stale-results-hours: "3" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./hack/release.sh" - - "--dot-release" - - "--release-gcs" - - "knative-releases/net-certmanager" - - "--release-gcr" - - "gcr.io/knative-releases" - - "--github-token" - - "/etc/hub-token/token" - - "--branch" - - "release-1.2" - volumeMounts: - - name: hub-token - mountPath: /etc/hub-token - readOnly: true - - name: release-account - mountPath: /etc/release-account - readOnly: true - env: - - name: ORG_NAME - value: knative-sandbox - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/release-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - - name: PULL_BASE_REF - value: release-1.2 - volumes: - - name: hub-token - secret: - secretName: hub-token - - name: release-account - secret: - secretName: release-account -- cron: "36 9 * * 2" - name: ci-knative-sandbox-net-certmanager-1.3-dot-release - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative-sandbox - repo: net-certmanager - path_alias: knative.dev/net-certmanager - base_ref: release-1.3 - annotations: - testgrid-dashboards: knative-sandbox-1.3 - testgrid-tab-name: net-certmanager-dot-release - testgrid-alert-stale-results-hours: "3" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./hack/release.sh" - - "--dot-release" - - "--release-gcs" - - "knative-releases/net-certmanager" - - "--release-gcr" - - "gcr.io/knative-releases" - - "--github-token" - - "/etc/hub-token/token" - - "--branch" - - "release-1.3" - volumeMounts: - - name: hub-token - mountPath: /etc/hub-token - readOnly: true - - name: release-account - mountPath: /etc/release-account - readOnly: true - env: - - name: ORG_NAME - value: knative-sandbox - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/release-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - - name: PULL_BASE_REF - value: release-1.3 - volumes: - - name: hub-token - secret: - secretName: hub-token - - name: release-account - secret: - secretName: release-account -- cron: "27 */12 * * *" - name: ci-knative-sandbox-net-certmanager-auto-release - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative-sandbox - repo: net-certmanager - path_alias: knative.dev/net-certmanager - base_ref: main - annotations: - testgrid-dashboards: net-certmanager - testgrid-tab-name: auto-release - testgrid-alert-email: "serverless-engprod-sea@google.com" - testgrid-num-failures-to-alert: "1" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./hack/release.sh" - - "--auto-release" - - "--release-gcs" - - "knative-releases/net-certmanager" - - "--release-gcr" - - "gcr.io/knative-releases" - - "--github-token" - - "/etc/hub-token/token" - volumeMounts: - - name: hub-token - mountPath: /etc/hub-token - readOnly: true - - name: release-account - mountPath: /etc/release-account - readOnly: true - env: - - name: ORG_NAME - value: knative-sandbox - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/release-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: hub-token - secret: - secretName: hub-token - - name: release-account - secret: - secretName: release-account -- cron: "0 1 * * *" - name: ci-knative-sandbox-net-certmanager-go-coverage - labels: - prow.k8s.io/pubsub.project: knative-tests - prow.k8s.io/pubsub.topic: knative-monitoring - prow.k8s.io/pubsub.runID: ci-knative-sandbox-net-certmanager-go-coverage - agent: kubernetes - decorate: true - cluster: "build-knative" - extra_refs: - - org: knative-sandbox - repo: net-certmanager - path_alias: knative.dev/net-certmanager - base_ref: main - annotations: - testgrid-dashboards: net-certmanager - testgrid-tab-name: net-certmanager-go-coverage - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - "runner.sh" - args: - - "coverage" - - "--artifacts=$(ARTIFACTS)" - - "--cov-threshold-percentage=50" -- cron: "38 */12 * * *" - name: ci-knative-sandbox-net-contour-continuous - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative-sandbox - repo: net-contour - path_alias: knative.dev/net-contour - base_ref: main - annotations: - testgrid-dashboards: net-contour - testgrid-tab-name: continuous - testgrid-alert-stale-results-hours: "3" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--all-tests" - volumeMounts: - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: test-account - secret: - secretName: test-account -- cron: "42 9 * * *" - name: ci-knative-sandbox-net-contour-nightly-release - agent: kubernetes - decorate: true - reporter_config: - slack: - channel: net-contour - report_template: "The nightly release job fails, check the log: <{{.Status.URL}}|View logs>" - job_states_to_report: - - "failure" - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative-sandbox - repo: net-contour - path_alias: knative.dev/net-contour - base_ref: main - annotations: - testgrid-dashboards: net-contour - testgrid-tab-name: nightly-release - testgrid-alert-email: "serverless-engprod-sea@google.com" - testgrid-num-failures-to-alert: "1" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./hack/release.sh" - - "--publish" - - "--tag-release" - volumeMounts: - - name: nightly-account - mountPath: /etc/nightly-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/nightly-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: nightly-account - secret: - secretName: nightly-account -- cron: "2 9 * * 2" - name: ci-knative-sandbox-net-contour-1.0-dot-release - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative-sandbox - repo: net-contour - path_alias: knative.dev/net-contour - base_ref: release-1.0 - annotations: - testgrid-dashboards: knative-sandbox-1.0 - testgrid-tab-name: net-contour-dot-release - testgrid-alert-stale-results-hours: "3" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./hack/release.sh" - - "--dot-release" - - "--release-gcs" - - "knative-releases/net-contour" - - "--release-gcr" - - "gcr.io/knative-releases" - - "--github-token" - - "/etc/hub-token/token" - - "--branch" - - "release-1.0" - volumeMounts: - - name: hub-token - mountPath: /etc/hub-token - readOnly: true - - name: release-account - mountPath: /etc/release-account - readOnly: true - env: - - name: ORG_NAME - value: knative-sandbox - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/release-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - - name: PULL_BASE_REF - value: release-1.0 - volumes: - - name: hub-token - secret: - secretName: hub-token - - name: release-account - secret: - secretName: release-account -- cron: "9 9 * * 2" - name: ci-knative-sandbox-net-contour-1.1-dot-release - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative-sandbox - repo: net-contour - path_alias: knative.dev/net-contour - base_ref: release-1.1 - annotations: - testgrid-dashboards: knative-sandbox-1.1 - testgrid-tab-name: net-contour-dot-release - testgrid-alert-stale-results-hours: "3" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./hack/release.sh" - - "--dot-release" - - "--release-gcs" - - "knative-releases/net-contour" - - "--release-gcr" - - "gcr.io/knative-releases" - - "--github-token" - - "/etc/hub-token/token" - - "--branch" - - "release-1.1" - volumeMounts: - - name: hub-token - mountPath: /etc/hub-token - readOnly: true - - name: release-account - mountPath: /etc/release-account - readOnly: true - env: - - name: ORG_NAME - value: knative-sandbox - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/release-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - - name: PULL_BASE_REF - value: release-1.1 - volumes: - - name: hub-token - secret: - secretName: hub-token - - name: release-account - secret: - secretName: release-account -- cron: "44 9 * * 2" - name: ci-knative-sandbox-net-contour-1.2-dot-release - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative-sandbox - repo: net-contour - path_alias: knative.dev/net-contour - base_ref: release-1.2 - annotations: - testgrid-dashboards: knative-sandbox-1.2 - testgrid-tab-name: net-contour-dot-release - testgrid-alert-stale-results-hours: "3" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./hack/release.sh" - - "--dot-release" - - "--release-gcs" - - "knative-releases/net-contour" - - "--release-gcr" - - "gcr.io/knative-releases" - - "--github-token" - - "/etc/hub-token/token" - - "--branch" - - "release-1.2" - volumeMounts: - - name: hub-token - mountPath: /etc/hub-token - readOnly: true - - name: release-account - mountPath: /etc/release-account - readOnly: true - env: - - name: ORG_NAME - value: knative-sandbox - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/release-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - - name: PULL_BASE_REF - value: release-1.2 - volumes: - - name: hub-token - secret: - secretName: hub-token - - name: release-account - secret: - secretName: release-account -- cron: "19 9 * * 2" - name: ci-knative-sandbox-net-contour-1.3-dot-release - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative-sandbox - repo: net-contour - path_alias: knative.dev/net-contour - base_ref: release-1.3 - annotations: - testgrid-dashboards: knative-sandbox-1.3 - testgrid-tab-name: net-contour-dot-release - testgrid-alert-stale-results-hours: "3" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./hack/release.sh" - - "--dot-release" - - "--release-gcs" - - "knative-releases/net-contour" - - "--release-gcr" - - "gcr.io/knative-releases" - - "--github-token" - - "/etc/hub-token/token" - - "--branch" - - "release-1.3" - volumeMounts: - - name: hub-token - mountPath: /etc/hub-token - readOnly: true - - name: release-account - mountPath: /etc/release-account - readOnly: true - env: - - name: ORG_NAME - value: knative-sandbox - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/release-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - - name: PULL_BASE_REF - value: release-1.3 - volumes: - - name: hub-token - secret: - secretName: hub-token - - name: release-account - secret: - secretName: release-account -- cron: "42 */12 * * *" - name: ci-knative-sandbox-net-contour-auto-release - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative-sandbox - repo: net-contour - path_alias: knative.dev/net-contour - base_ref: main - annotations: - testgrid-dashboards: net-contour - testgrid-tab-name: auto-release - testgrid-alert-email: "serverless-engprod-sea@google.com" - testgrid-num-failures-to-alert: "1" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./hack/release.sh" - - "--auto-release" - - "--release-gcs" - - "knative-releases/net-contour" - - "--release-gcr" - - "gcr.io/knative-releases" - - "--github-token" - - "/etc/hub-token/token" - volumeMounts: - - name: hub-token - mountPath: /etc/hub-token - readOnly: true - - name: release-account - mountPath: /etc/release-account - readOnly: true - env: - - name: ORG_NAME - value: knative-sandbox - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/release-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: hub-token - secret: - secretName: hub-token - - name: release-account - secret: - secretName: release-account -- cron: "29 */12 * * *" - name: ci-knative-sandbox-net-gateway-api-continuous - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative-sandbox - repo: net-gateway-api - path_alias: knative.dev/net-gateway-api - base_ref: main - annotations: - testgrid-dashboards: net-gateway-api - testgrid-tab-name: continuous - testgrid-alert-stale-results-hours: "3" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--all-tests" - volumeMounts: - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: test-account - secret: - secretName: test-account -- cron: "3 9 * * *" - name: ci-knative-sandbox-net-gateway-api-nightly-release - agent: kubernetes - decorate: true - reporter_config: - slack: - channel: net-gateway-api - report_template: "The nightly release job fails, check the log: <{{.Status.URL}}|View logs>" - job_states_to_report: - - "failure" - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative-sandbox - repo: net-gateway-api - path_alias: knative.dev/net-gateway-api - base_ref: main - annotations: - testgrid-dashboards: net-gateway-api - testgrid-tab-name: nightly-release - testgrid-alert-email: "serverless-engprod-sea@google.com" - testgrid-num-failures-to-alert: "1" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./hack/release.sh" - - "--publish" - - "--tag-release" - volumeMounts: - - name: nightly-account - mountPath: /etc/nightly-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/nightly-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: nightly-account - secret: - secretName: nightly-account -- cron: "53 */12 * * *" - name: ci-knative-sandbox-net-gateway-api-auto-release - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative-sandbox - repo: net-gateway-api - path_alias: knative.dev/net-gateway-api - base_ref: main - annotations: - testgrid-dashboards: net-gateway-api - testgrid-tab-name: auto-release - testgrid-alert-email: "serverless-engprod-sea@google.com" - testgrid-num-failures-to-alert: "1" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./hack/release.sh" - - "--auto-release" - - "--release-gcs" - - "knative-releases/net-gateway-api" - - "--release-gcr" - - "gcr.io/knative-releases" - - "--github-token" - - "/etc/hub-token/token" - volumeMounts: - - name: hub-token - mountPath: /etc/hub-token - readOnly: true - - name: release-account - mountPath: /etc/release-account - readOnly: true - env: - - name: ORG_NAME - value: knative-sandbox - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/release-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: hub-token - secret: - secretName: hub-token - - name: release-account - secret: - secretName: release-account -- cron: "9 */12 * * *" - name: ci-knative-sandbox-net-http01-continuous - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative-sandbox - repo: net-http01 - path_alias: knative.dev/net-http01 - base_ref: main - annotations: - testgrid-dashboards: net-http01 - testgrid-tab-name: continuous - testgrid-alert-stale-results-hours: "3" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--all-tests" - volumeMounts: - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: test-account - secret: - secretName: test-account -- cron: "59 9 * * *" - name: ci-knative-sandbox-net-http01-nightly-release - agent: kubernetes - decorate: true - reporter_config: - slack: - channel: net-http01 - report_template: "The nightly release job fails, check the log: <{{.Status.URL}}|View logs>" - job_states_to_report: - - "failure" - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative-sandbox - repo: net-http01 - path_alias: knative.dev/net-http01 - base_ref: main - annotations: - testgrid-dashboards: net-http01 - testgrid-tab-name: nightly-release - testgrid-alert-email: "serverless-engprod-sea@google.com" - testgrid-num-failures-to-alert: "1" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./hack/release.sh" - - "--publish" - - "--tag-release" - volumeMounts: - - name: nightly-account - mountPath: /etc/nightly-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/nightly-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: nightly-account - secret: - secretName: nightly-account -- cron: "7 9 * * 2" - name: ci-knative-sandbox-net-http01-dot-release - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative-sandbox - repo: net-http01 - path_alias: knative.dev/net-http01 - base_ref: main - annotations: - testgrid-dashboards: net-http01 - testgrid-tab-name: dot-release - testgrid-alert-stale-results-hours: "170" - testgrid-alert-email: "serverless-engprod-sea@google.com" - testgrid-num-failures-to-alert: "1" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./hack/release.sh" - - "--dot-release" - - "--release-gcs" - - "knative-releases/net-http01" - - "--release-gcr" - - "gcr.io/knative-releases" - - "--github-token" - - "/etc/hub-token/token" - volumeMounts: - - name: hub-token - mountPath: /etc/hub-token - readOnly: true - - name: release-account - mountPath: /etc/release-account - readOnly: true - env: - - name: ORG_NAME - value: knative-sandbox - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/release-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: hub-token - secret: - secretName: hub-token - - name: release-account - secret: - secretName: release-account -- cron: "5 */12 * * *" - name: ci-knative-sandbox-net-http01-auto-release - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative-sandbox - repo: net-http01 - path_alias: knative.dev/net-http01 - base_ref: main - annotations: - testgrid-dashboards: net-http01 - testgrid-tab-name: auto-release - testgrid-alert-email: "serverless-engprod-sea@google.com" - testgrid-num-failures-to-alert: "1" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./hack/release.sh" - - "--auto-release" - - "--release-gcs" - - "knative-releases/net-http01" - - "--release-gcr" - - "gcr.io/knative-releases" - - "--github-token" - - "/etc/hub-token/token" - volumeMounts: - - name: hub-token - mountPath: /etc/hub-token - readOnly: true - - name: release-account - mountPath: /etc/release-account - readOnly: true - env: - - name: ORG_NAME - value: knative-sandbox - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/release-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: hub-token - secret: - secretName: hub-token - - name: release-account - secret: - secretName: release-account -- cron: "8 */12 * * *" - name: ci-knative-sandbox-net-istio-continuous - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative-sandbox - repo: net-istio - path_alias: knative.dev/net-istio - base_ref: main - annotations: - testgrid-dashboards: net-istio - testgrid-tab-name: continuous - testgrid-alert-stale-results-hours: "3" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--all-tests" - volumeMounts: - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: test-account - secret: - secretName: test-account -- cron: "8 9 * * *" - name: ci-knative-sandbox-net-istio-nightly-release - agent: kubernetes - decorate: true - reporter_config: - slack: - channel: net-istio - report_template: "The nightly release job fails, check the log: <{{.Status.URL}}|View logs>" - job_states_to_report: - - "failure" - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative-sandbox - repo: net-istio - path_alias: knative.dev/net-istio - base_ref: main - annotations: - testgrid-dashboards: net-istio - testgrid-tab-name: nightly-release - testgrid-alert-email: "serverless-engprod-sea@google.com" - testgrid-num-failures-to-alert: "1" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./hack/release.sh" - - "--publish" - - "--tag-release" - volumeMounts: - - name: nightly-account - mountPath: /etc/nightly-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/nightly-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: nightly-account - secret: - secretName: nightly-account -- cron: "34 */9 * * *" - name: ci-knative-sandbox-net-istio-latest - agent: kubernetes - decorate: true - decoration_config: - timeout: 120m - cluster: "build-knative" - extra_refs: - - org: knative-sandbox - repo: net-istio - path_alias: knative.dev/net-istio - base_ref: main - annotations: - testgrid-dashboards: net-istio - testgrid-tab-name: latest - testgrid-alert-stale-results-hours: "3" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--run-test" - - "./test/e2e-tests.sh --istio-version latest" - volumeMounts: - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: test-account - secret: - secretName: test-account -- cron: "32 9 * * 2" - name: ci-knative-sandbox-net-istio-1.0-dot-release - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative-sandbox - repo: net-istio - path_alias: knative.dev/net-istio - base_ref: release-1.0 - annotations: - testgrid-dashboards: knative-sandbox-1.0 - testgrid-tab-name: net-istio-dot-release - testgrid-alert-stale-results-hours: "3" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./hack/release.sh" - - "--dot-release" - - "--release-gcs" - - "knative-releases/net-istio" - - "--release-gcr" - - "gcr.io/knative-releases" - - "--github-token" - - "/etc/hub-token/token" - - "--branch" - - "release-1.0" - volumeMounts: - - name: hub-token - mountPath: /etc/hub-token - readOnly: true - - name: release-account - mountPath: /etc/release-account - readOnly: true - env: - - name: ORG_NAME - value: knative-sandbox - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/release-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - - name: PULL_BASE_REF - value: release-1.0 - volumes: - - name: hub-token - secret: - secretName: hub-token - - name: release-account - secret: - secretName: release-account -- cron: "7 9 * * 2" - name: ci-knative-sandbox-net-istio-1.1-dot-release - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative-sandbox - repo: net-istio - path_alias: knative.dev/net-istio - base_ref: release-1.1 - annotations: - testgrid-dashboards: knative-sandbox-1.1 - testgrid-tab-name: net-istio-dot-release - testgrid-alert-stale-results-hours: "3" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./hack/release.sh" - - "--dot-release" - - "--release-gcs" - - "knative-releases/net-istio" - - "--release-gcr" - - "gcr.io/knative-releases" - - "--github-token" - - "/etc/hub-token/token" - - "--branch" - - "release-1.1" - volumeMounts: - - name: hub-token - mountPath: /etc/hub-token - readOnly: true - - name: release-account - mountPath: /etc/release-account - readOnly: true - env: - - name: ORG_NAME - value: knative-sandbox - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/release-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - - name: PULL_BASE_REF - value: release-1.1 - volumes: - - name: hub-token - secret: - secretName: hub-token - - name: release-account - secret: - secretName: release-account -- cron: "46 9 * * 2" - name: ci-knative-sandbox-net-istio-1.2-dot-release - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative-sandbox - repo: net-istio - path_alias: knative.dev/net-istio - base_ref: release-1.2 - annotations: - testgrid-dashboards: knative-sandbox-1.2 - testgrid-tab-name: net-istio-dot-release - testgrid-alert-stale-results-hours: "3" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./hack/release.sh" - - "--dot-release" - - "--release-gcs" - - "knative-releases/net-istio" - - "--release-gcr" - - "gcr.io/knative-releases" - - "--github-token" - - "/etc/hub-token/token" - - "--branch" - - "release-1.2" - volumeMounts: - - name: hub-token - mountPath: /etc/hub-token - readOnly: true - - name: release-account - mountPath: /etc/release-account - readOnly: true - env: - - name: ORG_NAME - value: knative-sandbox - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/release-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - - name: PULL_BASE_REF - value: release-1.2 - volumes: - - name: hub-token - secret: - secretName: hub-token - - name: release-account - secret: - secretName: release-account -- cron: "21 9 * * 2" - name: ci-knative-sandbox-net-istio-1.3-dot-release - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative-sandbox - repo: net-istio - path_alias: knative.dev/net-istio - base_ref: release-1.3 - annotations: - testgrid-dashboards: knative-sandbox-1.3 - testgrid-tab-name: net-istio-dot-release - testgrid-alert-stale-results-hours: "3" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./hack/release.sh" - - "--dot-release" - - "--release-gcs" - - "knative-releases/net-istio" - - "--release-gcr" - - "gcr.io/knative-releases" - - "--github-token" - - "/etc/hub-token/token" - - "--branch" - - "release-1.3" - volumeMounts: - - name: hub-token - mountPath: /etc/hub-token - readOnly: true - - name: release-account - mountPath: /etc/release-account - readOnly: true - env: - - name: ORG_NAME - value: knative-sandbox - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/release-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - - name: PULL_BASE_REF - value: release-1.3 - volumes: - - name: hub-token - secret: - secretName: hub-token - - name: release-account - secret: - secretName: release-account -- cron: "12 */12 * * *" - name: ci-knative-sandbox-net-istio-auto-release - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative-sandbox - repo: net-istio - path_alias: knative.dev/net-istio - base_ref: main - annotations: - testgrid-dashboards: net-istio - testgrid-tab-name: auto-release - testgrid-alert-email: "serverless-engprod-sea@google.com" - testgrid-num-failures-to-alert: "1" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./hack/release.sh" - - "--auto-release" - - "--release-gcs" - - "knative-releases/net-istio" - - "--release-gcr" - - "gcr.io/knative-releases" - - "--github-token" - - "/etc/hub-token/token" - volumeMounts: - - name: hub-token - mountPath: /etc/hub-token - readOnly: true - - name: release-account - mountPath: /etc/release-account - readOnly: true - env: - - name: ORG_NAME - value: knative-sandbox - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/release-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: hub-token - secret: - secretName: hub-token - - name: release-account - secret: - secretName: release-account -- cron: "0 1 * * *" - name: ci-knative-sandbox-net-istio-go-coverage - labels: - prow.k8s.io/pubsub.project: knative-tests - prow.k8s.io/pubsub.topic: knative-monitoring - prow.k8s.io/pubsub.runID: ci-knative-sandbox-net-istio-go-coverage - agent: kubernetes - decorate: true - cluster: "build-knative" - extra_refs: - - org: knative-sandbox - repo: net-istio - path_alias: knative.dev/net-istio - base_ref: main - annotations: - testgrid-dashboards: net-istio - testgrid-tab-name: net-istio-go-coverage - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - "runner.sh" - args: - - "coverage" - - "--artifacts=$(ARTIFACTS)" - - "--cov-threshold-percentage=50" -- cron: "17 */12 * * *" - name: ci-knative-sandbox-net-kourier-continuous - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative-sandbox - repo: net-kourier - path_alias: knative.dev/net-kourier - base_ref: main - annotations: - testgrid-dashboards: net-kourier - testgrid-tab-name: continuous - testgrid-alert-stale-results-hours: "3" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--all-tests" - volumeMounts: - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: test-account - secret: - secretName: test-account -- cron: "3 9 * * *" - name: ci-knative-sandbox-net-kourier-nightly-release - agent: kubernetes - decorate: true - reporter_config: - slack: - channel: net-kourier - report_template: "The nightly release job fails, check the log: <{{.Status.URL}}|View logs>" - job_states_to_report: - - "failure" - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative-sandbox - repo: net-kourier - path_alias: knative.dev/net-kourier - base_ref: main - annotations: - testgrid-dashboards: net-kourier - testgrid-tab-name: nightly-release - testgrid-alert-email: "serverless-engprod-sea@google.com" - testgrid-num-failures-to-alert: "1" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./hack/release.sh" - - "--publish" - - "--tag-release" - volumeMounts: - - name: nightly-account - mountPath: /etc/nightly-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/nightly-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: nightly-account - secret: - secretName: nightly-account -- cron: "27 9 * * 2" - name: ci-knative-sandbox-net-kourier-dot-release - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative-sandbox - repo: net-kourier - path_alias: knative.dev/net-kourier - base_ref: main - annotations: - testgrid-dashboards: net-kourier - testgrid-tab-name: dot-release - testgrid-alert-stale-results-hours: "170" - testgrid-alert-email: "serverless-engprod-sea@google.com" - testgrid-num-failures-to-alert: "1" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./hack/release.sh" - - "--dot-release" - - "--release-gcs" - - "knative-releases/net-kourier" - - "--release-gcr" - - "gcr.io/knative-releases" - - "--github-token" - - "/etc/hub-token/token" - volumeMounts: - - name: hub-token - mountPath: /etc/hub-token - readOnly: true - - name: release-account - mountPath: /etc/release-account - readOnly: true - env: - - name: ORG_NAME - value: knative-sandbox - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/release-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: hub-token - secret: - secretName: hub-token - - name: release-account - secret: - secretName: release-account -- cron: "53 */12 * * *" - name: ci-knative-sandbox-net-kourier-auto-release - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative-sandbox - repo: net-kourier - path_alias: knative.dev/net-kourier - base_ref: main - annotations: - testgrid-dashboards: net-kourier - testgrid-tab-name: auto-release - testgrid-alert-email: "serverless-engprod-sea@google.com" - testgrid-num-failures-to-alert: "1" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./hack/release.sh" - - "--auto-release" - - "--release-gcs" - - "knative-releases/net-kourier" - - "--release-gcr" - - "gcr.io/knative-releases" - - "--github-token" - - "/etc/hub-token/token" - volumeMounts: - - name: hub-token - mountPath: /etc/hub-token - readOnly: true - - name: release-account - mountPath: /etc/release-account - readOnly: true - env: - - name: ORG_NAME - value: knative-sandbox - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/release-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: hub-token - secret: - secretName: hub-token - - name: release-account - secret: - secretName: release-account -- cron: "0 1 * * *" - name: ci-knative-sandbox-net-kourier-go-coverage - labels: - prow.k8s.io/pubsub.project: knative-tests - prow.k8s.io/pubsub.topic: knative-monitoring - prow.k8s.io/pubsub.runID: ci-knative-sandbox-net-kourier-go-coverage - agent: kubernetes - decorate: true - cluster: "build-knative" - extra_refs: - - org: knative-sandbox - repo: net-kourier - path_alias: knative.dev/net-kourier - base_ref: main - annotations: - testgrid-dashboards: net-kourier - testgrid-tab-name: net-kourier-go-coverage - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - "runner.sh" - args: - - "coverage" - - "--artifacts=$(ARTIFACTS)" - - "--cov-threshold-percentage=50" -- cron: "0 */12 * * *" - name: ci-knative-operator-continuous - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative - repo: operator - path_alias: knative.dev/operator - base_ref: main - annotations: - testgrid-dashboards: operator - testgrid-tab-name: continuous - testgrid-alert-stale-results-hours: "3" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--all-tests" - volumeMounts: - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: test-account - secret: - secretName: test-account -- cron: "22 8 * * *" - name: ci-knative-operator-1.0-continuous - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative - repo: operator - path_alias: knative.dev/operator - base_ref: release-1.0 - annotations: - testgrid-dashboards: knative-1.0 - testgrid-tab-name: operator-continuous - testgrid-alert-stale-results-hours: "3" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./hack/release.sh" - - "--nopublish" - - "--notag-release" - securityContext: - privileged: true - volumeMounts: - - name: docker-graph - mountPath: /docker-graph - - name: modules - mountPath: /lib/modules - - name: cgroup - mountPath: /sys/fs/cgroup - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: DOCKER_IN_DOCKER_ENABLED - value: "true" - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - - name: PULL_BASE_REF - value: release-1.0 - resources: - requests: - memory: 12Gi - limits: - memory: 16Gi - volumes: - - name: docker-graph - emptyDir: {} - - name: modules - hostPath: - path: /lib/modules - type: Directory - - name: cgroup - hostPath: - path: /sys/fs/cgroup - type: Directory - - name: test-account - secret: - secretName: test-account -- cron: "15 8 * * *" - name: ci-knative-operator-1.1-continuous - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative - repo: operator - path_alias: knative.dev/operator - base_ref: release-1.1 - annotations: - testgrid-dashboards: knative-1.1 - testgrid-tab-name: operator-continuous - testgrid-alert-stale-results-hours: "3" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./hack/release.sh" - - "--nopublish" - - "--notag-release" - securityContext: - privileged: true - volumeMounts: - - name: docker-graph - mountPath: /docker-graph - - name: modules - mountPath: /lib/modules - - name: cgroup - mountPath: /sys/fs/cgroup - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: DOCKER_IN_DOCKER_ENABLED - value: "true" - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - - name: PULL_BASE_REF - value: release-1.1 - resources: - requests: - memory: 12Gi - limits: - memory: 16Gi - volumes: - - name: docker-graph - emptyDir: {} - - name: modules - hostPath: - path: /lib/modules - type: Directory - - name: cgroup - hostPath: - path: /sys/fs/cgroup - type: Directory - - name: test-account - secret: - secretName: test-account -- cron: "28 8 * * *" - name: ci-knative-operator-1.2-continuous - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative - repo: operator - path_alias: knative.dev/operator - base_ref: release-1.2 - annotations: - testgrid-dashboards: knative-1.2 - testgrid-tab-name: operator-continuous - testgrid-alert-stale-results-hours: "3" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./hack/release.sh" - - "--nopublish" - - "--notag-release" - securityContext: - privileged: true - volumeMounts: - - name: docker-graph - mountPath: /docker-graph - - name: modules - mountPath: /lib/modules - - name: cgroup - mountPath: /sys/fs/cgroup - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: DOCKER_IN_DOCKER_ENABLED - value: "true" - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - - name: PULL_BASE_REF - value: release-1.2 - resources: - requests: - memory: 12Gi - limits: - memory: 16Gi - volumes: - - name: docker-graph - emptyDir: {} - - name: modules - hostPath: - path: /lib/modules - type: Directory - - name: cgroup - hostPath: - path: /sys/fs/cgroup - type: Directory - - name: test-account - secret: - secretName: test-account -- cron: "21 8 * * *" - name: ci-knative-operator-1.3-continuous - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative - repo: operator - path_alias: knative.dev/operator - base_ref: release-1.3 - annotations: - testgrid-dashboards: knative-1.3 - testgrid-tab-name: operator-continuous - testgrid-alert-stale-results-hours: "3" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./hack/release.sh" - - "--nopublish" - - "--notag-release" - securityContext: - privileged: true - volumeMounts: - - name: docker-graph - mountPath: /docker-graph - - name: modules - mountPath: /lib/modules - - name: cgroup - mountPath: /sys/fs/cgroup - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: DOCKER_IN_DOCKER_ENABLED - value: "true" - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - - name: PULL_BASE_REF - value: release-1.3 - resources: - requests: - memory: 12Gi - limits: - memory: 16Gi - volumes: - - name: docker-graph - emptyDir: {} - - name: modules - hostPath: - path: /lib/modules - type: Directory - - name: cgroup - hostPath: - path: /sys/fs/cgroup - type: Directory - - name: test-account - secret: - secretName: test-account -- cron: "48 9 * * *" - name: ci-knative-operator-nightly-release - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative - repo: operator - path_alias: knative.dev/operator - base_ref: main - annotations: - testgrid-dashboards: operator - testgrid-tab-name: nightly-release - testgrid-alert-email: "serverless-engprod-sea@google.com" - testgrid-num-failures-to-alert: "1" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./hack/release.sh" - - "--publish" - - "--tag-release" - volumeMounts: - - name: nightly-account - mountPath: /etc/nightly-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/nightly-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: nightly-account - secret: - secretName: nightly-account -- cron: "28 9 * * 2" - name: ci-knative-operator-1.0-dot-release - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative - repo: operator - path_alias: knative.dev/operator - base_ref: release-1.0 - annotations: - testgrid-dashboards: knative-1.0 - testgrid-tab-name: operator-dot-release - testgrid-alert-stale-results-hours: "3" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./hack/release.sh" - - "--dot-release" - - "--release-gcs" - - "knative-releases/operator" - - "--release-gcr" - - "gcr.io/knative-releases" - - "--github-token" - - "/etc/hub-token/token" - - "--branch" - - "release-1.0" - volumeMounts: - - name: hub-token - mountPath: /etc/hub-token - readOnly: true - - name: release-account - mountPath: /etc/release-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/release-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - - name: PULL_BASE_REF - value: release-1.0 - resources: - requests: - memory: 12Gi - limits: - memory: 16Gi - volumes: - - name: hub-token - secret: - secretName: hub-token - - name: release-account - secret: - secretName: release-account -- cron: "39 9 * * 2" - name: ci-knative-operator-1.1-dot-release - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative - repo: operator - path_alias: knative.dev/operator - base_ref: release-1.1 - annotations: - testgrid-dashboards: knative-1.1 - testgrid-tab-name: operator-dot-release - testgrid-alert-stale-results-hours: "3" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./hack/release.sh" - - "--dot-release" - - "--release-gcs" - - "knative-releases/operator" - - "--release-gcr" - - "gcr.io/knative-releases" - - "--github-token" - - "/etc/hub-token/token" - - "--branch" - - "release-1.1" - volumeMounts: - - name: hub-token - mountPath: /etc/hub-token - readOnly: true - - name: release-account - mountPath: /etc/release-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/release-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - - name: PULL_BASE_REF - value: release-1.1 - resources: - requests: - memory: 12Gi - limits: - memory: 16Gi - volumes: - - name: hub-token - secret: - secretName: hub-token - - name: release-account - secret: - secretName: release-account -- cron: "46 9 * * 2" - name: ci-knative-operator-1.2-dot-release - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative - repo: operator - path_alias: knative.dev/operator - base_ref: release-1.2 - annotations: - testgrid-dashboards: knative-1.2 - testgrid-tab-name: operator-dot-release - testgrid-alert-stale-results-hours: "3" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./hack/release.sh" - - "--dot-release" - - "--release-gcs" - - "knative-releases/operator" - - "--release-gcr" - - "gcr.io/knative-releases" - - "--github-token" - - "/etc/hub-token/token" - - "--branch" - - "release-1.2" - volumeMounts: - - name: hub-token - mountPath: /etc/hub-token - readOnly: true - - name: release-account - mountPath: /etc/release-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/release-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - - name: PULL_BASE_REF - value: release-1.2 - resources: - requests: - memory: 12Gi - limits: - memory: 16Gi - volumes: - - name: hub-token - secret: - secretName: hub-token - - name: release-account - secret: - secretName: release-account -- cron: "25 9 * * 2" - name: ci-knative-operator-1.3-dot-release - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative - repo: operator - path_alias: knative.dev/operator - base_ref: release-1.3 - annotations: - testgrid-dashboards: knative-1.3 - testgrid-tab-name: operator-dot-release - testgrid-alert-stale-results-hours: "3" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./hack/release.sh" - - "--dot-release" - - "--release-gcs" - - "knative-releases/operator" - - "--release-gcr" - - "gcr.io/knative-releases" - - "--github-token" - - "/etc/hub-token/token" - - "--branch" - - "release-1.3" - volumeMounts: - - name: hub-token - mountPath: /etc/hub-token - readOnly: true - - name: release-account - mountPath: /etc/release-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/release-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - - name: PULL_BASE_REF - value: release-1.3 - resources: - requests: - memory: 12Gi - limits: - memory: 16Gi - volumes: - - name: hub-token - secret: - secretName: hub-token - - name: release-account - secret: - secretName: release-account -- cron: "32 */12 * * *" - name: ci-knative-operator-auto-release - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative - repo: operator - path_alias: knative.dev/operator - base_ref: main - annotations: - testgrid-dashboards: operator - testgrid-tab-name: auto-release - testgrid-alert-email: "serverless-engprod-sea@google.com" - testgrid-num-failures-to-alert: "1" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./hack/release.sh" - - "--auto-release" - - "--release-gcs" - - "knative-releases/operator" - - "--release-gcr" - - "gcr.io/knative-releases" - - "--github-token" - - "/etc/hub-token/token" - volumeMounts: - - name: hub-token - mountPath: /etc/hub-token - readOnly: true - - name: release-account - mountPath: /etc/release-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/release-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: hub-token - secret: - secretName: hub-token - - name: release-account - secret: - secretName: release-account -- cron: "0 16 * * *" - name: ci-knative-operator-s390x-e2e-tests - agent: kubernetes - decorate: true - decoration_config: - timeout: 120m - cluster: "build-knative" - extra_refs: - - org: knative - repo: operator - path_alias: knative.dev/operator - base_ref: main - annotations: - testgrid-dashboards: operator - testgrid-tab-name: s390x-e2e-tests - testgrid-alert-stale-results-hours: "3" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "bash" - - "-c" - - "mkdir -p /root/.kube && cat /opt/cluster/ci-script > connect.sh && chmod +x connect.sh && ./connect.sh operator-main && kubectl get cm s390x-config-operator -n default -o jsonpath='{.data.adjustment-script}' > adjust.sh && chmod +x adjust.sh && ./adjust.sh && ./test/e2e-tests.sh --run-tests" - volumeMounts: - - name: s390x-cluster1 - mountPath: /opt/cluster - readOnly: true - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: DISABLE_MD_LINTING - value: "1" - - name: KO_FLAGS - value: "--platform=linux/s390x" - - name: PLATFORM - value: "linux/s390x" - - name: KUBECONFIG - value: "/root/.kube/config" - - name: DOCKER_CONFIG - value: "/opt/cluster" - - name: INGRESS_CLASS - value: "contour.ingress.networking.knative.dev" - - name: KO_DOCKER_REPO - valueFrom: - secretKeyRef: - name: s390x-cluster1 - key: ko-docker-repo - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: s390x-cluster1 - secret: - secretName: s390x-cluster1 - defaultMode: 0600 - - name: test-account - secret: - secretName: test-account -- cron: "10 20 * * *" - name: ci-knative-operator-1.0-s390x-e2e-tests - agent: kubernetes - decorate: true - decoration_config: - timeout: 120m - cluster: "build-knative" - extra_refs: - - org: knative - repo: operator - path_alias: knative.dev/operator - base_ref: release-1.0 - annotations: - testgrid-dashboards: knative-1.0 - testgrid-tab-name: operator-s390x-e2e-tests - testgrid-alert-stale-results-hours: "3" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "bash" - - "-c" - - "mkdir -p /root/.kube && cat /opt/cluster/ci-script > connect.sh && chmod +x connect.sh && ./connect.sh operator-10 && kubectl get cm s390x-config-operator -n default -o jsonpath='{.data.adjustment-script}' > adjust.sh && chmod +x adjust.sh && ./adjust.sh && ./test/e2e-tests.sh --run-tests" - volumeMounts: - - name: s390x-cluster1 - mountPath: /opt/cluster - readOnly: true - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: DISABLE_MD_LINTING - value: "1" - - name: KO_FLAGS - value: "--platform=linux/s390x" - - name: PLATFORM - value: "linux/s390x" - - name: KUBECONFIG - value: "/root/.kube/config" - - name: DOCKER_CONFIG - value: "/opt/cluster" - - name: INGRESS_CLASS - value: "contour.ingress.networking.knative.dev" - - name: KO_DOCKER_REPO - valueFrom: - secretKeyRef: - name: s390x-cluster1 - key: ko-docker-repo - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - - name: PULL_BASE_REF - value: release-1.0 - volumes: - - name: s390x-cluster1 - secret: - secretName: s390x-cluster1 - defaultMode: 0600 - - name: test-account - secret: - secretName: test-account -- cron: "20 0 * * *" - name: ci-knative-operator-1.1-s390x-e2e-tests - agent: kubernetes - decorate: true - decoration_config: - timeout: 120m - cluster: "build-knative" - extra_refs: - - org: knative - repo: operator - path_alias: knative.dev/operator - base_ref: release-1.1 - annotations: - testgrid-dashboards: knative-1.1 - testgrid-tab-name: operator-s390x-e2e-tests - testgrid-alert-stale-results-hours: "3" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "bash" - - "-c" - - "mkdir -p /root/.kube && cat /opt/cluster/ci-script > connect.sh && chmod +x connect.sh && ./connect.sh operator-11 && kubectl get cm s390x-config-operator -n default -o jsonpath='{.data.adjustment-script}' > adjust.sh && chmod +x adjust.sh && ./adjust.sh && ./test/e2e-tests.sh --run-tests" - volumeMounts: - - name: s390x-cluster1 - mountPath: /opt/cluster - readOnly: true - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: DISABLE_MD_LINTING - value: "1" - - name: KO_FLAGS - value: "--platform=linux/s390x" - - name: PLATFORM - value: "linux/s390x" - - name: KUBECONFIG - value: "/root/.kube/config" - - name: DOCKER_CONFIG - value: "/opt/cluster" - - name: INGRESS_CLASS - value: "contour.ingress.networking.knative.dev" - - name: KO_DOCKER_REPO - valueFrom: - secretKeyRef: - name: s390x-cluster1 - key: ko-docker-repo - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - - name: PULL_BASE_REF - value: release-1.1 - volumes: - - name: s390x-cluster1 - secret: - secretName: s390x-cluster1 - defaultMode: 0600 - - name: test-account - secret: - secretName: test-account -- cron: "30 4 * * *" - name: ci-knative-operator-1.2-s390x-e2e-tests - agent: kubernetes - decorate: true - decoration_config: - timeout: 120m - cluster: "build-knative" - extra_refs: - - org: knative - repo: operator - path_alias: knative.dev/operator - base_ref: release-1.2 - annotations: - testgrid-dashboards: knative-1.2 - testgrid-tab-name: operator-s390x-e2e-tests - testgrid-alert-stale-results-hours: "3" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "bash" - - "-c" - - "mkdir -p /root/.kube && cat /opt/cluster/ci-script > connect.sh && chmod +x connect.sh && ./connect.sh operator-12 && kubectl get cm s390x-config-operator -n default -o jsonpath='{.data.adjustment-script}' > adjust.sh && chmod +x adjust.sh && ./adjust.sh && ./test/e2e-tests.sh --run-tests" - volumeMounts: - - name: s390x-cluster1 - mountPath: /opt/cluster - readOnly: true - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: DISABLE_MD_LINTING - value: "1" - - name: KO_FLAGS - value: "--platform=linux/s390x" - - name: PLATFORM - value: "linux/s390x" - - name: KUBECONFIG - value: "/root/.kube/config" - - name: DOCKER_CONFIG - value: "/opt/cluster" - - name: INGRESS_CLASS - value: "contour.ingress.networking.knative.dev" - - name: KO_DOCKER_REPO - valueFrom: - secretKeyRef: - name: s390x-cluster1 - key: ko-docker-repo - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - - name: PULL_BASE_REF - value: release-1.2 - volumes: - - name: s390x-cluster1 - secret: - secretName: s390x-cluster1 - defaultMode: 0600 - - name: test-account - secret: - secretName: test-account -- cron: "40 8 * * *" - name: ci-knative-operator-1.3-s390x-e2e-tests - agent: kubernetes - decorate: true - decoration_config: - timeout: 120m - cluster: "build-knative" - extra_refs: - - org: knative - repo: operator - path_alias: knative.dev/operator - base_ref: release-1.3 - annotations: - testgrid-dashboards: knative-1.3 - testgrid-tab-name: operator-s390x-e2e-tests - testgrid-alert-stale-results-hours: "3" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "bash" - - "-c" - - "mkdir -p /root/.kube && cat /opt/cluster/ci-script > connect.sh && chmod +x connect.sh && ./connect.sh operator-13 && kubectl get cm s390x-config-operator -n default -o jsonpath='{.data.adjustment-script}' > adjust.sh && chmod +x adjust.sh && ./adjust.sh && ./test/e2e-tests.sh --run-tests" - volumeMounts: - - name: s390x-cluster1 - mountPath: /opt/cluster - readOnly: true - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: DISABLE_MD_LINTING - value: "1" - - name: KO_FLAGS - value: "--platform=linux/s390x" - - name: PLATFORM - value: "linux/s390x" - - name: KUBECONFIG - value: "/root/.kube/config" - - name: DOCKER_CONFIG - value: "/opt/cluster" - - name: INGRESS_CLASS - value: "contour.ingress.networking.knative.dev" - - name: KO_DOCKER_REPO - valueFrom: - secretKeyRef: - name: s390x-cluster1 - key: ko-docker-repo - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - - name: PULL_BASE_REF - value: release-1.3 - volumes: - - name: s390x-cluster1 - secret: - secretName: s390x-cluster1 - defaultMode: 0600 - - name: test-account - secret: - secretName: test-account -- cron: "36 */12 * * *" - name: ci-knative-sandbox-async-component-continuous - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative-sandbox - repo: async-component - path_alias: knative.dev/async-component - base_ref: main - annotations: - testgrid-dashboards: async-component - testgrid-tab-name: continuous - testgrid-alert-stale-results-hours: "3" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--all-tests" - volumeMounts: - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: test-account - secret: - secretName: test-account -- cron: "0 9 * * *" - name: ci-knative-sandbox-async-component-nightly-release - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative-sandbox - repo: async-component - path_alias: knative.dev/async-component - base_ref: main - annotations: - testgrid-dashboards: async-component - testgrid-tab-name: nightly-release - testgrid-alert-email: "serverless-engprod-sea@google.com" - testgrid-num-failures-to-alert: "1" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./hack/release.sh" - - "--publish" - - "--tag-release" - volumeMounts: - - name: nightly-account - mountPath: /etc/nightly-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/nightly-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: nightly-account - secret: - secretName: nightly-account -- cron: "8 9 * * 2" - name: ci-knative-sandbox-async-component-dot-release - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative-sandbox - repo: async-component - path_alias: knative.dev/async-component - base_ref: main - annotations: - testgrid-dashboards: async-component - testgrid-tab-name: dot-release - testgrid-alert-stale-results-hours: "170" - testgrid-alert-email: "serverless-engprod-sea@google.com" - testgrid-num-failures-to-alert: "1" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./hack/release.sh" - - "--dot-release" - - "--release-gcs" - - "knative-releases/async-component" - - "--release-gcr" - - "gcr.io/knative-releases" - - "--github-token" - - "/etc/hub-token/token" - volumeMounts: - - name: hub-token - mountPath: /etc/hub-token - readOnly: true - - name: release-account - mountPath: /etc/release-account - readOnly: true - env: - - name: ORG_NAME - value: knative-sandbox - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/release-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: hub-token - secret: - secretName: hub-token - - name: release-account - secret: - secretName: release-account -- cron: "24 */12 * * *" - name: ci-knative-sandbox-async-component-auto-release - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative-sandbox - repo: async-component - path_alias: knative.dev/async-component - base_ref: main - annotations: - testgrid-dashboards: async-component - testgrid-tab-name: auto-release - testgrid-alert-email: "serverless-engprod-sea@google.com" - testgrid-num-failures-to-alert: "1" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./hack/release.sh" - - "--auto-release" - - "--release-gcs" - - "knative-releases/async-component" - - "--release-gcr" - - "gcr.io/knative-releases" - - "--github-token" - - "/etc/hub-token/token" - volumeMounts: - - name: hub-token - mountPath: /etc/hub-token - readOnly: true - - name: release-account - mountPath: /etc/release-account - readOnly: true - env: - - name: ORG_NAME - value: knative-sandbox - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/release-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: hub-token - secret: - secretName: hub-token - - name: release-account - secret: - secretName: release-account -- cron: "0 1 * * *" - name: ci-knative-sandbox-async-component-go-coverage - labels: - prow.k8s.io/pubsub.project: knative-tests - prow.k8s.io/pubsub.topic: knative-monitoring - prow.k8s.io/pubsub.runID: ci-knative-sandbox-async-component-go-coverage - agent: kubernetes - decorate: true - cluster: "build-knative" - extra_refs: - - org: knative-sandbox - repo: async-component - path_alias: knative.dev/async-component - base_ref: main - annotations: - testgrid-dashboards: async-component - testgrid-tab-name: async-component-go-coverage - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - "runner.sh" - args: - - "coverage" - - "--artifacts=$(ARTIFACTS)" - - "--cov-threshold-percentage=50" -- cron: "32 */12 * * *" - name: ci-knative-sandbox-discovery-continuous - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative-sandbox - repo: discovery - path_alias: knative.dev/discovery - base_ref: main - annotations: - testgrid-dashboards: discovery - testgrid-tab-name: continuous - testgrid-alert-stale-results-hours: "3" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--all-tests" - volumeMounts: - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: test-account - secret: - secretName: test-account -- cron: "24 9 * * *" - name: ci-knative-sandbox-discovery-nightly-release - agent: kubernetes - decorate: true - reporter_config: - slack: - channel: eventing-sources - report_template: "The nightly release job for discovery failed, check the log: <{{.Status.URL}}|View logs>" - job_states_to_report: - - "failure" - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative-sandbox - repo: discovery - path_alias: knative.dev/discovery - base_ref: main - annotations: - testgrid-dashboards: discovery - testgrid-tab-name: nightly-release - testgrid-alert-email: "serverless-engprod-sea@google.com" - testgrid-num-failures-to-alert: "1" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./hack/release.sh" - - "--publish" - - "--tag-release" - volumeMounts: - - name: nightly-account - mountPath: /etc/nightly-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/nightly-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: nightly-account - secret: - secretName: nightly-account -- cron: "32 9 * * 2" - name: ci-knative-sandbox-discovery-dot-release - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative-sandbox - repo: discovery - path_alias: knative.dev/discovery - base_ref: main - annotations: - testgrid-dashboards: discovery - testgrid-tab-name: dot-release - testgrid-alert-stale-results-hours: "170" - testgrid-alert-email: "serverless-engprod-sea@google.com" - testgrid-num-failures-to-alert: "1" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./hack/release.sh" - - "--dot-release" - - "--release-gcs" - - "knative-releases/discovery" - - "--release-gcr" - - "gcr.io/knative-releases" - - "--github-token" - - "/etc/hub-token/token" - volumeMounts: - - name: hub-token - mountPath: /etc/hub-token - readOnly: true - - name: release-account - mountPath: /etc/release-account - readOnly: true - env: - - name: ORG_NAME - value: knative-sandbox - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/release-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: hub-token - secret: - secretName: hub-token - - name: release-account - secret: - secretName: release-account -- cron: "28 */12 * * *" - name: ci-knative-sandbox-discovery-auto-release - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative-sandbox - repo: discovery - path_alias: knative.dev/discovery - base_ref: main - annotations: - testgrid-dashboards: discovery - testgrid-tab-name: auto-release - testgrid-alert-email: "serverless-engprod-sea@google.com" - testgrid-num-failures-to-alert: "1" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./hack/release.sh" - - "--auto-release" - - "--release-gcs" - - "knative-releases/discovery" - - "--release-gcr" - - "gcr.io/knative-releases" - - "--github-token" - - "/etc/hub-token/token" - volumeMounts: - - name: hub-token - mountPath: /etc/hub-token - readOnly: true - - name: release-account - mountPath: /etc/release-account - readOnly: true - env: - - name: ORG_NAME - value: knative-sandbox - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/release-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: hub-token - secret: - secretName: hub-token - - name: release-account - secret: - secretName: release-account -- cron: "23 */12 * * *" - name: ci-knative-sandbox-eventing-camel-continuous - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative-sandbox - repo: eventing-camel - path_alias: knative.dev/eventing-camel - base_ref: main - annotations: - testgrid-dashboards: eventing-camel - testgrid-tab-name: continuous - testgrid-alert-stale-results-hours: "3" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--all-tests" - volumeMounts: - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: test-account - secret: - secretName: test-account -- cron: "17 9 * * *" - name: ci-knative-sandbox-eventing-camel-nightly-release - agent: kubernetes - decorate: true - reporter_config: - slack: - channel: eventing-sources - report_template: "The nightly release job for camel failed, check the log: <{{.Status.URL}}|View logs>" - job_states_to_report: - - "failure" - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative-sandbox - repo: eventing-camel - path_alias: knative.dev/eventing-camel - base_ref: main - annotations: - testgrid-dashboards: eventing-camel - testgrid-tab-name: nightly-release - testgrid-alert-email: "serverless-engprod-sea@google.com" - testgrid-num-failures-to-alert: "1" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./hack/release.sh" - - "--publish" - - "--tag-release" - volumeMounts: - - name: nightly-account - mountPath: /etc/nightly-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/nightly-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: nightly-account - secret: - secretName: nightly-account -- cron: "25 9 * * 2" - name: ci-knative-sandbox-eventing-camel-dot-release - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative-sandbox - repo: eventing-camel - path_alias: knative.dev/eventing-camel - base_ref: main - annotations: - testgrid-dashboards: eventing-camel - testgrid-tab-name: dot-release - testgrid-alert-stale-results-hours: "170" - testgrid-alert-email: "serverless-engprod-sea@google.com" - testgrid-num-failures-to-alert: "1" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./hack/release.sh" - - "--dot-release" - - "--release-gcs" - - "knative-releases/eventing-camel" - - "--release-gcr" - - "gcr.io/knative-releases" - - "--github-token" - - "/etc/hub-token/token" - volumeMounts: - - name: hub-token - mountPath: /etc/hub-token - readOnly: true - - name: release-account - mountPath: /etc/release-account - readOnly: true - env: - - name: ORG_NAME - value: knative-sandbox - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/release-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: hub-token - secret: - secretName: hub-token - - name: release-account - secret: - secretName: release-account -- cron: "55 */12 * * *" - name: ci-knative-sandbox-eventing-camel-auto-release - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative-sandbox - repo: eventing-camel - path_alias: knative.dev/eventing-camel - base_ref: main - annotations: - testgrid-dashboards: eventing-camel - testgrid-tab-name: auto-release - testgrid-alert-email: "serverless-engprod-sea@google.com" - testgrid-num-failures-to-alert: "1" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./hack/release.sh" - - "--auto-release" - - "--release-gcs" - - "knative-releases/eventing-camel" - - "--release-gcr" - - "gcr.io/knative-releases" - - "--github-token" - - "/etc/hub-token/token" - volumeMounts: - - name: hub-token - mountPath: /etc/hub-token - readOnly: true - - name: release-account - mountPath: /etc/release-account - readOnly: true - env: - - name: ORG_NAME - value: knative-sandbox - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/release-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: hub-token - secret: - secretName: hub-token - - name: release-account - secret: - secretName: release-account -- cron: "31 */12 * * *" - name: ci-knative-sandbox-eventing-kafka-continuous - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative-sandbox - repo: eventing-kafka - path_alias: knative.dev/eventing-kafka - base_ref: main - annotations: - testgrid-dashboards: eventing-kafka - testgrid-tab-name: continuous - testgrid-alert-stale-results-hours: "3" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--all-tests" - securityContext: - privileged: true - volumeMounts: - - name: docker-graph - mountPath: /docker-graph - - name: modules - mountPath: /lib/modules - - name: cgroup - mountPath: /sys/fs/cgroup - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: DOCKER_IN_DOCKER_ENABLED - value: "true" - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: docker-graph - emptyDir: {} - - name: modules - hostPath: - path: /lib/modules - type: Directory - - name: cgroup - hostPath: - path: /sys/fs/cgroup - type: Directory - - name: test-account - secret: - secretName: test-account -- cron: "37 9 * * *" - name: ci-knative-sandbox-eventing-kafka-nightly-release - agent: kubernetes - decorate: true - reporter_config: - slack: - channel: eventing-kafka - report_template: "The nightly release job for eventing-kafka failed, check the log: <{{.Status.URL}}|View logs>" - job_states_to_report: - - "failure" - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative-sandbox - repo: eventing-kafka - path_alias: knative.dev/eventing-kafka - base_ref: main - annotations: - testgrid-dashboards: eventing-kafka - testgrid-tab-name: nightly-release - testgrid-alert-email: "serverless-engprod-sea@google.com" - testgrid-num-failures-to-alert: "1" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./hack/release.sh" - - "--publish" - - "--tag-release" - securityContext: - privileged: true - volumeMounts: - - name: docker-graph - mountPath: /docker-graph - - name: modules - mountPath: /lib/modules - - name: cgroup - mountPath: /sys/fs/cgroup - - name: nightly-account - mountPath: /etc/nightly-account - readOnly: true - env: - - name: DOCKER_IN_DOCKER_ENABLED - value: "true" - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/nightly-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: docker-graph - emptyDir: {} - - name: modules - hostPath: - path: /lib/modules - type: Directory - - name: cgroup - hostPath: - path: /sys/fs/cgroup - type: Directory - - name: nightly-account - secret: - secretName: nightly-account -- cron: "53 9 * * 2" - name: ci-knative-sandbox-eventing-kafka-1.0-dot-release - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative-sandbox - repo: eventing-kafka - path_alias: knative.dev/eventing-kafka - base_ref: release-1.0 - annotations: - testgrid-dashboards: knative-sandbox-1.0 - testgrid-tab-name: eventing-kafka-dot-release - testgrid-alert-stale-results-hours: "3" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./hack/release.sh" - - "--dot-release" - - "--release-gcs" - - "knative-releases/eventing-kafka" - - "--release-gcr" - - "gcr.io/knative-releases" - - "--github-token" - - "/etc/hub-token/token" - - "--branch" - - "release-1.0" - securityContext: - privileged: true - volumeMounts: - - name: hub-token - mountPath: /etc/hub-token - readOnly: true - - name: docker-graph - mountPath: /docker-graph - - name: modules - mountPath: /lib/modules - - name: cgroup - mountPath: /sys/fs/cgroup - - name: release-account - mountPath: /etc/release-account - readOnly: true - env: - - name: ORG_NAME - value: knative-sandbox - - name: DOCKER_IN_DOCKER_ENABLED - value: "true" - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/release-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - - name: PULL_BASE_REF - value: release-1.0 - volumes: - - name: hub-token - secret: - secretName: hub-token - - name: docker-graph - emptyDir: {} - - name: modules - hostPath: - path: /lib/modules - type: Directory - - name: cgroup - hostPath: - path: /sys/fs/cgroup - type: Directory - - name: release-account - secret: - secretName: release-account -- cron: "50 9 * * 2" - name: ci-knative-sandbox-eventing-kafka-1.1-dot-release - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative-sandbox - repo: eventing-kafka - path_alias: knative.dev/eventing-kafka - base_ref: release-1.1 - annotations: - testgrid-dashboards: knative-sandbox-1.1 - testgrid-tab-name: eventing-kafka-dot-release - testgrid-alert-stale-results-hours: "3" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./hack/release.sh" - - "--dot-release" - - "--release-gcs" - - "knative-releases/eventing-kafka" - - "--release-gcr" - - "gcr.io/knative-releases" - - "--github-token" - - "/etc/hub-token/token" - - "--branch" - - "release-1.1" - securityContext: - privileged: true - volumeMounts: - - name: hub-token - mountPath: /etc/hub-token - readOnly: true - - name: docker-graph - mountPath: /docker-graph - - name: modules - mountPath: /lib/modules - - name: cgroup - mountPath: /sys/fs/cgroup - - name: release-account - mountPath: /etc/release-account - readOnly: true - env: - - name: ORG_NAME - value: knative-sandbox - - name: DOCKER_IN_DOCKER_ENABLED - value: "true" - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/release-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - - name: PULL_BASE_REF - value: release-1.1 - volumes: - - name: hub-token - secret: - secretName: hub-token - - name: docker-graph - emptyDir: {} - - name: modules - hostPath: - path: /lib/modules - type: Directory - - name: cgroup - hostPath: - path: /sys/fs/cgroup - type: Directory - - name: release-account - secret: - secretName: release-account -- cron: "3 9 * * 2" - name: ci-knative-sandbox-eventing-kafka-1.2-dot-release - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative-sandbox - repo: eventing-kafka - path_alias: knative.dev/eventing-kafka - base_ref: release-1.2 - annotations: - testgrid-dashboards: knative-sandbox-1.2 - testgrid-tab-name: eventing-kafka-dot-release - testgrid-alert-stale-results-hours: "3" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./hack/release.sh" - - "--dot-release" - - "--release-gcs" - - "knative-releases/eventing-kafka" - - "--release-gcr" - - "gcr.io/knative-releases" - - "--github-token" - - "/etc/hub-token/token" - - "--branch" - - "release-1.2" - securityContext: - privileged: true - volumeMounts: - - name: hub-token - mountPath: /etc/hub-token - readOnly: true - - name: docker-graph - mountPath: /docker-graph - - name: modules - mountPath: /lib/modules - - name: cgroup - mountPath: /sys/fs/cgroup - - name: release-account - mountPath: /etc/release-account - readOnly: true - env: - - name: ORG_NAME - value: knative-sandbox - - name: DOCKER_IN_DOCKER_ENABLED - value: "true" - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/release-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - - name: PULL_BASE_REF - value: release-1.2 - volumes: - - name: hub-token - secret: - secretName: hub-token - - name: docker-graph - emptyDir: {} - - name: modules - hostPath: - path: /lib/modules - type: Directory - - name: cgroup - hostPath: - path: /sys/fs/cgroup - type: Directory - - name: release-account - secret: - secretName: release-account -- cron: "36 9 * * 2" - name: ci-knative-sandbox-eventing-kafka-1.3-dot-release - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative-sandbox - repo: eventing-kafka - path_alias: knative.dev/eventing-kafka - base_ref: release-1.3 - annotations: - testgrid-dashboards: knative-sandbox-1.3 - testgrid-tab-name: eventing-kafka-dot-release - testgrid-alert-stale-results-hours: "3" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./hack/release.sh" - - "--dot-release" - - "--release-gcs" - - "knative-releases/eventing-kafka" - - "--release-gcr" - - "gcr.io/knative-releases" - - "--github-token" - - "/etc/hub-token/token" - - "--branch" - - "release-1.3" - securityContext: - privileged: true - volumeMounts: - - name: hub-token - mountPath: /etc/hub-token - readOnly: true - - name: docker-graph - mountPath: /docker-graph - - name: modules - mountPath: /lib/modules - - name: cgroup - mountPath: /sys/fs/cgroup - - name: release-account - mountPath: /etc/release-account - readOnly: true - env: - - name: ORG_NAME - value: knative-sandbox - - name: DOCKER_IN_DOCKER_ENABLED - value: "true" - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/release-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - - name: PULL_BASE_REF - value: release-1.3 - volumes: - - name: hub-token - secret: - secretName: hub-token - - name: docker-graph - emptyDir: {} - - name: modules - hostPath: - path: /lib/modules - type: Directory - - name: cgroup - hostPath: - path: /sys/fs/cgroup - type: Directory - - name: release-account - secret: - secretName: release-account -- cron: "31 */12 * * *" - name: ci-knative-sandbox-eventing-kafka-auto-release - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative-sandbox - repo: eventing-kafka - path_alias: knative.dev/eventing-kafka - base_ref: main - annotations: - testgrid-dashboards: eventing-kafka - testgrid-tab-name: auto-release - testgrid-alert-email: "serverless-engprod-sea@google.com" - testgrid-num-failures-to-alert: "1" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./hack/release.sh" - - "--auto-release" - - "--release-gcs" - - "knative-releases/eventing-kafka" - - "--release-gcr" - - "gcr.io/knative-releases" - - "--github-token" - - "/etc/hub-token/token" - securityContext: - privileged: true - volumeMounts: - - name: hub-token - mountPath: /etc/hub-token - readOnly: true - - name: docker-graph - mountPath: /docker-graph - - name: modules - mountPath: /lib/modules - - name: cgroup - mountPath: /sys/fs/cgroup - - name: release-account - mountPath: /etc/release-account - readOnly: true - env: - - name: ORG_NAME - value: knative-sandbox - - name: DOCKER_IN_DOCKER_ENABLED - value: "true" - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/release-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - resources: - requests: - memory: 12Gi - limits: - memory: 16Gi - volumes: - - name: hub-token - secret: - secretName: hub-token - - name: docker-graph - emptyDir: {} - - name: modules - hostPath: - path: /lib/modules - type: Directory - - name: cgroup - hostPath: - path: /sys/fs/cgroup - type: Directory - - name: release-account - secret: - secretName: release-account -- cron: "0 1 * * *" - name: ci-knative-sandbox-eventing-kafka-go-coverage - labels: - prow.k8s.io/pubsub.project: knative-tests - prow.k8s.io/pubsub.topic: knative-monitoring - prow.k8s.io/pubsub.runID: ci-knative-sandbox-eventing-kafka-go-coverage - agent: kubernetes - decorate: true - cluster: "build-knative" - extra_refs: - - org: knative-sandbox - repo: eventing-kafka - path_alias: knative.dev/eventing-kafka - base_ref: main - annotations: - testgrid-dashboards: eventing-kafka - testgrid-tab-name: eventing-kafka-go-coverage - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - "runner.sh" - args: - - "coverage" - - "--artifacts=$(ARTIFACTS)" - - "--cov-threshold-percentage=50" -- cron: "59 */12 * * *" - name: ci-knative-sandbox-eventing-kafka-broker-continuous - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative-sandbox - repo: eventing-kafka-broker - path_alias: knative.dev/eventing-kafka-broker - base_ref: main - annotations: - testgrid-dashboards: eventing-kafka-broker - testgrid-tab-name: continuous - testgrid-alert-stale-results-hours: "3" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--all-tests" - securityContext: - privileged: true - volumeMounts: - - name: docker-graph - mountPath: /docker-graph - - name: modules - mountPath: /lib/modules - - name: cgroup - mountPath: /sys/fs/cgroup - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: DOCKER_IN_DOCKER_ENABLED - value: "true" - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: docker-graph - emptyDir: {} - - name: modules - hostPath: - path: /lib/modules - type: Directory - - name: cgroup - hostPath: - path: /sys/fs/cgroup - type: Directory - - name: test-account - secret: - secretName: test-account -- cron: "49 9 * * *" - name: ci-knative-sandbox-eventing-kafka-broker-nightly-release - agent: kubernetes - decorate: true - reporter_config: - slack: - channel: eventing-kafka - report_template: "The nightly release job for eventing-kafka-broker failed, check the log: <{{.Status.URL}}|View logs>" - job_states_to_report: - - "failure" - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative-sandbox - repo: eventing-kafka-broker - path_alias: knative.dev/eventing-kafka-broker - base_ref: main - annotations: - testgrid-dashboards: eventing-kafka-broker - testgrid-tab-name: nightly-release - testgrid-alert-email: "serverless-engprod-sea@google.com" - testgrid-num-failures-to-alert: "1" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./hack/release.sh" - - "--publish" - - "--tag-release" - securityContext: - privileged: true - volumeMounts: - - name: docker-graph - mountPath: /docker-graph - - name: modules - mountPath: /lib/modules - - name: cgroup - mountPath: /sys/fs/cgroup - - name: nightly-account - mountPath: /etc/nightly-account - readOnly: true - env: - - name: DOCKER_IN_DOCKER_ENABLED - value: "true" - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/nightly-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: docker-graph - emptyDir: {} - - name: modules - hostPath: - path: /lib/modules - type: Directory - - name: cgroup - hostPath: - path: /sys/fs/cgroup - type: Directory - - name: nightly-account - secret: - secretName: nightly-account -- cron: "29 9 * * 2" - name: ci-knative-sandbox-eventing-kafka-broker-1.0-dot-release - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative-sandbox - repo: eventing-kafka-broker - path_alias: knative.dev/eventing-kafka-broker - base_ref: release-1.0 - annotations: - testgrid-dashboards: knative-sandbox-1.0 - testgrid-tab-name: eventing-kafka-broker-dot-release - testgrid-alert-stale-results-hours: "3" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./hack/release.sh" - - "--dot-release" - - "--release-gcs" - - "knative-releases/eventing-kafka-broker" - - "--release-gcr" - - "gcr.io/knative-releases" - - "--github-token" - - "/etc/hub-token/token" - - "--branch" - - "release-1.0" - securityContext: - privileged: true - volumeMounts: - - name: hub-token - mountPath: /etc/hub-token - readOnly: true - - name: docker-graph - mountPath: /docker-graph - - name: modules - mountPath: /lib/modules - - name: cgroup - mountPath: /sys/fs/cgroup - - name: release-account - mountPath: /etc/release-account - readOnly: true - env: - - name: ORG_NAME - value: knative-sandbox - - name: DOCKER_IN_DOCKER_ENABLED - value: "true" - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/release-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - - name: PULL_BASE_REF - value: release-1.0 - volumes: - - name: hub-token - secret: - secretName: hub-token - - name: docker-graph - emptyDir: {} - - name: modules - hostPath: - path: /lib/modules - type: Directory - - name: cgroup - hostPath: - path: /sys/fs/cgroup - type: Directory - - name: release-account - secret: - secretName: release-account -- cron: "26 9 * * 2" - name: ci-knative-sandbox-eventing-kafka-broker-1.1-dot-release - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative-sandbox - repo: eventing-kafka-broker - path_alias: knative.dev/eventing-kafka-broker - base_ref: release-1.1 - annotations: - testgrid-dashboards: knative-sandbox-1.1 - testgrid-tab-name: eventing-kafka-broker-dot-release - testgrid-alert-stale-results-hours: "3" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./hack/release.sh" - - "--dot-release" - - "--release-gcs" - - "knative-releases/eventing-kafka-broker" - - "--release-gcr" - - "gcr.io/knative-releases" - - "--github-token" - - "/etc/hub-token/token" - - "--branch" - - "release-1.1" - securityContext: - privileged: true - volumeMounts: - - name: hub-token - mountPath: /etc/hub-token - readOnly: true - - name: docker-graph - mountPath: /docker-graph - - name: modules - mountPath: /lib/modules - - name: cgroup - mountPath: /sys/fs/cgroup - - name: release-account - mountPath: /etc/release-account - readOnly: true - env: - - name: ORG_NAME - value: knative-sandbox - - name: DOCKER_IN_DOCKER_ENABLED - value: "true" - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/release-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - - name: PULL_BASE_REF - value: release-1.1 - volumes: - - name: hub-token - secret: - secretName: hub-token - - name: docker-graph - emptyDir: {} - - name: modules - hostPath: - path: /lib/modules - type: Directory - - name: cgroup - hostPath: - path: /sys/fs/cgroup - type: Directory - - name: release-account - secret: - secretName: release-account -- cron: "19 9 * * 2" - name: ci-knative-sandbox-eventing-kafka-broker-1.2-dot-release - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative-sandbox - repo: eventing-kafka-broker - path_alias: knative.dev/eventing-kafka-broker - base_ref: release-1.2 - annotations: - testgrid-dashboards: knative-sandbox-1.2 - testgrid-tab-name: eventing-kafka-broker-dot-release - testgrid-alert-stale-results-hours: "3" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./hack/release.sh" - - "--dot-release" - - "--release-gcs" - - "knative-releases/eventing-kafka-broker" - - "--release-gcr" - - "gcr.io/knative-releases" - - "--github-token" - - "/etc/hub-token/token" - - "--branch" - - "release-1.2" - securityContext: - privileged: true - volumeMounts: - - name: hub-token - mountPath: /etc/hub-token - readOnly: true - - name: docker-graph - mountPath: /docker-graph - - name: modules - mountPath: /lib/modules - - name: cgroup - mountPath: /sys/fs/cgroup - - name: release-account - mountPath: /etc/release-account - readOnly: true - env: - - name: ORG_NAME - value: knative-sandbox - - name: DOCKER_IN_DOCKER_ENABLED - value: "true" - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/release-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - - name: PULL_BASE_REF - value: release-1.2 - volumes: - - name: hub-token - secret: - secretName: hub-token - - name: docker-graph - emptyDir: {} - - name: modules - hostPath: - path: /lib/modules - type: Directory - - name: cgroup - hostPath: - path: /sys/fs/cgroup - type: Directory - - name: release-account - secret: - secretName: release-account -- cron: "32 9 * * 2" - name: ci-knative-sandbox-eventing-kafka-broker-1.3-dot-release - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative-sandbox - repo: eventing-kafka-broker - path_alias: knative.dev/eventing-kafka-broker - base_ref: release-1.3 - annotations: - testgrid-dashboards: knative-sandbox-1.3 - testgrid-tab-name: eventing-kafka-broker-dot-release - testgrid-alert-stale-results-hours: "3" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./hack/release.sh" - - "--dot-release" - - "--release-gcs" - - "knative-releases/eventing-kafka-broker" - - "--release-gcr" - - "gcr.io/knative-releases" - - "--github-token" - - "/etc/hub-token/token" - - "--branch" - - "release-1.3" - securityContext: - privileged: true - volumeMounts: - - name: hub-token - mountPath: /etc/hub-token - readOnly: true - - name: docker-graph - mountPath: /docker-graph - - name: modules - mountPath: /lib/modules - - name: cgroup - mountPath: /sys/fs/cgroup - - name: release-account - mountPath: /etc/release-account - readOnly: true - env: - - name: ORG_NAME - value: knative-sandbox - - name: DOCKER_IN_DOCKER_ENABLED - value: "true" - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/release-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - - name: PULL_BASE_REF - value: release-1.3 - volumes: - - name: hub-token - secret: - secretName: hub-token - - name: docker-graph - emptyDir: {} - - name: modules - hostPath: - path: /lib/modules - type: Directory - - name: cgroup - hostPath: - path: /sys/fs/cgroup - type: Directory - - name: release-account - secret: - secretName: release-account -- cron: "11 */12 * * *" - name: ci-knative-sandbox-eventing-kafka-broker-auto-release - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative-sandbox - repo: eventing-kafka-broker - path_alias: knative.dev/eventing-kafka-broker - base_ref: main - annotations: - testgrid-dashboards: eventing-kafka-broker - testgrid-tab-name: auto-release - testgrid-alert-email: "serverless-engprod-sea@google.com" - testgrid-num-failures-to-alert: "1" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./hack/release.sh" - - "--auto-release" - - "--release-gcs" - - "knative-releases/eventing-kafka-broker" - - "--release-gcr" - - "gcr.io/knative-releases" - - "--github-token" - - "/etc/hub-token/token" - securityContext: - privileged: true - volumeMounts: - - name: hub-token - mountPath: /etc/hub-token - readOnly: true - - name: docker-graph - mountPath: /docker-graph - - name: modules - mountPath: /lib/modules - - name: cgroup - mountPath: /sys/fs/cgroup - - name: release-account - mountPath: /etc/release-account - readOnly: true - env: - - name: ORG_NAME - value: knative-sandbox - - name: DOCKER_IN_DOCKER_ENABLED - value: "true" - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/release-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: hub-token - secret: - secretName: hub-token - - name: docker-graph - emptyDir: {} - - name: modules - hostPath: - path: /lib/modules - type: Directory - - name: cgroup - hostPath: - path: /sys/fs/cgroup - type: Directory - - name: release-account - secret: - secretName: release-account -- cron: "0 1 * * *" - name: ci-knative-sandbox-eventing-kafka-broker-go-coverage - labels: - prow.k8s.io/pubsub.project: knative-tests - prow.k8s.io/pubsub.topic: knative-monitoring - prow.k8s.io/pubsub.runID: ci-knative-sandbox-eventing-kafka-broker-go-coverage - agent: kubernetes - decorate: true - cluster: "build-knative" - extra_refs: - - org: knative-sandbox - repo: eventing-kafka-broker - path_alias: knative.dev/eventing-kafka-broker - base_ref: main - annotations: - testgrid-dashboards: eventing-kafka-broker - testgrid-tab-name: eventing-kafka-broker-go-coverage - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - "runner.sh" - args: - - "coverage" - - "--artifacts=$(ARTIFACTS)" - - "--cov-threshold-percentage=50" -- cron: "35 9 * * *" - name: ci-knative-sandbox-eventing-rabbitmq-nightly-release - agent: kubernetes - decorate: true - reporter_config: - slack: - channel: eventing-rabbitmq - report_template: "The nightly release job for eventing-rabbitmq failed, check the log: <{{.Status.URL}}|View logs>" - job_states_to_report: - - "failure" - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative-sandbox - repo: eventing-rabbitmq - path_alias: knative.dev/eventing-rabbitmq - base_ref: main - annotations: - testgrid-dashboards: eventing-rabbitmq - testgrid-tab-name: nightly-release - testgrid-alert-email: "serverless-engprod-sea@google.com" - testgrid-num-failures-to-alert: "1" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./hack/release.sh" - - "--publish" - - "--tag-release" - volumeMounts: - - name: nightly-account - mountPath: /etc/nightly-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/nightly-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: nightly-account - secret: - secretName: nightly-account -- cron: "9 */12 * * *" - name: ci-knative-sandbox-eventing-rabbitmq-auto-release - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative-sandbox - repo: eventing-rabbitmq - path_alias: knative.dev/eventing-rabbitmq - base_ref: main - annotations: - testgrid-dashboards: eventing-rabbitmq - testgrid-tab-name: auto-release - testgrid-alert-email: "serverless-engprod-sea@google.com" - testgrid-num-failures-to-alert: "1" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./hack/release.sh" - - "--auto-release" - - "--release-gcs" - - "knative-releases/eventing-rabbitmq" - - "--release-gcr" - - "gcr.io/knative-releases" - - "--github-token" - - "/etc/hub-token/token" - volumeMounts: - - name: hub-token - mountPath: /etc/hub-token - readOnly: true - - name: release-account - mountPath: /etc/release-account - readOnly: true - env: - - name: ORG_NAME - value: knative-sandbox - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/release-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: hub-token - secret: - secretName: hub-token - - name: release-account - secret: - secretName: release-account -- cron: "43 9 * * 2" - name: ci-knative-sandbox-eventing-rabbitmq-1.0-dot-release - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative-sandbox - repo: eventing-rabbitmq - path_alias: knative.dev/eventing-rabbitmq - base_ref: release-1.0 - annotations: - testgrid-dashboards: knative-sandbox-1.0 - testgrid-tab-name: eventing-rabbitmq-dot-release - testgrid-alert-stale-results-hours: "3" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./hack/release.sh" - - "--dot-release" - - "--release-gcs" - - "knative-releases/eventing-rabbitmq" - - "--release-gcr" - - "gcr.io/knative-releases" - - "--github-token" - - "/etc/hub-token/token" - - "--branch" - - "release-1.0" - volumeMounts: - - name: hub-token - mountPath: /etc/hub-token - readOnly: true - - name: release-account - mountPath: /etc/release-account - readOnly: true - env: - - name: ORG_NAME - value: knative-sandbox - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/release-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - - name: PULL_BASE_REF - value: release-1.0 - volumes: - - name: hub-token - secret: - secretName: hub-token - - name: release-account - secret: - secretName: release-account -- cron: "16 9 * * 2" - name: ci-knative-sandbox-eventing-rabbitmq-1.1-dot-release - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative-sandbox - repo: eventing-rabbitmq - path_alias: knative.dev/eventing-rabbitmq - base_ref: release-1.1 - annotations: - testgrid-dashboards: knative-sandbox-1.1 - testgrid-tab-name: eventing-rabbitmq-dot-release - testgrid-alert-stale-results-hours: "3" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./hack/release.sh" - - "--dot-release" - - "--release-gcs" - - "knative-releases/eventing-rabbitmq" - - "--release-gcr" - - "gcr.io/knative-releases" - - "--github-token" - - "/etc/hub-token/token" - - "--branch" - - "release-1.1" - volumeMounts: - - name: hub-token - mountPath: /etc/hub-token - readOnly: true - - name: release-account - mountPath: /etc/release-account - readOnly: true - env: - - name: ORG_NAME - value: knative-sandbox - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/release-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - - name: PULL_BASE_REF - value: release-1.1 - volumes: - - name: hub-token - secret: - secretName: hub-token - - name: release-account - secret: - secretName: release-account -- cron: "37 9 * * 2" - name: ci-knative-sandbox-eventing-rabbitmq-1.2-dot-release - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative-sandbox - repo: eventing-rabbitmq - path_alias: knative.dev/eventing-rabbitmq - base_ref: release-1.2 - annotations: - testgrid-dashboards: knative-sandbox-1.2 - testgrid-tab-name: eventing-rabbitmq-dot-release - testgrid-alert-stale-results-hours: "3" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./hack/release.sh" - - "--dot-release" - - "--release-gcs" - - "knative-releases/eventing-rabbitmq" - - "--release-gcr" - - "gcr.io/knative-releases" - - "--github-token" - - "/etc/hub-token/token" - - "--branch" - - "release-1.2" - volumeMounts: - - name: hub-token - mountPath: /etc/hub-token - readOnly: true - - name: release-account - mountPath: /etc/release-account - readOnly: true - env: - - name: ORG_NAME - value: knative-sandbox - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/release-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - - name: PULL_BASE_REF - value: release-1.2 - volumes: - - name: hub-token - secret: - secretName: hub-token - - name: release-account - secret: - secretName: release-account -- cron: "34 9 * * 2" - name: ci-knative-sandbox-eventing-rabbitmq-1.3-dot-release - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative-sandbox - repo: eventing-rabbitmq - path_alias: knative.dev/eventing-rabbitmq - base_ref: release-1.3 - annotations: - testgrid-dashboards: knative-sandbox-1.3 - testgrid-tab-name: eventing-rabbitmq-dot-release - testgrid-alert-stale-results-hours: "3" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./hack/release.sh" - - "--dot-release" - - "--release-gcs" - - "knative-releases/eventing-rabbitmq" - - "--release-gcr" - - "gcr.io/knative-releases" - - "--github-token" - - "/etc/hub-token/token" - - "--branch" - - "release-1.3" - volumeMounts: - - name: hub-token - mountPath: /etc/hub-token - readOnly: true - - name: release-account - mountPath: /etc/release-account - readOnly: true - env: - - name: ORG_NAME - value: knative-sandbox - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/release-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - - name: PULL_BASE_REF - value: release-1.3 - volumes: - - name: hub-token - secret: - secretName: hub-token - - name: release-account - secret: - secretName: release-account -- cron: "54 9 * * *" - name: ci-knative-sandbox-eventing-natss-nightly-release - agent: kubernetes - decorate: true - reporter_config: - slack: - channel: eventing - report_template: "The nightly release job for eventing-natss failed, check the log: <{{.Status.URL}}|View logs>" - job_states_to_report: - - "failure" - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative-sandbox - repo: eventing-natss - path_alias: knative.dev/eventing-natss - base_ref: main - annotations: - testgrid-dashboards: eventing-natss - testgrid-tab-name: nightly-release - testgrid-alert-email: "serverless-engprod-sea@google.com" - testgrid-num-failures-to-alert: "1" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./hack/release.sh" - - "--publish" - - "--tag-release" - volumeMounts: - - name: nightly-account - mountPath: /etc/nightly-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/nightly-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: nightly-account - secret: - secretName: nightly-account -- cron: "54 9 * * 2" - name: ci-knative-sandbox-eventing-natss-1.0-dot-release - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative-sandbox - repo: eventing-natss - path_alias: knative.dev/eventing-natss - base_ref: release-1.0 - annotations: - testgrid-dashboards: knative-sandbox-1.0 - testgrid-tab-name: eventing-natss-dot-release - testgrid-alert-stale-results-hours: "3" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./hack/release.sh" - - "--dot-release" - - "--release-gcs" - - "knative-releases/eventing-natss" - - "--release-gcr" - - "gcr.io/knative-releases" - - "--github-token" - - "/etc/hub-token/token" - - "--branch" - - "release-1.0" - volumeMounts: - - name: hub-token - mountPath: /etc/hub-token - readOnly: true - - name: release-account - mountPath: /etc/release-account - readOnly: true - env: - - name: ORG_NAME - value: knative-sandbox - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/release-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - - name: PULL_BASE_REF - value: release-1.0 - volumes: - - name: hub-token - secret: - secretName: hub-token - - name: release-account - secret: - secretName: release-account -- cron: "33 9 * * 2" - name: ci-knative-sandbox-eventing-natss-1.1-dot-release - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative-sandbox - repo: eventing-natss - path_alias: knative.dev/eventing-natss - base_ref: release-1.1 - annotations: - testgrid-dashboards: knative-sandbox-1.1 - testgrid-tab-name: eventing-natss-dot-release - testgrid-alert-stale-results-hours: "3" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./hack/release.sh" - - "--dot-release" - - "--release-gcs" - - "knative-releases/eventing-natss" - - "--release-gcr" - - "gcr.io/knative-releases" - - "--github-token" - - "/etc/hub-token/token" - - "--branch" - - "release-1.1" - volumeMounts: - - name: hub-token - mountPath: /etc/hub-token - readOnly: true - - name: release-account - mountPath: /etc/release-account - readOnly: true - env: - - name: ORG_NAME - value: knative-sandbox - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/release-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - - name: PULL_BASE_REF - value: release-1.1 - volumes: - - name: hub-token - secret: - secretName: hub-token - - name: release-account - secret: - secretName: release-account -- cron: "40 9 * * 2" - name: ci-knative-sandbox-eventing-natss-1.2-dot-release - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative-sandbox - repo: eventing-natss - path_alias: knative.dev/eventing-natss - base_ref: release-1.2 - annotations: - testgrid-dashboards: knative-sandbox-1.2 - testgrid-tab-name: eventing-natss-dot-release - testgrid-alert-stale-results-hours: "3" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./hack/release.sh" - - "--dot-release" - - "--release-gcs" - - "knative-releases/eventing-natss" - - "--release-gcr" - - "gcr.io/knative-releases" - - "--github-token" - - "/etc/hub-token/token" - - "--branch" - - "release-1.2" - volumeMounts: - - name: hub-token - mountPath: /etc/hub-token - readOnly: true - - name: release-account - mountPath: /etc/release-account - readOnly: true - env: - - name: ORG_NAME - value: knative-sandbox - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/release-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - - name: PULL_BASE_REF - value: release-1.2 - volumes: - - name: hub-token - secret: - secretName: hub-token - - name: release-account - secret: - secretName: release-account -- cron: "27 9 * * 2" - name: ci-knative-sandbox-eventing-natss-1.3-dot-release - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative-sandbox - repo: eventing-natss - path_alias: knative.dev/eventing-natss - base_ref: release-1.3 - annotations: - testgrid-dashboards: knative-sandbox-1.3 - testgrid-tab-name: eventing-natss-dot-release - testgrid-alert-stale-results-hours: "3" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./hack/release.sh" - - "--dot-release" - - "--release-gcs" - - "knative-releases/eventing-natss" - - "--release-gcr" - - "gcr.io/knative-releases" - - "--github-token" - - "/etc/hub-token/token" - - "--branch" - - "release-1.3" - volumeMounts: - - name: hub-token - mountPath: /etc/hub-token - readOnly: true - - name: release-account - mountPath: /etc/release-account - readOnly: true - env: - - name: ORG_NAME - value: knative-sandbox - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/release-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - - name: PULL_BASE_REF - value: release-1.3 - volumes: - - name: hub-token - secret: - secretName: hub-token - - name: release-account - secret: - secretName: release-account -- cron: "2 */12 * * *" - name: ci-knative-sandbox-eventing-natss-auto-release - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative-sandbox - repo: eventing-natss - path_alias: knative.dev/eventing-natss - base_ref: main - annotations: - testgrid-dashboards: eventing-natss - testgrid-tab-name: auto-release - testgrid-alert-email: "serverless-engprod-sea@google.com" - testgrid-num-failures-to-alert: "1" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./hack/release.sh" - - "--auto-release" - - "--release-gcs" - - "knative-releases/eventing-natss" - - "--release-gcr" - - "gcr.io/knative-releases" - - "--github-token" - - "/etc/hub-token/token" - volumeMounts: - - name: hub-token - mountPath: /etc/hub-token - readOnly: true - - name: release-account - mountPath: /etc/release-account - readOnly: true - env: - - name: ORG_NAME - value: knative-sandbox - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/release-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: hub-token - secret: - secretName: hub-token - - name: release-account - secret: - secretName: release-account -- cron: "14 */12 * * *" - name: ci-knative-sandbox-eventing-autoscaler-keda-continuous - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative-sandbox - repo: eventing-autoscaler-keda - path_alias: knative.dev/eventing-autoscaler-keda - base_ref: main - annotations: - testgrid-dashboards: eventing-autoscaler-keda - testgrid-tab-name: continuous - testgrid-alert-stale-results-hours: "3" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--all-tests" - volumeMounts: - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: test-account - secret: - secretName: test-account -- cron: "10 9 * * *" - name: ci-knative-sandbox-eventing-autoscaler-keda-nightly-release - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative-sandbox - repo: eventing-autoscaler-keda - path_alias: knative.dev/eventing-autoscaler-keda - base_ref: main - annotations: - testgrid-dashboards: eventing-autoscaler-keda - testgrid-tab-name: nightly-release - testgrid-alert-email: "serverless-engprod-sea@google.com" - testgrid-num-failures-to-alert: "1" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./hack/release.sh" - - "--publish" - - "--tag-release" - volumeMounts: - - name: nightly-account - mountPath: /etc/nightly-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/nightly-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: nightly-account - secret: - secretName: nightly-account -- cron: "26 9 * * 2" - name: ci-knative-sandbox-eventing-autoscaler-keda-dot-release - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative-sandbox - repo: eventing-autoscaler-keda - path_alias: knative.dev/eventing-autoscaler-keda - base_ref: main - annotations: - testgrid-dashboards: eventing-autoscaler-keda - testgrid-tab-name: dot-release - testgrid-alert-stale-results-hours: "170" - testgrid-alert-email: "serverless-engprod-sea@google.com" - testgrid-num-failures-to-alert: "1" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./hack/release.sh" - - "--dot-release" - - "--release-gcs" - - "knative-releases/eventing-autoscaler-keda" - - "--release-gcr" - - "gcr.io/knative-releases" - - "--github-token" - - "/etc/hub-token/token" - volumeMounts: - - name: hub-token - mountPath: /etc/hub-token - readOnly: true - - name: release-account - mountPath: /etc/release-account - readOnly: true - env: - - name: ORG_NAME - value: knative-sandbox - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/release-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: hub-token - secret: - secretName: hub-token - - name: release-account - secret: - secretName: release-account -- cron: "50 */12 * * *" - name: ci-knative-sandbox-eventing-autoscaler-keda-auto-release - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative-sandbox - repo: eventing-autoscaler-keda - path_alias: knative.dev/eventing-autoscaler-keda - base_ref: main - annotations: - testgrid-dashboards: eventing-autoscaler-keda - testgrid-tab-name: auto-release - testgrid-alert-email: "serverless-engprod-sea@google.com" - testgrid-num-failures-to-alert: "1" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./hack/release.sh" - - "--auto-release" - - "--release-gcs" - - "knative-releases/eventing-autoscaler-keda" - - "--release-gcr" - - "gcr.io/knative-releases" - - "--github-token" - - "/etc/hub-token/token" - volumeMounts: - - name: hub-token - mountPath: /etc/hub-token - readOnly: true - - name: release-account - mountPath: /etc/release-account - readOnly: true - env: - - name: ORG_NAME - value: knative-sandbox - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/release-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: hub-token - secret: - secretName: hub-token - - name: release-account - secret: - secretName: release-account -- cron: "2 */12 * * *" - name: ci-knative-sandbox-eventing-kogito-continuous - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative-sandbox - repo: eventing-kogito - path_alias: knative.dev/eventing-kogito - base_ref: main - annotations: - testgrid-dashboards: eventing-kogito - testgrid-tab-name: continuous - testgrid-alert-stale-results-hours: "3" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--all-tests" - volumeMounts: - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: test-account - secret: - secretName: test-account -- cron: "38 9 * * *" - name: ci-knative-sandbox-eventing-kogito-nightly-release - agent: kubernetes - decorate: true - reporter_config: - slack: - channel: eventing-sources - report_template: "The nightly release job for Kogito failed, check the log: <{{.Status.URL}}|View logs>" - job_states_to_report: - - "failure" - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative-sandbox - repo: eventing-kogito - path_alias: knative.dev/eventing-kogito - base_ref: main - annotations: - testgrid-dashboards: eventing-kogito - testgrid-tab-name: nightly-release - testgrid-alert-email: "serverless-engprod-sea@google.com" - testgrid-num-failures-to-alert: "1" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./hack/release.sh" - - "--publish" - - "--tag-release" - volumeMounts: - - name: nightly-account - mountPath: /etc/nightly-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/nightly-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: nightly-account - secret: - secretName: nightly-account -- cron: "30 9 * * 2" - name: ci-knative-sandbox-eventing-kogito-dot-release - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative-sandbox - repo: eventing-kogito - path_alias: knative.dev/eventing-kogito - base_ref: main - annotations: - testgrid-dashboards: eventing-kogito - testgrid-tab-name: dot-release - testgrid-alert-stale-results-hours: "170" - testgrid-alert-email: "serverless-engprod-sea@google.com" - testgrid-num-failures-to-alert: "1" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./hack/release.sh" - - "--dot-release" - - "--release-gcs" - - "knative-releases/eventing-kogito" - - "--release-gcr" - - "gcr.io/knative-releases" - - "--github-token" - - "/etc/hub-token/token" - volumeMounts: - - name: hub-token - mountPath: /etc/hub-token - readOnly: true - - name: release-account - mountPath: /etc/release-account - readOnly: true - env: - - name: ORG_NAME - value: knative-sandbox - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/release-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: hub-token - secret: - secretName: hub-token - - name: release-account - secret: - secretName: release-account -- cron: "34 */12 * * *" - name: ci-knative-sandbox-eventing-kogito-auto-release - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative-sandbox - repo: eventing-kogito - path_alias: knative.dev/eventing-kogito - base_ref: main - annotations: - testgrid-dashboards: eventing-kogito - testgrid-tab-name: auto-release - testgrid-alert-email: "serverless-engprod-sea@google.com" - testgrid-num-failures-to-alert: "1" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./hack/release.sh" - - "--auto-release" - - "--release-gcs" - - "knative-releases/eventing-kogito" - - "--release-gcr" - - "gcr.io/knative-releases" - - "--github-token" - - "/etc/hub-token/token" - volumeMounts: - - name: hub-token - mountPath: /etc/hub-token - readOnly: true - - name: release-account - mountPath: /etc/release-account - readOnly: true - env: - - name: ORG_NAME - value: knative-sandbox - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/release-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: hub-token - secret: - secretName: hub-token - - name: release-account - secret: - secretName: release-account -- cron: "1 */12 * * *" - name: ci-knative-sandbox-container-freezer-continuous - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative-sandbox - repo: container-freezer - path_alias: knative.dev/container-freezer - base_ref: main - annotations: - testgrid-dashboards: container-freezer - testgrid-tab-name: continuous - testgrid-alert-stale-results-hours: "3" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./test/presubmit-tests.sh" - - "--all-tests" - volumeMounts: - - name: test-account - mountPath: /etc/test-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/test-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: test-account - secret: - secretName: test-account -- cron: "7 9 * * *" - name: ci-knative-sandbox-container-freezer-nightly-release - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative-sandbox - repo: container-freezer - path_alias: knative.dev/container-freezer - base_ref: main - annotations: - testgrid-dashboards: container-freezer - testgrid-tab-name: nightly-release - testgrid-alert-email: "serverless-engprod-sea@google.com" - testgrid-num-failures-to-alert: "1" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./hack/release.sh" - - "--publish" - - "--tag-release" - volumeMounts: - - name: nightly-account - mountPath: /etc/nightly-account - readOnly: true - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/nightly-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: nightly-account - secret: - secretName: nightly-account -- cron: "55 9 * * 2" - name: ci-knative-sandbox-container-freezer-dot-release - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative-sandbox - repo: container-freezer - path_alias: knative.dev/container-freezer - base_ref: main - annotations: - testgrid-dashboards: container-freezer - testgrid-tab-name: dot-release - testgrid-alert-stale-results-hours: "170" - testgrid-alert-email: "serverless-engprod-sea@google.com" - testgrid-num-failures-to-alert: "1" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./hack/release.sh" - - "--dot-release" - - "--release-gcs" - - "knative-releases/container-freezer" - - "--release-gcr" - - "gcr.io/knative-releases" - - "--github-token" - - "/etc/hub-token/token" - volumeMounts: - - name: hub-token - mountPath: /etc/hub-token - readOnly: true - - name: release-account - mountPath: /etc/release-account - readOnly: true - env: - - name: ORG_NAME - value: knative-sandbox - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/release-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: hub-token - secret: - secretName: hub-token - - name: release-account - secret: - secretName: release-account -- cron: "17 */12 * * *" - name: ci-knative-sandbox-container-freezer-auto-release - agent: kubernetes - decorate: true - decoration_config: - timeout: 180m - cluster: "build-knative" - extra_refs: - - org: knative-sandbox - repo: container-freezer - path_alias: knative.dev/container-freezer - base_ref: main - annotations: - testgrid-dashboards: container-freezer - testgrid-tab-name: auto-release - testgrid-alert-email: "serverless-engprod-sea@google.com" - testgrid-num-failures-to-alert: "1" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "./hack/release.sh" - - "--auto-release" - - "--release-gcs" - - "knative-releases/container-freezer" - - "--release-gcr" - - "gcr.io/knative-releases" - - "--github-token" - - "/etc/hub-token/token" - volumeMounts: - - name: hub-token - mountPath: /etc/hub-token - readOnly: true - - name: release-account - mountPath: /etc/release-account - readOnly: true - env: - - name: ORG_NAME - value: knative-sandbox - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/release-account/service-account.json - - name: E2E_CLUSTER_REGION - value: us-central1 - volumes: - - name: hub-token - secret: - secretName: hub-token - - name: release-account - secret: - secretName: release-account -postsubmits: - knative/eventing: - - name: post-knative-eventing-go-coverage - branches: - - "main" - annotations: - testgrid-create-test-group: "false" - agent: kubernetes - decorate: true - cluster: "build-knative" - path_alias: knative.dev/eventing - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "coverage" - - "--artifacts=$(ARTIFACTS)" - - "--cov-threshold-percentage=0" - knative/docs: - - name: post-knative-docs-go-coverage - branches: - - "main" - annotations: - testgrid-create-test-group: "false" - agent: kubernetes - decorate: true - cluster: "build-knative" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "coverage" - - "--artifacts=$(ARTIFACTS)" - - "--cov-threshold-percentage=0" - knative/test-infra: - - name: post-knative-test-infra-go-coverage - branches: - - "main" - annotations: - testgrid-create-test-group: "false" - agent: kubernetes - decorate: true - cluster: "build-knative" - path_alias: knative.dev/test-infra - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "coverage" - - "--artifacts=$(ARTIFACTS)" - - "--cov-threshold-percentage=0" - google/knative-gcp: - - name: post-google-knative-gcp-go-coverage - branches: - - "main" - annotations: - testgrid-create-test-group: "false" - agent: kubernetes - decorate: true - cluster: "build-knative" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "coverage" - - "--artifacts=$(ARTIFACTS)" - - "--cov-threshold-percentage=0" - knative-sandbox/net-certmanager: - - name: post-knative-sandbox-net-certmanager-go-coverage - branches: - - "main" - annotations: - testgrid-create-test-group: "false" - agent: kubernetes - decorate: true - cluster: "build-knative" - path_alias: knative.dev/net-certmanager - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "coverage" - - "--artifacts=$(ARTIFACTS)" - - "--cov-threshold-percentage=0" - knative-sandbox/net-istio: - - name: post-knative-sandbox-net-istio-go-coverage - branches: - - "main" - annotations: - testgrid-create-test-group: "false" - agent: kubernetes - decorate: true - cluster: "build-knative" - path_alias: knative.dev/net-istio - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "coverage" - - "--artifacts=$(ARTIFACTS)" - - "--cov-threshold-percentage=0" - knative-sandbox/net-kourier: - - name: post-knative-sandbox-net-kourier-go-coverage - branches: - - "main" - annotations: - testgrid-create-test-group: "false" - agent: kubernetes - decorate: true - cluster: "build-knative" - path_alias: knative.dev/net-kourier - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "coverage" - - "--artifacts=$(ARTIFACTS)" - - "--cov-threshold-percentage=0" - knative-sandbox/async-component: - - name: post-knative-sandbox-async-component-go-coverage - branches: - - "main" - annotations: - testgrid-create-test-group: "false" - agent: kubernetes - decorate: true - cluster: "build-knative" - path_alias: knative.dev/async-component - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "coverage" - - "--artifacts=$(ARTIFACTS)" - - "--cov-threshold-percentage=0" - knative-sandbox/eventing-kafka: - - name: post-knative-sandbox-eventing-kafka-go-coverage - branches: - - "main" - annotations: - testgrid-create-test-group: "false" - agent: kubernetes - decorate: true - cluster: "build-knative" - path_alias: knative.dev/eventing-kafka - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "coverage" - - "--artifacts=$(ARTIFACTS)" - - "--cov-threshold-percentage=0" - knative-sandbox/eventing-kafka-broker: - - name: post-knative-sandbox-eventing-kafka-broker-go-coverage - branches: - - "main" - annotations: - testgrid-create-test-group: "false" - agent: kubernetes - decorate: true - cluster: "build-knative" - path_alias: knative.dev/eventing-kafka-broker - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:stable - imagePullPolicy: Always - command: - - runner.sh - args: - - "coverage" - - "--artifacts=$(ARTIFACTS)" - - "--cov-threshold-percentage=0" diff --git a/prow/jobs/generated/knative-sandbox/async-component-main.gen.yaml b/prow/jobs/generated/knative-sandbox/async-component-main.gen.yaml new file mode 100644 index 00000000000..11546aa12b5 --- /dev/null +++ b/prow/jobs/generated/knative-sandbox/async-component-main.gen.yaml @@ -0,0 +1,200 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. #### +# #### USE "./hack/generate-configs.sh" TO REGENERATE THIS FILE. #### +# #### #### +# ####################################################################### + +periodics: +- annotations: + testgrid-dashboards: async-component + testgrid-tab-name: continuous + cluster: build-knative + cron: 17 */9 * * * + decorate: true + extra_refs: + - base_ref: main + org: knative-sandbox + repo: async-component + name: continuous_async-component_main_periodic + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account +- annotations: + testgrid-dashboards: async-component + testgrid-tab-name: nightly + cluster: build-knative + cron: 3 9 * * * + decorate: true + extra_refs: + - base_ref: main + org: knative-sandbox + repo: async-component + name: nightly_async-component_main_periodic + spec: + containers: + - command: + - runner.sh + - ./hack/release.sh + - --publish + - --tag-release + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/nightly-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/nightly-account + name: nightly-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: nightly-account + secret: + secretName: nightly-account +- annotations: + testgrid-dashboards: async-component + testgrid-tab-name: release + cluster: build-knative + cron: 15 */9 * * * + decorate: true + extra_refs: + - base_ref: main + org: knative-sandbox + repo: async-component + name: release_async-component_main_periodic + spec: + containers: + - command: + - runner.sh + - ./hack/release.sh + - --auto-release + - --release-gcs + - knative-releases/async-component + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/release-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/hub-token + name: hub-token + readOnly: true + - mountPath: /etc/release-account + name: release-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: hub-token + secret: + secretName: hub-token + - name: release-account + secret: + secretName: release-account +presubmits: + knative-sandbox/async-component: + - always_run: true + branches: + - ^main$ + cluster: build-knative + decorate: true + name: build-tests_async-component_main + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --build-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^main$ + cluster: build-knative + decorate: true + name: unit-tests_async-component_main + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --unit-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^main$ + cluster: build-knative + decorate: true + name: integration-tests_async-component_main + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --integration-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account diff --git a/prow/jobs/generated/knative-sandbox/container-freezer-main.gen.yaml b/prow/jobs/generated/knative-sandbox/container-freezer-main.gen.yaml new file mode 100644 index 00000000000..bbf7968e4be --- /dev/null +++ b/prow/jobs/generated/knative-sandbox/container-freezer-main.gen.yaml @@ -0,0 +1,200 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. #### +# #### USE "./hack/generate-configs.sh" TO REGENERATE THIS FILE. #### +# #### #### +# ####################################################################### + +periodics: +- annotations: + testgrid-dashboards: container-freezer + testgrid-tab-name: continuous + cluster: build-knative + cron: 6 */9 * * * + decorate: true + extra_refs: + - base_ref: main + org: knative-sandbox + repo: container-freezer + name: continuous_container-freezer_main_periodic + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account +- annotations: + testgrid-dashboards: container-freezer + testgrid-tab-name: nightly + cluster: build-knative + cron: 2 9 * * * + decorate: true + extra_refs: + - base_ref: main + org: knative-sandbox + repo: container-freezer + name: nightly_container-freezer_main_periodic + spec: + containers: + - command: + - runner.sh + - ./hack/release.sh + - --publish + - --tag-release + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/nightly-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/nightly-account + name: nightly-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: nightly-account + secret: + secretName: nightly-account +- annotations: + testgrid-dashboards: container-freezer + testgrid-tab-name: release + cluster: build-knative + cron: 14 */9 * * * + decorate: true + extra_refs: + - base_ref: main + org: knative-sandbox + repo: container-freezer + name: release_container-freezer_main_periodic + spec: + containers: + - command: + - runner.sh + - ./hack/release.sh + - --auto-release + - --release-gcs + - knative-releases/container-freezer + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/release-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/hub-token + name: hub-token + readOnly: true + - mountPath: /etc/release-account + name: release-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: hub-token + secret: + secretName: hub-token + - name: release-account + secret: + secretName: release-account +presubmits: + knative-sandbox/container-freezer: + - always_run: true + branches: + - ^main$ + cluster: build-knative + decorate: true + name: build-tests_container-freezer_main + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --build-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^main$ + cluster: build-knative + decorate: true + name: unit-tests_container-freezer_main + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --unit-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^main$ + cluster: build-knative + decorate: true + name: integration-tests_container-freezer_main + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --integration-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account diff --git a/prow/jobs/generated/knative-sandbox/discovery-main.gen.yaml b/prow/jobs/generated/knative-sandbox/discovery-main.gen.yaml new file mode 100644 index 00000000000..4b0a110fc62 --- /dev/null +++ b/prow/jobs/generated/knative-sandbox/discovery-main.gen.yaml @@ -0,0 +1,79 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. #### +# #### USE "./hack/generate-configs.sh" TO REGENERATE THIS FILE. #### +# #### #### +# ####################################################################### + +presubmits: + knative-sandbox/discovery: + - always_run: true + branches: + - ^main$ + cluster: build-knative + decorate: true + name: build-tests_discovery_main + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --build-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^main$ + cluster: build-knative + decorate: true + name: unit-tests_discovery_main + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --unit-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^main$ + cluster: build-knative + decorate: true + name: integration-tests_discovery_main + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --integration-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account diff --git a/prow/jobs/generated/knative-sandbox/eventing-autoscaler-keda-main.gen.yaml b/prow/jobs/generated/knative-sandbox/eventing-autoscaler-keda-main.gen.yaml new file mode 100644 index 00000000000..91be057ccc5 --- /dev/null +++ b/prow/jobs/generated/knative-sandbox/eventing-autoscaler-keda-main.gen.yaml @@ -0,0 +1,198 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. #### +# #### USE "./hack/generate-configs.sh" TO REGENERATE THIS FILE. #### +# #### #### +# ####################################################################### + +periodics: +- annotations: + testgrid-dashboards: eventing-autoscaler-keda + testgrid-tab-name: continuous + cluster: build-knative + cron: 49 */9 * * * + decorate: true + extra_refs: + - base_ref: main + org: knative-sandbox + repo: eventing-autoscaler-keda + name: continuous_eventing-autoscaler-keda_main_periodic + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account +- annotations: + testgrid-dashboards: eventing-autoscaler-keda + testgrid-tab-name: nightly + cluster: build-knative + cron: 55 9 * * * + decorate: true + extra_refs: + - base_ref: main + org: knative-sandbox + repo: eventing-autoscaler-keda + name: nightly_eventing-autoscaler-keda_main_periodic + spec: + containers: + - command: + - runner.sh + - ./hack/release.sh + - --publish + - --tag-release + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/nightly-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/nightly-account + name: nightly-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: nightly-account + secret: + secretName: nightly-account +- annotations: + testgrid-dashboards: eventing-autoscaler-keda + testgrid-tab-name: release + cluster: build-knative + cron: 51 */9 * * * + decorate: true + extra_refs: + - base_ref: main + org: knative-sandbox + repo: eventing-autoscaler-keda + name: release_eventing-autoscaler-keda_main_periodic + spec: + containers: + - command: + - runner.sh + - ./hack/release.sh + - --auto-release + - --release-gcs + - knative-releases/eventing-autoscaler-keda + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/release-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/hub-token + name: hub-token + readOnly: true + - mountPath: /etc/release-account + name: release-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: hub-token + secret: + secretName: hub-token + - name: release-account + secret: + secretName: release-account +presubmits: + knative-sandbox/eventing-autoscaler-keda: + - always_run: true + branches: + - ^main$ + cluster: build-knative + decorate: true + name: integration-test-kafka-source_eventing-autoscaler-keda_main + optional: true + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-tests.sh --kafka-source + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - always_run: true + branches: + - ^main$ + cluster: build-knative + decorate: true + name: integration-test-kafka-mt-source_eventing-autoscaler-keda_main + optional: true + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-tests.sh --kafka-mt-source + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account diff --git a/prow/jobs/generated/knative-sandbox/eventing-autoscaler-keda-release-1.0.gen.yaml b/prow/jobs/generated/knative-sandbox/eventing-autoscaler-keda-release-1.0.gen.yaml new file mode 100644 index 00000000000..286fb6abb5d --- /dev/null +++ b/prow/jobs/generated/knative-sandbox/eventing-autoscaler-keda-release-1.0.gen.yaml @@ -0,0 +1,164 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. #### +# #### USE "./hack/generate-configs.sh" TO REGENERATE THIS FILE. #### +# #### #### +# ####################################################################### + +periodics: +- annotations: + testgrid-dashboards: knative-sandbox-release-1.0 + testgrid-tab-name: eventing-autoscaler-keda-continuous + cluster: build-knative + cron: 31 8 * * * + decorate: true + extra_refs: + - base_ref: release-1.0 + org: knative-sandbox + repo: eventing-autoscaler-keda + name: continuous_eventing-autoscaler-keda_release-1.0_periodic + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account +- annotations: + testgrid-dashboards: knative-sandbox-release-1.0 + testgrid-tab-name: eventing-autoscaler-keda-release + cluster: build-knative + cron: 1 9 * * 2 + decorate: true + extra_refs: + - base_ref: release-1.0 + org: knative-sandbox + repo: eventing-autoscaler-keda + name: release_eventing-autoscaler-keda_release-1.0_periodic + spec: + containers: + - command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/eventing-autoscaler-keda + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.0 + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/release-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/hub-token + name: hub-token + readOnly: true + - mountPath: /etc/release-account + name: release-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: hub-token + secret: + secretName: hub-token + - name: release-account + secret: + secretName: release-account +presubmits: + knative-sandbox/eventing-autoscaler-keda: + - always_run: true + branches: + - ^release-1.0$ + cluster: build-knative + decorate: true + name: integration-test-kafka-source_eventing-autoscaler-keda_release-1.0 + optional: true + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-tests.sh --kafka-source + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - always_run: true + branches: + - ^release-1.0$ + cluster: build-knative + decorate: true + name: integration-test-kafka-mt-source_eventing-autoscaler-keda_release-1.0 + optional: true + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-tests.sh --kafka-mt-source + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account diff --git a/prow/jobs/generated/knative-sandbox/eventing-autoscaler-keda-release-1.1.gen.yaml b/prow/jobs/generated/knative-sandbox/eventing-autoscaler-keda-release-1.1.gen.yaml new file mode 100644 index 00000000000..489f7587741 --- /dev/null +++ b/prow/jobs/generated/knative-sandbox/eventing-autoscaler-keda-release-1.1.gen.yaml @@ -0,0 +1,164 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. #### +# #### USE "./hack/generate-configs.sh" TO REGENERATE THIS FILE. #### +# #### #### +# ####################################################################### + +periodics: +- annotations: + testgrid-dashboards: knative-sandbox-release-1.1 + testgrid-tab-name: eventing-autoscaler-keda-continuous + cluster: build-knative + cron: 4 8 * * * + decorate: true + extra_refs: + - base_ref: release-1.1 + org: knative-sandbox + repo: eventing-autoscaler-keda + name: continuous_eventing-autoscaler-keda_release-1.1_periodic + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account +- annotations: + testgrid-dashboards: knative-sandbox-release-1.1 + testgrid-tab-name: eventing-autoscaler-keda-release + cluster: build-knative + cron: 44 9 * * 2 + decorate: true + extra_refs: + - base_ref: release-1.1 + org: knative-sandbox + repo: eventing-autoscaler-keda + name: release_eventing-autoscaler-keda_release-1.1_periodic + spec: + containers: + - command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/eventing-autoscaler-keda + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.1 + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/release-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/hub-token + name: hub-token + readOnly: true + - mountPath: /etc/release-account + name: release-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: hub-token + secret: + secretName: hub-token + - name: release-account + secret: + secretName: release-account +presubmits: + knative-sandbox/eventing-autoscaler-keda: + - always_run: true + branches: + - ^release-1.1$ + cluster: build-knative + decorate: true + name: integration-test-kafka-source_eventing-autoscaler-keda_release-1.1 + optional: true + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-tests.sh --kafka-source + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - always_run: true + branches: + - ^release-1.1$ + cluster: build-knative + decorate: true + name: integration-test-kafka-mt-source_eventing-autoscaler-keda_release-1.1 + optional: true + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-tests.sh --kafka-mt-source + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account diff --git a/prow/jobs/generated/knative-sandbox/eventing-autoscaler-keda-release-1.2.gen.yaml b/prow/jobs/generated/knative-sandbox/eventing-autoscaler-keda-release-1.2.gen.yaml new file mode 100644 index 00000000000..7a6eb388be9 --- /dev/null +++ b/prow/jobs/generated/knative-sandbox/eventing-autoscaler-keda-release-1.2.gen.yaml @@ -0,0 +1,164 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. #### +# #### USE "./hack/generate-configs.sh" TO REGENERATE THIS FILE. #### +# #### #### +# ####################################################################### + +periodics: +- annotations: + testgrid-dashboards: knative-sandbox-release-1.2 + testgrid-tab-name: eventing-autoscaler-keda-continuous + cluster: build-knative + cron: 33 8 * * * + decorate: true + extra_refs: + - base_ref: release-1.2 + org: knative-sandbox + repo: eventing-autoscaler-keda + name: continuous_eventing-autoscaler-keda_release-1.2_periodic + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account +- annotations: + testgrid-dashboards: knative-sandbox-release-1.2 + testgrid-tab-name: eventing-autoscaler-keda-release + cluster: build-knative + cron: 31 9 * * 2 + decorate: true + extra_refs: + - base_ref: release-1.2 + org: knative-sandbox + repo: eventing-autoscaler-keda + name: release_eventing-autoscaler-keda_release-1.2_periodic + spec: + containers: + - command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/eventing-autoscaler-keda + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.2 + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/release-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/hub-token + name: hub-token + readOnly: true + - mountPath: /etc/release-account + name: release-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: hub-token + secret: + secretName: hub-token + - name: release-account + secret: + secretName: release-account +presubmits: + knative-sandbox/eventing-autoscaler-keda: + - always_run: true + branches: + - ^release-1.2$ + cluster: build-knative + decorate: true + name: integration-test-kafka-source_eventing-autoscaler-keda_release-1.2 + optional: true + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-tests.sh --kafka-source + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - always_run: true + branches: + - ^release-1.2$ + cluster: build-knative + decorate: true + name: integration-test-kafka-mt-source_eventing-autoscaler-keda_release-1.2 + optional: true + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-tests.sh --kafka-mt-source + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account diff --git a/prow/jobs/generated/knative-sandbox/eventing-autoscaler-keda-release-1.3.gen.yaml b/prow/jobs/generated/knative-sandbox/eventing-autoscaler-keda-release-1.3.gen.yaml new file mode 100644 index 00000000000..8b145d7389b --- /dev/null +++ b/prow/jobs/generated/knative-sandbox/eventing-autoscaler-keda-release-1.3.gen.yaml @@ -0,0 +1,164 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. #### +# #### USE "./hack/generate-configs.sh" TO REGENERATE THIS FILE. #### +# #### #### +# ####################################################################### + +periodics: +- annotations: + testgrid-dashboards: knative-sandbox-release-1.3 + testgrid-tab-name: eventing-autoscaler-keda-continuous + cluster: build-knative + cron: 18 8 * * * + decorate: true + extra_refs: + - base_ref: release-1.3 + org: knative-sandbox + repo: eventing-autoscaler-keda + name: continuous_eventing-autoscaler-keda_release-1.3_periodic + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account +- annotations: + testgrid-dashboards: knative-sandbox-release-1.3 + testgrid-tab-name: eventing-autoscaler-keda-release + cluster: build-knative + cron: 6 9 * * 2 + decorate: true + extra_refs: + - base_ref: release-1.3 + org: knative-sandbox + repo: eventing-autoscaler-keda + name: release_eventing-autoscaler-keda_release-1.3_periodic + spec: + containers: + - command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/eventing-autoscaler-keda + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.3 + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/release-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/hub-token + name: hub-token + readOnly: true + - mountPath: /etc/release-account + name: release-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: hub-token + secret: + secretName: hub-token + - name: release-account + secret: + secretName: release-account +presubmits: + knative-sandbox/eventing-autoscaler-keda: + - always_run: true + branches: + - ^release-1.3$ + cluster: build-knative + decorate: true + name: integration-test-kafka-source_eventing-autoscaler-keda_release-1.3 + optional: true + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-tests.sh --kafka-source + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - always_run: true + branches: + - ^release-1.3$ + cluster: build-knative + decorate: true + name: integration-test-kafka-mt-source_eventing-autoscaler-keda_release-1.3 + optional: true + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-tests.sh --kafka-mt-source + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account diff --git a/prow/jobs/generated/knative-sandbox/eventing-awssqs-main.gen.yaml b/prow/jobs/generated/knative-sandbox/eventing-awssqs-main.gen.yaml new file mode 100644 index 00000000000..6e39aaf0a58 --- /dev/null +++ b/prow/jobs/generated/knative-sandbox/eventing-awssqs-main.gen.yaml @@ -0,0 +1,128 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. #### +# #### USE "./hack/generate-configs.sh" TO REGENERATE THIS FILE. #### +# #### #### +# ####################################################################### + +periodics: +- annotations: + testgrid-dashboards: eventing-awssqs + testgrid-tab-name: continuous + cluster: build-knative + cron: 42 */9 * * * + decorate: true + extra_refs: + - base_ref: main + org: knative-sandbox + repo: eventing-awssqs + name: continuous_eventing-awssqs_main_periodic + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account +- annotations: + testgrid-dashboards: eventing-awssqs + testgrid-tab-name: nightly + cluster: build-knative + cron: 38 9 * * * + decorate: true + extra_refs: + - base_ref: main + org: knative-sandbox + repo: eventing-awssqs + name: nightly_eventing-awssqs_main_periodic + spec: + containers: + - command: + - runner.sh + - ./hack/release.sh + - --publish + - --tag-release + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/nightly-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/nightly-account + name: nightly-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: nightly-account + secret: + secretName: nightly-account +- annotations: + testgrid-dashboards: eventing-awssqs + testgrid-tab-name: release + cluster: build-knative + cron: 18 */9 * * * + decorate: true + extra_refs: + - base_ref: main + org: knative-sandbox + repo: eventing-awssqs + name: release_eventing-awssqs_main_periodic + spec: + containers: + - command: + - runner.sh + - ./hack/release.sh + - --auto-release + - --release-gcs + - knative-releases/eventing-awssqs + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/release-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/hub-token + name: hub-token + readOnly: true + - mountPath: /etc/release-account + name: release-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: hub-token + secret: + secretName: hub-token + - name: release-account + secret: + secretName: release-account diff --git a/prow/jobs/generated/knative-sandbox/eventing-awssqs-release-1.0.gen.yaml b/prow/jobs/generated/knative-sandbox/eventing-awssqs-release-1.0.gen.yaml new file mode 100644 index 00000000000..e69fab65b69 --- /dev/null +++ b/prow/jobs/generated/knative-sandbox/eventing-awssqs-release-1.0.gen.yaml @@ -0,0 +1,94 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. #### +# #### USE "./hack/generate-configs.sh" TO REGENERATE THIS FILE. #### +# #### #### +# ####################################################################### + +periodics: +- annotations: + testgrid-dashboards: knative-sandbox-release-1.0 + testgrid-tab-name: eventing-awssqs-continuous + cluster: build-knative + cron: 14 8 * * * + decorate: true + extra_refs: + - base_ref: release-1.0 + org: knative-sandbox + repo: eventing-awssqs + name: continuous_eventing-awssqs_release-1.0_periodic + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account +- annotations: + testgrid-dashboards: knative-sandbox-release-1.0 + testgrid-tab-name: eventing-awssqs-release + cluster: build-knative + cron: 46 9 * * 2 + decorate: true + extra_refs: + - base_ref: release-1.0 + org: knative-sandbox + repo: eventing-awssqs + name: release_eventing-awssqs_release-1.0_periodic + spec: + containers: + - command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/eventing-awssqs + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.0 + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/release-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/hub-token + name: hub-token + readOnly: true + - mountPath: /etc/release-account + name: release-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: hub-token + secret: + secretName: hub-token + - name: release-account + secret: + secretName: release-account diff --git a/prow/jobs/generated/knative-sandbox/eventing-awssqs-release-1.1.gen.yaml b/prow/jobs/generated/knative-sandbox/eventing-awssqs-release-1.1.gen.yaml new file mode 100644 index 00000000000..ddedfbf698f --- /dev/null +++ b/prow/jobs/generated/knative-sandbox/eventing-awssqs-release-1.1.gen.yaml @@ -0,0 +1,94 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. #### +# #### USE "./hack/generate-configs.sh" TO REGENERATE THIS FILE. #### +# #### #### +# ####################################################################### + +periodics: +- annotations: + testgrid-dashboards: knative-sandbox-release-1.1 + testgrid-tab-name: eventing-awssqs-continuous + cluster: build-knative + cron: 5 8 * * * + decorate: true + extra_refs: + - base_ref: release-1.1 + org: knative-sandbox + repo: eventing-awssqs + name: continuous_eventing-awssqs_release-1.1_periodic + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account +- annotations: + testgrid-dashboards: knative-sandbox-release-1.1 + testgrid-tab-name: eventing-awssqs-release + cluster: build-knative + cron: 27 9 * * 2 + decorate: true + extra_refs: + - base_ref: release-1.1 + org: knative-sandbox + repo: eventing-awssqs + name: release_eventing-awssqs_release-1.1_periodic + spec: + containers: + - command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/eventing-awssqs + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.1 + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/release-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/hub-token + name: hub-token + readOnly: true + - mountPath: /etc/release-account + name: release-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: hub-token + secret: + secretName: hub-token + - name: release-account + secret: + secretName: release-account diff --git a/prow/jobs/generated/knative-sandbox/eventing-awssqs-release-1.2.gen.yaml b/prow/jobs/generated/knative-sandbox/eventing-awssqs-release-1.2.gen.yaml new file mode 100644 index 00000000000..763ccdf67f4 --- /dev/null +++ b/prow/jobs/generated/knative-sandbox/eventing-awssqs-release-1.2.gen.yaml @@ -0,0 +1,94 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. #### +# #### USE "./hack/generate-configs.sh" TO REGENERATE THIS FILE. #### +# #### #### +# ####################################################################### + +periodics: +- annotations: + testgrid-dashboards: knative-sandbox-release-1.2 + testgrid-tab-name: eventing-awssqs-continuous + cluster: build-knative + cron: 20 8 * * * + decorate: true + extra_refs: + - base_ref: release-1.2 + org: knative-sandbox + repo: eventing-awssqs + name: continuous_eventing-awssqs_release-1.2_periodic + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account +- annotations: + testgrid-dashboards: knative-sandbox-release-1.2 + testgrid-tab-name: eventing-awssqs-release + cluster: build-knative + cron: 16 9 * * 2 + decorate: true + extra_refs: + - base_ref: release-1.2 + org: knative-sandbox + repo: eventing-awssqs + name: release_eventing-awssqs_release-1.2_periodic + spec: + containers: + - command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/eventing-awssqs + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.2 + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/release-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/hub-token + name: hub-token + readOnly: true + - mountPath: /etc/release-account + name: release-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: hub-token + secret: + secretName: hub-token + - name: release-account + secret: + secretName: release-account diff --git a/prow/jobs/generated/knative-sandbox/eventing-ceph-main.gen.yaml b/prow/jobs/generated/knative-sandbox/eventing-ceph-main.gen.yaml new file mode 100644 index 00000000000..a7e1d426d27 --- /dev/null +++ b/prow/jobs/generated/knative-sandbox/eventing-ceph-main.gen.yaml @@ -0,0 +1,182 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. #### +# #### USE "./hack/generate-configs.sh" TO REGENERATE THIS FILE. #### +# #### #### +# ####################################################################### + +periodics: +- annotations: + testgrid-dashboards: eventing-ceph + testgrid-tab-name: continuous + cluster: build-knative + cron: 4 */9 * * * + decorate: true + extra_refs: + - base_ref: main + org: knative-sandbox + repo: eventing-ceph + name: continuous_eventing-ceph_main_periodic + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + - name: DOCKER_IN_DOCKER_ENABLED + value: "true" + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + - mountPath: /docker-graph + name: docker-graph + - mountPath: /lib/modules + name: modules + - mountPath: /sys/fs/cgroup + name: cgroup + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - emptyDir: {} + name: docker-graph + - hostPath: + path: /lib/modules + type: Directory + name: modules + - hostPath: + path: /sys/fs/cgroup + type: Directory + name: cgroup +- annotations: + testgrid-dashboards: eventing-ceph + testgrid-tab-name: nightly + cluster: build-knative + cron: 0 9 * * * + decorate: true + extra_refs: + - base_ref: main + org: knative-sandbox + repo: eventing-ceph + name: nightly_eventing-ceph_main_periodic + spec: + containers: + - command: + - runner.sh + - ./hack/release.sh + - --publish + - --tag-release + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/nightly-account/service-account.json + - name: DOCKER_IN_DOCKER_ENABLED + value: "true" + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/nightly-account + name: nightly-account + readOnly: true + - mountPath: /docker-graph + name: docker-graph + - mountPath: /lib/modules + name: modules + - mountPath: /sys/fs/cgroup + name: cgroup + nodeSelector: + type: testing + volumes: + - name: nightly-account + secret: + secretName: nightly-account + - emptyDir: {} + name: docker-graph + - hostPath: + path: /lib/modules + type: Directory + name: modules + - hostPath: + path: /sys/fs/cgroup + type: Directory + name: cgroup +- annotations: + testgrid-dashboards: eventing-ceph + testgrid-tab-name: release + cluster: build-knative + cron: 24 */9 * * * + decorate: true + extra_refs: + - base_ref: main + org: knative-sandbox + repo: eventing-ceph + name: release_eventing-ceph_main_periodic + spec: + containers: + - command: + - runner.sh + - ./hack/release.sh + - --auto-release + - --release-gcs + - knative-releases/eventing-ceph + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/release-account/service-account.json + - name: DOCKER_IN_DOCKER_ENABLED + value: "true" + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/hub-token + name: hub-token + readOnly: true + - mountPath: /etc/release-account + name: release-account + readOnly: true + - mountPath: /docker-graph + name: docker-graph + - mountPath: /lib/modules + name: modules + - mountPath: /sys/fs/cgroup + name: cgroup + nodeSelector: + type: testing + volumes: + - name: hub-token + secret: + secretName: hub-token + - name: release-account + secret: + secretName: release-account + - emptyDir: {} + name: docker-graph + - hostPath: + path: /lib/modules + type: Directory + name: modules + - hostPath: + path: /sys/fs/cgroup + type: Directory + name: cgroup diff --git a/prow/jobs/generated/knative-sandbox/eventing-ceph-release-1.0.gen.yaml b/prow/jobs/generated/knative-sandbox/eventing-ceph-release-1.0.gen.yaml new file mode 100644 index 00000000000..9a1f5ef0b92 --- /dev/null +++ b/prow/jobs/generated/knative-sandbox/eventing-ceph-release-1.0.gen.yaml @@ -0,0 +1,112 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. #### +# #### USE "./hack/generate-configs.sh" TO REGENERATE THIS FILE. #### +# #### #### +# ####################################################################### + +periodics: +- annotations: + testgrid-dashboards: knative-sandbox-release-1.0 + testgrid-tab-name: eventing-ceph-continuous + cluster: build-knative + cron: 20 8 * * * + decorate: true + extra_refs: + - base_ref: release-1.0 + org: knative-sandbox + repo: eventing-ceph + name: continuous_eventing-ceph_release-1.0_periodic + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + - name: DOCKER_IN_DOCKER_ENABLED + value: "true" + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + - mountPath: /docker-graph + name: docker-graph + - mountPath: /lib/modules + name: modules + - mountPath: /sys/fs/cgroup + name: cgroup + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - emptyDir: {} + name: docker-graph + - hostPath: + path: /lib/modules + type: Directory + name: modules + - hostPath: + path: /sys/fs/cgroup + type: Directory + name: cgroup +- annotations: + testgrid-dashboards: knative-sandbox-release-1.0 + testgrid-tab-name: eventing-ceph-release + cluster: build-knative + cron: 36 9 * * 2 + decorate: true + extra_refs: + - base_ref: release-1.0 + org: knative-sandbox + repo: eventing-ceph + name: release_eventing-ceph_release-1.0_periodic + spec: + containers: + - command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/eventing-ceph + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.0 + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/release-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/hub-token + name: hub-token + readOnly: true + - mountPath: /etc/release-account + name: release-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: hub-token + secret: + secretName: hub-token + - name: release-account + secret: + secretName: release-account diff --git a/prow/jobs/generated/knative-sandbox/eventing-ceph-release-1.1.gen.yaml b/prow/jobs/generated/knative-sandbox/eventing-ceph-release-1.1.gen.yaml new file mode 100644 index 00000000000..78c1487298f --- /dev/null +++ b/prow/jobs/generated/knative-sandbox/eventing-ceph-release-1.1.gen.yaml @@ -0,0 +1,112 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. #### +# #### USE "./hack/generate-configs.sh" TO REGENERATE THIS FILE. #### +# #### #### +# ####################################################################### + +periodics: +- annotations: + testgrid-dashboards: knative-sandbox-release-1.1 + testgrid-tab-name: eventing-ceph-continuous + cluster: build-knative + cron: 39 8 * * * + decorate: true + extra_refs: + - base_ref: release-1.1 + org: knative-sandbox + repo: eventing-ceph + name: continuous_eventing-ceph_release-1.1_periodic + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + - name: DOCKER_IN_DOCKER_ENABLED + value: "true" + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + - mountPath: /docker-graph + name: docker-graph + - mountPath: /lib/modules + name: modules + - mountPath: /sys/fs/cgroup + name: cgroup + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - emptyDir: {} + name: docker-graph + - hostPath: + path: /lib/modules + type: Directory + name: modules + - hostPath: + path: /sys/fs/cgroup + type: Directory + name: cgroup +- annotations: + testgrid-dashboards: knative-sandbox-release-1.1 + testgrid-tab-name: eventing-ceph-release + cluster: build-knative + cron: 57 9 * * 2 + decorate: true + extra_refs: + - base_ref: release-1.1 + org: knative-sandbox + repo: eventing-ceph + name: release_eventing-ceph_release-1.1_periodic + spec: + containers: + - command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/eventing-ceph + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.1 + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/release-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/hub-token + name: hub-token + readOnly: true + - mountPath: /etc/release-account + name: release-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: hub-token + secret: + secretName: hub-token + - name: release-account + secret: + secretName: release-account diff --git a/prow/jobs/generated/knative-sandbox/eventing-ceph-release-1.2.gen.yaml b/prow/jobs/generated/knative-sandbox/eventing-ceph-release-1.2.gen.yaml new file mode 100644 index 00000000000..580c163d373 --- /dev/null +++ b/prow/jobs/generated/knative-sandbox/eventing-ceph-release-1.2.gen.yaml @@ -0,0 +1,112 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. #### +# #### USE "./hack/generate-configs.sh" TO REGENERATE THIS FILE. #### +# #### #### +# ####################################################################### + +periodics: +- annotations: + testgrid-dashboards: knative-sandbox-release-1.2 + testgrid-tab-name: eventing-ceph-continuous + cluster: build-knative + cron: 6 8 * * * + decorate: true + extra_refs: + - base_ref: release-1.2 + org: knative-sandbox + repo: eventing-ceph + name: continuous_eventing-ceph_release-1.2_periodic + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + - name: DOCKER_IN_DOCKER_ENABLED + value: "true" + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + - mountPath: /docker-graph + name: docker-graph + - mountPath: /lib/modules + name: modules + - mountPath: /sys/fs/cgroup + name: cgroup + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - emptyDir: {} + name: docker-graph + - hostPath: + path: /lib/modules + type: Directory + name: modules + - hostPath: + path: /sys/fs/cgroup + type: Directory + name: cgroup +- annotations: + testgrid-dashboards: knative-sandbox-release-1.2 + testgrid-tab-name: eventing-ceph-release + cluster: build-knative + cron: 46 9 * * 2 + decorate: true + extra_refs: + - base_ref: release-1.2 + org: knative-sandbox + repo: eventing-ceph + name: release_eventing-ceph_release-1.2_periodic + spec: + containers: + - command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/eventing-ceph + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.2 + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/release-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/hub-token + name: hub-token + readOnly: true + - mountPath: /etc/release-account + name: release-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: hub-token + secret: + secretName: hub-token + - name: release-account + secret: + secretName: release-account diff --git a/prow/jobs/generated/knative-sandbox/eventing-ceph-release-1.3.gen.yaml b/prow/jobs/generated/knative-sandbox/eventing-ceph-release-1.3.gen.yaml new file mode 100644 index 00000000000..cea4a51f752 --- /dev/null +++ b/prow/jobs/generated/knative-sandbox/eventing-ceph-release-1.3.gen.yaml @@ -0,0 +1,112 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. #### +# #### USE "./hack/generate-configs.sh" TO REGENERATE THIS FILE. #### +# #### #### +# ####################################################################### + +periodics: +- annotations: + testgrid-dashboards: knative-sandbox-release-1.3 + testgrid-tab-name: eventing-ceph-continuous + cluster: build-knative + cron: 37 8 * * * + decorate: true + extra_refs: + - base_ref: release-1.3 + org: knative-sandbox + repo: eventing-ceph + name: continuous_eventing-ceph_release-1.3_periodic + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + - name: DOCKER_IN_DOCKER_ENABLED + value: "true" + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + - mountPath: /docker-graph + name: docker-graph + - mountPath: /lib/modules + name: modules + - mountPath: /sys/fs/cgroup + name: cgroup + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - emptyDir: {} + name: docker-graph + - hostPath: + path: /lib/modules + type: Directory + name: modules + - hostPath: + path: /sys/fs/cgroup + type: Directory + name: cgroup +- annotations: + testgrid-dashboards: knative-sandbox-release-1.3 + testgrid-tab-name: eventing-ceph-release + cluster: build-knative + cron: 7 9 * * 2 + decorate: true + extra_refs: + - base_ref: release-1.3 + org: knative-sandbox + repo: eventing-ceph + name: release_eventing-ceph_release-1.3_periodic + spec: + containers: + - command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/eventing-ceph + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.3 + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/release-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/hub-token + name: hub-token + readOnly: true + - mountPath: /etc/release-account + name: release-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: hub-token + secret: + secretName: hub-token + - name: release-account + secret: + secretName: release-account diff --git a/prow/jobs/generated/knative-sandbox/eventing-couchdb-main.gen.yaml b/prow/jobs/generated/knative-sandbox/eventing-couchdb-main.gen.yaml new file mode 100644 index 00000000000..e37b00c081f --- /dev/null +++ b/prow/jobs/generated/knative-sandbox/eventing-couchdb-main.gen.yaml @@ -0,0 +1,182 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. #### +# #### USE "./hack/generate-configs.sh" TO REGENERATE THIS FILE. #### +# #### #### +# ####################################################################### + +periodics: +- annotations: + testgrid-dashboards: eventing-couchdb + testgrid-tab-name: continuous + cluster: build-knative + cron: 52 */9 * * * + decorate: true + extra_refs: + - base_ref: main + org: knative-sandbox + repo: eventing-couchdb + name: continuous_eventing-couchdb_main_periodic + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + - name: DOCKER_IN_DOCKER_ENABLED + value: "true" + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + - mountPath: /docker-graph + name: docker-graph + - mountPath: /lib/modules + name: modules + - mountPath: /sys/fs/cgroup + name: cgroup + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - emptyDir: {} + name: docker-graph + - hostPath: + path: /lib/modules + type: Directory + name: modules + - hostPath: + path: /sys/fs/cgroup + type: Directory + name: cgroup +- annotations: + testgrid-dashboards: eventing-couchdb + testgrid-tab-name: nightly + cluster: build-knative + cron: 56 9 * * * + decorate: true + extra_refs: + - base_ref: main + org: knative-sandbox + repo: eventing-couchdb + name: nightly_eventing-couchdb_main_periodic + spec: + containers: + - command: + - runner.sh + - ./hack/release.sh + - --publish + - --tag-release + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/nightly-account/service-account.json + - name: DOCKER_IN_DOCKER_ENABLED + value: "true" + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/nightly-account + name: nightly-account + readOnly: true + - mountPath: /docker-graph + name: docker-graph + - mountPath: /lib/modules + name: modules + - mountPath: /sys/fs/cgroup + name: cgroup + nodeSelector: + type: testing + volumes: + - name: nightly-account + secret: + secretName: nightly-account + - emptyDir: {} + name: docker-graph + - hostPath: + path: /lib/modules + type: Directory + name: modules + - hostPath: + path: /sys/fs/cgroup + type: Directory + name: cgroup +- annotations: + testgrid-dashboards: eventing-couchdb + testgrid-tab-name: release + cluster: build-knative + cron: 8 */9 * * * + decorate: true + extra_refs: + - base_ref: main + org: knative-sandbox + repo: eventing-couchdb + name: release_eventing-couchdb_main_periodic + spec: + containers: + - command: + - runner.sh + - ./hack/release.sh + - --auto-release + - --release-gcs + - knative-releases/eventing-couchdb + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/release-account/service-account.json + - name: DOCKER_IN_DOCKER_ENABLED + value: "true" + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/hub-token + name: hub-token + readOnly: true + - mountPath: /etc/release-account + name: release-account + readOnly: true + - mountPath: /docker-graph + name: docker-graph + - mountPath: /lib/modules + name: modules + - mountPath: /sys/fs/cgroup + name: cgroup + nodeSelector: + type: testing + volumes: + - name: hub-token + secret: + secretName: hub-token + - name: release-account + secret: + secretName: release-account + - emptyDir: {} + name: docker-graph + - hostPath: + path: /lib/modules + type: Directory + name: modules + - hostPath: + path: /sys/fs/cgroup + type: Directory + name: cgroup diff --git a/prow/jobs/generated/knative-sandbox/eventing-couchdb-release-1.0.gen.yaml b/prow/jobs/generated/knative-sandbox/eventing-couchdb-release-1.0.gen.yaml new file mode 100644 index 00000000000..f5609cc7b44 --- /dev/null +++ b/prow/jobs/generated/knative-sandbox/eventing-couchdb-release-1.0.gen.yaml @@ -0,0 +1,112 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. #### +# #### USE "./hack/generate-configs.sh" TO REGENERATE THIS FILE. #### +# #### #### +# ####################################################################### + +periodics: +- annotations: + testgrid-dashboards: knative-sandbox-release-1.0 + testgrid-tab-name: eventing-couchdb-continuous + cluster: build-knative + cron: 32 8 * * * + decorate: true + extra_refs: + - base_ref: release-1.0 + org: knative-sandbox + repo: eventing-couchdb + name: continuous_eventing-couchdb_release-1.0_periodic + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + - name: DOCKER_IN_DOCKER_ENABLED + value: "true" + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + - mountPath: /docker-graph + name: docker-graph + - mountPath: /lib/modules + name: modules + - mountPath: /sys/fs/cgroup + name: cgroup + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - emptyDir: {} + name: docker-graph + - hostPath: + path: /lib/modules + type: Directory + name: modules + - hostPath: + path: /sys/fs/cgroup + type: Directory + name: cgroup +- annotations: + testgrid-dashboards: knative-sandbox-release-1.0 + testgrid-tab-name: eventing-couchdb-release + cluster: build-knative + cron: 16 9 * * 2 + decorate: true + extra_refs: + - base_ref: release-1.0 + org: knative-sandbox + repo: eventing-couchdb + name: release_eventing-couchdb_release-1.0_periodic + spec: + containers: + - command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/eventing-couchdb + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.0 + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/release-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/hub-token + name: hub-token + readOnly: true + - mountPath: /etc/release-account + name: release-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: hub-token + secret: + secretName: hub-token + - name: release-account + secret: + secretName: release-account diff --git a/prow/jobs/generated/knative-sandbox/eventing-couchdb-release-1.1.gen.yaml b/prow/jobs/generated/knative-sandbox/eventing-couchdb-release-1.1.gen.yaml new file mode 100644 index 00000000000..780080dc886 --- /dev/null +++ b/prow/jobs/generated/knative-sandbox/eventing-couchdb-release-1.1.gen.yaml @@ -0,0 +1,112 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. #### +# #### USE "./hack/generate-configs.sh" TO REGENERATE THIS FILE. #### +# #### #### +# ####################################################################### + +periodics: +- annotations: + testgrid-dashboards: knative-sandbox-release-1.1 + testgrid-tab-name: eventing-couchdb-continuous + cluster: build-knative + cron: 55 8 * * * + decorate: true + extra_refs: + - base_ref: release-1.1 + org: knative-sandbox + repo: eventing-couchdb + name: continuous_eventing-couchdb_release-1.1_periodic + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + - name: DOCKER_IN_DOCKER_ENABLED + value: "true" + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + - mountPath: /docker-graph + name: docker-graph + - mountPath: /lib/modules + name: modules + - mountPath: /sys/fs/cgroup + name: cgroup + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - emptyDir: {} + name: docker-graph + - hostPath: + path: /lib/modules + type: Directory + name: modules + - hostPath: + path: /sys/fs/cgroup + type: Directory + name: cgroup +- annotations: + testgrid-dashboards: knative-sandbox-release-1.1 + testgrid-tab-name: eventing-couchdb-release + cluster: build-knative + cron: 13 9 * * 2 + decorate: true + extra_refs: + - base_ref: release-1.1 + org: knative-sandbox + repo: eventing-couchdb + name: release_eventing-couchdb_release-1.1_periodic + spec: + containers: + - command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/eventing-couchdb + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.1 + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/release-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/hub-token + name: hub-token + readOnly: true + - mountPath: /etc/release-account + name: release-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: hub-token + secret: + secretName: hub-token + - name: release-account + secret: + secretName: release-account diff --git a/prow/jobs/generated/knative-sandbox/eventing-github-main.gen.yaml b/prow/jobs/generated/knative-sandbox/eventing-github-main.gen.yaml new file mode 100644 index 00000000000..4a35a352239 --- /dev/null +++ b/prow/jobs/generated/knative-sandbox/eventing-github-main.gen.yaml @@ -0,0 +1,182 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. #### +# #### USE "./hack/generate-configs.sh" TO REGENERATE THIS FILE. #### +# #### #### +# ####################################################################### + +periodics: +- annotations: + testgrid-dashboards: eventing-github + testgrid-tab-name: continuous + cluster: build-knative + cron: 43 */9 * * * + decorate: true + extra_refs: + - base_ref: main + org: knative-sandbox + repo: eventing-github + name: continuous_eventing-github_main_periodic + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + - name: DOCKER_IN_DOCKER_ENABLED + value: "true" + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + - mountPath: /docker-graph + name: docker-graph + - mountPath: /lib/modules + name: modules + - mountPath: /sys/fs/cgroup + name: cgroup + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - emptyDir: {} + name: docker-graph + - hostPath: + path: /lib/modules + type: Directory + name: modules + - hostPath: + path: /sys/fs/cgroup + type: Directory + name: cgroup +- annotations: + testgrid-dashboards: eventing-github + testgrid-tab-name: nightly + cluster: build-knative + cron: 53 9 * * * + decorate: true + extra_refs: + - base_ref: main + org: knative-sandbox + repo: eventing-github + name: nightly_eventing-github_main_periodic + spec: + containers: + - command: + - runner.sh + - ./hack/release.sh + - --publish + - --tag-release + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/nightly-account/service-account.json + - name: DOCKER_IN_DOCKER_ENABLED + value: "true" + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/nightly-account + name: nightly-account + readOnly: true + - mountPath: /docker-graph + name: docker-graph + - mountPath: /lib/modules + name: modules + - mountPath: /sys/fs/cgroup + name: cgroup + nodeSelector: + type: testing + volumes: + - name: nightly-account + secret: + secretName: nightly-account + - emptyDir: {} + name: docker-graph + - hostPath: + path: /lib/modules + type: Directory + name: modules + - hostPath: + path: /sys/fs/cgroup + type: Directory + name: cgroup +- annotations: + testgrid-dashboards: eventing-github + testgrid-tab-name: release + cluster: build-knative + cron: 33 */9 * * * + decorate: true + extra_refs: + - base_ref: main + org: knative-sandbox + repo: eventing-github + name: release_eventing-github_main_periodic + spec: + containers: + - command: + - runner.sh + - ./hack/release.sh + - --auto-release + - --release-gcs + - knative-releases/eventing-github + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/release-account/service-account.json + - name: DOCKER_IN_DOCKER_ENABLED + value: "true" + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/hub-token + name: hub-token + readOnly: true + - mountPath: /etc/release-account + name: release-account + readOnly: true + - mountPath: /docker-graph + name: docker-graph + - mountPath: /lib/modules + name: modules + - mountPath: /sys/fs/cgroup + name: cgroup + nodeSelector: + type: testing + volumes: + - name: hub-token + secret: + secretName: hub-token + - name: release-account + secret: + secretName: release-account + - emptyDir: {} + name: docker-graph + - hostPath: + path: /lib/modules + type: Directory + name: modules + - hostPath: + path: /sys/fs/cgroup + type: Directory + name: cgroup diff --git a/prow/jobs/generated/knative-sandbox/eventing-github-release-1.0.gen.yaml b/prow/jobs/generated/knative-sandbox/eventing-github-release-1.0.gen.yaml new file mode 100644 index 00000000000..6e610c1295e --- /dev/null +++ b/prow/jobs/generated/knative-sandbox/eventing-github-release-1.0.gen.yaml @@ -0,0 +1,112 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. #### +# #### USE "./hack/generate-configs.sh" TO REGENERATE THIS FILE. #### +# #### #### +# ####################################################################### + +periodics: +- annotations: + testgrid-dashboards: knative-sandbox-release-1.0 + testgrid-tab-name: eventing-github-continuous + cluster: build-knative + cron: 5 8 * * * + decorate: true + extra_refs: + - base_ref: release-1.0 + org: knative-sandbox + repo: eventing-github + name: continuous_eventing-github_release-1.0_periodic + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + - name: DOCKER_IN_DOCKER_ENABLED + value: "true" + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + - mountPath: /docker-graph + name: docker-graph + - mountPath: /lib/modules + name: modules + - mountPath: /sys/fs/cgroup + name: cgroup + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - emptyDir: {} + name: docker-graph + - hostPath: + path: /lib/modules + type: Directory + name: modules + - hostPath: + path: /sys/fs/cgroup + type: Directory + name: cgroup +- annotations: + testgrid-dashboards: knative-sandbox-release-1.0 + testgrid-tab-name: eventing-github-release + cluster: build-knative + cron: 3 9 * * 2 + decorate: true + extra_refs: + - base_ref: release-1.0 + org: knative-sandbox + repo: eventing-github + name: release_eventing-github_release-1.0_periodic + spec: + containers: + - command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/eventing-github + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.0 + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/release-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/hub-token + name: hub-token + readOnly: true + - mountPath: /etc/release-account + name: release-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: hub-token + secret: + secretName: hub-token + - name: release-account + secret: + secretName: release-account diff --git a/prow/jobs/generated/knative-sandbox/eventing-github-release-1.1.gen.yaml b/prow/jobs/generated/knative-sandbox/eventing-github-release-1.1.gen.yaml new file mode 100644 index 00000000000..20dbba6d682 --- /dev/null +++ b/prow/jobs/generated/knative-sandbox/eventing-github-release-1.1.gen.yaml @@ -0,0 +1,112 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. #### +# #### USE "./hack/generate-configs.sh" TO REGENERATE THIS FILE. #### +# #### #### +# ####################################################################### + +periodics: +- annotations: + testgrid-dashboards: knative-sandbox-release-1.1 + testgrid-tab-name: eventing-github-continuous + cluster: build-knative + cron: 2 8 * * * + decorate: true + extra_refs: + - base_ref: release-1.1 + org: knative-sandbox + repo: eventing-github + name: continuous_eventing-github_release-1.1_periodic + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + - name: DOCKER_IN_DOCKER_ENABLED + value: "true" + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + - mountPath: /docker-graph + name: docker-graph + - mountPath: /lib/modules + name: modules + - mountPath: /sys/fs/cgroup + name: cgroup + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - emptyDir: {} + name: docker-graph + - hostPath: + path: /lib/modules + type: Directory + name: modules + - hostPath: + path: /sys/fs/cgroup + type: Directory + name: cgroup +- annotations: + testgrid-dashboards: knative-sandbox-release-1.1 + testgrid-tab-name: eventing-github-release + cluster: build-knative + cron: 50 9 * * 2 + decorate: true + extra_refs: + - base_ref: release-1.1 + org: knative-sandbox + repo: eventing-github + name: release_eventing-github_release-1.1_periodic + spec: + containers: + - command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/eventing-github + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.1 + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/release-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/hub-token + name: hub-token + readOnly: true + - mountPath: /etc/release-account + name: release-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: hub-token + secret: + secretName: hub-token + - name: release-account + secret: + secretName: release-account diff --git a/prow/jobs/generated/knative-sandbox/eventing-github-release-1.2.gen.yaml b/prow/jobs/generated/knative-sandbox/eventing-github-release-1.2.gen.yaml new file mode 100644 index 00000000000..a768de72eae --- /dev/null +++ b/prow/jobs/generated/knative-sandbox/eventing-github-release-1.2.gen.yaml @@ -0,0 +1,112 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. #### +# #### USE "./hack/generate-configs.sh" TO REGENERATE THIS FILE. #### +# #### #### +# ####################################################################### + +periodics: +- annotations: + testgrid-dashboards: knative-sandbox-release-1.2 + testgrid-tab-name: eventing-github-continuous + cluster: build-knative + cron: 59 8 * * * + decorate: true + extra_refs: + - base_ref: release-1.2 + org: knative-sandbox + repo: eventing-github + name: continuous_eventing-github_release-1.2_periodic + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + - name: DOCKER_IN_DOCKER_ENABLED + value: "true" + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + - mountPath: /docker-graph + name: docker-graph + - mountPath: /lib/modules + name: modules + - mountPath: /sys/fs/cgroup + name: cgroup + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - emptyDir: {} + name: docker-graph + - hostPath: + path: /lib/modules + type: Directory + name: modules + - hostPath: + path: /sys/fs/cgroup + type: Directory + name: cgroup +- annotations: + testgrid-dashboards: knative-sandbox-release-1.2 + testgrid-tab-name: eventing-github-release + cluster: build-knative + cron: 1 9 * * 2 + decorate: true + extra_refs: + - base_ref: release-1.2 + org: knative-sandbox + repo: eventing-github + name: release_eventing-github_release-1.2_periodic + spec: + containers: + - command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/eventing-github + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.2 + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/release-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/hub-token + name: hub-token + readOnly: true + - mountPath: /etc/release-account + name: release-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: hub-token + secret: + secretName: hub-token + - name: release-account + secret: + secretName: release-account diff --git a/prow/jobs/generated/knative-sandbox/eventing-github-release-1.3.gen.yaml b/prow/jobs/generated/knative-sandbox/eventing-github-release-1.3.gen.yaml new file mode 100644 index 00000000000..1ada0b316bb --- /dev/null +++ b/prow/jobs/generated/knative-sandbox/eventing-github-release-1.3.gen.yaml @@ -0,0 +1,112 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. #### +# #### USE "./hack/generate-configs.sh" TO REGENERATE THIS FILE. #### +# #### #### +# ####################################################################### + +periodics: +- annotations: + testgrid-dashboards: knative-sandbox-release-1.3 + testgrid-tab-name: eventing-github-continuous + cluster: build-knative + cron: 52 8 * * * + decorate: true + extra_refs: + - base_ref: release-1.3 + org: knative-sandbox + repo: eventing-github + name: continuous_eventing-github_release-1.3_periodic + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + - name: DOCKER_IN_DOCKER_ENABLED + value: "true" + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + - mountPath: /docker-graph + name: docker-graph + - mountPath: /lib/modules + name: modules + - mountPath: /sys/fs/cgroup + name: cgroup + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - emptyDir: {} + name: docker-graph + - hostPath: + path: /lib/modules + type: Directory + name: modules + - hostPath: + path: /sys/fs/cgroup + type: Directory + name: cgroup +- annotations: + testgrid-dashboards: knative-sandbox-release-1.3 + testgrid-tab-name: eventing-github-release + cluster: build-knative + cron: 56 9 * * 2 + decorate: true + extra_refs: + - base_ref: release-1.3 + org: knative-sandbox + repo: eventing-github + name: release_eventing-github_release-1.3_periodic + spec: + containers: + - command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/eventing-github + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.3 + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/release-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/hub-token + name: hub-token + readOnly: true + - mountPath: /etc/release-account + name: release-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: hub-token + secret: + secretName: hub-token + - name: release-account + secret: + secretName: release-account diff --git a/prow/jobs/generated/knative-sandbox/eventing-gitlab-main.gen.yaml b/prow/jobs/generated/knative-sandbox/eventing-gitlab-main.gen.yaml new file mode 100644 index 00000000000..025a07ca5e8 --- /dev/null +++ b/prow/jobs/generated/knative-sandbox/eventing-gitlab-main.gen.yaml @@ -0,0 +1,194 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. #### +# #### USE "./hack/generate-configs.sh" TO REGENERATE THIS FILE. #### +# #### #### +# ####################################################################### + +periodics: +- annotations: + testgrid-dashboards: eventing-gitlab + testgrid-tab-name: continuous + cluster: build-knative + cron: 43 */9 * * * + decorate: true + extra_refs: + - base_ref: main + org: knative-sandbox + repo: eventing-gitlab + name: continuous_eventing-gitlab_main_periodic + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + - name: DOCKER_IN_DOCKER_ENABLED + value: "true" + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: + limits: + memory: 16Gi + requests: + memory: 12Gi + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + - mountPath: /docker-graph + name: docker-graph + - mountPath: /lib/modules + name: modules + - mountPath: /sys/fs/cgroup + name: cgroup + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - emptyDir: {} + name: docker-graph + - hostPath: + path: /lib/modules + type: Directory + name: modules + - hostPath: + path: /sys/fs/cgroup + type: Directory + name: cgroup +- annotations: + testgrid-dashboards: eventing-gitlab + testgrid-tab-name: nightly + cluster: build-knative + cron: 33 9 * * * + decorate: true + extra_refs: + - base_ref: main + org: knative-sandbox + repo: eventing-gitlab + name: nightly_eventing-gitlab_main_periodic + spec: + containers: + - command: + - runner.sh + - ./hack/release.sh + - --publish + - --tag-release + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/nightly-account/service-account.json + - name: DOCKER_IN_DOCKER_ENABLED + value: "true" + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: + limits: + memory: 16Gi + requests: + memory: 12Gi + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/nightly-account + name: nightly-account + readOnly: true + - mountPath: /docker-graph + name: docker-graph + - mountPath: /lib/modules + name: modules + - mountPath: /sys/fs/cgroup + name: cgroup + nodeSelector: + type: testing + volumes: + - name: nightly-account + secret: + secretName: nightly-account + - emptyDir: {} + name: docker-graph + - hostPath: + path: /lib/modules + type: Directory + name: modules + - hostPath: + path: /sys/fs/cgroup + type: Directory + name: cgroup +- annotations: + testgrid-dashboards: eventing-gitlab + testgrid-tab-name: release + cluster: build-knative + cron: 5 */9 * * * + decorate: true + extra_refs: + - base_ref: main + org: knative-sandbox + repo: eventing-gitlab + name: release_eventing-gitlab_main_periodic + spec: + containers: + - command: + - runner.sh + - ./hack/release.sh + - --auto-release + - --release-gcs + - knative-releases/eventing-gitlab + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/release-account/service-account.json + - name: DOCKER_IN_DOCKER_ENABLED + value: "true" + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: + limits: + memory: 16Gi + requests: + memory: 12Gi + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/hub-token + name: hub-token + readOnly: true + - mountPath: /etc/release-account + name: release-account + readOnly: true + - mountPath: /docker-graph + name: docker-graph + - mountPath: /lib/modules + name: modules + - mountPath: /sys/fs/cgroup + name: cgroup + nodeSelector: + type: testing + volumes: + - name: hub-token + secret: + secretName: hub-token + - name: release-account + secret: + secretName: release-account + - emptyDir: {} + name: docker-graph + - hostPath: + path: /lib/modules + type: Directory + name: modules + - hostPath: + path: /sys/fs/cgroup + type: Directory + name: cgroup diff --git a/prow/jobs/generated/knative-sandbox/eventing-gitlab-release-1.0.gen.yaml b/prow/jobs/generated/knative-sandbox/eventing-gitlab-release-1.0.gen.yaml new file mode 100644 index 00000000000..44826b097a5 --- /dev/null +++ b/prow/jobs/generated/knative-sandbox/eventing-gitlab-release-1.0.gen.yaml @@ -0,0 +1,120 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. #### +# #### USE "./hack/generate-configs.sh" TO REGENERATE THIS FILE. #### +# #### #### +# ####################################################################### + +periodics: +- annotations: + testgrid-dashboards: knative-sandbox-release-1.0 + testgrid-tab-name: eventing-gitlab-continuous + cluster: build-knative + cron: 25 8 * * * + decorate: true + extra_refs: + - base_ref: release-1.0 + org: knative-sandbox + repo: eventing-gitlab + name: continuous_eventing-gitlab_release-1.0_periodic + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + - name: DOCKER_IN_DOCKER_ENABLED + value: "true" + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: + limits: + memory: 16Gi + requests: + memory: 12Gi + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + - mountPath: /docker-graph + name: docker-graph + - mountPath: /lib/modules + name: modules + - mountPath: /sys/fs/cgroup + name: cgroup + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - emptyDir: {} + name: docker-graph + - hostPath: + path: /lib/modules + type: Directory + name: modules + - hostPath: + path: /sys/fs/cgroup + type: Directory + name: cgroup +- annotations: + testgrid-dashboards: knative-sandbox-release-1.0 + testgrid-tab-name: eventing-gitlab-release + cluster: build-knative + cron: 51 9 * * 2 + decorate: true + extra_refs: + - base_ref: release-1.0 + org: knative-sandbox + repo: eventing-gitlab + name: release_eventing-gitlab_release-1.0_periodic + spec: + containers: + - command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/eventing-gitlab + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.0 + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/release-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: + limits: + memory: 16Gi + requests: + memory: 12Gi + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/hub-token + name: hub-token + readOnly: true + - mountPath: /etc/release-account + name: release-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: hub-token + secret: + secretName: hub-token + - name: release-account + secret: + secretName: release-account diff --git a/prow/jobs/generated/knative-sandbox/eventing-gitlab-release-1.1.gen.yaml b/prow/jobs/generated/knative-sandbox/eventing-gitlab-release-1.1.gen.yaml new file mode 100644 index 00000000000..4fbf52d6a27 --- /dev/null +++ b/prow/jobs/generated/knative-sandbox/eventing-gitlab-release-1.1.gen.yaml @@ -0,0 +1,120 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. #### +# #### USE "./hack/generate-configs.sh" TO REGENERATE THIS FILE. #### +# #### #### +# ####################################################################### + +periodics: +- annotations: + testgrid-dashboards: knative-sandbox-release-1.1 + testgrid-tab-name: eventing-gitlab-continuous + cluster: build-knative + cron: 38 8 * * * + decorate: true + extra_refs: + - base_ref: release-1.1 + org: knative-sandbox + repo: eventing-gitlab + name: continuous_eventing-gitlab_release-1.1_periodic + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + - name: DOCKER_IN_DOCKER_ENABLED + value: "true" + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: + limits: + memory: 16Gi + requests: + memory: 12Gi + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + - mountPath: /docker-graph + name: docker-graph + - mountPath: /lib/modules + name: modules + - mountPath: /sys/fs/cgroup + name: cgroup + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - emptyDir: {} + name: docker-graph + - hostPath: + path: /lib/modules + type: Directory + name: modules + - hostPath: + path: /sys/fs/cgroup + type: Directory + name: cgroup +- annotations: + testgrid-dashboards: knative-sandbox-release-1.1 + testgrid-tab-name: eventing-gitlab-release + cluster: build-knative + cron: 14 9 * * 2 + decorate: true + extra_refs: + - base_ref: release-1.1 + org: knative-sandbox + repo: eventing-gitlab + name: release_eventing-gitlab_release-1.1_periodic + spec: + containers: + - command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/eventing-gitlab + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.1 + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/release-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: + limits: + memory: 16Gi + requests: + memory: 12Gi + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/hub-token + name: hub-token + readOnly: true + - mountPath: /etc/release-account + name: release-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: hub-token + secret: + secretName: hub-token + - name: release-account + secret: + secretName: release-account diff --git a/prow/jobs/generated/knative-sandbox/eventing-gitlab-release-1.2.gen.yaml b/prow/jobs/generated/knative-sandbox/eventing-gitlab-release-1.2.gen.yaml new file mode 100644 index 00000000000..445e4f53a4d --- /dev/null +++ b/prow/jobs/generated/knative-sandbox/eventing-gitlab-release-1.2.gen.yaml @@ -0,0 +1,120 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. #### +# #### USE "./hack/generate-configs.sh" TO REGENERATE THIS FILE. #### +# #### #### +# ####################################################################### + +periodics: +- annotations: + testgrid-dashboards: knative-sandbox-release-1.2 + testgrid-tab-name: eventing-gitlab-continuous + cluster: build-knative + cron: 51 8 * * * + decorate: true + extra_refs: + - base_ref: release-1.2 + org: knative-sandbox + repo: eventing-gitlab + name: continuous_eventing-gitlab_release-1.2_periodic + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + - name: DOCKER_IN_DOCKER_ENABLED + value: "true" + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: + limits: + memory: 16Gi + requests: + memory: 12Gi + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + - mountPath: /docker-graph + name: docker-graph + - mountPath: /lib/modules + name: modules + - mountPath: /sys/fs/cgroup + name: cgroup + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - emptyDir: {} + name: docker-graph + - hostPath: + path: /lib/modules + type: Directory + name: modules + - hostPath: + path: /sys/fs/cgroup + type: Directory + name: cgroup +- annotations: + testgrid-dashboards: knative-sandbox-release-1.2 + testgrid-tab-name: eventing-gitlab-release + cluster: build-knative + cron: 37 9 * * 2 + decorate: true + extra_refs: + - base_ref: release-1.2 + org: knative-sandbox + repo: eventing-gitlab + name: release_eventing-gitlab_release-1.2_periodic + spec: + containers: + - command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/eventing-gitlab + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.2 + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/release-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: + limits: + memory: 16Gi + requests: + memory: 12Gi + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/hub-token + name: hub-token + readOnly: true + - mountPath: /etc/release-account + name: release-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: hub-token + secret: + secretName: hub-token + - name: release-account + secret: + secretName: release-account diff --git a/prow/jobs/generated/knative-sandbox/eventing-gitlab-release-1.3.gen.yaml b/prow/jobs/generated/knative-sandbox/eventing-gitlab-release-1.3.gen.yaml new file mode 100644 index 00000000000..f7da9a6952d --- /dev/null +++ b/prow/jobs/generated/knative-sandbox/eventing-gitlab-release-1.3.gen.yaml @@ -0,0 +1,120 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. #### +# #### USE "./hack/generate-configs.sh" TO REGENERATE THIS FILE. #### +# #### #### +# ####################################################################### + +periodics: +- annotations: + testgrid-dashboards: knative-sandbox-release-1.3 + testgrid-tab-name: eventing-gitlab-continuous + cluster: build-knative + cron: 24 8 * * * + decorate: true + extra_refs: + - base_ref: release-1.3 + org: knative-sandbox + repo: eventing-gitlab + name: continuous_eventing-gitlab_release-1.3_periodic + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + - name: DOCKER_IN_DOCKER_ENABLED + value: "true" + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: + limits: + memory: 16Gi + requests: + memory: 12Gi + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + - mountPath: /docker-graph + name: docker-graph + - mountPath: /lib/modules + name: modules + - mountPath: /sys/fs/cgroup + name: cgroup + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - emptyDir: {} + name: docker-graph + - hostPath: + path: /lib/modules + type: Directory + name: modules + - hostPath: + path: /sys/fs/cgroup + type: Directory + name: cgroup +- annotations: + testgrid-dashboards: knative-sandbox-release-1.3 + testgrid-tab-name: eventing-gitlab-release + cluster: build-knative + cron: 4 9 * * 2 + decorate: true + extra_refs: + - base_ref: release-1.3 + org: knative-sandbox + repo: eventing-gitlab + name: release_eventing-gitlab_release-1.3_periodic + spec: + containers: + - command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/eventing-gitlab + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.3 + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/release-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: + limits: + memory: 16Gi + requests: + memory: 12Gi + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/hub-token + name: hub-token + readOnly: true + - mountPath: /etc/release-account + name: release-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: hub-token + secret: + secretName: hub-token + - name: release-account + secret: + secretName: release-account diff --git a/prow/jobs/generated/knative-sandbox/eventing-kafka-broker-main.gen.yaml b/prow/jobs/generated/knative-sandbox/eventing-kafka-broker-main.gen.yaml new file mode 100644 index 00000000000..a6863f3e6d1 --- /dev/null +++ b/prow/jobs/generated/knative-sandbox/eventing-kafka-broker-main.gen.yaml @@ -0,0 +1,855 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. #### +# #### USE "./hack/generate-configs.sh" TO REGENERATE THIS FILE. #### +# #### #### +# ####################################################################### + +periodics: +- annotations: + testgrid-dashboards: eventing-kafka-broker + testgrid-tab-name: continuous + cluster: build-knative + cron: 52 */9 * * * + decorate: true + extra_refs: + - base_ref: main + org: knative-sandbox + repo: eventing-kafka-broker + name: continuous_eventing-kafka-broker_main_periodic + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + - name: DOCKER_IN_DOCKER_ENABLED + value: "true" + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + - mountPath: /docker-graph + name: docker-graph + - mountPath: /lib/modules + name: modules + - mountPath: /sys/fs/cgroup + name: cgroup + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - emptyDir: {} + name: docker-graph + - hostPath: + path: /lib/modules + type: Directory + name: modules + - hostPath: + path: /sys/fs/cgroup + type: Directory + name: cgroup +- annotations: + testgrid-dashboards: eventing-kafka-broker + testgrid-tab-name: nightly + cluster: build-knative + cron: 28 9 * * * + decorate: true + extra_refs: + - base_ref: main + org: knative-sandbox + repo: eventing-kafka-broker + name: nightly_eventing-kafka-broker_main_periodic + reporter_config: + slack: + channel: eventing-kafka + job_states_to_report: + - failure + report_template: | + "The nightly release job fails, check the log: <{{.Status.URL}}|View logs>" + spec: + containers: + - command: + - runner.sh + - ./hack/release.sh + - --publish + - --tag-release + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/nightly-account/service-account.json + - name: DOCKER_IN_DOCKER_ENABLED + value: "true" + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/nightly-account + name: nightly-account + readOnly: true + - mountPath: /docker-graph + name: docker-graph + - mountPath: /lib/modules + name: modules + - mountPath: /sys/fs/cgroup + name: cgroup + nodeSelector: + type: testing + volumes: + - name: nightly-account + secret: + secretName: nightly-account + - emptyDir: {} + name: docker-graph + - hostPath: + path: /lib/modules + type: Directory + name: modules + - hostPath: + path: /sys/fs/cgroup + type: Directory + name: cgroup +- annotations: + testgrid-dashboards: eventing-kafka-broker + testgrid-tab-name: release + cluster: build-knative + cron: 0 */9 * * * + decorate: true + extra_refs: + - base_ref: main + org: knative-sandbox + repo: eventing-kafka-broker + name: release_eventing-kafka-broker_main_periodic + spec: + containers: + - command: + - runner.sh + - ./hack/release.sh + - --auto-release + - --release-gcs + - knative-releases/eventing-kafka-broker + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/release-account/service-account.json + - name: DOCKER_IN_DOCKER_ENABLED + value: "true" + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/hub-token + name: hub-token + readOnly: true + - mountPath: /etc/release-account + name: release-account + readOnly: true + - mountPath: /docker-graph + name: docker-graph + - mountPath: /lib/modules + name: modules + - mountPath: /sys/fs/cgroup + name: cgroup + nodeSelector: + type: testing + volumes: + - name: hub-token + secret: + secretName: hub-token + - name: release-account + secret: + secretName: release-account + - emptyDir: {} + name: docker-graph + - hostPath: + path: /lib/modules + type: Directory + name: modules + - hostPath: + path: /sys/fs/cgroup + type: Directory + name: cgroup +presubmits: + knative-sandbox/eventing-kafka-broker: + - always_run: true + branches: + - ^main$ + cluster: build-knative + decorate: true + name: build-tests_eventing-kafka-broker_main + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --build-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^main$ + cluster: build-knative + decorate: true + name: unit-tests_eventing-kafka-broker_main + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --unit-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^main$ + cluster: build-knative + decorate: true + name: integration-tests_eventing-kafka-broker_main + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --integration-tests + env: + - name: DOCKER_IN_DOCKER_ENABLED + value: "true" + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /docker-graph + name: docker-graph + - mountPath: /lib/modules + name: modules + - mountPath: /sys/fs/cgroup + name: cgroup + nodeSelector: + type: testing + volumes: + - emptyDir: {} + name: docker-graph + - hostPath: + path: /lib/modules + type: Directory + name: modules + - hostPath: + path: /sys/fs/cgroup + type: Directory + name: cgroup + - always_run: true + branches: + - ^main$ + cluster: build-knative + decorate: true + name: integration-test-channel-consolidated_eventing-kafka-broker_main + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-tests.sh --consolidated + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - always_run: true + branches: + - ^main$ + cluster: build-knative + decorate: true + name: integration-test-channel-consolidated-tls_eventing-kafka-broker_main + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-tests.sh --consolidated-tls + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - always_run: true + branches: + - ^main$ + cluster: build-knative + decorate: true + name: integration-test-channel-consolidated-sasl_eventing-kafka-broker_main + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-tests.sh --consolidated-sasl + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - always_run: true + branches: + - ^main$ + cluster: build-knative + decorate: true + name: integration-test-channel-distributed_eventing-kafka-broker_main + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-tests.sh --distributed + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - always_run: true + branches: + - ^main$ + cluster: build-knative + decorate: true + name: integration-test-mt-source_eventing-kafka-broker_main + optional: true + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-tests.sh --mt-source + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - always_run: true + branches: + - ^main$ + cluster: build-knative + decorate: true + name: upgrade-tests_eventing-kafka-broker_main + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-upgrade-tests.sh + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + - name: DOCKER_IN_DOCKER_ENABLED + value: "true" + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + - mountPath: /docker-graph + name: docker-graph + - mountPath: /lib/modules + name: modules + - mountPath: /sys/fs/cgroup + name: cgroup + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - emptyDir: {} + name: docker-graph + - hostPath: + path: /lib/modules + type: Directory + name: modules + - hostPath: + path: /sys/fs/cgroup + type: Directory + name: cgroup + - always_run: true + branches: + - ^main$ + cluster: build-knative + decorate: true + name: reconciler-tests_eventing-kafka-broker_main + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/reconciler-tests.sh + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + - name: DOCKER_IN_DOCKER_ENABLED + value: "true" + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + - mountPath: /docker-graph + name: docker-graph + - mountPath: /lib/modules + name: modules + - mountPath: /sys/fs/cgroup + name: cgroup + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - emptyDir: {} + name: docker-graph + - hostPath: + path: /lib/modules + type: Directory + name: modules + - hostPath: + path: /sys/fs/cgroup + type: Directory + name: cgroup + - always_run: true + branches: + - ^main$ + cluster: build-knative + decorate: true + name: channel-integration-tests-ssl_eventing-kafka-broker_main + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-tests.sh + env: + - name: EVENTING_KAFKA_BROKER_CHANNEL_AUTH_SCENARIO + value: SSL + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + - name: DOCKER_IN_DOCKER_ENABLED + value: "true" + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + - mountPath: /docker-graph + name: docker-graph + - mountPath: /lib/modules + name: modules + - mountPath: /sys/fs/cgroup + name: cgroup + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - emptyDir: {} + name: docker-graph + - hostPath: + path: /lib/modules + type: Directory + name: modules + - hostPath: + path: /sys/fs/cgroup + type: Directory + name: cgroup + - always_run: true + branches: + - ^main$ + cluster: build-knative + decorate: true + name: channel-integration-tests-sasl-ssl_eventing-kafka-broker_main + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-tests.sh + env: + - name: EVENTING_KAFKA_BROKER_CHANNEL_AUTH_SCENARIO + value: SASL_SSL + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + - name: DOCKER_IN_DOCKER_ENABLED + value: "true" + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + - mountPath: /docker-graph + name: docker-graph + - mountPath: /lib/modules + name: modules + - mountPath: /sys/fs/cgroup + name: cgroup + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - emptyDir: {} + name: docker-graph + - hostPath: + path: /lib/modules + type: Directory + name: modules + - hostPath: + path: /sys/fs/cgroup + type: Directory + name: cgroup + - always_run: true + branches: + - ^main$ + cluster: build-knative + decorate: true + name: channel-integration-tests-sasl-plain_eventing-kafka-broker_main + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-tests.sh + env: + - name: EVENTING_KAFKA_BROKER_CHANNEL_AUTH_SCENARIO + value: SASL_PLAIN + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + - name: DOCKER_IN_DOCKER_ENABLED + value: "true" + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + - mountPath: /docker-graph + name: docker-graph + - mountPath: /lib/modules + name: modules + - mountPath: /sys/fs/cgroup + name: cgroup + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - emptyDir: {} + name: docker-graph + - hostPath: + path: /lib/modules + type: Directory + name: modules + - hostPath: + path: /sys/fs/cgroup + type: Directory + name: cgroup + - always_run: true + branches: + - ^main$ + cluster: build-knative + decorate: true + name: channel-reconciler-tests-ssl_eventing-kafka-broker_main + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/reconciler-tests.sh + env: + - name: EVENTING_KAFKA_BROKER_CHANNEL_AUTH_SCENARIO + value: SSL + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + - name: DOCKER_IN_DOCKER_ENABLED + value: "true" + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + - mountPath: /docker-graph + name: docker-graph + - mountPath: /lib/modules + name: modules + - mountPath: /sys/fs/cgroup + name: cgroup + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - emptyDir: {} + name: docker-graph + - hostPath: + path: /lib/modules + type: Directory + name: modules + - hostPath: + path: /sys/fs/cgroup + type: Directory + name: cgroup + - always_run: true + branches: + - ^main$ + cluster: build-knative + decorate: true + name: channel-reconciler-tests-sasl-ssl_eventing-kafka-broker_main + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/reconciler-tests.sh + env: + - name: EVENTING_KAFKA_BROKER_CHANNEL_AUTH_SCENARIO + value: SASL_SSL + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + - name: DOCKER_IN_DOCKER_ENABLED + value: "true" + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + - mountPath: /docker-graph + name: docker-graph + - mountPath: /lib/modules + name: modules + - mountPath: /sys/fs/cgroup + name: cgroup + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - emptyDir: {} + name: docker-graph + - hostPath: + path: /lib/modules + type: Directory + name: modules + - hostPath: + path: /sys/fs/cgroup + type: Directory + name: cgroup + - always_run: true + branches: + - ^main$ + cluster: build-knative + decorate: true + name: channel-reconciler-tests-sasl-plain_eventing-kafka-broker_main + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/reconciler-tests.sh + env: + - name: EVENTING_KAFKA_BROKER_CHANNEL_AUTH_SCENARIO + value: SASL_PLAIN + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + - name: DOCKER_IN_DOCKER_ENABLED + value: "true" + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + - mountPath: /docker-graph + name: docker-graph + - mountPath: /lib/modules + name: modules + - mountPath: /sys/fs/cgroup + name: cgroup + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - emptyDir: {} + name: docker-graph + - hostPath: + path: /lib/modules + type: Directory + name: modules + - hostPath: + path: /sys/fs/cgroup + type: Directory + name: cgroup diff --git a/prow/jobs/generated/knative-sandbox/eventing-kafka-broker-release-1.0.gen.yaml b/prow/jobs/generated/knative-sandbox/eventing-kafka-broker-release-1.0.gen.yaml new file mode 100644 index 00000000000..8baf2f2392b --- /dev/null +++ b/prow/jobs/generated/knative-sandbox/eventing-kafka-broker-release-1.0.gen.yaml @@ -0,0 +1,778 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. #### +# #### USE "./hack/generate-configs.sh" TO REGENERATE THIS FILE. #### +# #### #### +# ####################################################################### + +periodics: +- annotations: + testgrid-dashboards: knative-sandbox-release-1.0 + testgrid-tab-name: eventing-kafka-broker-continuous + cluster: build-knative + cron: 36 8 * * * + decorate: true + extra_refs: + - base_ref: release-1.0 + org: knative-sandbox + repo: eventing-kafka-broker + name: continuous_eventing-kafka-broker_release-1.0_periodic + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + - name: DOCKER_IN_DOCKER_ENABLED + value: "true" + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + - mountPath: /docker-graph + name: docker-graph + - mountPath: /lib/modules + name: modules + - mountPath: /sys/fs/cgroup + name: cgroup + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - emptyDir: {} + name: docker-graph + - hostPath: + path: /lib/modules + type: Directory + name: modules + - hostPath: + path: /sys/fs/cgroup + type: Directory + name: cgroup +- annotations: + testgrid-dashboards: knative-sandbox-release-1.0 + testgrid-tab-name: eventing-kafka-broker-release + cluster: build-knative + cron: 28 9 * * 2 + decorate: true + extra_refs: + - base_ref: release-1.0 + org: knative-sandbox + repo: eventing-kafka-broker + name: release_eventing-kafka-broker_release-1.0_periodic + spec: + containers: + - command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/eventing-kafka-broker + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.0 + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/release-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/hub-token + name: hub-token + readOnly: true + - mountPath: /etc/release-account + name: release-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: hub-token + secret: + secretName: hub-token + - name: release-account + secret: + secretName: release-account +presubmits: + knative-sandbox/eventing-kafka-broker: + - always_run: true + branches: + - ^release-1.0$ + cluster: build-knative + decorate: true + name: build-tests_eventing-kafka-broker_release-1.0 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --build-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^release-1.0$ + cluster: build-knative + decorate: true + name: unit-tests_eventing-kafka-broker_release-1.0 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --unit-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^release-1.0$ + cluster: build-knative + decorate: true + name: integration-tests_eventing-kafka-broker_release-1.0 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --integration-tests + env: + - name: DOCKER_IN_DOCKER_ENABLED + value: "true" + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /docker-graph + name: docker-graph + - mountPath: /lib/modules + name: modules + - mountPath: /sys/fs/cgroup + name: cgroup + nodeSelector: + type: testing + volumes: + - emptyDir: {} + name: docker-graph + - hostPath: + path: /lib/modules + type: Directory + name: modules + - hostPath: + path: /sys/fs/cgroup + type: Directory + name: cgroup + - always_run: true + branches: + - ^release-1.0$ + cluster: build-knative + decorate: true + name: integration-test-channel-consolidated_eventing-kafka-broker_release-1.0 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-tests.sh --consolidated + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - always_run: true + branches: + - ^release-1.0$ + cluster: build-knative + decorate: true + name: integration-test-channel-consolidated-tls_eventing-kafka-broker_release-1.0 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-tests.sh --consolidated-tls + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - always_run: true + branches: + - ^release-1.0$ + cluster: build-knative + decorate: true + name: integration-test-channel-consolidated-sasl_eventing-kafka-broker_release-1.0 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-tests.sh --consolidated-sasl + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - always_run: true + branches: + - ^release-1.0$ + cluster: build-knative + decorate: true + name: integration-test-channel-distributed_eventing-kafka-broker_release-1.0 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-tests.sh --distributed + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - always_run: true + branches: + - ^release-1.0$ + cluster: build-knative + decorate: true + name: integration-test-mt-source_eventing-kafka-broker_release-1.0 + optional: true + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-tests.sh --mt-source + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - always_run: true + branches: + - ^release-1.0$ + cluster: build-knative + decorate: true + name: upgrade-tests_eventing-kafka-broker_release-1.0 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-upgrade-tests.sh + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + - name: DOCKER_IN_DOCKER_ENABLED + value: "true" + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + - mountPath: /docker-graph + name: docker-graph + - mountPath: /lib/modules + name: modules + - mountPath: /sys/fs/cgroup + name: cgroup + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - emptyDir: {} + name: docker-graph + - hostPath: + path: /lib/modules + type: Directory + name: modules + - hostPath: + path: /sys/fs/cgroup + type: Directory + name: cgroup + - always_run: true + branches: + - ^release-1.0$ + cluster: build-knative + decorate: true + name: reconciler-tests_eventing-kafka-broker_release-1.0 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/reconciler-tests.sh + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + - name: DOCKER_IN_DOCKER_ENABLED + value: "true" + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + - mountPath: /docker-graph + name: docker-graph + - mountPath: /lib/modules + name: modules + - mountPath: /sys/fs/cgroup + name: cgroup + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - emptyDir: {} + name: docker-graph + - hostPath: + path: /lib/modules + type: Directory + name: modules + - hostPath: + path: /sys/fs/cgroup + type: Directory + name: cgroup + - always_run: true + branches: + - ^release-1.0$ + cluster: build-knative + decorate: true + name: channel-integration-tests-ssl_eventing-kafka-broker_release-1.0 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-tests.sh + env: + - name: EVENTING_KAFKA_BROKER_CHANNEL_AUTH_SCENARIO + value: SSL + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + - name: DOCKER_IN_DOCKER_ENABLED + value: "true" + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + - mountPath: /docker-graph + name: docker-graph + - mountPath: /lib/modules + name: modules + - mountPath: /sys/fs/cgroup + name: cgroup + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - emptyDir: {} + name: docker-graph + - hostPath: + path: /lib/modules + type: Directory + name: modules + - hostPath: + path: /sys/fs/cgroup + type: Directory + name: cgroup + - always_run: true + branches: + - ^release-1.0$ + cluster: build-knative + decorate: true + name: channel-integration-tests-sasl-ssl_eventing-kafka-broker_release-1.0 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-tests.sh + env: + - name: EVENTING_KAFKA_BROKER_CHANNEL_AUTH_SCENARIO + value: SASL_SSL + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + - name: DOCKER_IN_DOCKER_ENABLED + value: "true" + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + - mountPath: /docker-graph + name: docker-graph + - mountPath: /lib/modules + name: modules + - mountPath: /sys/fs/cgroup + name: cgroup + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - emptyDir: {} + name: docker-graph + - hostPath: + path: /lib/modules + type: Directory + name: modules + - hostPath: + path: /sys/fs/cgroup + type: Directory + name: cgroup + - always_run: true + branches: + - ^release-1.0$ + cluster: build-knative + decorate: true + name: channel-integration-tests-sasl-plain_eventing-kafka-broker_release-1.0 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-tests.sh + env: + - name: EVENTING_KAFKA_BROKER_CHANNEL_AUTH_SCENARIO + value: SASL_PLAIN + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + - name: DOCKER_IN_DOCKER_ENABLED + value: "true" + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + - mountPath: /docker-graph + name: docker-graph + - mountPath: /lib/modules + name: modules + - mountPath: /sys/fs/cgroup + name: cgroup + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - emptyDir: {} + name: docker-graph + - hostPath: + path: /lib/modules + type: Directory + name: modules + - hostPath: + path: /sys/fs/cgroup + type: Directory + name: cgroup + - always_run: true + branches: + - ^release-1.0$ + cluster: build-knative + decorate: true + name: channel-reconciler-tests-ssl_eventing-kafka-broker_release-1.0 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/reconciler-tests.sh + env: + - name: EVENTING_KAFKA_BROKER_CHANNEL_AUTH_SCENARIO + value: SSL + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + - name: DOCKER_IN_DOCKER_ENABLED + value: "true" + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + - mountPath: /docker-graph + name: docker-graph + - mountPath: /lib/modules + name: modules + - mountPath: /sys/fs/cgroup + name: cgroup + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - emptyDir: {} + name: docker-graph + - hostPath: + path: /lib/modules + type: Directory + name: modules + - hostPath: + path: /sys/fs/cgroup + type: Directory + name: cgroup + - always_run: true + branches: + - ^release-1.0$ + cluster: build-knative + decorate: true + name: channel-reconciler-tests-sasl-ssl_eventing-kafka-broker_release-1.0 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/reconciler-tests.sh + env: + - name: EVENTING_KAFKA_BROKER_CHANNEL_AUTH_SCENARIO + value: SASL_SSL + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + - name: DOCKER_IN_DOCKER_ENABLED + value: "true" + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + - mountPath: /docker-graph + name: docker-graph + - mountPath: /lib/modules + name: modules + - mountPath: /sys/fs/cgroup + name: cgroup + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - emptyDir: {} + name: docker-graph + - hostPath: + path: /lib/modules + type: Directory + name: modules + - hostPath: + path: /sys/fs/cgroup + type: Directory + name: cgroup + - always_run: true + branches: + - ^release-1.0$ + cluster: build-knative + decorate: true + name: channel-reconciler-tests-sasl-plain_eventing-kafka-broker_release-1.0 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/reconciler-tests.sh + env: + - name: EVENTING_KAFKA_BROKER_CHANNEL_AUTH_SCENARIO + value: SASL_PLAIN + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + - name: DOCKER_IN_DOCKER_ENABLED + value: "true" + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + - mountPath: /docker-graph + name: docker-graph + - mountPath: /lib/modules + name: modules + - mountPath: /sys/fs/cgroup + name: cgroup + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - emptyDir: {} + name: docker-graph + - hostPath: + path: /lib/modules + type: Directory + name: modules + - hostPath: + path: /sys/fs/cgroup + type: Directory + name: cgroup diff --git a/prow/jobs/generated/knative-sandbox/eventing-kafka-broker-release-1.1.gen.yaml b/prow/jobs/generated/knative-sandbox/eventing-kafka-broker-release-1.1.gen.yaml new file mode 100644 index 00000000000..304a8f39c2e --- /dev/null +++ b/prow/jobs/generated/knative-sandbox/eventing-kafka-broker-release-1.1.gen.yaml @@ -0,0 +1,778 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. #### +# #### USE "./hack/generate-configs.sh" TO REGENERATE THIS FILE. #### +# #### #### +# ####################################################################### + +periodics: +- annotations: + testgrid-dashboards: knative-sandbox-release-1.1 + testgrid-tab-name: eventing-kafka-broker-continuous + cluster: build-knative + cron: 47 8 * * * + decorate: true + extra_refs: + - base_ref: release-1.1 + org: knative-sandbox + repo: eventing-kafka-broker + name: continuous_eventing-kafka-broker_release-1.1_periodic + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + - name: DOCKER_IN_DOCKER_ENABLED + value: "true" + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + - mountPath: /docker-graph + name: docker-graph + - mountPath: /lib/modules + name: modules + - mountPath: /sys/fs/cgroup + name: cgroup + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - emptyDir: {} + name: docker-graph + - hostPath: + path: /lib/modules + type: Directory + name: modules + - hostPath: + path: /sys/fs/cgroup + type: Directory + name: cgroup +- annotations: + testgrid-dashboards: knative-sandbox-release-1.1 + testgrid-tab-name: eventing-kafka-broker-release + cluster: build-knative + cron: 45 9 * * 2 + decorate: true + extra_refs: + - base_ref: release-1.1 + org: knative-sandbox + repo: eventing-kafka-broker + name: release_eventing-kafka-broker_release-1.1_periodic + spec: + containers: + - command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/eventing-kafka-broker + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.1 + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/release-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/hub-token + name: hub-token + readOnly: true + - mountPath: /etc/release-account + name: release-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: hub-token + secret: + secretName: hub-token + - name: release-account + secret: + secretName: release-account +presubmits: + knative-sandbox/eventing-kafka-broker: + - always_run: true + branches: + - ^release-1.1$ + cluster: build-knative + decorate: true + name: build-tests_eventing-kafka-broker_release-1.1 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --build-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^release-1.1$ + cluster: build-knative + decorate: true + name: unit-tests_eventing-kafka-broker_release-1.1 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --unit-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^release-1.1$ + cluster: build-knative + decorate: true + name: integration-tests_eventing-kafka-broker_release-1.1 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --integration-tests + env: + - name: DOCKER_IN_DOCKER_ENABLED + value: "true" + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /docker-graph + name: docker-graph + - mountPath: /lib/modules + name: modules + - mountPath: /sys/fs/cgroup + name: cgroup + nodeSelector: + type: testing + volumes: + - emptyDir: {} + name: docker-graph + - hostPath: + path: /lib/modules + type: Directory + name: modules + - hostPath: + path: /sys/fs/cgroup + type: Directory + name: cgroup + - always_run: true + branches: + - ^release-1.1$ + cluster: build-knative + decorate: true + name: integration-test-channel-consolidated_eventing-kafka-broker_release-1.1 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-tests.sh --consolidated + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - always_run: true + branches: + - ^release-1.1$ + cluster: build-knative + decorate: true + name: integration-test-channel-consolidated-tls_eventing-kafka-broker_release-1.1 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-tests.sh --consolidated-tls + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - always_run: true + branches: + - ^release-1.1$ + cluster: build-knative + decorate: true + name: integration-test-channel-consolidated-sasl_eventing-kafka-broker_release-1.1 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-tests.sh --consolidated-sasl + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - always_run: true + branches: + - ^release-1.1$ + cluster: build-knative + decorate: true + name: integration-test-channel-distributed_eventing-kafka-broker_release-1.1 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-tests.sh --distributed + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - always_run: true + branches: + - ^release-1.1$ + cluster: build-knative + decorate: true + name: integration-test-mt-source_eventing-kafka-broker_release-1.1 + optional: true + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-tests.sh --mt-source + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - always_run: true + branches: + - ^release-1.1$ + cluster: build-knative + decorate: true + name: upgrade-tests_eventing-kafka-broker_release-1.1 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-upgrade-tests.sh + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + - name: DOCKER_IN_DOCKER_ENABLED + value: "true" + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + - mountPath: /docker-graph + name: docker-graph + - mountPath: /lib/modules + name: modules + - mountPath: /sys/fs/cgroup + name: cgroup + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - emptyDir: {} + name: docker-graph + - hostPath: + path: /lib/modules + type: Directory + name: modules + - hostPath: + path: /sys/fs/cgroup + type: Directory + name: cgroup + - always_run: true + branches: + - ^release-1.1$ + cluster: build-knative + decorate: true + name: reconciler-tests_eventing-kafka-broker_release-1.1 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/reconciler-tests.sh + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + - name: DOCKER_IN_DOCKER_ENABLED + value: "true" + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + - mountPath: /docker-graph + name: docker-graph + - mountPath: /lib/modules + name: modules + - mountPath: /sys/fs/cgroup + name: cgroup + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - emptyDir: {} + name: docker-graph + - hostPath: + path: /lib/modules + type: Directory + name: modules + - hostPath: + path: /sys/fs/cgroup + type: Directory + name: cgroup + - always_run: true + branches: + - ^release-1.1$ + cluster: build-knative + decorate: true + name: channel-integration-tests-ssl_eventing-kafka-broker_release-1.1 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-tests.sh + env: + - name: EVENTING_KAFKA_BROKER_CHANNEL_AUTH_SCENARIO + value: SSL + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + - name: DOCKER_IN_DOCKER_ENABLED + value: "true" + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + - mountPath: /docker-graph + name: docker-graph + - mountPath: /lib/modules + name: modules + - mountPath: /sys/fs/cgroup + name: cgroup + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - emptyDir: {} + name: docker-graph + - hostPath: + path: /lib/modules + type: Directory + name: modules + - hostPath: + path: /sys/fs/cgroup + type: Directory + name: cgroup + - always_run: true + branches: + - ^release-1.1$ + cluster: build-knative + decorate: true + name: channel-integration-tests-sasl-ssl_eventing-kafka-broker_release-1.1 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-tests.sh + env: + - name: EVENTING_KAFKA_BROKER_CHANNEL_AUTH_SCENARIO + value: SASL_SSL + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + - name: DOCKER_IN_DOCKER_ENABLED + value: "true" + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + - mountPath: /docker-graph + name: docker-graph + - mountPath: /lib/modules + name: modules + - mountPath: /sys/fs/cgroup + name: cgroup + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - emptyDir: {} + name: docker-graph + - hostPath: + path: /lib/modules + type: Directory + name: modules + - hostPath: + path: /sys/fs/cgroup + type: Directory + name: cgroup + - always_run: true + branches: + - ^release-1.1$ + cluster: build-knative + decorate: true + name: channel-integration-tests-sasl-plain_eventing-kafka-broker_release-1.1 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-tests.sh + env: + - name: EVENTING_KAFKA_BROKER_CHANNEL_AUTH_SCENARIO + value: SASL_PLAIN + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + - name: DOCKER_IN_DOCKER_ENABLED + value: "true" + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + - mountPath: /docker-graph + name: docker-graph + - mountPath: /lib/modules + name: modules + - mountPath: /sys/fs/cgroup + name: cgroup + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - emptyDir: {} + name: docker-graph + - hostPath: + path: /lib/modules + type: Directory + name: modules + - hostPath: + path: /sys/fs/cgroup + type: Directory + name: cgroup + - always_run: true + branches: + - ^release-1.1$ + cluster: build-knative + decorate: true + name: channel-reconciler-tests-ssl_eventing-kafka-broker_release-1.1 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/reconciler-tests.sh + env: + - name: EVENTING_KAFKA_BROKER_CHANNEL_AUTH_SCENARIO + value: SSL + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + - name: DOCKER_IN_DOCKER_ENABLED + value: "true" + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + - mountPath: /docker-graph + name: docker-graph + - mountPath: /lib/modules + name: modules + - mountPath: /sys/fs/cgroup + name: cgroup + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - emptyDir: {} + name: docker-graph + - hostPath: + path: /lib/modules + type: Directory + name: modules + - hostPath: + path: /sys/fs/cgroup + type: Directory + name: cgroup + - always_run: true + branches: + - ^release-1.1$ + cluster: build-knative + decorate: true + name: channel-reconciler-tests-sasl-ssl_eventing-kafka-broker_release-1.1 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/reconciler-tests.sh + env: + - name: EVENTING_KAFKA_BROKER_CHANNEL_AUTH_SCENARIO + value: SASL_SSL + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + - name: DOCKER_IN_DOCKER_ENABLED + value: "true" + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + - mountPath: /docker-graph + name: docker-graph + - mountPath: /lib/modules + name: modules + - mountPath: /sys/fs/cgroup + name: cgroup + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - emptyDir: {} + name: docker-graph + - hostPath: + path: /lib/modules + type: Directory + name: modules + - hostPath: + path: /sys/fs/cgroup + type: Directory + name: cgroup + - always_run: true + branches: + - ^release-1.1$ + cluster: build-knative + decorate: true + name: channel-reconciler-tests-sasl-plain_eventing-kafka-broker_release-1.1 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/reconciler-tests.sh + env: + - name: EVENTING_KAFKA_BROKER_CHANNEL_AUTH_SCENARIO + value: SASL_PLAIN + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + - name: DOCKER_IN_DOCKER_ENABLED + value: "true" + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + - mountPath: /docker-graph + name: docker-graph + - mountPath: /lib/modules + name: modules + - mountPath: /sys/fs/cgroup + name: cgroup + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - emptyDir: {} + name: docker-graph + - hostPath: + path: /lib/modules + type: Directory + name: modules + - hostPath: + path: /sys/fs/cgroup + type: Directory + name: cgroup diff --git a/prow/jobs/generated/knative-sandbox/eventing-kafka-broker-release-1.2.gen.yaml b/prow/jobs/generated/knative-sandbox/eventing-kafka-broker-release-1.2.gen.yaml new file mode 100644 index 00000000000..c641f38e227 --- /dev/null +++ b/prow/jobs/generated/knative-sandbox/eventing-kafka-broker-release-1.2.gen.yaml @@ -0,0 +1,778 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. #### +# #### USE "./hack/generate-configs.sh" TO REGENERATE THIS FILE. #### +# #### #### +# ####################################################################### + +periodics: +- annotations: + testgrid-dashboards: knative-sandbox-release-1.2 + testgrid-tab-name: eventing-kafka-broker-continuous + cluster: build-knative + cron: 50 8 * * * + decorate: true + extra_refs: + - base_ref: release-1.2 + org: knative-sandbox + repo: eventing-kafka-broker + name: continuous_eventing-kafka-broker_release-1.2_periodic + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + - name: DOCKER_IN_DOCKER_ENABLED + value: "true" + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + - mountPath: /docker-graph + name: docker-graph + - mountPath: /lib/modules + name: modules + - mountPath: /sys/fs/cgroup + name: cgroup + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - emptyDir: {} + name: docker-graph + - hostPath: + path: /lib/modules + type: Directory + name: modules + - hostPath: + path: /sys/fs/cgroup + type: Directory + name: cgroup +- annotations: + testgrid-dashboards: knative-sandbox-release-1.2 + testgrid-tab-name: eventing-kafka-broker-release + cluster: build-knative + cron: 38 9 * * 2 + decorate: true + extra_refs: + - base_ref: release-1.2 + org: knative-sandbox + repo: eventing-kafka-broker + name: release_eventing-kafka-broker_release-1.2_periodic + spec: + containers: + - command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/eventing-kafka-broker + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.2 + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/release-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/hub-token + name: hub-token + readOnly: true + - mountPath: /etc/release-account + name: release-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: hub-token + secret: + secretName: hub-token + - name: release-account + secret: + secretName: release-account +presubmits: + knative-sandbox/eventing-kafka-broker: + - always_run: true + branches: + - ^release-1.2$ + cluster: build-knative + decorate: true + name: build-tests_eventing-kafka-broker_release-1.2 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --build-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^release-1.2$ + cluster: build-knative + decorate: true + name: unit-tests_eventing-kafka-broker_release-1.2 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --unit-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^release-1.2$ + cluster: build-knative + decorate: true + name: integration-tests_eventing-kafka-broker_release-1.2 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --integration-tests + env: + - name: DOCKER_IN_DOCKER_ENABLED + value: "true" + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /docker-graph + name: docker-graph + - mountPath: /lib/modules + name: modules + - mountPath: /sys/fs/cgroup + name: cgroup + nodeSelector: + type: testing + volumes: + - emptyDir: {} + name: docker-graph + - hostPath: + path: /lib/modules + type: Directory + name: modules + - hostPath: + path: /sys/fs/cgroup + type: Directory + name: cgroup + - always_run: true + branches: + - ^release-1.2$ + cluster: build-knative + decorate: true + name: integration-test-channel-consolidated_eventing-kafka-broker_release-1.2 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-tests.sh --consolidated + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - always_run: true + branches: + - ^release-1.2$ + cluster: build-knative + decorate: true + name: integration-test-channel-consolidated-tls_eventing-kafka-broker_release-1.2 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-tests.sh --consolidated-tls + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - always_run: true + branches: + - ^release-1.2$ + cluster: build-knative + decorate: true + name: integration-test-channel-consolidated-sasl_eventing-kafka-broker_release-1.2 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-tests.sh --consolidated-sasl + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - always_run: true + branches: + - ^release-1.2$ + cluster: build-knative + decorate: true + name: integration-test-channel-distributed_eventing-kafka-broker_release-1.2 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-tests.sh --distributed + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - always_run: true + branches: + - ^release-1.2$ + cluster: build-knative + decorate: true + name: integration-test-mt-source_eventing-kafka-broker_release-1.2 + optional: true + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-tests.sh --mt-source + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - always_run: true + branches: + - ^release-1.2$ + cluster: build-knative + decorate: true + name: upgrade-tests_eventing-kafka-broker_release-1.2 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-upgrade-tests.sh + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + - name: DOCKER_IN_DOCKER_ENABLED + value: "true" + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + - mountPath: /docker-graph + name: docker-graph + - mountPath: /lib/modules + name: modules + - mountPath: /sys/fs/cgroup + name: cgroup + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - emptyDir: {} + name: docker-graph + - hostPath: + path: /lib/modules + type: Directory + name: modules + - hostPath: + path: /sys/fs/cgroup + type: Directory + name: cgroup + - always_run: true + branches: + - ^release-1.2$ + cluster: build-knative + decorate: true + name: reconciler-tests_eventing-kafka-broker_release-1.2 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/reconciler-tests.sh + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + - name: DOCKER_IN_DOCKER_ENABLED + value: "true" + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + - mountPath: /docker-graph + name: docker-graph + - mountPath: /lib/modules + name: modules + - mountPath: /sys/fs/cgroup + name: cgroup + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - emptyDir: {} + name: docker-graph + - hostPath: + path: /lib/modules + type: Directory + name: modules + - hostPath: + path: /sys/fs/cgroup + type: Directory + name: cgroup + - always_run: true + branches: + - ^release-1.2$ + cluster: build-knative + decorate: true + name: channel-integration-tests-ssl_eventing-kafka-broker_release-1.2 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-tests.sh + env: + - name: EVENTING_KAFKA_BROKER_CHANNEL_AUTH_SCENARIO + value: SSL + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + - name: DOCKER_IN_DOCKER_ENABLED + value: "true" + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + - mountPath: /docker-graph + name: docker-graph + - mountPath: /lib/modules + name: modules + - mountPath: /sys/fs/cgroup + name: cgroup + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - emptyDir: {} + name: docker-graph + - hostPath: + path: /lib/modules + type: Directory + name: modules + - hostPath: + path: /sys/fs/cgroup + type: Directory + name: cgroup + - always_run: true + branches: + - ^release-1.2$ + cluster: build-knative + decorate: true + name: channel-integration-tests-sasl-ssl_eventing-kafka-broker_release-1.2 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-tests.sh + env: + - name: EVENTING_KAFKA_BROKER_CHANNEL_AUTH_SCENARIO + value: SASL_SSL + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + - name: DOCKER_IN_DOCKER_ENABLED + value: "true" + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + - mountPath: /docker-graph + name: docker-graph + - mountPath: /lib/modules + name: modules + - mountPath: /sys/fs/cgroup + name: cgroup + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - emptyDir: {} + name: docker-graph + - hostPath: + path: /lib/modules + type: Directory + name: modules + - hostPath: + path: /sys/fs/cgroup + type: Directory + name: cgroup + - always_run: true + branches: + - ^release-1.2$ + cluster: build-knative + decorate: true + name: channel-integration-tests-sasl-plain_eventing-kafka-broker_release-1.2 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-tests.sh + env: + - name: EVENTING_KAFKA_BROKER_CHANNEL_AUTH_SCENARIO + value: SASL_PLAIN + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + - name: DOCKER_IN_DOCKER_ENABLED + value: "true" + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + - mountPath: /docker-graph + name: docker-graph + - mountPath: /lib/modules + name: modules + - mountPath: /sys/fs/cgroup + name: cgroup + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - emptyDir: {} + name: docker-graph + - hostPath: + path: /lib/modules + type: Directory + name: modules + - hostPath: + path: /sys/fs/cgroup + type: Directory + name: cgroup + - always_run: true + branches: + - ^release-1.2$ + cluster: build-knative + decorate: true + name: channel-reconciler-tests-ssl_eventing-kafka-broker_release-1.2 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/reconciler-tests.sh + env: + - name: EVENTING_KAFKA_BROKER_CHANNEL_AUTH_SCENARIO + value: SSL + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + - name: DOCKER_IN_DOCKER_ENABLED + value: "true" + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + - mountPath: /docker-graph + name: docker-graph + - mountPath: /lib/modules + name: modules + - mountPath: /sys/fs/cgroup + name: cgroup + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - emptyDir: {} + name: docker-graph + - hostPath: + path: /lib/modules + type: Directory + name: modules + - hostPath: + path: /sys/fs/cgroup + type: Directory + name: cgroup + - always_run: true + branches: + - ^release-1.2$ + cluster: build-knative + decorate: true + name: channel-reconciler-tests-sasl-ssl_eventing-kafka-broker_release-1.2 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/reconciler-tests.sh + env: + - name: EVENTING_KAFKA_BROKER_CHANNEL_AUTH_SCENARIO + value: SASL_SSL + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + - name: DOCKER_IN_DOCKER_ENABLED + value: "true" + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + - mountPath: /docker-graph + name: docker-graph + - mountPath: /lib/modules + name: modules + - mountPath: /sys/fs/cgroup + name: cgroup + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - emptyDir: {} + name: docker-graph + - hostPath: + path: /lib/modules + type: Directory + name: modules + - hostPath: + path: /sys/fs/cgroup + type: Directory + name: cgroup + - always_run: true + branches: + - ^release-1.2$ + cluster: build-knative + decorate: true + name: channel-reconciler-tests-sasl-plain_eventing-kafka-broker_release-1.2 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/reconciler-tests.sh + env: + - name: EVENTING_KAFKA_BROKER_CHANNEL_AUTH_SCENARIO + value: SASL_PLAIN + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + - name: DOCKER_IN_DOCKER_ENABLED + value: "true" + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + - mountPath: /docker-graph + name: docker-graph + - mountPath: /lib/modules + name: modules + - mountPath: /sys/fs/cgroup + name: cgroup + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - emptyDir: {} + name: docker-graph + - hostPath: + path: /lib/modules + type: Directory + name: modules + - hostPath: + path: /sys/fs/cgroup + type: Directory + name: cgroup diff --git a/prow/jobs/generated/knative-sandbox/eventing-kafka-broker-release-1.3.gen.yaml b/prow/jobs/generated/knative-sandbox/eventing-kafka-broker-release-1.3.gen.yaml new file mode 100644 index 00000000000..de066581cd8 --- /dev/null +++ b/prow/jobs/generated/knative-sandbox/eventing-kafka-broker-release-1.3.gen.yaml @@ -0,0 +1,778 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. #### +# #### USE "./hack/generate-configs.sh" TO REGENERATE THIS FILE. #### +# #### #### +# ####################################################################### + +periodics: +- annotations: + testgrid-dashboards: knative-sandbox-release-1.3 + testgrid-tab-name: eventing-kafka-broker-continuous + cluster: build-knative + cron: 21 8 * * * + decorate: true + extra_refs: + - base_ref: release-1.3 + org: knative-sandbox + repo: eventing-kafka-broker + name: continuous_eventing-kafka-broker_release-1.3_periodic + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + - name: DOCKER_IN_DOCKER_ENABLED + value: "true" + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + - mountPath: /docker-graph + name: docker-graph + - mountPath: /lib/modules + name: modules + - mountPath: /sys/fs/cgroup + name: cgroup + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - emptyDir: {} + name: docker-graph + - hostPath: + path: /lib/modules + type: Directory + name: modules + - hostPath: + path: /sys/fs/cgroup + type: Directory + name: cgroup +- annotations: + testgrid-dashboards: knative-sandbox-release-1.3 + testgrid-tab-name: eventing-kafka-broker-release + cluster: build-knative + cron: 15 9 * * 2 + decorate: true + extra_refs: + - base_ref: release-1.3 + org: knative-sandbox + repo: eventing-kafka-broker + name: release_eventing-kafka-broker_release-1.3_periodic + spec: + containers: + - command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/eventing-kafka-broker + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.3 + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/release-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/hub-token + name: hub-token + readOnly: true + - mountPath: /etc/release-account + name: release-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: hub-token + secret: + secretName: hub-token + - name: release-account + secret: + secretName: release-account +presubmits: + knative-sandbox/eventing-kafka-broker: + - always_run: true + branches: + - ^release-1.3$ + cluster: build-knative + decorate: true + name: build-tests_eventing-kafka-broker_release-1.3 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --build-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^release-1.3$ + cluster: build-knative + decorate: true + name: unit-tests_eventing-kafka-broker_release-1.3 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --unit-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^release-1.3$ + cluster: build-knative + decorate: true + name: integration-tests_eventing-kafka-broker_release-1.3 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --integration-tests + env: + - name: DOCKER_IN_DOCKER_ENABLED + value: "true" + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /docker-graph + name: docker-graph + - mountPath: /lib/modules + name: modules + - mountPath: /sys/fs/cgroup + name: cgroup + nodeSelector: + type: testing + volumes: + - emptyDir: {} + name: docker-graph + - hostPath: + path: /lib/modules + type: Directory + name: modules + - hostPath: + path: /sys/fs/cgroup + type: Directory + name: cgroup + - always_run: true + branches: + - ^release-1.3$ + cluster: build-knative + decorate: true + name: integration-test-channel-consolidated_eventing-kafka-broker_release-1.3 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-tests.sh --consolidated + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - always_run: true + branches: + - ^release-1.3$ + cluster: build-knative + decorate: true + name: integration-test-channel-consolidated-tls_eventing-kafka-broker_release-1.3 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-tests.sh --consolidated-tls + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - always_run: true + branches: + - ^release-1.3$ + cluster: build-knative + decorate: true + name: integration-test-channel-consolidated-sasl_eventing-kafka-broker_release-1.3 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-tests.sh --consolidated-sasl + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - always_run: true + branches: + - ^release-1.3$ + cluster: build-knative + decorate: true + name: integration-test-channel-distributed_eventing-kafka-broker_release-1.3 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-tests.sh --distributed + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - always_run: true + branches: + - ^release-1.3$ + cluster: build-knative + decorate: true + name: integration-test-mt-source_eventing-kafka-broker_release-1.3 + optional: true + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-tests.sh --mt-source + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - always_run: true + branches: + - ^release-1.3$ + cluster: build-knative + decorate: true + name: upgrade-tests_eventing-kafka-broker_release-1.3 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-upgrade-tests.sh + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + - name: DOCKER_IN_DOCKER_ENABLED + value: "true" + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + - mountPath: /docker-graph + name: docker-graph + - mountPath: /lib/modules + name: modules + - mountPath: /sys/fs/cgroup + name: cgroup + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - emptyDir: {} + name: docker-graph + - hostPath: + path: /lib/modules + type: Directory + name: modules + - hostPath: + path: /sys/fs/cgroup + type: Directory + name: cgroup + - always_run: true + branches: + - ^release-1.3$ + cluster: build-knative + decorate: true + name: reconciler-tests_eventing-kafka-broker_release-1.3 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/reconciler-tests.sh + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + - name: DOCKER_IN_DOCKER_ENABLED + value: "true" + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + - mountPath: /docker-graph + name: docker-graph + - mountPath: /lib/modules + name: modules + - mountPath: /sys/fs/cgroup + name: cgroup + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - emptyDir: {} + name: docker-graph + - hostPath: + path: /lib/modules + type: Directory + name: modules + - hostPath: + path: /sys/fs/cgroup + type: Directory + name: cgroup + - always_run: true + branches: + - ^release-1.3$ + cluster: build-knative + decorate: true + name: channel-integration-tests-ssl_eventing-kafka-broker_release-1.3 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-tests.sh + env: + - name: EVENTING_KAFKA_BROKER_CHANNEL_AUTH_SCENARIO + value: SSL + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + - name: DOCKER_IN_DOCKER_ENABLED + value: "true" + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + - mountPath: /docker-graph + name: docker-graph + - mountPath: /lib/modules + name: modules + - mountPath: /sys/fs/cgroup + name: cgroup + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - emptyDir: {} + name: docker-graph + - hostPath: + path: /lib/modules + type: Directory + name: modules + - hostPath: + path: /sys/fs/cgroup + type: Directory + name: cgroup + - always_run: true + branches: + - ^release-1.3$ + cluster: build-knative + decorate: true + name: channel-integration-tests-sasl-ssl_eventing-kafka-broker_release-1.3 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-tests.sh + env: + - name: EVENTING_KAFKA_BROKER_CHANNEL_AUTH_SCENARIO + value: SASL_SSL + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + - name: DOCKER_IN_DOCKER_ENABLED + value: "true" + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + - mountPath: /docker-graph + name: docker-graph + - mountPath: /lib/modules + name: modules + - mountPath: /sys/fs/cgroup + name: cgroup + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - emptyDir: {} + name: docker-graph + - hostPath: + path: /lib/modules + type: Directory + name: modules + - hostPath: + path: /sys/fs/cgroup + type: Directory + name: cgroup + - always_run: true + branches: + - ^release-1.3$ + cluster: build-knative + decorate: true + name: channel-integration-tests-sasl-plain_eventing-kafka-broker_release-1.3 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-tests.sh + env: + - name: EVENTING_KAFKA_BROKER_CHANNEL_AUTH_SCENARIO + value: SASL_PLAIN + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + - name: DOCKER_IN_DOCKER_ENABLED + value: "true" + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + - mountPath: /docker-graph + name: docker-graph + - mountPath: /lib/modules + name: modules + - mountPath: /sys/fs/cgroup + name: cgroup + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - emptyDir: {} + name: docker-graph + - hostPath: + path: /lib/modules + type: Directory + name: modules + - hostPath: + path: /sys/fs/cgroup + type: Directory + name: cgroup + - always_run: true + branches: + - ^release-1.3$ + cluster: build-knative + decorate: true + name: channel-reconciler-tests-ssl_eventing-kafka-broker_release-1.3 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/reconciler-tests.sh + env: + - name: EVENTING_KAFKA_BROKER_CHANNEL_AUTH_SCENARIO + value: SSL + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + - name: DOCKER_IN_DOCKER_ENABLED + value: "true" + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + - mountPath: /docker-graph + name: docker-graph + - mountPath: /lib/modules + name: modules + - mountPath: /sys/fs/cgroup + name: cgroup + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - emptyDir: {} + name: docker-graph + - hostPath: + path: /lib/modules + type: Directory + name: modules + - hostPath: + path: /sys/fs/cgroup + type: Directory + name: cgroup + - always_run: true + branches: + - ^release-1.3$ + cluster: build-knative + decorate: true + name: channel-reconciler-tests-sasl-ssl_eventing-kafka-broker_release-1.3 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/reconciler-tests.sh + env: + - name: EVENTING_KAFKA_BROKER_CHANNEL_AUTH_SCENARIO + value: SASL_SSL + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + - name: DOCKER_IN_DOCKER_ENABLED + value: "true" + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + - mountPath: /docker-graph + name: docker-graph + - mountPath: /lib/modules + name: modules + - mountPath: /sys/fs/cgroup + name: cgroup + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - emptyDir: {} + name: docker-graph + - hostPath: + path: /lib/modules + type: Directory + name: modules + - hostPath: + path: /sys/fs/cgroup + type: Directory + name: cgroup + - always_run: true + branches: + - ^release-1.3$ + cluster: build-knative + decorate: true + name: channel-reconciler-tests-sasl-plain_eventing-kafka-broker_release-1.3 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/reconciler-tests.sh + env: + - name: EVENTING_KAFKA_BROKER_CHANNEL_AUTH_SCENARIO + value: SASL_PLAIN + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + - name: DOCKER_IN_DOCKER_ENABLED + value: "true" + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + - mountPath: /docker-graph + name: docker-graph + - mountPath: /lib/modules + name: modules + - mountPath: /sys/fs/cgroup + name: cgroup + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - emptyDir: {} + name: docker-graph + - hostPath: + path: /lib/modules + type: Directory + name: modules + - hostPath: + path: /sys/fs/cgroup + type: Directory + name: cgroup diff --git a/prow/jobs/generated/knative-sandbox/eventing-kafka-main.gen.yaml b/prow/jobs/generated/knative-sandbox/eventing-kafka-main.gen.yaml new file mode 100644 index 00000000000..04804627818 --- /dev/null +++ b/prow/jobs/generated/knative-sandbox/eventing-kafka-main.gen.yaml @@ -0,0 +1,470 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. #### +# #### USE "./hack/generate-configs.sh" TO REGENERATE THIS FILE. #### +# #### #### +# ####################################################################### + +periodics: +- annotations: + testgrid-dashboards: eventing-kafka + testgrid-tab-name: continuous + cluster: build-knative + cron: 10 */9 * * * + decorate: true + extra_refs: + - base_ref: main + org: knative-sandbox + repo: eventing-kafka + name: continuous_eventing-kafka_main_periodic + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + - name: DOCKER_IN_DOCKER_ENABLED + value: "true" + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + - mountPath: /docker-graph + name: docker-graph + - mountPath: /lib/modules + name: modules + - mountPath: /sys/fs/cgroup + name: cgroup + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - emptyDir: {} + name: docker-graph + - hostPath: + path: /lib/modules + type: Directory + name: modules + - hostPath: + path: /sys/fs/cgroup + type: Directory + name: cgroup +- annotations: + testgrid-dashboards: eventing-kafka + testgrid-tab-name: nightly + cluster: build-knative + cron: 38 9 * * * + decorate: true + extra_refs: + - base_ref: main + org: knative-sandbox + repo: eventing-kafka + name: nightly_eventing-kafka_main_periodic + reporter_config: + slack: + channel: eventing-kafka + job_states_to_report: + - failure + report_template: | + "The nightly release job fails, check the log: <{{.Status.URL}}|View logs>" + spec: + containers: + - command: + - runner.sh + - ./hack/release.sh + - --publish + - --tag-release + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/nightly-account/service-account.json + - name: DOCKER_IN_DOCKER_ENABLED + value: "true" + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/nightly-account + name: nightly-account + readOnly: true + - mountPath: /docker-graph + name: docker-graph + - mountPath: /lib/modules + name: modules + - mountPath: /sys/fs/cgroup + name: cgroup + nodeSelector: + type: testing + volumes: + - name: nightly-account + secret: + secretName: nightly-account + - emptyDir: {} + name: docker-graph + - hostPath: + path: /lib/modules + type: Directory + name: modules + - hostPath: + path: /sys/fs/cgroup + type: Directory + name: cgroup +- annotations: + testgrid-dashboards: eventing-kafka + testgrid-tab-name: release + cluster: build-knative + cron: 26 */9 * * * + decorate: true + extra_refs: + - base_ref: main + org: knative-sandbox + repo: eventing-kafka + name: release_eventing-kafka_main_periodic + spec: + containers: + - command: + - runner.sh + - ./hack/release.sh + - --auto-release + - --release-gcs + - knative-releases/eventing-kafka + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/release-account/service-account.json + - name: DOCKER_IN_DOCKER_ENABLED + value: "true" + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/hub-token + name: hub-token + readOnly: true + - mountPath: /etc/release-account + name: release-account + readOnly: true + - mountPath: /docker-graph + name: docker-graph + - mountPath: /lib/modules + name: modules + - mountPath: /sys/fs/cgroup + name: cgroup + nodeSelector: + type: testing + volumes: + - name: hub-token + secret: + secretName: hub-token + - name: release-account + secret: + secretName: release-account + - emptyDir: {} + name: docker-graph + - hostPath: + path: /lib/modules + type: Directory + name: modules + - hostPath: + path: /sys/fs/cgroup + type: Directory + name: cgroup +presubmits: + knative-sandbox/eventing-kafka: + - always_run: true + branches: + - ^main$ + cluster: build-knative + decorate: true + name: build-tests_eventing-kafka_main + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --build-tests + env: + - name: DOCKER_IN_DOCKER_ENABLED + value: "true" + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /docker-graph + name: docker-graph + - mountPath: /lib/modules + name: modules + - mountPath: /sys/fs/cgroup + name: cgroup + nodeSelector: + type: testing + volumes: + - emptyDir: {} + name: docker-graph + - hostPath: + path: /lib/modules + type: Directory + name: modules + - hostPath: + path: /sys/fs/cgroup + type: Directory + name: cgroup + - always_run: true + branches: + - ^main$ + cluster: build-knative + decorate: true + name: unit-tests_eventing-kafka_main + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --unit-tests + env: + - name: DOCKER_IN_DOCKER_ENABLED + value: "true" + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /docker-graph + name: docker-graph + - mountPath: /lib/modules + name: modules + - mountPath: /sys/fs/cgroup + name: cgroup + nodeSelector: + type: testing + volumes: + - emptyDir: {} + name: docker-graph + - hostPath: + path: /lib/modules + type: Directory + name: modules + - hostPath: + path: /sys/fs/cgroup + type: Directory + name: cgroup + - always_run: true + branches: + - ^main$ + cluster: build-knative + decorate: true + name: integration-test-channel-consolidated_eventing-kafka_main + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-tests.sh --consolidated + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - always_run: true + branches: + - ^main$ + cluster: build-knative + decorate: true + name: integration-test-channel-consolidated-tls_eventing-kafka_main + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-tests.sh --consolidated-tls + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - always_run: true + branches: + - ^main$ + cluster: build-knative + decorate: true + name: integration-test-channel-consolidated-sasl_eventing-kafka_main + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-tests.sh --consolidated-sasl + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - always_run: true + branches: + - ^main$ + cluster: build-knative + decorate: true + name: integration-test-channel-distributed_eventing-kafka_main + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-tests.sh --distributed + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - always_run: true + branches: + - ^main$ + cluster: build-knative + decorate: true + name: integration-test-mt-source_eventing-kafka_main + optional: true + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-tests.sh --mt-source + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - always_run: true + branches: + - ^main$ + cluster: build-knative + decorate: true + name: upgrade-tests_eventing-kafka_main + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-upgrade-tests.sh + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account diff --git a/prow/jobs/generated/knative-sandbox/eventing-kafka-release-1.0.gen.yaml b/prow/jobs/generated/knative-sandbox/eventing-kafka-release-1.0.gen.yaml new file mode 100644 index 00000000000..bf98b2346b4 --- /dev/null +++ b/prow/jobs/generated/knative-sandbox/eventing-kafka-release-1.0.gen.yaml @@ -0,0 +1,393 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. #### +# #### USE "./hack/generate-configs.sh" TO REGENERATE THIS FILE. #### +# #### #### +# ####################################################################### + +periodics: +- annotations: + testgrid-dashboards: knative-sandbox-release-1.0 + testgrid-tab-name: eventing-kafka-continuous + cluster: build-knative + cron: 30 8 * * * + decorate: true + extra_refs: + - base_ref: release-1.0 + org: knative-sandbox + repo: eventing-kafka + name: continuous_eventing-kafka_release-1.0_periodic + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + - name: DOCKER_IN_DOCKER_ENABLED + value: "true" + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + - mountPath: /docker-graph + name: docker-graph + - mountPath: /lib/modules + name: modules + - mountPath: /sys/fs/cgroup + name: cgroup + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - emptyDir: {} + name: docker-graph + - hostPath: + path: /lib/modules + type: Directory + name: modules + - hostPath: + path: /sys/fs/cgroup + type: Directory + name: cgroup +- annotations: + testgrid-dashboards: knative-sandbox-release-1.0 + testgrid-tab-name: eventing-kafka-release + cluster: build-knative + cron: 10 9 * * 2 + decorate: true + extra_refs: + - base_ref: release-1.0 + org: knative-sandbox + repo: eventing-kafka + name: release_eventing-kafka_release-1.0_periodic + spec: + containers: + - command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/eventing-kafka + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.0 + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/release-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/hub-token + name: hub-token + readOnly: true + - mountPath: /etc/release-account + name: release-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: hub-token + secret: + secretName: hub-token + - name: release-account + secret: + secretName: release-account +presubmits: + knative-sandbox/eventing-kafka: + - always_run: true + branches: + - ^release-1.0$ + cluster: build-knative + decorate: true + name: build-tests_eventing-kafka_release-1.0 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --build-tests + env: + - name: DOCKER_IN_DOCKER_ENABLED + value: "true" + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /docker-graph + name: docker-graph + - mountPath: /lib/modules + name: modules + - mountPath: /sys/fs/cgroup + name: cgroup + nodeSelector: + type: testing + volumes: + - emptyDir: {} + name: docker-graph + - hostPath: + path: /lib/modules + type: Directory + name: modules + - hostPath: + path: /sys/fs/cgroup + type: Directory + name: cgroup + - always_run: true + branches: + - ^release-1.0$ + cluster: build-knative + decorate: true + name: unit-tests_eventing-kafka_release-1.0 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --unit-tests + env: + - name: DOCKER_IN_DOCKER_ENABLED + value: "true" + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /docker-graph + name: docker-graph + - mountPath: /lib/modules + name: modules + - mountPath: /sys/fs/cgroup + name: cgroup + nodeSelector: + type: testing + volumes: + - emptyDir: {} + name: docker-graph + - hostPath: + path: /lib/modules + type: Directory + name: modules + - hostPath: + path: /sys/fs/cgroup + type: Directory + name: cgroup + - always_run: true + branches: + - ^release-1.0$ + cluster: build-knative + decorate: true + name: integration-test-channel-consolidated_eventing-kafka_release-1.0 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-tests.sh --consolidated + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - always_run: true + branches: + - ^release-1.0$ + cluster: build-knative + decorate: true + name: integration-test-channel-consolidated-tls_eventing-kafka_release-1.0 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-tests.sh --consolidated-tls + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - always_run: true + branches: + - ^release-1.0$ + cluster: build-knative + decorate: true + name: integration-test-channel-consolidated-sasl_eventing-kafka_release-1.0 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-tests.sh --consolidated-sasl + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - always_run: true + branches: + - ^release-1.0$ + cluster: build-knative + decorate: true + name: integration-test-channel-distributed_eventing-kafka_release-1.0 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-tests.sh --distributed + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - always_run: true + branches: + - ^release-1.0$ + cluster: build-knative + decorate: true + name: integration-test-mt-source_eventing-kafka_release-1.0 + optional: true + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-tests.sh --mt-source + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - always_run: true + branches: + - ^release-1.0$ + cluster: build-knative + decorate: true + name: upgrade-tests_eventing-kafka_release-1.0 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-upgrade-tests.sh + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account diff --git a/prow/jobs/generated/knative-sandbox/eventing-kafka-release-1.1.gen.yaml b/prow/jobs/generated/knative-sandbox/eventing-kafka-release-1.1.gen.yaml new file mode 100644 index 00000000000..8db9de9dc68 --- /dev/null +++ b/prow/jobs/generated/knative-sandbox/eventing-kafka-release-1.1.gen.yaml @@ -0,0 +1,393 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. #### +# #### USE "./hack/generate-configs.sh" TO REGENERATE THIS FILE. #### +# #### #### +# ####################################################################### + +periodics: +- annotations: + testgrid-dashboards: knative-sandbox-release-1.1 + testgrid-tab-name: eventing-kafka-continuous + cluster: build-knative + cron: 1 8 * * * + decorate: true + extra_refs: + - base_ref: release-1.1 + org: knative-sandbox + repo: eventing-kafka + name: continuous_eventing-kafka_release-1.1_periodic + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + - name: DOCKER_IN_DOCKER_ENABLED + value: "true" + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + - mountPath: /docker-graph + name: docker-graph + - mountPath: /lib/modules + name: modules + - mountPath: /sys/fs/cgroup + name: cgroup + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - emptyDir: {} + name: docker-graph + - hostPath: + path: /lib/modules + type: Directory + name: modules + - hostPath: + path: /sys/fs/cgroup + type: Directory + name: cgroup +- annotations: + testgrid-dashboards: knative-sandbox-release-1.1 + testgrid-tab-name: eventing-kafka-release + cluster: build-knative + cron: 55 9 * * 2 + decorate: true + extra_refs: + - base_ref: release-1.1 + org: knative-sandbox + repo: eventing-kafka + name: release_eventing-kafka_release-1.1_periodic + spec: + containers: + - command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/eventing-kafka + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.1 + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/release-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/hub-token + name: hub-token + readOnly: true + - mountPath: /etc/release-account + name: release-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: hub-token + secret: + secretName: hub-token + - name: release-account + secret: + secretName: release-account +presubmits: + knative-sandbox/eventing-kafka: + - always_run: true + branches: + - ^release-1.1$ + cluster: build-knative + decorate: true + name: build-tests_eventing-kafka_release-1.1 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --build-tests + env: + - name: DOCKER_IN_DOCKER_ENABLED + value: "true" + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /docker-graph + name: docker-graph + - mountPath: /lib/modules + name: modules + - mountPath: /sys/fs/cgroup + name: cgroup + nodeSelector: + type: testing + volumes: + - emptyDir: {} + name: docker-graph + - hostPath: + path: /lib/modules + type: Directory + name: modules + - hostPath: + path: /sys/fs/cgroup + type: Directory + name: cgroup + - always_run: true + branches: + - ^release-1.1$ + cluster: build-knative + decorate: true + name: unit-tests_eventing-kafka_release-1.1 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --unit-tests + env: + - name: DOCKER_IN_DOCKER_ENABLED + value: "true" + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /docker-graph + name: docker-graph + - mountPath: /lib/modules + name: modules + - mountPath: /sys/fs/cgroup + name: cgroup + nodeSelector: + type: testing + volumes: + - emptyDir: {} + name: docker-graph + - hostPath: + path: /lib/modules + type: Directory + name: modules + - hostPath: + path: /sys/fs/cgroup + type: Directory + name: cgroup + - always_run: true + branches: + - ^release-1.1$ + cluster: build-knative + decorate: true + name: integration-test-channel-consolidated_eventing-kafka_release-1.1 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-tests.sh --consolidated + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - always_run: true + branches: + - ^release-1.1$ + cluster: build-knative + decorate: true + name: integration-test-channel-consolidated-tls_eventing-kafka_release-1.1 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-tests.sh --consolidated-tls + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - always_run: true + branches: + - ^release-1.1$ + cluster: build-knative + decorate: true + name: integration-test-channel-consolidated-sasl_eventing-kafka_release-1.1 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-tests.sh --consolidated-sasl + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - always_run: true + branches: + - ^release-1.1$ + cluster: build-knative + decorate: true + name: integration-test-channel-distributed_eventing-kafka_release-1.1 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-tests.sh --distributed + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - always_run: true + branches: + - ^release-1.1$ + cluster: build-knative + decorate: true + name: integration-test-mt-source_eventing-kafka_release-1.1 + optional: true + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-tests.sh --mt-source + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - always_run: true + branches: + - ^release-1.1$ + cluster: build-knative + decorate: true + name: upgrade-tests_eventing-kafka_release-1.1 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-upgrade-tests.sh + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account diff --git a/prow/jobs/generated/knative-sandbox/eventing-kafka-release-1.2.gen.yaml b/prow/jobs/generated/knative-sandbox/eventing-kafka-release-1.2.gen.yaml new file mode 100644 index 00000000000..bfe1797d049 --- /dev/null +++ b/prow/jobs/generated/knative-sandbox/eventing-kafka-release-1.2.gen.yaml @@ -0,0 +1,393 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. #### +# #### USE "./hack/generate-configs.sh" TO REGENERATE THIS FILE. #### +# #### #### +# ####################################################################### + +periodics: +- annotations: + testgrid-dashboards: knative-sandbox-release-1.2 + testgrid-tab-name: eventing-kafka-continuous + cluster: build-knative + cron: 52 8 * * * + decorate: true + extra_refs: + - base_ref: release-1.2 + org: knative-sandbox + repo: eventing-kafka + name: continuous_eventing-kafka_release-1.2_periodic + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + - name: DOCKER_IN_DOCKER_ENABLED + value: "true" + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + - mountPath: /docker-graph + name: docker-graph + - mountPath: /lib/modules + name: modules + - mountPath: /sys/fs/cgroup + name: cgroup + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - emptyDir: {} + name: docker-graph + - hostPath: + path: /lib/modules + type: Directory + name: modules + - hostPath: + path: /sys/fs/cgroup + type: Directory + name: cgroup +- annotations: + testgrid-dashboards: knative-sandbox-release-1.2 + testgrid-tab-name: eventing-kafka-release + cluster: build-knative + cron: 16 9 * * 2 + decorate: true + extra_refs: + - base_ref: release-1.2 + org: knative-sandbox + repo: eventing-kafka + name: release_eventing-kafka_release-1.2_periodic + spec: + containers: + - command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/eventing-kafka + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.2 + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/release-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/hub-token + name: hub-token + readOnly: true + - mountPath: /etc/release-account + name: release-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: hub-token + secret: + secretName: hub-token + - name: release-account + secret: + secretName: release-account +presubmits: + knative-sandbox/eventing-kafka: + - always_run: true + branches: + - ^release-1.2$ + cluster: build-knative + decorate: true + name: build-tests_eventing-kafka_release-1.2 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --build-tests + env: + - name: DOCKER_IN_DOCKER_ENABLED + value: "true" + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /docker-graph + name: docker-graph + - mountPath: /lib/modules + name: modules + - mountPath: /sys/fs/cgroup + name: cgroup + nodeSelector: + type: testing + volumes: + - emptyDir: {} + name: docker-graph + - hostPath: + path: /lib/modules + type: Directory + name: modules + - hostPath: + path: /sys/fs/cgroup + type: Directory + name: cgroup + - always_run: true + branches: + - ^release-1.2$ + cluster: build-knative + decorate: true + name: unit-tests_eventing-kafka_release-1.2 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --unit-tests + env: + - name: DOCKER_IN_DOCKER_ENABLED + value: "true" + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /docker-graph + name: docker-graph + - mountPath: /lib/modules + name: modules + - mountPath: /sys/fs/cgroup + name: cgroup + nodeSelector: + type: testing + volumes: + - emptyDir: {} + name: docker-graph + - hostPath: + path: /lib/modules + type: Directory + name: modules + - hostPath: + path: /sys/fs/cgroup + type: Directory + name: cgroup + - always_run: true + branches: + - ^release-1.2$ + cluster: build-knative + decorate: true + name: integration-test-channel-consolidated_eventing-kafka_release-1.2 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-tests.sh --consolidated + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - always_run: true + branches: + - ^release-1.2$ + cluster: build-knative + decorate: true + name: integration-test-channel-consolidated-tls_eventing-kafka_release-1.2 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-tests.sh --consolidated-tls + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - always_run: true + branches: + - ^release-1.2$ + cluster: build-knative + decorate: true + name: integration-test-channel-consolidated-sasl_eventing-kafka_release-1.2 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-tests.sh --consolidated-sasl + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - always_run: true + branches: + - ^release-1.2$ + cluster: build-knative + decorate: true + name: integration-test-channel-distributed_eventing-kafka_release-1.2 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-tests.sh --distributed + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - always_run: true + branches: + - ^release-1.2$ + cluster: build-knative + decorate: true + name: integration-test-mt-source_eventing-kafka_release-1.2 + optional: true + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-tests.sh --mt-source + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - always_run: true + branches: + - ^release-1.2$ + cluster: build-knative + decorate: true + name: upgrade-tests_eventing-kafka_release-1.2 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-upgrade-tests.sh + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account diff --git a/prow/jobs/generated/knative-sandbox/eventing-kafka-release-1.3.gen.yaml b/prow/jobs/generated/knative-sandbox/eventing-kafka-release-1.3.gen.yaml new file mode 100644 index 00000000000..9cfe27771c3 --- /dev/null +++ b/prow/jobs/generated/knative-sandbox/eventing-kafka-release-1.3.gen.yaml @@ -0,0 +1,393 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. #### +# #### USE "./hack/generate-configs.sh" TO REGENERATE THIS FILE. #### +# #### #### +# ####################################################################### + +periodics: +- annotations: + testgrid-dashboards: knative-sandbox-release-1.3 + testgrid-tab-name: eventing-kafka-continuous + cluster: build-knative + cron: 55 8 * * * + decorate: true + extra_refs: + - base_ref: release-1.3 + org: knative-sandbox + repo: eventing-kafka + name: continuous_eventing-kafka_release-1.3_periodic + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + - name: DOCKER_IN_DOCKER_ENABLED + value: "true" + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + - mountPath: /docker-graph + name: docker-graph + - mountPath: /lib/modules + name: modules + - mountPath: /sys/fs/cgroup + name: cgroup + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - emptyDir: {} + name: docker-graph + - hostPath: + path: /lib/modules + type: Directory + name: modules + - hostPath: + path: /sys/fs/cgroup + type: Directory + name: cgroup +- annotations: + testgrid-dashboards: knative-sandbox-release-1.3 + testgrid-tab-name: eventing-kafka-release + cluster: build-knative + cron: 41 9 * * 2 + decorate: true + extra_refs: + - base_ref: release-1.3 + org: knative-sandbox + repo: eventing-kafka + name: release_eventing-kafka_release-1.3_periodic + spec: + containers: + - command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/eventing-kafka + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.3 + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/release-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/hub-token + name: hub-token + readOnly: true + - mountPath: /etc/release-account + name: release-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: hub-token + secret: + secretName: hub-token + - name: release-account + secret: + secretName: release-account +presubmits: + knative-sandbox/eventing-kafka: + - always_run: true + branches: + - ^release-1.3$ + cluster: build-knative + decorate: true + name: build-tests_eventing-kafka_release-1.3 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --build-tests + env: + - name: DOCKER_IN_DOCKER_ENABLED + value: "true" + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /docker-graph + name: docker-graph + - mountPath: /lib/modules + name: modules + - mountPath: /sys/fs/cgroup + name: cgroup + nodeSelector: + type: testing + volumes: + - emptyDir: {} + name: docker-graph + - hostPath: + path: /lib/modules + type: Directory + name: modules + - hostPath: + path: /sys/fs/cgroup + type: Directory + name: cgroup + - always_run: true + branches: + - ^release-1.3$ + cluster: build-knative + decorate: true + name: unit-tests_eventing-kafka_release-1.3 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --unit-tests + env: + - name: DOCKER_IN_DOCKER_ENABLED + value: "true" + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /docker-graph + name: docker-graph + - mountPath: /lib/modules + name: modules + - mountPath: /sys/fs/cgroup + name: cgroup + nodeSelector: + type: testing + volumes: + - emptyDir: {} + name: docker-graph + - hostPath: + path: /lib/modules + type: Directory + name: modules + - hostPath: + path: /sys/fs/cgroup + type: Directory + name: cgroup + - always_run: true + branches: + - ^release-1.3$ + cluster: build-knative + decorate: true + name: integration-test-channel-consolidated_eventing-kafka_release-1.3 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-tests.sh --consolidated + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - always_run: true + branches: + - ^release-1.3$ + cluster: build-knative + decorate: true + name: integration-test-channel-consolidated-tls_eventing-kafka_release-1.3 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-tests.sh --consolidated-tls + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - always_run: true + branches: + - ^release-1.3$ + cluster: build-knative + decorate: true + name: integration-test-channel-consolidated-sasl_eventing-kafka_release-1.3 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-tests.sh --consolidated-sasl + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - always_run: true + branches: + - ^release-1.3$ + cluster: build-knative + decorate: true + name: integration-test-channel-distributed_eventing-kafka_release-1.3 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-tests.sh --distributed + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - always_run: true + branches: + - ^release-1.3$ + cluster: build-knative + decorate: true + name: integration-test-mt-source_eventing-kafka_release-1.3 + optional: true + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-tests.sh --mt-source + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - always_run: true + branches: + - ^release-1.3$ + cluster: build-knative + decorate: true + name: upgrade-tests_eventing-kafka_release-1.3 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-upgrade-tests.sh + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account diff --git a/prow/jobs/generated/knative-sandbox/eventing-kogito-main.gen.yaml b/prow/jobs/generated/knative-sandbox/eventing-kogito-main.gen.yaml new file mode 100644 index 00000000000..d28fbb7d808 --- /dev/null +++ b/prow/jobs/generated/knative-sandbox/eventing-kogito-main.gen.yaml @@ -0,0 +1,207 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. #### +# #### USE "./hack/generate-configs.sh" TO REGENERATE THIS FILE. #### +# #### #### +# ####################################################################### + +periodics: +- annotations: + testgrid-dashboards: eventing-kogito + testgrid-tab-name: continuous + cluster: build-knative + cron: 27 */9 * * * + decorate: true + extra_refs: + - base_ref: main + org: knative-sandbox + repo: eventing-kogito + name: continuous_eventing-kogito_main_periodic + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account +- annotations: + testgrid-dashboards: eventing-kogito + testgrid-tab-name: nightly + cluster: build-knative + cron: 37 9 * * * + decorate: true + extra_refs: + - base_ref: main + org: knative-sandbox + repo: eventing-kogito + name: nightly_eventing-kogito_main_periodic + reporter_config: + slack: + channel: eventing-sources + job_states_to_report: + - failure + report_template: | + "The nightly release job for Kogito failed, check the log: <{{.Status.URL}}|View logs>" + spec: + containers: + - command: + - runner.sh + - ./hack/release.sh + - --publish + - --tag-release + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/nightly-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/nightly-account + name: nightly-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: nightly-account + secret: + secretName: nightly-account +- annotations: + testgrid-dashboards: eventing-kogito + testgrid-tab-name: release + cluster: build-knative + cron: 41 */9 * * * + decorate: true + extra_refs: + - base_ref: main + org: knative-sandbox + repo: eventing-kogito + name: release_eventing-kogito_main_periodic + spec: + containers: + - command: + - runner.sh + - ./hack/release.sh + - --auto-release + - --release-gcs + - knative-releases/eventing-kogito + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/release-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/hub-token + name: hub-token + readOnly: true + - mountPath: /etc/release-account + name: release-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: hub-token + secret: + secretName: hub-token + - name: release-account + secret: + secretName: release-account +presubmits: + knative-sandbox/eventing-kogito: + - always_run: true + branches: + - ^main$ + cluster: build-knative + decorate: true + name: build-tests_eventing-kogito_main + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --build-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^main$ + cluster: build-knative + decorate: true + name: unit-tests_eventing-kogito_main + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --unit-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^main$ + cluster: build-knative + decorate: true + name: integration-tests_eventing-kogito_main + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --integration-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account diff --git a/prow/jobs/generated/knative-sandbox/eventing-kogito-release-1.0.gen.yaml b/prow/jobs/generated/knative-sandbox/eventing-kogito-release-1.0.gen.yaml new file mode 100644 index 00000000000..f6b4e1bc357 --- /dev/null +++ b/prow/jobs/generated/knative-sandbox/eventing-kogito-release-1.0.gen.yaml @@ -0,0 +1,166 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. #### +# #### USE "./hack/generate-configs.sh" TO REGENERATE THIS FILE. #### +# #### #### +# ####################################################################### + +periodics: +- annotations: + testgrid-dashboards: knative-sandbox-release-1.0 + testgrid-tab-name: eventing-kogito-continuous + cluster: build-knative + cron: 53 8 * * * + decorate: true + extra_refs: + - base_ref: release-1.0 + org: knative-sandbox + repo: eventing-kogito + name: continuous_eventing-kogito_release-1.0_periodic + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account +- annotations: + testgrid-dashboards: knative-sandbox-release-1.0 + testgrid-tab-name: eventing-kogito-release + cluster: build-knative + cron: 47 9 * * 2 + decorate: true + extra_refs: + - base_ref: release-1.0 + org: knative-sandbox + repo: eventing-kogito + name: release_eventing-kogito_release-1.0_periodic + spec: + containers: + - command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/eventing-kogito + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.0 + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/release-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/hub-token + name: hub-token + readOnly: true + - mountPath: /etc/release-account + name: release-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: hub-token + secret: + secretName: hub-token + - name: release-account + secret: + secretName: release-account +presubmits: + knative-sandbox/eventing-kogito: + - always_run: true + branches: + - ^release-1.0$ + cluster: build-knative + decorate: true + name: build-tests_eventing-kogito_release-1.0 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --build-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^release-1.0$ + cluster: build-knative + decorate: true + name: unit-tests_eventing-kogito_release-1.0 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --unit-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^release-1.0$ + cluster: build-knative + decorate: true + name: integration-tests_eventing-kogito_release-1.0 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --integration-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account diff --git a/prow/jobs/generated/knative-sandbox/eventing-kogito-release-1.1.gen.yaml b/prow/jobs/generated/knative-sandbox/eventing-kogito-release-1.1.gen.yaml new file mode 100644 index 00000000000..6e21d5d9963 --- /dev/null +++ b/prow/jobs/generated/knative-sandbox/eventing-kogito-release-1.1.gen.yaml @@ -0,0 +1,166 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. #### +# #### USE "./hack/generate-configs.sh" TO REGENERATE THIS FILE. #### +# #### #### +# ####################################################################### + +periodics: +- annotations: + testgrid-dashboards: knative-sandbox-release-1.1 + testgrid-tab-name: eventing-kogito-continuous + cluster: build-knative + cron: 18 8 * * * + decorate: true + extra_refs: + - base_ref: release-1.1 + org: knative-sandbox + repo: eventing-kogito + name: continuous_eventing-kogito_release-1.1_periodic + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account +- annotations: + testgrid-dashboards: knative-sandbox-release-1.1 + testgrid-tab-name: eventing-kogito-release + cluster: build-knative + cron: 38 9 * * 2 + decorate: true + extra_refs: + - base_ref: release-1.1 + org: knative-sandbox + repo: eventing-kogito + name: release_eventing-kogito_release-1.1_periodic + spec: + containers: + - command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/eventing-kogito + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.1 + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/release-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/hub-token + name: hub-token + readOnly: true + - mountPath: /etc/release-account + name: release-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: hub-token + secret: + secretName: hub-token + - name: release-account + secret: + secretName: release-account +presubmits: + knative-sandbox/eventing-kogito: + - always_run: true + branches: + - ^release-1.1$ + cluster: build-knative + decorate: true + name: build-tests_eventing-kogito_release-1.1 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --build-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^release-1.1$ + cluster: build-knative + decorate: true + name: unit-tests_eventing-kogito_release-1.1 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --unit-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^release-1.1$ + cluster: build-knative + decorate: true + name: integration-tests_eventing-kogito_release-1.1 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --integration-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account diff --git a/prow/jobs/generated/knative-sandbox/eventing-kogito-release-1.2.gen.yaml b/prow/jobs/generated/knative-sandbox/eventing-kogito-release-1.2.gen.yaml new file mode 100644 index 00000000000..a48ba4ea0c7 --- /dev/null +++ b/prow/jobs/generated/knative-sandbox/eventing-kogito-release-1.2.gen.yaml @@ -0,0 +1,166 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. #### +# #### USE "./hack/generate-configs.sh" TO REGENERATE THIS FILE. #### +# #### #### +# ####################################################################### + +periodics: +- annotations: + testgrid-dashboards: knative-sandbox-release-1.2 + testgrid-tab-name: eventing-kogito-continuous + cluster: build-knative + cron: 59 8 * * * + decorate: true + extra_refs: + - base_ref: release-1.2 + org: knative-sandbox + repo: eventing-kogito + name: continuous_eventing-kogito_release-1.2_periodic + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account +- annotations: + testgrid-dashboards: knative-sandbox-release-1.2 + testgrid-tab-name: eventing-kogito-release + cluster: build-knative + cron: 13 9 * * 2 + decorate: true + extra_refs: + - base_ref: release-1.2 + org: knative-sandbox + repo: eventing-kogito + name: release_eventing-kogito_release-1.2_periodic + spec: + containers: + - command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/eventing-kogito + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.2 + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/release-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/hub-token + name: hub-token + readOnly: true + - mountPath: /etc/release-account + name: release-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: hub-token + secret: + secretName: hub-token + - name: release-account + secret: + secretName: release-account +presubmits: + knative-sandbox/eventing-kogito: + - always_run: true + branches: + - ^release-1.2$ + cluster: build-knative + decorate: true + name: build-tests_eventing-kogito_release-1.2 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --build-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^release-1.2$ + cluster: build-knative + decorate: true + name: unit-tests_eventing-kogito_release-1.2 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --unit-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^release-1.2$ + cluster: build-knative + decorate: true + name: integration-tests_eventing-kogito_release-1.2 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --integration-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account diff --git a/prow/jobs/generated/knative-sandbox/eventing-kogito-release-1.3.gen.yaml b/prow/jobs/generated/knative-sandbox/eventing-kogito-release-1.3.gen.yaml new file mode 100644 index 00000000000..f27d75b21f2 --- /dev/null +++ b/prow/jobs/generated/knative-sandbox/eventing-kogito-release-1.3.gen.yaml @@ -0,0 +1,166 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. #### +# #### USE "./hack/generate-configs.sh" TO REGENERATE THIS FILE. #### +# #### #### +# ####################################################################### + +periodics: +- annotations: + testgrid-dashboards: knative-sandbox-release-1.3 + testgrid-tab-name: eventing-kogito-continuous + cluster: build-knative + cron: 36 8 * * * + decorate: true + extra_refs: + - base_ref: release-1.3 + org: knative-sandbox + repo: eventing-kogito + name: continuous_eventing-kogito_release-1.3_periodic + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account +- annotations: + testgrid-dashboards: knative-sandbox-release-1.3 + testgrid-tab-name: eventing-kogito-release + cluster: build-knative + cron: 52 9 * * 2 + decorate: true + extra_refs: + - base_ref: release-1.3 + org: knative-sandbox + repo: eventing-kogito + name: release_eventing-kogito_release-1.3_periodic + spec: + containers: + - command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/eventing-kogito + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.3 + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/release-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/hub-token + name: hub-token + readOnly: true + - mountPath: /etc/release-account + name: release-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: hub-token + secret: + secretName: hub-token + - name: release-account + secret: + secretName: release-account +presubmits: + knative-sandbox/eventing-kogito: + - always_run: true + branches: + - ^release-1.3$ + cluster: build-knative + decorate: true + name: build-tests_eventing-kogito_release-1.3 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --build-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^release-1.3$ + cluster: build-knative + decorate: true + name: unit-tests_eventing-kogito_release-1.3 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --unit-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^release-1.3$ + cluster: build-knative + decorate: true + name: integration-tests_eventing-kogito_release-1.3 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --integration-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account diff --git a/prow/jobs/generated/knative-sandbox/eventing-natss-main.gen.yaml b/prow/jobs/generated/knative-sandbox/eventing-natss-main.gen.yaml new file mode 100644 index 00000000000..a5fb0062f8e --- /dev/null +++ b/prow/jobs/generated/knative-sandbox/eventing-natss-main.gen.yaml @@ -0,0 +1,135 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. #### +# #### USE "./hack/generate-configs.sh" TO REGENERATE THIS FILE. #### +# #### #### +# ####################################################################### + +periodics: +- annotations: + testgrid-dashboards: eventing-natss + testgrid-tab-name: continuous + cluster: build-knative + cron: 45 */9 * * * + decorate: true + extra_refs: + - base_ref: main + org: knative-sandbox + repo: eventing-natss + name: continuous_eventing-natss_main_periodic + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account +- annotations: + testgrid-dashboards: eventing-natss + testgrid-tab-name: nightly + cluster: build-knative + cron: 59 9 * * * + decorate: true + extra_refs: + - base_ref: main + org: knative-sandbox + repo: eventing-natss + name: nightly_eventing-natss_main_periodic + reporter_config: + slack: + channel: eventing + job_states_to_report: + - failure + report_template: | + "The nightly release job for eventing-natss failed, check the log: <{{.Status.URL}}|View logs>" + spec: + containers: + - command: + - runner.sh + - ./hack/release.sh + - --publish + - --tag-release + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/nightly-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/nightly-account + name: nightly-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: nightly-account + secret: + secretName: nightly-account +- annotations: + testgrid-dashboards: eventing-natss + testgrid-tab-name: release + cluster: build-knative + cron: 55 */9 * * * + decorate: true + extra_refs: + - base_ref: main + org: knative-sandbox + repo: eventing-natss + name: release_eventing-natss_main_periodic + spec: + containers: + - command: + - runner.sh + - ./hack/release.sh + - --auto-release + - --release-gcs + - knative-releases/eventing-natss + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/release-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/hub-token + name: hub-token + readOnly: true + - mountPath: /etc/release-account + name: release-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: hub-token + secret: + secretName: hub-token + - name: release-account + secret: + secretName: release-account diff --git a/prow/jobs/generated/knative-sandbox/eventing-natss-release-1.0.gen.yaml b/prow/jobs/generated/knative-sandbox/eventing-natss-release-1.0.gen.yaml new file mode 100644 index 00000000000..6424415ed77 --- /dev/null +++ b/prow/jobs/generated/knative-sandbox/eventing-natss-release-1.0.gen.yaml @@ -0,0 +1,94 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. #### +# #### USE "./hack/generate-configs.sh" TO REGENERATE THIS FILE. #### +# #### #### +# ####################################################################### + +periodics: +- annotations: + testgrid-dashboards: knative-sandbox-release-1.0 + testgrid-tab-name: eventing-natss-continuous + cluster: build-knative + cron: 23 8 * * * + decorate: true + extra_refs: + - base_ref: release-1.0 + org: knative-sandbox + repo: eventing-natss + name: continuous_eventing-natss_release-1.0_periodic + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account +- annotations: + testgrid-dashboards: knative-sandbox-release-1.0 + testgrid-tab-name: eventing-natss-release + cluster: build-knative + cron: 33 9 * * 2 + decorate: true + extra_refs: + - base_ref: release-1.0 + org: knative-sandbox + repo: eventing-natss + name: release_eventing-natss_release-1.0_periodic + spec: + containers: + - command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/eventing-natss + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.0 + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/release-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/hub-token + name: hub-token + readOnly: true + - mountPath: /etc/release-account + name: release-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: hub-token + secret: + secretName: hub-token + - name: release-account + secret: + secretName: release-account diff --git a/prow/jobs/generated/knative-sandbox/eventing-natss-release-1.1.gen.yaml b/prow/jobs/generated/knative-sandbox/eventing-natss-release-1.1.gen.yaml new file mode 100644 index 00000000000..0e65c7c64ce --- /dev/null +++ b/prow/jobs/generated/knative-sandbox/eventing-natss-release-1.1.gen.yaml @@ -0,0 +1,94 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. #### +# #### USE "./hack/generate-configs.sh" TO REGENERATE THIS FILE. #### +# #### #### +# ####################################################################### + +periodics: +- annotations: + testgrid-dashboards: knative-sandbox-release-1.1 + testgrid-tab-name: eventing-natss-continuous + cluster: build-knative + cron: 56 8 * * * + decorate: true + extra_refs: + - base_ref: release-1.1 + org: knative-sandbox + repo: eventing-natss + name: continuous_eventing-natss_release-1.1_periodic + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account +- annotations: + testgrid-dashboards: knative-sandbox-release-1.1 + testgrid-tab-name: eventing-natss-release + cluster: build-knative + cron: 0 9 * * 2 + decorate: true + extra_refs: + - base_ref: release-1.1 + org: knative-sandbox + repo: eventing-natss + name: release_eventing-natss_release-1.1_periodic + spec: + containers: + - command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/eventing-natss + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.1 + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/release-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/hub-token + name: hub-token + readOnly: true + - mountPath: /etc/release-account + name: release-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: hub-token + secret: + secretName: hub-token + - name: release-account + secret: + secretName: release-account diff --git a/prow/jobs/generated/knative-sandbox/eventing-natss-release-1.2.gen.yaml b/prow/jobs/generated/knative-sandbox/eventing-natss-release-1.2.gen.yaml new file mode 100644 index 00000000000..efff56d4df4 --- /dev/null +++ b/prow/jobs/generated/knative-sandbox/eventing-natss-release-1.2.gen.yaml @@ -0,0 +1,94 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. #### +# #### USE "./hack/generate-configs.sh" TO REGENERATE THIS FILE. #### +# #### #### +# ####################################################################### + +periodics: +- annotations: + testgrid-dashboards: knative-sandbox-release-1.2 + testgrid-tab-name: eventing-natss-continuous + cluster: build-knative + cron: 9 8 * * * + decorate: true + extra_refs: + - base_ref: release-1.2 + org: knative-sandbox + repo: eventing-natss + name: continuous_eventing-natss_release-1.2_periodic + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account +- annotations: + testgrid-dashboards: knative-sandbox-release-1.2 + testgrid-tab-name: eventing-natss-release + cluster: build-knative + cron: 47 9 * * 2 + decorate: true + extra_refs: + - base_ref: release-1.2 + org: knative-sandbox + repo: eventing-natss + name: release_eventing-natss_release-1.2_periodic + spec: + containers: + - command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/eventing-natss + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.2 + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/release-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/hub-token + name: hub-token + readOnly: true + - mountPath: /etc/release-account + name: release-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: hub-token + secret: + secretName: hub-token + - name: release-account + secret: + secretName: release-account diff --git a/prow/jobs/generated/knative-sandbox/eventing-natss-release-1.3.gen.yaml b/prow/jobs/generated/knative-sandbox/eventing-natss-release-1.3.gen.yaml new file mode 100644 index 00000000000..4e4a33d6ea5 --- /dev/null +++ b/prow/jobs/generated/knative-sandbox/eventing-natss-release-1.3.gen.yaml @@ -0,0 +1,94 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. #### +# #### USE "./hack/generate-configs.sh" TO REGENERATE THIS FILE. #### +# #### #### +# ####################################################################### + +periodics: +- annotations: + testgrid-dashboards: knative-sandbox-release-1.3 + testgrid-tab-name: eventing-natss-continuous + cluster: build-knative + cron: 54 8 * * * + decorate: true + extra_refs: + - base_ref: release-1.3 + org: knative-sandbox + repo: eventing-natss + name: continuous_eventing-natss_release-1.3_periodic + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account +- annotations: + testgrid-dashboards: knative-sandbox-release-1.3 + testgrid-tab-name: eventing-natss-release + cluster: build-knative + cron: 22 9 * * 2 + decorate: true + extra_refs: + - base_ref: release-1.3 + org: knative-sandbox + repo: eventing-natss + name: release_eventing-natss_release-1.3_periodic + spec: + containers: + - command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/eventing-natss + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.3 + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/release-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/hub-token + name: hub-token + readOnly: true + - mountPath: /etc/release-account + name: release-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: hub-token + secret: + secretName: hub-token + - name: release-account + secret: + secretName: release-account diff --git a/prow/jobs/generated/knative-sandbox/eventing-rabbitmq-main.gen.yaml b/prow/jobs/generated/knative-sandbox/eventing-rabbitmq-main.gen.yaml new file mode 100644 index 00000000000..9ac942ac4f4 --- /dev/null +++ b/prow/jobs/generated/knative-sandbox/eventing-rabbitmq-main.gen.yaml @@ -0,0 +1,135 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. #### +# #### USE "./hack/generate-configs.sh" TO REGENERATE THIS FILE. #### +# #### #### +# ####################################################################### + +periodics: +- annotations: + testgrid-dashboards: eventing-rabbitmq + testgrid-tab-name: continuous + cluster: build-knative + cron: 50 */9 * * * + decorate: true + extra_refs: + - base_ref: main + org: knative-sandbox + repo: eventing-rabbitmq + name: continuous_eventing-rabbitmq_main_periodic + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account +- annotations: + testgrid-dashboards: eventing-rabbitmq + testgrid-tab-name: nightly + cluster: build-knative + cron: 30 9 * * * + decorate: true + extra_refs: + - base_ref: main + org: knative-sandbox + repo: eventing-rabbitmq + name: nightly_eventing-rabbitmq_main_periodic + reporter_config: + slack: + channel: eventing-rabbitmq + job_states_to_report: + - failure + report_template: | + "The nightly release job for eventing-rabbitmq failed, check the log: <{{.Status.URL}}|View logs>" + spec: + containers: + - command: + - runner.sh + - ./hack/release.sh + - --publish + - --tag-release + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/nightly-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/nightly-account + name: nightly-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: nightly-account + secret: + secretName: nightly-account +- annotations: + testgrid-dashboards: eventing-rabbitmq + testgrid-tab-name: release + cluster: build-knative + cron: 34 */9 * * * + decorate: true + extra_refs: + - base_ref: main + org: knative-sandbox + repo: eventing-rabbitmq + name: release_eventing-rabbitmq_main_periodic + spec: + containers: + - command: + - runner.sh + - ./hack/release.sh + - --auto-release + - --release-gcs + - knative-releases/eventing-rabbitmq + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/release-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/hub-token + name: hub-token + readOnly: true + - mountPath: /etc/release-account + name: release-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: hub-token + secret: + secretName: hub-token + - name: release-account + secret: + secretName: release-account diff --git a/prow/jobs/generated/knative-sandbox/eventing-rabbitmq-release-1.0.gen.yaml b/prow/jobs/generated/knative-sandbox/eventing-rabbitmq-release-1.0.gen.yaml new file mode 100644 index 00000000000..7e5db7a6d6a --- /dev/null +++ b/prow/jobs/generated/knative-sandbox/eventing-rabbitmq-release-1.0.gen.yaml @@ -0,0 +1,94 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. #### +# #### USE "./hack/generate-configs.sh" TO REGENERATE THIS FILE. #### +# #### #### +# ####################################################################### + +periodics: +- annotations: + testgrid-dashboards: knative-sandbox-release-1.0 + testgrid-tab-name: eventing-rabbitmq-continuous + cluster: build-knative + cron: 18 8 * * * + decorate: true + extra_refs: + - base_ref: release-1.0 + org: knative-sandbox + repo: eventing-rabbitmq + name: continuous_eventing-rabbitmq_release-1.0_periodic + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account +- annotations: + testgrid-dashboards: knative-sandbox-release-1.0 + testgrid-tab-name: eventing-rabbitmq-release + cluster: build-knative + cron: 26 9 * * 2 + decorate: true + extra_refs: + - base_ref: release-1.0 + org: knative-sandbox + repo: eventing-rabbitmq + name: release_eventing-rabbitmq_release-1.0_periodic + spec: + containers: + - command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/eventing-rabbitmq + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.0 + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/release-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/hub-token + name: hub-token + readOnly: true + - mountPath: /etc/release-account + name: release-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: hub-token + secret: + secretName: hub-token + - name: release-account + secret: + secretName: release-account diff --git a/prow/jobs/generated/knative-sandbox/eventing-rabbitmq-release-1.1.gen.yaml b/prow/jobs/generated/knative-sandbox/eventing-rabbitmq-release-1.1.gen.yaml new file mode 100644 index 00000000000..aa56d03939c --- /dev/null +++ b/prow/jobs/generated/knative-sandbox/eventing-rabbitmq-release-1.1.gen.yaml @@ -0,0 +1,94 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. #### +# #### USE "./hack/generate-configs.sh" TO REGENERATE THIS FILE. #### +# #### #### +# ####################################################################### + +periodics: +- annotations: + testgrid-dashboards: knative-sandbox-release-1.1 + testgrid-tab-name: eventing-rabbitmq-continuous + cluster: build-knative + cron: 5 8 * * * + decorate: true + extra_refs: + - base_ref: release-1.1 + org: knative-sandbox + repo: eventing-rabbitmq + name: continuous_eventing-rabbitmq_release-1.1_periodic + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account +- annotations: + testgrid-dashboards: knative-sandbox-release-1.1 + testgrid-tab-name: eventing-rabbitmq-release + cluster: build-knative + cron: 11 9 * * 2 + decorate: true + extra_refs: + - base_ref: release-1.1 + org: knative-sandbox + repo: eventing-rabbitmq + name: release_eventing-rabbitmq_release-1.1_periodic + spec: + containers: + - command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/eventing-rabbitmq + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.1 + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/release-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/hub-token + name: hub-token + readOnly: true + - mountPath: /etc/release-account + name: release-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: hub-token + secret: + secretName: hub-token + - name: release-account + secret: + secretName: release-account diff --git a/prow/jobs/generated/knative-sandbox/eventing-rabbitmq-release-1.2.gen.yaml b/prow/jobs/generated/knative-sandbox/eventing-rabbitmq-release-1.2.gen.yaml new file mode 100644 index 00000000000..3b4bce28e94 --- /dev/null +++ b/prow/jobs/generated/knative-sandbox/eventing-rabbitmq-release-1.2.gen.yaml @@ -0,0 +1,94 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. #### +# #### USE "./hack/generate-configs.sh" TO REGENERATE THIS FILE. #### +# #### #### +# ####################################################################### + +periodics: +- annotations: + testgrid-dashboards: knative-sandbox-release-1.2 + testgrid-tab-name: eventing-rabbitmq-continuous + cluster: build-knative + cron: 40 8 * * * + decorate: true + extra_refs: + - base_ref: release-1.2 + org: knative-sandbox + repo: eventing-rabbitmq + name: continuous_eventing-rabbitmq_release-1.2_periodic + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account +- annotations: + testgrid-dashboards: knative-sandbox-release-1.2 + testgrid-tab-name: eventing-rabbitmq-release + cluster: build-knative + cron: 32 9 * * 2 + decorate: true + extra_refs: + - base_ref: release-1.2 + org: knative-sandbox + repo: eventing-rabbitmq + name: release_eventing-rabbitmq_release-1.2_periodic + spec: + containers: + - command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/eventing-rabbitmq + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.2 + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/release-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/hub-token + name: hub-token + readOnly: true + - mountPath: /etc/release-account + name: release-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: hub-token + secret: + secretName: hub-token + - name: release-account + secret: + secretName: release-account diff --git a/prow/jobs/generated/knative-sandbox/eventing-rabbitmq-release-1.3.gen.yaml b/prow/jobs/generated/knative-sandbox/eventing-rabbitmq-release-1.3.gen.yaml new file mode 100644 index 00000000000..eafb51bd87a --- /dev/null +++ b/prow/jobs/generated/knative-sandbox/eventing-rabbitmq-release-1.3.gen.yaml @@ -0,0 +1,94 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. #### +# #### USE "./hack/generate-configs.sh" TO REGENERATE THIS FILE. #### +# #### #### +# ####################################################################### + +periodics: +- annotations: + testgrid-dashboards: knative-sandbox-release-1.3 + testgrid-tab-name: eventing-rabbitmq-continuous + cluster: build-knative + cron: 59 8 * * * + decorate: true + extra_refs: + - base_ref: release-1.3 + org: knative-sandbox + repo: eventing-rabbitmq + name: continuous_eventing-rabbitmq_release-1.3_periodic + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account +- annotations: + testgrid-dashboards: knative-sandbox-release-1.3 + testgrid-tab-name: eventing-rabbitmq-release + cluster: build-knative + cron: 57 9 * * 2 + decorate: true + extra_refs: + - base_ref: release-1.3 + org: knative-sandbox + repo: eventing-rabbitmq + name: release_eventing-rabbitmq_release-1.3_periodic + spec: + containers: + - command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/eventing-rabbitmq + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.3 + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/release-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/hub-token + name: hub-token + readOnly: true + - mountPath: /etc/release-account + name: release-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: hub-token + secret: + secretName: hub-token + - name: release-account + secret: + secretName: release-account diff --git a/prow/jobs/generated/knative-sandbox/eventing-redis-main.gen.yaml b/prow/jobs/generated/knative-sandbox/eventing-redis-main.gen.yaml new file mode 100644 index 00000000000..0ca213dfc88 --- /dev/null +++ b/prow/jobs/generated/knative-sandbox/eventing-redis-main.gen.yaml @@ -0,0 +1,182 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. #### +# #### USE "./hack/generate-configs.sh" TO REGENERATE THIS FILE. #### +# #### #### +# ####################################################################### + +periodics: +- annotations: + testgrid-dashboards: eventing-redis + testgrid-tab-name: continuous + cluster: build-knative + cron: 55 */9 * * * + decorate: true + extra_refs: + - base_ref: main + org: knative-sandbox + repo: eventing-redis + name: continuous_eventing-redis_main_periodic + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + - name: DOCKER_IN_DOCKER_ENABLED + value: "true" + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + - mountPath: /docker-graph + name: docker-graph + - mountPath: /lib/modules + name: modules + - mountPath: /sys/fs/cgroup + name: cgroup + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - emptyDir: {} + name: docker-graph + - hostPath: + path: /lib/modules + type: Directory + name: modules + - hostPath: + path: /sys/fs/cgroup + type: Directory + name: cgroup +- annotations: + testgrid-dashboards: eventing-redis + testgrid-tab-name: nightly + cluster: build-knative + cron: 1 9 * * * + decorate: true + extra_refs: + - base_ref: main + org: knative-sandbox + repo: eventing-redis + name: nightly_eventing-redis_main_periodic + spec: + containers: + - command: + - runner.sh + - ./hack/release.sh + - --publish + - --tag-release + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/nightly-account/service-account.json + - name: DOCKER_IN_DOCKER_ENABLED + value: "true" + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/nightly-account + name: nightly-account + readOnly: true + - mountPath: /docker-graph + name: docker-graph + - mountPath: /lib/modules + name: modules + - mountPath: /sys/fs/cgroup + name: cgroup + nodeSelector: + type: testing + volumes: + - name: nightly-account + secret: + secretName: nightly-account + - emptyDir: {} + name: docker-graph + - hostPath: + path: /lib/modules + type: Directory + name: modules + - hostPath: + path: /sys/fs/cgroup + type: Directory + name: cgroup +- annotations: + testgrid-dashboards: eventing-redis + testgrid-tab-name: release + cluster: build-knative + cron: 33 */9 * * * + decorate: true + extra_refs: + - base_ref: main + org: knative-sandbox + repo: eventing-redis + name: release_eventing-redis_main_periodic + spec: + containers: + - command: + - runner.sh + - ./hack/release.sh + - --auto-release + - --release-gcs + - knative-releases/eventing-redis + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/release-account/service-account.json + - name: DOCKER_IN_DOCKER_ENABLED + value: "true" + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/hub-token + name: hub-token + readOnly: true + - mountPath: /etc/release-account + name: release-account + readOnly: true + - mountPath: /docker-graph + name: docker-graph + - mountPath: /lib/modules + name: modules + - mountPath: /sys/fs/cgroup + name: cgroup + nodeSelector: + type: testing + volumes: + - name: hub-token + secret: + secretName: hub-token + - name: release-account + secret: + secretName: release-account + - emptyDir: {} + name: docker-graph + - hostPath: + path: /lib/modules + type: Directory + name: modules + - hostPath: + path: /sys/fs/cgroup + type: Directory + name: cgroup diff --git a/prow/jobs/generated/knative-sandbox/eventing-redis-release-1.0.gen.yaml b/prow/jobs/generated/knative-sandbox/eventing-redis-release-1.0.gen.yaml new file mode 100644 index 00000000000..5852f63a354 --- /dev/null +++ b/prow/jobs/generated/knative-sandbox/eventing-redis-release-1.0.gen.yaml @@ -0,0 +1,112 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. #### +# #### USE "./hack/generate-configs.sh" TO REGENERATE THIS FILE. #### +# #### #### +# ####################################################################### + +periodics: +- annotations: + testgrid-dashboards: knative-sandbox-release-1.0 + testgrid-tab-name: eventing-redis-continuous + cluster: build-knative + cron: 13 8 * * * + decorate: true + extra_refs: + - base_ref: release-1.0 + org: knative-sandbox + repo: eventing-redis + name: continuous_eventing-redis_release-1.0_periodic + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + - name: DOCKER_IN_DOCKER_ENABLED + value: "true" + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + - mountPath: /docker-graph + name: docker-graph + - mountPath: /lib/modules + name: modules + - mountPath: /sys/fs/cgroup + name: cgroup + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - emptyDir: {} + name: docker-graph + - hostPath: + path: /lib/modules + type: Directory + name: modules + - hostPath: + path: /sys/fs/cgroup + type: Directory + name: cgroup +- annotations: + testgrid-dashboards: knative-sandbox-release-1.0 + testgrid-tab-name: eventing-redis-release + cluster: build-knative + cron: 3 9 * * 2 + decorate: true + extra_refs: + - base_ref: release-1.0 + org: knative-sandbox + repo: eventing-redis + name: release_eventing-redis_release-1.0_periodic + spec: + containers: + - command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/eventing-redis + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.0 + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/release-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/hub-token + name: hub-token + readOnly: true + - mountPath: /etc/release-account + name: release-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: hub-token + secret: + secretName: hub-token + - name: release-account + secret: + secretName: release-account diff --git a/prow/jobs/generated/knative-sandbox/eventing-redis-release-1.1.gen.yaml b/prow/jobs/generated/knative-sandbox/eventing-redis-release-1.1.gen.yaml new file mode 100644 index 00000000000..194327a53dd --- /dev/null +++ b/prow/jobs/generated/knative-sandbox/eventing-redis-release-1.1.gen.yaml @@ -0,0 +1,112 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. #### +# #### USE "./hack/generate-configs.sh" TO REGENERATE THIS FILE. #### +# #### #### +# ####################################################################### + +periodics: +- annotations: + testgrid-dashboards: knative-sandbox-release-1.1 + testgrid-tab-name: eventing-redis-continuous + cluster: build-knative + cron: 22 8 * * * + decorate: true + extra_refs: + - base_ref: release-1.1 + org: knative-sandbox + repo: eventing-redis + name: continuous_eventing-redis_release-1.1_periodic + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + - name: DOCKER_IN_DOCKER_ENABLED + value: "true" + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + - mountPath: /docker-graph + name: docker-graph + - mountPath: /lib/modules + name: modules + - mountPath: /sys/fs/cgroup + name: cgroup + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - emptyDir: {} + name: docker-graph + - hostPath: + path: /lib/modules + type: Directory + name: modules + - hostPath: + path: /sys/fs/cgroup + type: Directory + name: cgroup +- annotations: + testgrid-dashboards: knative-sandbox-release-1.1 + testgrid-tab-name: eventing-redis-release + cluster: build-knative + cron: 22 9 * * 2 + decorate: true + extra_refs: + - base_ref: release-1.1 + org: knative-sandbox + repo: eventing-redis + name: release_eventing-redis_release-1.1_periodic + spec: + containers: + - command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/eventing-redis + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.1 + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/release-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/hub-token + name: hub-token + readOnly: true + - mountPath: /etc/release-account + name: release-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: hub-token + secret: + secretName: hub-token + - name: release-account + secret: + secretName: release-account diff --git a/prow/jobs/generated/knative-sandbox/eventing-redis-release-1.2.gen.yaml b/prow/jobs/generated/knative-sandbox/eventing-redis-release-1.2.gen.yaml new file mode 100644 index 00000000000..757c929ea0c --- /dev/null +++ b/prow/jobs/generated/knative-sandbox/eventing-redis-release-1.2.gen.yaml @@ -0,0 +1,112 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. #### +# #### USE "./hack/generate-configs.sh" TO REGENERATE THIS FILE. #### +# #### #### +# ####################################################################### + +periodics: +- annotations: + testgrid-dashboards: knative-sandbox-release-1.2 + testgrid-tab-name: eventing-redis-continuous + cluster: build-knative + cron: 27 8 * * * + decorate: true + extra_refs: + - base_ref: release-1.2 + org: knative-sandbox + repo: eventing-redis + name: continuous_eventing-redis_release-1.2_periodic + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + - name: DOCKER_IN_DOCKER_ENABLED + value: "true" + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + - mountPath: /docker-graph + name: docker-graph + - mountPath: /lib/modules + name: modules + - mountPath: /sys/fs/cgroup + name: cgroup + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - emptyDir: {} + name: docker-graph + - hostPath: + path: /lib/modules + type: Directory + name: modules + - hostPath: + path: /sys/fs/cgroup + type: Directory + name: cgroup +- annotations: + testgrid-dashboards: knative-sandbox-release-1.2 + testgrid-tab-name: eventing-redis-release + cluster: build-knative + cron: 45 9 * * 2 + decorate: true + extra_refs: + - base_ref: release-1.2 + org: knative-sandbox + repo: eventing-redis + name: release_eventing-redis_release-1.2_periodic + spec: + containers: + - command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/eventing-redis + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.2 + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/release-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/hub-token + name: hub-token + readOnly: true + - mountPath: /etc/release-account + name: release-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: hub-token + secret: + secretName: hub-token + - name: release-account + secret: + secretName: release-account diff --git a/prow/jobs/generated/knative-sandbox/eventing-redis-release-1.3.gen.yaml b/prow/jobs/generated/knative-sandbox/eventing-redis-release-1.3.gen.yaml new file mode 100644 index 00000000000..ad32a432a33 --- /dev/null +++ b/prow/jobs/generated/knative-sandbox/eventing-redis-release-1.3.gen.yaml @@ -0,0 +1,112 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. #### +# #### USE "./hack/generate-configs.sh" TO REGENERATE THIS FILE. #### +# #### #### +# ####################################################################### + +periodics: +- annotations: + testgrid-dashboards: knative-sandbox-release-1.3 + testgrid-tab-name: eventing-redis-continuous + cluster: build-knative + cron: 28 8 * * * + decorate: true + extra_refs: + - base_ref: release-1.3 + org: knative-sandbox + repo: eventing-redis + name: continuous_eventing-redis_release-1.3_periodic + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + - name: DOCKER_IN_DOCKER_ENABLED + value: "true" + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + - mountPath: /docker-graph + name: docker-graph + - mountPath: /lib/modules + name: modules + - mountPath: /sys/fs/cgroup + name: cgroup + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - emptyDir: {} + name: docker-graph + - hostPath: + path: /lib/modules + type: Directory + name: modules + - hostPath: + path: /sys/fs/cgroup + type: Directory + name: cgroup +- annotations: + testgrid-dashboards: knative-sandbox-release-1.3 + testgrid-tab-name: eventing-redis-release + cluster: build-knative + cron: 8 9 * * 2 + decorate: true + extra_refs: + - base_ref: release-1.3 + org: knative-sandbox + repo: eventing-redis + name: release_eventing-redis_release-1.3_periodic + spec: + containers: + - command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/eventing-redis + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.3 + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/release-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/hub-token + name: hub-token + readOnly: true + - mountPath: /etc/release-account + name: release-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: hub-token + secret: + secretName: hub-token + - name: release-account + secret: + secretName: release-account diff --git a/prow/jobs/generated/knative-sandbox/kn-plugin-admin-main.gen.yaml b/prow/jobs/generated/knative-sandbox/kn-plugin-admin-main.gen.yaml new file mode 100644 index 00000000000..c56fc7fa8ba --- /dev/null +++ b/prow/jobs/generated/knative-sandbox/kn-plugin-admin-main.gen.yaml @@ -0,0 +1,200 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. #### +# #### USE "./hack/generate-configs.sh" TO REGENERATE THIS FILE. #### +# #### #### +# ####################################################################### + +periodics: +- annotations: + testgrid-dashboards: kn-plugin-admin + testgrid-tab-name: continuous + cluster: build-knative + cron: 14 */9 * * * + decorate: true + extra_refs: + - base_ref: main + org: knative-sandbox + repo: kn-plugin-admin + name: continuous_kn-plugin-admin_main_periodic + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account +- annotations: + testgrid-dashboards: kn-plugin-admin + testgrid-tab-name: nightly + cluster: build-knative + cron: 42 9 * * * + decorate: true + extra_refs: + - base_ref: main + org: knative-sandbox + repo: kn-plugin-admin + name: nightly_kn-plugin-admin_main_periodic + spec: + containers: + - command: + - runner.sh + - ./hack/release.sh + - --publish + - --tag-release + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/nightly-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/nightly-account + name: nightly-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: nightly-account + secret: + secretName: nightly-account +- annotations: + testgrid-dashboards: kn-plugin-admin + testgrid-tab-name: release + cluster: build-knative + cron: 18 */9 * * * + decorate: true + extra_refs: + - base_ref: main + org: knative-sandbox + repo: kn-plugin-admin + name: release_kn-plugin-admin_main_periodic + spec: + containers: + - command: + - runner.sh + - ./hack/release.sh + - --auto-release + - --release-gcs + - knative-releases/kn-plugin-admin + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/release-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/hub-token + name: hub-token + readOnly: true + - mountPath: /etc/release-account + name: release-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: hub-token + secret: + secretName: hub-token + - name: release-account + secret: + secretName: release-account +presubmits: + knative-sandbox/kn-plugin-admin: + - always_run: true + branches: + - ^main$ + cluster: build-knative + decorate: true + name: build-tests_kn-plugin-admin_main + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --build-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^main$ + cluster: build-knative + decorate: true + name: unit-tests_kn-plugin-admin_main + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --unit-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^main$ + cluster: build-knative + decorate: true + name: integration-tests_kn-plugin-admin_main + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --integration-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account diff --git a/prow/jobs/generated/knative-sandbox/kn-plugin-admin-release-1.0.gen.yaml b/prow/jobs/generated/knative-sandbox/kn-plugin-admin-release-1.0.gen.yaml new file mode 100644 index 00000000000..c1a8792d657 --- /dev/null +++ b/prow/jobs/generated/knative-sandbox/kn-plugin-admin-release-1.0.gen.yaml @@ -0,0 +1,166 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. #### +# #### USE "./hack/generate-configs.sh" TO REGENERATE THIS FILE. #### +# #### #### +# ####################################################################### + +periodics: +- annotations: + testgrid-dashboards: knative-sandbox-release-1.0 + testgrid-tab-name: kn-plugin-admin-continuous + cluster: build-knative + cron: 58 8 * * * + decorate: true + extra_refs: + - base_ref: release-1.0 + org: knative-sandbox + repo: kn-plugin-admin + name: continuous_kn-plugin-admin_release-1.0_periodic + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account +- annotations: + testgrid-dashboards: knative-sandbox-release-1.0 + testgrid-tab-name: kn-plugin-admin-release + cluster: build-knative + cron: 22 9 * * 2 + decorate: true + extra_refs: + - base_ref: release-1.0 + org: knative-sandbox + repo: kn-plugin-admin + name: release_kn-plugin-admin_release-1.0_periodic + spec: + containers: + - command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/kn-plugin-admin + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.0 + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/release-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/hub-token + name: hub-token + readOnly: true + - mountPath: /etc/release-account + name: release-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: hub-token + secret: + secretName: hub-token + - name: release-account + secret: + secretName: release-account +presubmits: + knative-sandbox/kn-plugin-admin: + - always_run: true + branches: + - ^release-1.0$ + cluster: build-knative + decorate: true + name: build-tests_kn-plugin-admin_release-1.0 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --build-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^release-1.0$ + cluster: build-knative + decorate: true + name: unit-tests_kn-plugin-admin_release-1.0 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --unit-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^release-1.0$ + cluster: build-knative + decorate: true + name: integration-tests_kn-plugin-admin_release-1.0 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --integration-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account diff --git a/prow/jobs/generated/knative-sandbox/kn-plugin-admin-release-1.1.gen.yaml b/prow/jobs/generated/knative-sandbox/kn-plugin-admin-release-1.1.gen.yaml new file mode 100644 index 00000000000..b079e8cd6b9 --- /dev/null +++ b/prow/jobs/generated/knative-sandbox/kn-plugin-admin-release-1.1.gen.yaml @@ -0,0 +1,166 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. #### +# #### USE "./hack/generate-configs.sh" TO REGENERATE THIS FILE. #### +# #### #### +# ####################################################################### + +periodics: +- annotations: + testgrid-dashboards: knative-sandbox-release-1.1 + testgrid-tab-name: kn-plugin-admin-continuous + cluster: build-knative + cron: 33 8 * * * + decorate: true + extra_refs: + - base_ref: release-1.1 + org: knative-sandbox + repo: kn-plugin-admin + name: continuous_kn-plugin-admin_release-1.1_periodic + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account +- annotations: + testgrid-dashboards: knative-sandbox-release-1.1 + testgrid-tab-name: kn-plugin-admin-release + cluster: build-knative + cron: 55 9 * * 2 + decorate: true + extra_refs: + - base_ref: release-1.1 + org: knative-sandbox + repo: kn-plugin-admin + name: release_kn-plugin-admin_release-1.1_periodic + spec: + containers: + - command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/kn-plugin-admin + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.1 + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/release-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/hub-token + name: hub-token + readOnly: true + - mountPath: /etc/release-account + name: release-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: hub-token + secret: + secretName: hub-token + - name: release-account + secret: + secretName: release-account +presubmits: + knative-sandbox/kn-plugin-admin: + - always_run: true + branches: + - ^release-1.1$ + cluster: build-knative + decorate: true + name: build-tests_kn-plugin-admin_release-1.1 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --build-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^release-1.1$ + cluster: build-knative + decorate: true + name: unit-tests_kn-plugin-admin_release-1.1 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --unit-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^release-1.1$ + cluster: build-knative + decorate: true + name: integration-tests_kn-plugin-admin_release-1.1 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --integration-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account diff --git a/prow/jobs/generated/knative-sandbox/kn-plugin-admin-release-1.2.gen.yaml b/prow/jobs/generated/knative-sandbox/kn-plugin-admin-release-1.2.gen.yaml new file mode 100644 index 00000000000..d8b2a4a5709 --- /dev/null +++ b/prow/jobs/generated/knative-sandbox/kn-plugin-admin-release-1.2.gen.yaml @@ -0,0 +1,166 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. #### +# #### USE "./hack/generate-configs.sh" TO REGENERATE THIS FILE. #### +# #### #### +# ####################################################################### + +periodics: +- annotations: + testgrid-dashboards: knative-sandbox-release-1.2 + testgrid-tab-name: kn-plugin-admin-continuous + cluster: build-knative + cron: 28 8 * * * + decorate: true + extra_refs: + - base_ref: release-1.2 + org: knative-sandbox + repo: kn-plugin-admin + name: continuous_kn-plugin-admin_release-1.2_periodic + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account +- annotations: + testgrid-dashboards: knative-sandbox-release-1.2 + testgrid-tab-name: kn-plugin-admin-release + cluster: build-knative + cron: 12 9 * * 2 + decorate: true + extra_refs: + - base_ref: release-1.2 + org: knative-sandbox + repo: kn-plugin-admin + name: release_kn-plugin-admin_release-1.2_periodic + spec: + containers: + - command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/kn-plugin-admin + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.2 + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/release-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/hub-token + name: hub-token + readOnly: true + - mountPath: /etc/release-account + name: release-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: hub-token + secret: + secretName: hub-token + - name: release-account + secret: + secretName: release-account +presubmits: + knative-sandbox/kn-plugin-admin: + - always_run: true + branches: + - ^release-1.2$ + cluster: build-knative + decorate: true + name: build-tests_kn-plugin-admin_release-1.2 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --build-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^release-1.2$ + cluster: build-knative + decorate: true + name: unit-tests_kn-plugin-admin_release-1.2 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --unit-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^release-1.2$ + cluster: build-knative + decorate: true + name: integration-tests_kn-plugin-admin_release-1.2 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --integration-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account diff --git a/prow/jobs/generated/knative-sandbox/kn-plugin-admin-release-1.3.gen.yaml b/prow/jobs/generated/knative-sandbox/kn-plugin-admin-release-1.3.gen.yaml new file mode 100644 index 00000000000..1ba877755c5 --- /dev/null +++ b/prow/jobs/generated/knative-sandbox/kn-plugin-admin-release-1.3.gen.yaml @@ -0,0 +1,166 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. #### +# #### USE "./hack/generate-configs.sh" TO REGENERATE THIS FILE. #### +# #### #### +# ####################################################################### + +periodics: +- annotations: + testgrid-dashboards: knative-sandbox-release-1.3 + testgrid-tab-name: kn-plugin-admin-continuous + cluster: build-knative + cron: 31 8 * * * + decorate: true + extra_refs: + - base_ref: release-1.3 + org: knative-sandbox + repo: kn-plugin-admin + name: continuous_kn-plugin-admin_release-1.3_periodic + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account +- annotations: + testgrid-dashboards: knative-sandbox-release-1.3 + testgrid-tab-name: kn-plugin-admin-release + cluster: build-knative + cron: 1 9 * * 2 + decorate: true + extra_refs: + - base_ref: release-1.3 + org: knative-sandbox + repo: kn-plugin-admin + name: release_kn-plugin-admin_release-1.3_periodic + spec: + containers: + - command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/kn-plugin-admin + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.3 + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/release-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/hub-token + name: hub-token + readOnly: true + - mountPath: /etc/release-account + name: release-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: hub-token + secret: + secretName: hub-token + - name: release-account + secret: + secretName: release-account +presubmits: + knative-sandbox/kn-plugin-admin: + - always_run: true + branches: + - ^release-1.3$ + cluster: build-knative + decorate: true + name: build-tests_kn-plugin-admin_release-1.3 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --build-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^release-1.3$ + cluster: build-knative + decorate: true + name: unit-tests_kn-plugin-admin_release-1.3 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --unit-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^release-1.3$ + cluster: build-knative + decorate: true + name: integration-tests_kn-plugin-admin_release-1.3 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --integration-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account diff --git a/prow/jobs/generated/knative-sandbox/kn-plugin-diag-main.gen.yaml b/prow/jobs/generated/knative-sandbox/kn-plugin-diag-main.gen.yaml new file mode 100644 index 00000000000..0bb6c52a68f --- /dev/null +++ b/prow/jobs/generated/knative-sandbox/kn-plugin-diag-main.gen.yaml @@ -0,0 +1,117 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. #### +# #### USE "./hack/generate-configs.sh" TO REGENERATE THIS FILE. #### +# #### #### +# ####################################################################### + +periodics: +- annotations: + testgrid-dashboards: kn-plugin-diag + testgrid-tab-name: continuous + cluster: build-knative + cron: 8 */9 * * * + decorate: true + extra_refs: + - base_ref: main + org: knative-sandbox + repo: kn-plugin-diag + name: continuous_kn-plugin-diag_main_periodic + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account +presubmits: + knative-sandbox/kn-plugin-diag: + - always_run: true + branches: + - ^main$ + cluster: build-knative + decorate: true + name: build-tests_kn-plugin-diag_main + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --build-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^main$ + cluster: build-knative + decorate: true + name: unit-tests_kn-plugin-diag_main + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --unit-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^main$ + cluster: build-knative + decorate: true + name: integration-tests_kn-plugin-diag_main + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --integration-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account diff --git a/prow/jobs/generated/knative-sandbox/kn-plugin-event-main.gen.yaml b/prow/jobs/generated/knative-sandbox/kn-plugin-event-main.gen.yaml new file mode 100644 index 00000000000..9dd382274f9 --- /dev/null +++ b/prow/jobs/generated/knative-sandbox/kn-plugin-event-main.gen.yaml @@ -0,0 +1,200 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. #### +# #### USE "./hack/generate-configs.sh" TO REGENERATE THIS FILE. #### +# #### #### +# ####################################################################### + +periodics: +- annotations: + testgrid-dashboards: kn-plugin-event + testgrid-tab-name: continuous + cluster: build-knative + cron: 5 */9 * * * + decorate: true + extra_refs: + - base_ref: main + org: knative-sandbox + repo: kn-plugin-event + name: continuous_kn-plugin-event_main_periodic + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account +- annotations: + testgrid-dashboards: kn-plugin-event + testgrid-tab-name: nightly + cluster: build-knative + cron: 27 9 * * * + decorate: true + extra_refs: + - base_ref: main + org: knative-sandbox + repo: kn-plugin-event + name: nightly_kn-plugin-event_main_periodic + spec: + containers: + - command: + - runner.sh + - ./hack/release.sh + - --publish + - --tag-release + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/nightly-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/nightly-account + name: nightly-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: nightly-account + secret: + secretName: nightly-account +- annotations: + testgrid-dashboards: kn-plugin-event + testgrid-tab-name: release + cluster: build-knative + cron: 39 */9 * * * + decorate: true + extra_refs: + - base_ref: main + org: knative-sandbox + repo: kn-plugin-event + name: release_kn-plugin-event_main_periodic + spec: + containers: + - command: + - runner.sh + - ./hack/release.sh + - --auto-release + - --release-gcs + - knative-releases/kn-plugin-event + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/release-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/hub-token + name: hub-token + readOnly: true + - mountPath: /etc/release-account + name: release-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: hub-token + secret: + secretName: hub-token + - name: release-account + secret: + secretName: release-account +presubmits: + knative-sandbox/kn-plugin-event: + - always_run: true + branches: + - ^main$ + cluster: build-knative + decorate: true + name: build-tests_kn-plugin-event_main + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --build-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^main$ + cluster: build-knative + decorate: true + name: unit-tests_kn-plugin-event_main + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --unit-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^main$ + cluster: build-knative + decorate: true + name: integration-tests_kn-plugin-event_main + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --integration-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account diff --git a/prow/jobs/generated/knative-sandbox/kn-plugin-event-release-1.0.gen.yaml b/prow/jobs/generated/knative-sandbox/kn-plugin-event-release-1.0.gen.yaml new file mode 100644 index 00000000000..0699bd604a0 --- /dev/null +++ b/prow/jobs/generated/knative-sandbox/kn-plugin-event-release-1.0.gen.yaml @@ -0,0 +1,166 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. #### +# #### USE "./hack/generate-configs.sh" TO REGENERATE THIS FILE. #### +# #### #### +# ####################################################################### + +periodics: +- annotations: + testgrid-dashboards: knative-sandbox-release-1.0 + testgrid-tab-name: kn-plugin-event-continuous + cluster: build-knative + cron: 39 8 * * * + decorate: true + extra_refs: + - base_ref: release-1.0 + org: knative-sandbox + repo: kn-plugin-event + name: continuous_kn-plugin-event_release-1.0_periodic + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account +- annotations: + testgrid-dashboards: knative-sandbox-release-1.0 + testgrid-tab-name: kn-plugin-event-release + cluster: build-knative + cron: 49 9 * * 2 + decorate: true + extra_refs: + - base_ref: release-1.0 + org: knative-sandbox + repo: kn-plugin-event + name: release_kn-plugin-event_release-1.0_periodic + spec: + containers: + - command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/kn-plugin-event + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.0 + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/release-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/hub-token + name: hub-token + readOnly: true + - mountPath: /etc/release-account + name: release-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: hub-token + secret: + secretName: hub-token + - name: release-account + secret: + secretName: release-account +presubmits: + knative-sandbox/kn-plugin-event: + - always_run: true + branches: + - ^release-1.0$ + cluster: build-knative + decorate: true + name: build-tests_kn-plugin-event_release-1.0 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --build-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^release-1.0$ + cluster: build-knative + decorate: true + name: unit-tests_kn-plugin-event_release-1.0 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --unit-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^release-1.0$ + cluster: build-knative + decorate: true + name: integration-tests_kn-plugin-event_release-1.0 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --integration-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account diff --git a/prow/jobs/generated/knative-sandbox/kn-plugin-event-release-1.1.gen.yaml b/prow/jobs/generated/knative-sandbox/kn-plugin-event-release-1.1.gen.yaml new file mode 100644 index 00000000000..749b3b3a54a --- /dev/null +++ b/prow/jobs/generated/knative-sandbox/kn-plugin-event-release-1.1.gen.yaml @@ -0,0 +1,166 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. #### +# #### USE "./hack/generate-configs.sh" TO REGENERATE THIS FILE. #### +# #### #### +# ####################################################################### + +periodics: +- annotations: + testgrid-dashboards: knative-sandbox-release-1.1 + testgrid-tab-name: kn-plugin-event-continuous + cluster: build-knative + cron: 36 8 * * * + decorate: true + extra_refs: + - base_ref: release-1.1 + org: knative-sandbox + repo: kn-plugin-event + name: continuous_kn-plugin-event_release-1.1_periodic + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account +- annotations: + testgrid-dashboards: knative-sandbox-release-1.1 + testgrid-tab-name: kn-plugin-event-release + cluster: build-knative + cron: 44 9 * * 2 + decorate: true + extra_refs: + - base_ref: release-1.1 + org: knative-sandbox + repo: kn-plugin-event + name: release_kn-plugin-event_release-1.1_periodic + spec: + containers: + - command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/kn-plugin-event + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.1 + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/release-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/hub-token + name: hub-token + readOnly: true + - mountPath: /etc/release-account + name: release-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: hub-token + secret: + secretName: hub-token + - name: release-account + secret: + secretName: release-account +presubmits: + knative-sandbox/kn-plugin-event: + - always_run: true + branches: + - ^release-1.1$ + cluster: build-knative + decorate: true + name: build-tests_kn-plugin-event_release-1.1 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --build-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^release-1.1$ + cluster: build-knative + decorate: true + name: unit-tests_kn-plugin-event_release-1.1 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --unit-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^release-1.1$ + cluster: build-knative + decorate: true + name: integration-tests_kn-plugin-event_release-1.1 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --integration-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account diff --git a/prow/jobs/generated/knative-sandbox/kn-plugin-event-release-1.2.gen.yaml b/prow/jobs/generated/knative-sandbox/kn-plugin-event-release-1.2.gen.yaml new file mode 100644 index 00000000000..5251a01c300 --- /dev/null +++ b/prow/jobs/generated/knative-sandbox/kn-plugin-event-release-1.2.gen.yaml @@ -0,0 +1,166 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. #### +# #### USE "./hack/generate-configs.sh" TO REGENERATE THIS FILE. #### +# #### #### +# ####################################################################### + +periodics: +- annotations: + testgrid-dashboards: knative-sandbox-release-1.2 + testgrid-tab-name: kn-plugin-event-continuous + cluster: build-knative + cron: 17 8 * * * + decorate: true + extra_refs: + - base_ref: release-1.2 + org: knative-sandbox + repo: kn-plugin-event + name: continuous_kn-plugin-event_release-1.2_periodic + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account +- annotations: + testgrid-dashboards: knative-sandbox-release-1.2 + testgrid-tab-name: kn-plugin-event-release + cluster: build-knative + cron: 7 9 * * 2 + decorate: true + extra_refs: + - base_ref: release-1.2 + org: knative-sandbox + repo: kn-plugin-event + name: release_kn-plugin-event_release-1.2_periodic + spec: + containers: + - command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/kn-plugin-event + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.2 + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/release-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/hub-token + name: hub-token + readOnly: true + - mountPath: /etc/release-account + name: release-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: hub-token + secret: + secretName: hub-token + - name: release-account + secret: + secretName: release-account +presubmits: + knative-sandbox/kn-plugin-event: + - always_run: true + branches: + - ^release-1.2$ + cluster: build-knative + decorate: true + name: build-tests_kn-plugin-event_release-1.2 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --build-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^release-1.2$ + cluster: build-knative + decorate: true + name: unit-tests_kn-plugin-event_release-1.2 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --unit-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^release-1.2$ + cluster: build-knative + decorate: true + name: integration-tests_kn-plugin-event_release-1.2 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --integration-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account diff --git a/prow/jobs/generated/knative-sandbox/kn-plugin-event-release-1.3.gen.yaml b/prow/jobs/generated/knative-sandbox/kn-plugin-event-release-1.3.gen.yaml new file mode 100644 index 00000000000..2864b7ba91d --- /dev/null +++ b/prow/jobs/generated/knative-sandbox/kn-plugin-event-release-1.3.gen.yaml @@ -0,0 +1,166 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. #### +# #### USE "./hack/generate-configs.sh" TO REGENERATE THIS FILE. #### +# #### #### +# ####################################################################### + +periodics: +- annotations: + testgrid-dashboards: knative-sandbox-release-1.3 + testgrid-tab-name: kn-plugin-event-continuous + cluster: build-knative + cron: 10 8 * * * + decorate: true + extra_refs: + - base_ref: release-1.3 + org: knative-sandbox + repo: kn-plugin-event + name: continuous_kn-plugin-event_release-1.3_periodic + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account +- annotations: + testgrid-dashboards: knative-sandbox-release-1.3 + testgrid-tab-name: kn-plugin-event-release + cluster: build-knative + cron: 18 9 * * 2 + decorate: true + extra_refs: + - base_ref: release-1.3 + org: knative-sandbox + repo: kn-plugin-event + name: release_kn-plugin-event_release-1.3_periodic + spec: + containers: + - command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/kn-plugin-event + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.3 + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/release-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/hub-token + name: hub-token + readOnly: true + - mountPath: /etc/release-account + name: release-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: hub-token + secret: + secretName: hub-token + - name: release-account + secret: + secretName: release-account +presubmits: + knative-sandbox/kn-plugin-event: + - always_run: true + branches: + - ^release-1.3$ + cluster: build-knative + decorate: true + name: build-tests_kn-plugin-event_release-1.3 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --build-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^release-1.3$ + cluster: build-knative + decorate: true + name: unit-tests_kn-plugin-event_release-1.3 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --unit-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^release-1.3$ + cluster: build-knative + decorate: true + name: integration-tests_kn-plugin-event_release-1.3 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --integration-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account diff --git a/prow/jobs/generated/knative-sandbox/kn-plugin-func-main.gen.yaml b/prow/jobs/generated/knative-sandbox/kn-plugin-func-main.gen.yaml new file mode 100644 index 00000000000..7ffd7ec8f88 --- /dev/null +++ b/prow/jobs/generated/knative-sandbox/kn-plugin-func-main.gen.yaml @@ -0,0 +1,127 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. #### +# #### USE "./hack/generate-configs.sh" TO REGENERATE THIS FILE. #### +# #### #### +# ####################################################################### + +periodics: +- annotations: + testgrid-dashboards: kn-plugin-func + testgrid-tab-name: nightly + cluster: build-knative + cron: 43 9 * * * + decorate: true + extra_refs: + - base_ref: main + org: knative-sandbox + repo: kn-plugin-func + name: nightly_kn-plugin-func_main_periodic + spec: + containers: + - command: + - runner.sh + - ./hack/release.sh + - --publish + - --tag-release + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/nightly-account/service-account.json + - name: DOCKER_IN_DOCKER_ENABLED + value: "true" + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/nightly-account + name: nightly-account + readOnly: true + - mountPath: /docker-graph + name: docker-graph + - mountPath: /lib/modules + name: modules + - mountPath: /sys/fs/cgroup + name: cgroup + nodeSelector: + type: testing + volumes: + - name: nightly-account + secret: + secretName: nightly-account + - emptyDir: {} + name: docker-graph + - hostPath: + path: /lib/modules + type: Directory + name: modules + - hostPath: + path: /sys/fs/cgroup + type: Directory + name: cgroup +- annotations: + testgrid-dashboards: kn-plugin-func + testgrid-tab-name: release + cluster: build-knative + cron: 19 */9 * * * + decorate: true + extra_refs: + - base_ref: main + org: knative-sandbox + repo: kn-plugin-func + name: release_kn-plugin-func_main_periodic + spec: + containers: + - command: + - runner.sh + - ./hack/release.sh + - --auto-release + - --release-gcs + - knative-releases/kn-plugin-func + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/release-account/service-account.json + - name: DOCKER_IN_DOCKER_ENABLED + value: "true" + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/hub-token + name: hub-token + readOnly: true + - mountPath: /etc/release-account + name: release-account + readOnly: true + - mountPath: /docker-graph + name: docker-graph + - mountPath: /lib/modules + name: modules + - mountPath: /sys/fs/cgroup + name: cgroup + nodeSelector: + type: testing + volumes: + - name: hub-token + secret: + secretName: hub-token + - name: release-account + secret: + secretName: release-account + - emptyDir: {} + name: docker-graph + - hostPath: + path: /lib/modules + type: Directory + name: modules + - hostPath: + path: /sys/fs/cgroup + type: Directory + name: cgroup diff --git a/prow/jobs/generated/knative-sandbox/kn-plugin-migration-main.gen.yaml b/prow/jobs/generated/knative-sandbox/kn-plugin-migration-main.gen.yaml new file mode 100644 index 00000000000..7480636f3cb --- /dev/null +++ b/prow/jobs/generated/knative-sandbox/kn-plugin-migration-main.gen.yaml @@ -0,0 +1,117 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. #### +# #### USE "./hack/generate-configs.sh" TO REGENERATE THIS FILE. #### +# #### #### +# ####################################################################### + +periodics: +- annotations: + testgrid-dashboards: kn-plugin-migration + testgrid-tab-name: continuous + cluster: build-knative + cron: 53 */9 * * * + decorate: true + extra_refs: + - base_ref: main + org: knative-sandbox + repo: kn-plugin-migration + name: continuous_kn-plugin-migration_main_periodic + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account +presubmits: + knative-sandbox/kn-plugin-migration: + - always_run: true + branches: + - ^main$ + cluster: build-knative + decorate: true + name: build-tests_kn-plugin-migration_main + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --build-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^main$ + cluster: build-knative + decorate: true + name: unit-tests_kn-plugin-migration_main + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --unit-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^main$ + cluster: build-knative + decorate: true + name: integration-tests_kn-plugin-migration_main + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --integration-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account diff --git a/prow/jobs/generated/knative-sandbox/kn-plugin-operator-main.gen.yaml b/prow/jobs/generated/knative-sandbox/kn-plugin-operator-main.gen.yaml new file mode 100644 index 00000000000..c9705880852 --- /dev/null +++ b/prow/jobs/generated/knative-sandbox/kn-plugin-operator-main.gen.yaml @@ -0,0 +1,117 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. #### +# #### USE "./hack/generate-configs.sh" TO REGENERATE THIS FILE. #### +# #### #### +# ####################################################################### + +periodics: +- annotations: + testgrid-dashboards: kn-plugin-operator + testgrid-tab-name: continuous + cluster: build-knative + cron: 47 */9 * * * + decorate: true + extra_refs: + - base_ref: main + org: knative-sandbox + repo: kn-plugin-operator + name: continuous_kn-plugin-operator_main_periodic + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account +presubmits: + knative-sandbox/kn-plugin-operator: + - always_run: true + branches: + - ^main$ + cluster: build-knative + decorate: true + name: build-tests_kn-plugin-operator_main + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --build-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^main$ + cluster: build-knative + decorate: true + name: unit-tests_kn-plugin-operator_main + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --unit-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^main$ + cluster: build-knative + decorate: true + name: integration-tests_kn-plugin-operator_main + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --integration-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account diff --git a/prow/jobs/generated/knative-sandbox/kn-plugin-quickstart-main.gen.yaml b/prow/jobs/generated/knative-sandbox/kn-plugin-quickstart-main.gen.yaml new file mode 100644 index 00000000000..8b820d6406d --- /dev/null +++ b/prow/jobs/generated/knative-sandbox/kn-plugin-quickstart-main.gen.yaml @@ -0,0 +1,200 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. #### +# #### USE "./hack/generate-configs.sh" TO REGENERATE THIS FILE. #### +# #### #### +# ####################################################################### + +periodics: +- annotations: + testgrid-dashboards: kn-plugin-quickstart + testgrid-tab-name: continuous + cluster: build-knative + cron: 42 */9 * * * + decorate: true + extra_refs: + - base_ref: main + org: knative-sandbox + repo: kn-plugin-quickstart + name: continuous_kn-plugin-quickstart_main_periodic + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account +- annotations: + testgrid-dashboards: kn-plugin-quickstart + testgrid-tab-name: nightly + cluster: build-knative + cron: 22 9 * * * + decorate: true + extra_refs: + - base_ref: main + org: knative-sandbox + repo: kn-plugin-quickstart + name: nightly_kn-plugin-quickstart_main_periodic + spec: + containers: + - command: + - runner.sh + - ./hack/release.sh + - --publish + - --tag-release + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/nightly-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/nightly-account + name: nightly-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: nightly-account + secret: + secretName: nightly-account +- annotations: + testgrid-dashboards: kn-plugin-quickstart + testgrid-tab-name: release + cluster: build-knative + cron: 26 */9 * * * + decorate: true + extra_refs: + - base_ref: main + org: knative-sandbox + repo: kn-plugin-quickstart + name: release_kn-plugin-quickstart_main_periodic + spec: + containers: + - command: + - runner.sh + - ./hack/release.sh + - --auto-release + - --release-gcs + - knative-releases/kn-plugin-quickstart + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/release-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/hub-token + name: hub-token + readOnly: true + - mountPath: /etc/release-account + name: release-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: hub-token + secret: + secretName: hub-token + - name: release-account + secret: + secretName: release-account +presubmits: + knative-sandbox/kn-plugin-quickstart: + - always_run: true + branches: + - ^main$ + cluster: build-knative + decorate: true + name: build-tests_kn-plugin-quickstart_main + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --build-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^main$ + cluster: build-knative + decorate: true + name: unit-tests_kn-plugin-quickstart_main + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --unit-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^main$ + cluster: build-knative + decorate: true + name: integration-tests_kn-plugin-quickstart_main + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --integration-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account diff --git a/prow/jobs/generated/knative-sandbox/kn-plugin-quickstart-release-1.0.gen.yaml b/prow/jobs/generated/knative-sandbox/kn-plugin-quickstart-release-1.0.gen.yaml new file mode 100644 index 00000000000..88aaf41ecd5 --- /dev/null +++ b/prow/jobs/generated/knative-sandbox/kn-plugin-quickstart-release-1.0.gen.yaml @@ -0,0 +1,166 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. #### +# #### USE "./hack/generate-configs.sh" TO REGENERATE THIS FILE. #### +# #### #### +# ####################################################################### + +periodics: +- annotations: + testgrid-dashboards: knative-sandbox-release-1.0 + testgrid-tab-name: kn-plugin-quickstart-continuous + cluster: build-knative + cron: 26 8 * * * + decorate: true + extra_refs: + - base_ref: release-1.0 + org: knative-sandbox + repo: kn-plugin-quickstart + name: continuous_kn-plugin-quickstart_release-1.0_periodic + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account +- annotations: + testgrid-dashboards: knative-sandbox-release-1.0 + testgrid-tab-name: kn-plugin-quickstart-release + cluster: build-knative + cron: 14 9 * * 2 + decorate: true + extra_refs: + - base_ref: release-1.0 + org: knative-sandbox + repo: kn-plugin-quickstart + name: release_kn-plugin-quickstart_release-1.0_periodic + spec: + containers: + - command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/kn-plugin-quickstart + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.0 + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/release-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/hub-token + name: hub-token + readOnly: true + - mountPath: /etc/release-account + name: release-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: hub-token + secret: + secretName: hub-token + - name: release-account + secret: + secretName: release-account +presubmits: + knative-sandbox/kn-plugin-quickstart: + - always_run: true + branches: + - ^release-1.0$ + cluster: build-knative + decorate: true + name: build-tests_kn-plugin-quickstart_release-1.0 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --build-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^release-1.0$ + cluster: build-knative + decorate: true + name: unit-tests_kn-plugin-quickstart_release-1.0 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --unit-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^release-1.0$ + cluster: build-knative + decorate: true + name: integration-tests_kn-plugin-quickstart_release-1.0 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --integration-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account diff --git a/prow/jobs/generated/knative-sandbox/kn-plugin-quickstart-release-1.1.gen.yaml b/prow/jobs/generated/knative-sandbox/kn-plugin-quickstart-release-1.1.gen.yaml new file mode 100644 index 00000000000..04230c94aca --- /dev/null +++ b/prow/jobs/generated/knative-sandbox/kn-plugin-quickstart-release-1.1.gen.yaml @@ -0,0 +1,166 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. #### +# #### USE "./hack/generate-configs.sh" TO REGENERATE THIS FILE. #### +# #### #### +# ####################################################################### + +periodics: +- annotations: + testgrid-dashboards: knative-sandbox-release-1.1 + testgrid-tab-name: kn-plugin-quickstart-continuous + cluster: build-knative + cron: 25 8 * * * + decorate: true + extra_refs: + - base_ref: release-1.1 + org: knative-sandbox + repo: kn-plugin-quickstart + name: continuous_kn-plugin-quickstart_release-1.1_periodic + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account +- annotations: + testgrid-dashboards: knative-sandbox-release-1.1 + testgrid-tab-name: kn-plugin-quickstart-release + cluster: build-knative + cron: 55 9 * * 2 + decorate: true + extra_refs: + - base_ref: release-1.1 + org: knative-sandbox + repo: kn-plugin-quickstart + name: release_kn-plugin-quickstart_release-1.1_periodic + spec: + containers: + - command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/kn-plugin-quickstart + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.1 + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/release-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/hub-token + name: hub-token + readOnly: true + - mountPath: /etc/release-account + name: release-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: hub-token + secret: + secretName: hub-token + - name: release-account + secret: + secretName: release-account +presubmits: + knative-sandbox/kn-plugin-quickstart: + - always_run: true + branches: + - ^release-1.1$ + cluster: build-knative + decorate: true + name: build-tests_kn-plugin-quickstart_release-1.1 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --build-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^release-1.1$ + cluster: build-knative + decorate: true + name: unit-tests_kn-plugin-quickstart_release-1.1 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --unit-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^release-1.1$ + cluster: build-knative + decorate: true + name: integration-tests_kn-plugin-quickstart_release-1.1 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --integration-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account diff --git a/prow/jobs/generated/knative-sandbox/kn-plugin-quickstart-release-1.2.gen.yaml b/prow/jobs/generated/knative-sandbox/kn-plugin-quickstart-release-1.2.gen.yaml new file mode 100644 index 00000000000..bc7ea6dcd28 --- /dev/null +++ b/prow/jobs/generated/knative-sandbox/kn-plugin-quickstart-release-1.2.gen.yaml @@ -0,0 +1,166 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. #### +# #### USE "./hack/generate-configs.sh" TO REGENERATE THIS FILE. #### +# #### #### +# ####################################################################### + +periodics: +- annotations: + testgrid-dashboards: knative-sandbox-release-1.2 + testgrid-tab-name: kn-plugin-quickstart-continuous + cluster: build-knative + cron: 24 8 * * * + decorate: true + extra_refs: + - base_ref: release-1.2 + org: knative-sandbox + repo: kn-plugin-quickstart + name: continuous_kn-plugin-quickstart_release-1.2_periodic + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account +- annotations: + testgrid-dashboards: knative-sandbox-release-1.2 + testgrid-tab-name: kn-plugin-quickstart-release + cluster: build-knative + cron: 52 9 * * 2 + decorate: true + extra_refs: + - base_ref: release-1.2 + org: knative-sandbox + repo: kn-plugin-quickstart + name: release_kn-plugin-quickstart_release-1.2_periodic + spec: + containers: + - command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/kn-plugin-quickstart + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.2 + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/release-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/hub-token + name: hub-token + readOnly: true + - mountPath: /etc/release-account + name: release-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: hub-token + secret: + secretName: hub-token + - name: release-account + secret: + secretName: release-account +presubmits: + knative-sandbox/kn-plugin-quickstart: + - always_run: true + branches: + - ^release-1.2$ + cluster: build-knative + decorate: true + name: build-tests_kn-plugin-quickstart_release-1.2 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --build-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^release-1.2$ + cluster: build-knative + decorate: true + name: unit-tests_kn-plugin-quickstart_release-1.2 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --unit-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^release-1.2$ + cluster: build-knative + decorate: true + name: integration-tests_kn-plugin-quickstart_release-1.2 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --integration-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account diff --git a/prow/jobs/generated/knative-sandbox/kn-plugin-quickstart-release-1.3.gen.yaml b/prow/jobs/generated/knative-sandbox/kn-plugin-quickstart-release-1.3.gen.yaml new file mode 100644 index 00000000000..5b0469e4607 --- /dev/null +++ b/prow/jobs/generated/knative-sandbox/kn-plugin-quickstart-release-1.3.gen.yaml @@ -0,0 +1,166 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. #### +# #### USE "./hack/generate-configs.sh" TO REGENERATE THIS FILE. #### +# #### #### +# ####################################################################### + +periodics: +- annotations: + testgrid-dashboards: knative-sandbox-release-1.3 + testgrid-tab-name: kn-plugin-quickstart-continuous + cluster: build-knative + cron: 35 8 * * * + decorate: true + extra_refs: + - base_ref: release-1.3 + org: knative-sandbox + repo: kn-plugin-quickstart + name: continuous_kn-plugin-quickstart_release-1.3_periodic + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account +- annotations: + testgrid-dashboards: knative-sandbox-release-1.3 + testgrid-tab-name: kn-plugin-quickstart-release + cluster: build-knative + cron: 13 9 * * 2 + decorate: true + extra_refs: + - base_ref: release-1.3 + org: knative-sandbox + repo: kn-plugin-quickstart + name: release_kn-plugin-quickstart_release-1.3_periodic + spec: + containers: + - command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/kn-plugin-quickstart + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.3 + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/release-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/hub-token + name: hub-token + readOnly: true + - mountPath: /etc/release-account + name: release-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: hub-token + secret: + secretName: hub-token + - name: release-account + secret: + secretName: release-account +presubmits: + knative-sandbox/kn-plugin-quickstart: + - always_run: true + branches: + - ^release-1.3$ + cluster: build-knative + decorate: true + name: build-tests_kn-plugin-quickstart_release-1.3 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --build-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^release-1.3$ + cluster: build-knative + decorate: true + name: unit-tests_kn-plugin-quickstart_release-1.3 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --unit-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^release-1.3$ + cluster: build-knative + decorate: true + name: integration-tests_kn-plugin-quickstart_release-1.3 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --integration-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account diff --git a/prow/jobs/generated/knative-sandbox/kn-plugin-sample-main.gen.yaml b/prow/jobs/generated/knative-sandbox/kn-plugin-sample-main.gen.yaml new file mode 100644 index 00000000000..a4caf8ff412 --- /dev/null +++ b/prow/jobs/generated/knative-sandbox/kn-plugin-sample-main.gen.yaml @@ -0,0 +1,117 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. #### +# #### USE "./hack/generate-configs.sh" TO REGENERATE THIS FILE. #### +# #### #### +# ####################################################################### + +periodics: +- annotations: + testgrid-dashboards: kn-plugin-sample + testgrid-tab-name: continuous + cluster: build-knative + cron: 13 */9 * * * + decorate: true + extra_refs: + - base_ref: main + org: knative-sandbox + repo: kn-plugin-sample + name: continuous_kn-plugin-sample_main_periodic + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account +presubmits: + knative-sandbox/kn-plugin-sample: + - always_run: true + branches: + - ^main$ + cluster: build-knative + decorate: true + name: build-tests_kn-plugin-sample_main + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --build-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^main$ + cluster: build-knative + decorate: true + name: unit-tests_kn-plugin-sample_main + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --unit-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^main$ + cluster: build-knative + decorate: true + name: integration-tests_kn-plugin-sample_main + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --integration-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account diff --git a/prow/jobs/generated/knative-sandbox/kn-plugin-service-log-main.gen.yaml b/prow/jobs/generated/knative-sandbox/kn-plugin-service-log-main.gen.yaml new file mode 100644 index 00000000000..2393d7219b4 --- /dev/null +++ b/prow/jobs/generated/knative-sandbox/kn-plugin-service-log-main.gen.yaml @@ -0,0 +1,236 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. #### +# #### USE "./hack/generate-configs.sh" TO REGENERATE THIS FILE. #### +# #### #### +# ####################################################################### + +periodics: +- annotations: + testgrid-dashboards: kn-plugin-service-log + testgrid-tab-name: continuous + cluster: build-knative + cron: 13 */9 * * * + decorate: true + extra_refs: + - base_ref: main + org: knative-sandbox + repo: kn-plugin-service-log + name: continuous_kn-plugin-service-log_main_periodic + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account +- annotations: + testgrid-dashboards: kn-plugin-service-log + testgrid-tab-name: nightly + cluster: build-knative + cron: 35 9 * * * + decorate: true + extra_refs: + - base_ref: main + org: knative-sandbox + repo: kn-plugin-service-log + name: nightly_kn-plugin-service-log_main_periodic + spec: + containers: + - command: + - runner.sh + - ./hack/release.sh + - --publish + - --tag-release + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/nightly-account/service-account.json + - name: DOCKER_IN_DOCKER_ENABLED + value: "true" + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/nightly-account + name: nightly-account + readOnly: true + - mountPath: /docker-graph + name: docker-graph + - mountPath: /lib/modules + name: modules + - mountPath: /sys/fs/cgroup + name: cgroup + nodeSelector: + type: testing + volumes: + - name: nightly-account + secret: + secretName: nightly-account + - emptyDir: {} + name: docker-graph + - hostPath: + path: /lib/modules + type: Directory + name: modules + - hostPath: + path: /sys/fs/cgroup + type: Directory + name: cgroup +- annotations: + testgrid-dashboards: kn-plugin-service-log + testgrid-tab-name: release + cluster: build-knative + cron: 15 */9 * * * + decorate: true + extra_refs: + - base_ref: main + org: knative-sandbox + repo: kn-plugin-service-log + name: release_kn-plugin-service-log_main_periodic + spec: + containers: + - command: + - runner.sh + - ./hack/release.sh + - --auto-release + - --release-gcs + - knative-releases/kn-plugin-service-log + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/release-account/service-account.json + - name: DOCKER_IN_DOCKER_ENABLED + value: "true" + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/hub-token + name: hub-token + readOnly: true + - mountPath: /etc/release-account + name: release-account + readOnly: true + - mountPath: /docker-graph + name: docker-graph + - mountPath: /lib/modules + name: modules + - mountPath: /sys/fs/cgroup + name: cgroup + nodeSelector: + type: testing + volumes: + - name: hub-token + secret: + secretName: hub-token + - name: release-account + secret: + secretName: release-account + - emptyDir: {} + name: docker-graph + - hostPath: + path: /lib/modules + type: Directory + name: modules + - hostPath: + path: /sys/fs/cgroup + type: Directory + name: cgroup +presubmits: + knative-sandbox/kn-plugin-service-log: + - always_run: true + branches: + - ^main$ + cluster: build-knative + decorate: true + name: build-tests_kn-plugin-service-log_main + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --build-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^main$ + cluster: build-knative + decorate: true + name: unit-tests_kn-plugin-service-log_main + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --unit-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^main$ + cluster: build-knative + decorate: true + name: integration-tests_kn-plugin-service-log_main + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --integration-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account diff --git a/prow/jobs/generated/knative-sandbox/kn-plugin-source-kafka-main.gen.yaml b/prow/jobs/generated/knative-sandbox/kn-plugin-source-kafka-main.gen.yaml new file mode 100644 index 00000000000..dd6ae743414 --- /dev/null +++ b/prow/jobs/generated/knative-sandbox/kn-plugin-source-kafka-main.gen.yaml @@ -0,0 +1,236 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. #### +# #### USE "./hack/generate-configs.sh" TO REGENERATE THIS FILE. #### +# #### #### +# ####################################################################### + +periodics: +- annotations: + testgrid-dashboards: kn-plugin-source-kafka + testgrid-tab-name: continuous + cluster: build-knative + cron: 29 */9 * * * + decorate: true + extra_refs: + - base_ref: main + org: knative-sandbox + repo: kn-plugin-source-kafka + name: continuous_kn-plugin-source-kafka_main_periodic + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account +- annotations: + testgrid-dashboards: kn-plugin-source-kafka + testgrid-tab-name: nightly + cluster: build-knative + cron: 51 9 * * * + decorate: true + extra_refs: + - base_ref: main + org: knative-sandbox + repo: kn-plugin-source-kafka + name: nightly_kn-plugin-source-kafka_main_periodic + spec: + containers: + - command: + - runner.sh + - ./hack/release.sh + - --publish + - --tag-release + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/nightly-account/service-account.json + - name: DOCKER_IN_DOCKER_ENABLED + value: "true" + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/nightly-account + name: nightly-account + readOnly: true + - mountPath: /docker-graph + name: docker-graph + - mountPath: /lib/modules + name: modules + - mountPath: /sys/fs/cgroup + name: cgroup + nodeSelector: + type: testing + volumes: + - name: nightly-account + secret: + secretName: nightly-account + - emptyDir: {} + name: docker-graph + - hostPath: + path: /lib/modules + type: Directory + name: modules + - hostPath: + path: /sys/fs/cgroup + type: Directory + name: cgroup +- annotations: + testgrid-dashboards: kn-plugin-source-kafka + testgrid-tab-name: release + cluster: build-knative + cron: 43 */9 * * * + decorate: true + extra_refs: + - base_ref: main + org: knative-sandbox + repo: kn-plugin-source-kafka + name: release_kn-plugin-source-kafka_main_periodic + spec: + containers: + - command: + - runner.sh + - ./hack/release.sh + - --auto-release + - --release-gcs + - knative-releases/kn-plugin-source-kafka + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/release-account/service-account.json + - name: DOCKER_IN_DOCKER_ENABLED + value: "true" + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/hub-token + name: hub-token + readOnly: true + - mountPath: /etc/release-account + name: release-account + readOnly: true + - mountPath: /docker-graph + name: docker-graph + - mountPath: /lib/modules + name: modules + - mountPath: /sys/fs/cgroup + name: cgroup + nodeSelector: + type: testing + volumes: + - name: hub-token + secret: + secretName: hub-token + - name: release-account + secret: + secretName: release-account + - emptyDir: {} + name: docker-graph + - hostPath: + path: /lib/modules + type: Directory + name: modules + - hostPath: + path: /sys/fs/cgroup + type: Directory + name: cgroup +presubmits: + knative-sandbox/kn-plugin-source-kafka: + - always_run: true + branches: + - ^main$ + cluster: build-knative + decorate: true + name: build-tests_kn-plugin-source-kafka_main + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --build-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^main$ + cluster: build-knative + decorate: true + name: unit-tests_kn-plugin-source-kafka_main + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --unit-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^main$ + cluster: build-knative + decorate: true + name: integration-tests_kn-plugin-source-kafka_main + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --integration-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account diff --git a/prow/jobs/generated/knative-sandbox/kn-plugin-source-kafka-release-1.0.gen.yaml b/prow/jobs/generated/knative-sandbox/kn-plugin-source-kafka-release-1.0.gen.yaml new file mode 100644 index 00000000000..207af22c168 --- /dev/null +++ b/prow/jobs/generated/knative-sandbox/kn-plugin-source-kafka-release-1.0.gen.yaml @@ -0,0 +1,166 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. #### +# #### USE "./hack/generate-configs.sh" TO REGENERATE THIS FILE. #### +# #### #### +# ####################################################################### + +periodics: +- annotations: + testgrid-dashboards: knative-sandbox-release-1.0 + testgrid-tab-name: kn-plugin-source-kafka-continuous + cluster: build-knative + cron: 51 8 * * * + decorate: true + extra_refs: + - base_ref: release-1.0 + org: knative-sandbox + repo: kn-plugin-source-kafka + name: continuous_kn-plugin-source-kafka_release-1.0_periodic + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account +- annotations: + testgrid-dashboards: knative-sandbox-release-1.0 + testgrid-tab-name: kn-plugin-source-kafka-release + cluster: build-knative + cron: 41 9 * * 2 + decorate: true + extra_refs: + - base_ref: release-1.0 + org: knative-sandbox + repo: kn-plugin-source-kafka + name: release_kn-plugin-source-kafka_release-1.0_periodic + spec: + containers: + - command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/kn-plugin-source-kafka + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.0 + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/release-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/hub-token + name: hub-token + readOnly: true + - mountPath: /etc/release-account + name: release-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: hub-token + secret: + secretName: hub-token + - name: release-account + secret: + secretName: release-account +presubmits: + knative-sandbox/kn-plugin-source-kafka: + - always_run: true + branches: + - ^release-1.0$ + cluster: build-knative + decorate: true + name: build-tests_kn-plugin-source-kafka_release-1.0 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --build-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^release-1.0$ + cluster: build-knative + decorate: true + name: unit-tests_kn-plugin-source-kafka_release-1.0 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --unit-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^release-1.0$ + cluster: build-knative + decorate: true + name: integration-tests_kn-plugin-source-kafka_release-1.0 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --integration-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account diff --git a/prow/jobs/generated/knative-sandbox/kn-plugin-source-kafka-release-1.1.gen.yaml b/prow/jobs/generated/knative-sandbox/kn-plugin-source-kafka-release-1.1.gen.yaml new file mode 100644 index 00000000000..b82ab4b9de6 --- /dev/null +++ b/prow/jobs/generated/knative-sandbox/kn-plugin-source-kafka-release-1.1.gen.yaml @@ -0,0 +1,166 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. #### +# #### USE "./hack/generate-configs.sh" TO REGENERATE THIS FILE. #### +# #### #### +# ####################################################################### + +periodics: +- annotations: + testgrid-dashboards: knative-sandbox-release-1.1 + testgrid-tab-name: kn-plugin-source-kafka-continuous + cluster: build-knative + cron: 32 8 * * * + decorate: true + extra_refs: + - base_ref: release-1.1 + org: knative-sandbox + repo: kn-plugin-source-kafka + name: continuous_kn-plugin-source-kafka_release-1.1_periodic + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account +- annotations: + testgrid-dashboards: knative-sandbox-release-1.1 + testgrid-tab-name: kn-plugin-source-kafka-release + cluster: build-knative + cron: 20 9 * * 2 + decorate: true + extra_refs: + - base_ref: release-1.1 + org: knative-sandbox + repo: kn-plugin-source-kafka + name: release_kn-plugin-source-kafka_release-1.1_periodic + spec: + containers: + - command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/kn-plugin-source-kafka + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.1 + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/release-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/hub-token + name: hub-token + readOnly: true + - mountPath: /etc/release-account + name: release-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: hub-token + secret: + secretName: hub-token + - name: release-account + secret: + secretName: release-account +presubmits: + knative-sandbox/kn-plugin-source-kafka: + - always_run: true + branches: + - ^release-1.1$ + cluster: build-knative + decorate: true + name: build-tests_kn-plugin-source-kafka_release-1.1 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --build-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^release-1.1$ + cluster: build-knative + decorate: true + name: unit-tests_kn-plugin-source-kafka_release-1.1 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --unit-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^release-1.1$ + cluster: build-knative + decorate: true + name: integration-tests_kn-plugin-source-kafka_release-1.1 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --integration-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account diff --git a/prow/jobs/generated/knative-sandbox/kn-plugin-source-kafka-release-1.2.gen.yaml b/prow/jobs/generated/knative-sandbox/kn-plugin-source-kafka-release-1.2.gen.yaml new file mode 100644 index 00000000000..fe1a0719c1c --- /dev/null +++ b/prow/jobs/generated/knative-sandbox/kn-plugin-source-kafka-release-1.2.gen.yaml @@ -0,0 +1,166 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. #### +# #### USE "./hack/generate-configs.sh" TO REGENERATE THIS FILE. #### +# #### #### +# ####################################################################### + +periodics: +- annotations: + testgrid-dashboards: knative-sandbox-release-1.2 + testgrid-tab-name: kn-plugin-source-kafka-continuous + cluster: build-knative + cron: 17 8 * * * + decorate: true + extra_refs: + - base_ref: release-1.2 + org: knative-sandbox + repo: kn-plugin-source-kafka + name: continuous_kn-plugin-source-kafka_release-1.2_periodic + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account +- annotations: + testgrid-dashboards: knative-sandbox-release-1.2 + testgrid-tab-name: kn-plugin-source-kafka-release + cluster: build-knative + cron: 7 9 * * 2 + decorate: true + extra_refs: + - base_ref: release-1.2 + org: knative-sandbox + repo: kn-plugin-source-kafka + name: release_kn-plugin-source-kafka_release-1.2_periodic + spec: + containers: + - command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/kn-plugin-source-kafka + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.2 + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/release-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/hub-token + name: hub-token + readOnly: true + - mountPath: /etc/release-account + name: release-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: hub-token + secret: + secretName: hub-token + - name: release-account + secret: + secretName: release-account +presubmits: + knative-sandbox/kn-plugin-source-kafka: + - always_run: true + branches: + - ^release-1.2$ + cluster: build-knative + decorate: true + name: build-tests_kn-plugin-source-kafka_release-1.2 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --build-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^release-1.2$ + cluster: build-knative + decorate: true + name: unit-tests_kn-plugin-source-kafka_release-1.2 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --unit-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^release-1.2$ + cluster: build-knative + decorate: true + name: integration-tests_kn-plugin-source-kafka_release-1.2 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --integration-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account diff --git a/prow/jobs/generated/knative-sandbox/kn-plugin-source-kafka-release-1.3.gen.yaml b/prow/jobs/generated/knative-sandbox/kn-plugin-source-kafka-release-1.3.gen.yaml new file mode 100644 index 00000000000..be0c4422994 --- /dev/null +++ b/prow/jobs/generated/knative-sandbox/kn-plugin-source-kafka-release-1.3.gen.yaml @@ -0,0 +1,166 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. #### +# #### USE "./hack/generate-configs.sh" TO REGENERATE THIS FILE. #### +# #### #### +# ####################################################################### + +periodics: +- annotations: + testgrid-dashboards: knative-sandbox-release-1.3 + testgrid-tab-name: kn-plugin-source-kafka-continuous + cluster: build-knative + cron: 42 8 * * * + decorate: true + extra_refs: + - base_ref: release-1.3 + org: knative-sandbox + repo: kn-plugin-source-kafka + name: continuous_kn-plugin-source-kafka_release-1.3_periodic + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account +- annotations: + testgrid-dashboards: knative-sandbox-release-1.3 + testgrid-tab-name: kn-plugin-source-kafka-release + cluster: build-knative + cron: 18 9 * * 2 + decorate: true + extra_refs: + - base_ref: release-1.3 + org: knative-sandbox + repo: kn-plugin-source-kafka + name: release_kn-plugin-source-kafka_release-1.3_periodic + spec: + containers: + - command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/kn-plugin-source-kafka + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.3 + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/release-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/hub-token + name: hub-token + readOnly: true + - mountPath: /etc/release-account + name: release-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: hub-token + secret: + secretName: hub-token + - name: release-account + secret: + secretName: release-account +presubmits: + knative-sandbox/kn-plugin-source-kafka: + - always_run: true + branches: + - ^release-1.3$ + cluster: build-knative + decorate: true + name: build-tests_kn-plugin-source-kafka_release-1.3 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --build-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^release-1.3$ + cluster: build-knative + decorate: true + name: unit-tests_kn-plugin-source-kafka_release-1.3 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --unit-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^release-1.3$ + cluster: build-knative + decorate: true + name: integration-tests_kn-plugin-source-kafka_release-1.3 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --integration-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account diff --git a/prow/jobs/generated/knative-sandbox/kn-plugin-source-kamelet-main.gen.yaml b/prow/jobs/generated/knative-sandbox/kn-plugin-source-kamelet-main.gen.yaml new file mode 100644 index 00000000000..1b081f82ae3 --- /dev/null +++ b/prow/jobs/generated/knative-sandbox/kn-plugin-source-kamelet-main.gen.yaml @@ -0,0 +1,200 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. #### +# #### USE "./hack/generate-configs.sh" TO REGENERATE THIS FILE. #### +# #### #### +# ####################################################################### + +periodics: +- annotations: + testgrid-dashboards: kn-plugin-source-kamelet + testgrid-tab-name: continuous + cluster: build-knative + cron: 48 */9 * * * + decorate: true + extra_refs: + - base_ref: main + org: knative-sandbox + repo: kn-plugin-source-kamelet + name: continuous_kn-plugin-source-kamelet_main_periodic + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account +- annotations: + testgrid-dashboards: kn-plugin-source-kamelet + testgrid-tab-name: nightly + cluster: build-knative + cron: 32 9 * * * + decorate: true + extra_refs: + - base_ref: main + org: knative-sandbox + repo: kn-plugin-source-kamelet + name: nightly_kn-plugin-source-kamelet_main_periodic + spec: + containers: + - command: + - runner.sh + - ./hack/release.sh + - --publish + - --tag-release + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/nightly-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/nightly-account + name: nightly-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: nightly-account + secret: + secretName: nightly-account +- annotations: + testgrid-dashboards: kn-plugin-source-kamelet + testgrid-tab-name: release + cluster: build-knative + cron: 44 */9 * * * + decorate: true + extra_refs: + - base_ref: main + org: knative-sandbox + repo: kn-plugin-source-kamelet + name: release_kn-plugin-source-kamelet_main_periodic + spec: + containers: + - command: + - runner.sh + - ./hack/release.sh + - --auto-release + - --release-gcs + - knative-releases/kn-plugin-source-kamelet + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/release-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/hub-token + name: hub-token + readOnly: true + - mountPath: /etc/release-account + name: release-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: hub-token + secret: + secretName: hub-token + - name: release-account + secret: + secretName: release-account +presubmits: + knative-sandbox/kn-plugin-source-kamelet: + - always_run: true + branches: + - ^main$ + cluster: build-knative + decorate: true + name: build-tests_kn-plugin-source-kamelet_main + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --build-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^main$ + cluster: build-knative + decorate: true + name: unit-tests_kn-plugin-source-kamelet_main + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --unit-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^main$ + cluster: build-knative + decorate: true + name: integration-tests_kn-plugin-source-kamelet_main + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --integration-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account diff --git a/prow/jobs/generated/knative-sandbox/kn-plugin-source-kamelet-release-1.0.gen.yaml b/prow/jobs/generated/knative-sandbox/kn-plugin-source-kamelet-release-1.0.gen.yaml new file mode 100644 index 00000000000..d9f5252506a --- /dev/null +++ b/prow/jobs/generated/knative-sandbox/kn-plugin-source-kamelet-release-1.0.gen.yaml @@ -0,0 +1,166 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. #### +# #### USE "./hack/generate-configs.sh" TO REGENERATE THIS FILE. #### +# #### #### +# ####################################################################### + +periodics: +- annotations: + testgrid-dashboards: knative-sandbox-release-1.0 + testgrid-tab-name: kn-plugin-source-kamelet-continuous + cluster: build-knative + cron: 20 8 * * * + decorate: true + extra_refs: + - base_ref: release-1.0 + org: knative-sandbox + repo: kn-plugin-source-kamelet + name: continuous_kn-plugin-source-kamelet_release-1.0_periodic + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account +- annotations: + testgrid-dashboards: knative-sandbox-release-1.0 + testgrid-tab-name: kn-plugin-source-kamelet-release + cluster: build-knative + cron: 4 9 * * 2 + decorate: true + extra_refs: + - base_ref: release-1.0 + org: knative-sandbox + repo: kn-plugin-source-kamelet + name: release_kn-plugin-source-kamelet_release-1.0_periodic + spec: + containers: + - command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/kn-plugin-source-kamelet + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.0 + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/release-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/hub-token + name: hub-token + readOnly: true + - mountPath: /etc/release-account + name: release-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: hub-token + secret: + secretName: hub-token + - name: release-account + secret: + secretName: release-account +presubmits: + knative-sandbox/kn-plugin-source-kamelet: + - always_run: true + branches: + - ^release-1.0$ + cluster: build-knative + decorate: true + name: build-tests_kn-plugin-source-kamelet_release-1.0 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --build-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^release-1.0$ + cluster: build-knative + decorate: true + name: unit-tests_kn-plugin-source-kamelet_release-1.0 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --unit-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^release-1.0$ + cluster: build-knative + decorate: true + name: integration-tests_kn-plugin-source-kamelet_release-1.0 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --integration-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account diff --git a/prow/jobs/generated/knative-sandbox/kn-plugin-source-kamelet-release-1.1.gen.yaml b/prow/jobs/generated/knative-sandbox/kn-plugin-source-kamelet-release-1.1.gen.yaml new file mode 100644 index 00000000000..384417fc5e2 --- /dev/null +++ b/prow/jobs/generated/knative-sandbox/kn-plugin-source-kamelet-release-1.1.gen.yaml @@ -0,0 +1,166 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. #### +# #### USE "./hack/generate-configs.sh" TO REGENERATE THIS FILE. #### +# #### #### +# ####################################################################### + +periodics: +- annotations: + testgrid-dashboards: knative-sandbox-release-1.1 + testgrid-tab-name: kn-plugin-source-kamelet-continuous + cluster: build-knative + cron: 15 8 * * * + decorate: true + extra_refs: + - base_ref: release-1.1 + org: knative-sandbox + repo: kn-plugin-source-kamelet + name: continuous_kn-plugin-source-kamelet_release-1.1_periodic + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account +- annotations: + testgrid-dashboards: knative-sandbox-release-1.1 + testgrid-tab-name: kn-plugin-source-kamelet-release + cluster: build-knative + cron: 5 9 * * 2 + decorate: true + extra_refs: + - base_ref: release-1.1 + org: knative-sandbox + repo: kn-plugin-source-kamelet + name: release_kn-plugin-source-kamelet_release-1.1_periodic + spec: + containers: + - command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/kn-plugin-source-kamelet + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.1 + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/release-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/hub-token + name: hub-token + readOnly: true + - mountPath: /etc/release-account + name: release-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: hub-token + secret: + secretName: hub-token + - name: release-account + secret: + secretName: release-account +presubmits: + knative-sandbox/kn-plugin-source-kamelet: + - always_run: true + branches: + - ^release-1.1$ + cluster: build-knative + decorate: true + name: build-tests_kn-plugin-source-kamelet_release-1.1 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --build-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^release-1.1$ + cluster: build-knative + decorate: true + name: unit-tests_kn-plugin-source-kamelet_release-1.1 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --unit-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^release-1.1$ + cluster: build-knative + decorate: true + name: integration-tests_kn-plugin-source-kamelet_release-1.1 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --integration-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account diff --git a/prow/jobs/generated/knative-sandbox/kn-plugin-source-kamelet-release-1.2.gen.yaml b/prow/jobs/generated/knative-sandbox/kn-plugin-source-kamelet-release-1.2.gen.yaml new file mode 100644 index 00000000000..589aee54842 --- /dev/null +++ b/prow/jobs/generated/knative-sandbox/kn-plugin-source-kamelet-release-1.2.gen.yaml @@ -0,0 +1,166 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. #### +# #### USE "./hack/generate-configs.sh" TO REGENERATE THIS FILE. #### +# #### #### +# ####################################################################### + +periodics: +- annotations: + testgrid-dashboards: knative-sandbox-release-1.2 + testgrid-tab-name: kn-plugin-source-kamelet-continuous + cluster: build-knative + cron: 6 8 * * * + decorate: true + extra_refs: + - base_ref: release-1.2 + org: knative-sandbox + repo: kn-plugin-source-kamelet + name: continuous_kn-plugin-source-kamelet_release-1.2_periodic + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account +- annotations: + testgrid-dashboards: knative-sandbox-release-1.2 + testgrid-tab-name: kn-plugin-source-kamelet-release + cluster: build-knative + cron: 6 9 * * 2 + decorate: true + extra_refs: + - base_ref: release-1.2 + org: knative-sandbox + repo: kn-plugin-source-kamelet + name: release_kn-plugin-source-kamelet_release-1.2_periodic + spec: + containers: + - command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/kn-plugin-source-kamelet + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.2 + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/release-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/hub-token + name: hub-token + readOnly: true + - mountPath: /etc/release-account + name: release-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: hub-token + secret: + secretName: hub-token + - name: release-account + secret: + secretName: release-account +presubmits: + knative-sandbox/kn-plugin-source-kamelet: + - always_run: true + branches: + - ^release-1.2$ + cluster: build-knative + decorate: true + name: build-tests_kn-plugin-source-kamelet_release-1.2 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --build-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^release-1.2$ + cluster: build-knative + decorate: true + name: unit-tests_kn-plugin-source-kamelet_release-1.2 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --unit-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^release-1.2$ + cluster: build-knative + decorate: true + name: integration-tests_kn-plugin-source-kamelet_release-1.2 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --integration-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account diff --git a/prow/jobs/generated/knative-sandbox/kn-plugin-source-kamelet-release-1.3.gen.yaml b/prow/jobs/generated/knative-sandbox/kn-plugin-source-kamelet-release-1.3.gen.yaml new file mode 100644 index 00000000000..9511f16882b --- /dev/null +++ b/prow/jobs/generated/knative-sandbox/kn-plugin-source-kamelet-release-1.3.gen.yaml @@ -0,0 +1,166 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. #### +# #### USE "./hack/generate-configs.sh" TO REGENERATE THIS FILE. #### +# #### #### +# ####################################################################### + +periodics: +- annotations: + testgrid-dashboards: knative-sandbox-release-1.3 + testgrid-tab-name: kn-plugin-source-kamelet-continuous + cluster: build-knative + cron: 49 8 * * * + decorate: true + extra_refs: + - base_ref: release-1.3 + org: knative-sandbox + repo: kn-plugin-source-kamelet + name: continuous_kn-plugin-source-kamelet_release-1.3_periodic + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account +- annotations: + testgrid-dashboards: knative-sandbox-release-1.3 + testgrid-tab-name: kn-plugin-source-kamelet-release + cluster: build-knative + cron: 43 9 * * 2 + decorate: true + extra_refs: + - base_ref: release-1.3 + org: knative-sandbox + repo: kn-plugin-source-kamelet + name: release_kn-plugin-source-kamelet_release-1.3_periodic + spec: + containers: + - command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/kn-plugin-source-kamelet + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.3 + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/release-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/hub-token + name: hub-token + readOnly: true + - mountPath: /etc/release-account + name: release-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: hub-token + secret: + secretName: hub-token + - name: release-account + secret: + secretName: release-account +presubmits: + knative-sandbox/kn-plugin-source-kamelet: + - always_run: true + branches: + - ^release-1.3$ + cluster: build-knative + decorate: true + name: build-tests_kn-plugin-source-kamelet_release-1.3 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --build-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^release-1.3$ + cluster: build-knative + decorate: true + name: unit-tests_kn-plugin-source-kamelet_release-1.3 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --unit-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^release-1.3$ + cluster: build-knative + decorate: true + name: integration-tests_kn-plugin-source-kamelet_release-1.3 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --integration-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account diff --git a/prow/jobs/generated/knative-sandbox/kperf-main.gen.yaml b/prow/jobs/generated/knative-sandbox/kperf-main.gen.yaml new file mode 100644 index 00000000000..b0603c1796f --- /dev/null +++ b/prow/jobs/generated/knative-sandbox/kperf-main.gen.yaml @@ -0,0 +1,79 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. #### +# #### USE "./hack/generate-configs.sh" TO REGENERATE THIS FILE. #### +# #### #### +# ####################################################################### + +presubmits: + knative-sandbox/kperf: + - always_run: true + branches: + - ^main$ + cluster: build-knative + decorate: true + name: build-tests_kperf_main + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --build-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^main$ + cluster: build-knative + decorate: true + name: unit-tests_kperf_main + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --unit-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^main$ + cluster: build-knative + decorate: true + name: integration-tests_kperf_main + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --integration-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account diff --git a/prow/jobs/generated/knative-sandbox/net-certmanager-main.gen.yaml b/prow/jobs/generated/knative-sandbox/net-certmanager-main.gen.yaml new file mode 100644 index 00000000000..481764a11d7 --- /dev/null +++ b/prow/jobs/generated/knative-sandbox/net-certmanager-main.gen.yaml @@ -0,0 +1,207 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. #### +# #### USE "./hack/generate-configs.sh" TO REGENERATE THIS FILE. #### +# #### #### +# ####################################################################### + +periodics: +- annotations: + testgrid-dashboards: net-certmanager + testgrid-tab-name: continuous + cluster: build-knative + cron: 24 */9 * * * + decorate: true + extra_refs: + - base_ref: main + org: knative-sandbox + repo: net-certmanager + name: continuous_net-certmanager_main_periodic + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account +- annotations: + testgrid-dashboards: net-certmanager + testgrid-tab-name: nightly + cluster: build-knative + cron: 0 9 * * * + decorate: true + extra_refs: + - base_ref: main + org: knative-sandbox + repo: net-certmanager + name: nightly_net-certmanager_main_periodic + reporter_config: + slack: + channel: net-certmanager + job_states_to_report: + - failure + report_template: | + "The nightly release job fails, check the log: <{{.Status.URL}}|View logs>" + spec: + containers: + - command: + - runner.sh + - ./hack/release.sh + - --publish + - --tag-release + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/nightly-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/nightly-account + name: nightly-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: nightly-account + secret: + secretName: nightly-account +- annotations: + testgrid-dashboards: net-certmanager + testgrid-tab-name: release + cluster: build-knative + cron: 32 */9 * * * + decorate: true + extra_refs: + - base_ref: main + org: knative-sandbox + repo: net-certmanager + name: release_net-certmanager_main_periodic + spec: + containers: + - command: + - runner.sh + - ./hack/release.sh + - --auto-release + - --release-gcs + - knative-releases/net-certmanager + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/release-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/hub-token + name: hub-token + readOnly: true + - mountPath: /etc/release-account + name: release-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: hub-token + secret: + secretName: hub-token + - name: release-account + secret: + secretName: release-account +presubmits: + knative-sandbox/net-certmanager: + - always_run: true + branches: + - ^main$ + cluster: build-knative + decorate: true + name: build-tests_net-certmanager_main + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --build-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^main$ + cluster: build-knative + decorate: true + name: unit-tests_net-certmanager_main + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --unit-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^main$ + cluster: build-knative + decorate: true + name: integration-tests_net-certmanager_main + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --integration-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account diff --git a/prow/jobs/generated/knative-sandbox/net-certmanager-release-1.0.gen.yaml b/prow/jobs/generated/knative-sandbox/net-certmanager-release-1.0.gen.yaml new file mode 100644 index 00000000000..ccc75b6d9cc --- /dev/null +++ b/prow/jobs/generated/knative-sandbox/net-certmanager-release-1.0.gen.yaml @@ -0,0 +1,166 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. #### +# #### USE "./hack/generate-configs.sh" TO REGENERATE THIS FILE. #### +# #### #### +# ####################################################################### + +periodics: +- annotations: + testgrid-dashboards: knative-sandbox-release-1.0 + testgrid-tab-name: net-certmanager-continuous + cluster: build-knative + cron: 56 8 * * * + decorate: true + extra_refs: + - base_ref: release-1.0 + org: knative-sandbox + repo: net-certmanager + name: continuous_net-certmanager_release-1.0_periodic + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account +- annotations: + testgrid-dashboards: knative-sandbox-release-1.0 + testgrid-tab-name: net-certmanager-release + cluster: build-knative + cron: 4 9 * * 2 + decorate: true + extra_refs: + - base_ref: release-1.0 + org: knative-sandbox + repo: net-certmanager + name: release_net-certmanager_release-1.0_periodic + spec: + containers: + - command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/net-certmanager + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.0 + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/release-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/hub-token + name: hub-token + readOnly: true + - mountPath: /etc/release-account + name: release-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: hub-token + secret: + secretName: hub-token + - name: release-account + secret: + secretName: release-account +presubmits: + knative-sandbox/net-certmanager: + - always_run: true + branches: + - ^release-1.0$ + cluster: build-knative + decorate: true + name: build-tests_net-certmanager_release-1.0 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --build-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^release-1.0$ + cluster: build-knative + decorate: true + name: unit-tests_net-certmanager_release-1.0 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --unit-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^release-1.0$ + cluster: build-knative + decorate: true + name: integration-tests_net-certmanager_release-1.0 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --integration-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account diff --git a/prow/jobs/generated/knative-sandbox/net-certmanager-release-1.1.gen.yaml b/prow/jobs/generated/knative-sandbox/net-certmanager-release-1.1.gen.yaml new file mode 100644 index 00000000000..291debc5e72 --- /dev/null +++ b/prow/jobs/generated/knative-sandbox/net-certmanager-release-1.1.gen.yaml @@ -0,0 +1,166 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. #### +# #### USE "./hack/generate-configs.sh" TO REGENERATE THIS FILE. #### +# #### #### +# ####################################################################### + +periodics: +- annotations: + testgrid-dashboards: knative-sandbox-release-1.1 + testgrid-tab-name: net-certmanager-continuous + cluster: build-knative + cron: 55 8 * * * + decorate: true + extra_refs: + - base_ref: release-1.1 + org: knative-sandbox + repo: net-certmanager + name: continuous_net-certmanager_release-1.1_periodic + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account +- annotations: + testgrid-dashboards: knative-sandbox-release-1.1 + testgrid-tab-name: net-certmanager-release + cluster: build-knative + cron: 41 9 * * 2 + decorate: true + extra_refs: + - base_ref: release-1.1 + org: knative-sandbox + repo: net-certmanager + name: release_net-certmanager_release-1.1_periodic + spec: + containers: + - command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/net-certmanager + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.1 + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/release-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/hub-token + name: hub-token + readOnly: true + - mountPath: /etc/release-account + name: release-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: hub-token + secret: + secretName: hub-token + - name: release-account + secret: + secretName: release-account +presubmits: + knative-sandbox/net-certmanager: + - always_run: true + branches: + - ^release-1.1$ + cluster: build-knative + decorate: true + name: build-tests_net-certmanager_release-1.1 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --build-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^release-1.1$ + cluster: build-knative + decorate: true + name: unit-tests_net-certmanager_release-1.1 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --unit-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^release-1.1$ + cluster: build-knative + decorate: true + name: integration-tests_net-certmanager_release-1.1 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --integration-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account diff --git a/prow/jobs/generated/knative-sandbox/net-certmanager-release-1.2.gen.yaml b/prow/jobs/generated/knative-sandbox/net-certmanager-release-1.2.gen.yaml new file mode 100644 index 00000000000..67e11b544a0 --- /dev/null +++ b/prow/jobs/generated/knative-sandbox/net-certmanager-release-1.2.gen.yaml @@ -0,0 +1,166 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. #### +# #### USE "./hack/generate-configs.sh" TO REGENERATE THIS FILE. #### +# #### #### +# ####################################################################### + +periodics: +- annotations: + testgrid-dashboards: knative-sandbox-release-1.2 + testgrid-tab-name: net-certmanager-continuous + cluster: build-knative + cron: 50 8 * * * + decorate: true + extra_refs: + - base_ref: release-1.2 + org: knative-sandbox + repo: net-certmanager + name: continuous_net-certmanager_release-1.2_periodic + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account +- annotations: + testgrid-dashboards: knative-sandbox-release-1.2 + testgrid-tab-name: net-certmanager-release + cluster: build-knative + cron: 18 9 * * 2 + decorate: true + extra_refs: + - base_ref: release-1.2 + org: knative-sandbox + repo: net-certmanager + name: release_net-certmanager_release-1.2_periodic + spec: + containers: + - command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/net-certmanager + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.2 + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/release-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/hub-token + name: hub-token + readOnly: true + - mountPath: /etc/release-account + name: release-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: hub-token + secret: + secretName: hub-token + - name: release-account + secret: + secretName: release-account +presubmits: + knative-sandbox/net-certmanager: + - always_run: true + branches: + - ^release-1.2$ + cluster: build-knative + decorate: true + name: build-tests_net-certmanager_release-1.2 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --build-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^release-1.2$ + cluster: build-knative + decorate: true + name: unit-tests_net-certmanager_release-1.2 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --unit-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^release-1.2$ + cluster: build-knative + decorate: true + name: integration-tests_net-certmanager_release-1.2 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --integration-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account diff --git a/prow/jobs/generated/knative-sandbox/net-certmanager-release-1.3.gen.yaml b/prow/jobs/generated/knative-sandbox/net-certmanager-release-1.3.gen.yaml new file mode 100644 index 00000000000..7d6a44a8421 --- /dev/null +++ b/prow/jobs/generated/knative-sandbox/net-certmanager-release-1.3.gen.yaml @@ -0,0 +1,166 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. #### +# #### USE "./hack/generate-configs.sh" TO REGENERATE THIS FILE. #### +# #### #### +# ####################################################################### + +periodics: +- annotations: + testgrid-dashboards: knative-sandbox-release-1.3 + testgrid-tab-name: net-certmanager-continuous + cluster: build-knative + cron: 41 8 * * * + decorate: true + extra_refs: + - base_ref: release-1.3 + org: knative-sandbox + repo: net-certmanager + name: continuous_net-certmanager_release-1.3_periodic + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account +- annotations: + testgrid-dashboards: knative-sandbox-release-1.3 + testgrid-tab-name: net-certmanager-release + cluster: build-knative + cron: 59 9 * * 2 + decorate: true + extra_refs: + - base_ref: release-1.3 + org: knative-sandbox + repo: net-certmanager + name: release_net-certmanager_release-1.3_periodic + spec: + containers: + - command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/net-certmanager + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.3 + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/release-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/hub-token + name: hub-token + readOnly: true + - mountPath: /etc/release-account + name: release-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: hub-token + secret: + secretName: hub-token + - name: release-account + secret: + secretName: release-account +presubmits: + knative-sandbox/net-certmanager: + - always_run: true + branches: + - ^release-1.3$ + cluster: build-knative + decorate: true + name: build-tests_net-certmanager_release-1.3 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --build-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^release-1.3$ + cluster: build-knative + decorate: true + name: unit-tests_net-certmanager_release-1.3 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --unit-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^release-1.3$ + cluster: build-knative + decorate: true + name: integration-tests_net-certmanager_release-1.3 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --integration-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account diff --git a/prow/jobs/generated/knative-sandbox/net-contour-main.gen.yaml b/prow/jobs/generated/knative-sandbox/net-contour-main.gen.yaml new file mode 100644 index 00000000000..8ce8d6a8bfe --- /dev/null +++ b/prow/jobs/generated/knative-sandbox/net-contour-main.gen.yaml @@ -0,0 +1,207 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. #### +# #### USE "./hack/generate-configs.sh" TO REGENERATE THIS FILE. #### +# #### #### +# ####################################################################### + +periodics: +- annotations: + testgrid-dashboards: net-contour + testgrid-tab-name: continuous + cluster: build-knative + cron: 55 */9 * * * + decorate: true + extra_refs: + - base_ref: main + org: knative-sandbox + repo: net-contour + name: continuous_net-contour_main_periodic + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account +- annotations: + testgrid-dashboards: net-contour + testgrid-tab-name: nightly + cluster: build-knative + cron: 17 9 * * * + decorate: true + extra_refs: + - base_ref: main + org: knative-sandbox + repo: net-contour + name: nightly_net-contour_main_periodic + reporter_config: + slack: + channel: net-contour + job_states_to_report: + - failure + report_template: | + "The nightly release job fails, check the log: <{{.Status.URL}}|View logs>" + spec: + containers: + - command: + - runner.sh + - ./hack/release.sh + - --publish + - --tag-release + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/nightly-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/nightly-account + name: nightly-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: nightly-account + secret: + secretName: nightly-account +- annotations: + testgrid-dashboards: net-contour + testgrid-tab-name: release + cluster: build-knative + cron: 13 */9 * * * + decorate: true + extra_refs: + - base_ref: main + org: knative-sandbox + repo: net-contour + name: release_net-contour_main_periodic + spec: + containers: + - command: + - runner.sh + - ./hack/release.sh + - --auto-release + - --release-gcs + - knative-releases/net-contour + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/release-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/hub-token + name: hub-token + readOnly: true + - mountPath: /etc/release-account + name: release-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: hub-token + secret: + secretName: hub-token + - name: release-account + secret: + secretName: release-account +presubmits: + knative-sandbox/net-contour: + - always_run: true + branches: + - ^main$ + cluster: build-knative + decorate: true + name: build-tests_net-contour_main + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --build-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^main$ + cluster: build-knative + decorate: true + name: unit-tests_net-contour_main + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --unit-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^main$ + cluster: build-knative + decorate: true + name: integration-tests_net-contour_main + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --integration-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account diff --git a/prow/jobs/generated/knative-sandbox/net-contour-release-1.0.gen.yaml b/prow/jobs/generated/knative-sandbox/net-contour-release-1.0.gen.yaml new file mode 100644 index 00000000000..952b1e46ebb --- /dev/null +++ b/prow/jobs/generated/knative-sandbox/net-contour-release-1.0.gen.yaml @@ -0,0 +1,166 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. #### +# #### USE "./hack/generate-configs.sh" TO REGENERATE THIS FILE. #### +# #### #### +# ####################################################################### + +periodics: +- annotations: + testgrid-dashboards: knative-sandbox-release-1.0 + testgrid-tab-name: net-contour-continuous + cluster: build-knative + cron: 1 8 * * * + decorate: true + extra_refs: + - base_ref: release-1.0 + org: knative-sandbox + repo: net-contour + name: continuous_net-contour_release-1.0_periodic + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account +- annotations: + testgrid-dashboards: knative-sandbox-release-1.0 + testgrid-tab-name: net-contour-release + cluster: build-knative + cron: 7 9 * * 2 + decorate: true + extra_refs: + - base_ref: release-1.0 + org: knative-sandbox + repo: net-contour + name: release_net-contour_release-1.0_periodic + spec: + containers: + - command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/net-contour + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.0 + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/release-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/hub-token + name: hub-token + readOnly: true + - mountPath: /etc/release-account + name: release-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: hub-token + secret: + secretName: hub-token + - name: release-account + secret: + secretName: release-account +presubmits: + knative-sandbox/net-contour: + - always_run: true + branches: + - ^release-1.0$ + cluster: build-knative + decorate: true + name: build-tests_net-contour_release-1.0 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --build-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^release-1.0$ + cluster: build-knative + decorate: true + name: unit-tests_net-contour_release-1.0 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --unit-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^release-1.0$ + cluster: build-knative + decorate: true + name: integration-tests_net-contour_release-1.0 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --integration-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account diff --git a/prow/jobs/generated/knative-sandbox/net-contour-release-1.1.gen.yaml b/prow/jobs/generated/knative-sandbox/net-contour-release-1.1.gen.yaml new file mode 100644 index 00000000000..964e2384b21 --- /dev/null +++ b/prow/jobs/generated/knative-sandbox/net-contour-release-1.1.gen.yaml @@ -0,0 +1,166 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. #### +# #### USE "./hack/generate-configs.sh" TO REGENERATE THIS FILE. #### +# #### #### +# ####################################################################### + +periodics: +- annotations: + testgrid-dashboards: knative-sandbox-release-1.1 + testgrid-tab-name: net-contour-continuous + cluster: build-knative + cron: 18 8 * * * + decorate: true + extra_refs: + - base_ref: release-1.1 + org: knative-sandbox + repo: net-contour + name: continuous_net-contour_release-1.1_periodic + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account +- annotations: + testgrid-dashboards: knative-sandbox-release-1.1 + testgrid-tab-name: net-contour-release + cluster: build-knative + cron: 30 9 * * 2 + decorate: true + extra_refs: + - base_ref: release-1.1 + org: knative-sandbox + repo: net-contour + name: release_net-contour_release-1.1_periodic + spec: + containers: + - command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/net-contour + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.1 + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/release-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/hub-token + name: hub-token + readOnly: true + - mountPath: /etc/release-account + name: release-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: hub-token + secret: + secretName: hub-token + - name: release-account + secret: + secretName: release-account +presubmits: + knative-sandbox/net-contour: + - always_run: true + branches: + - ^release-1.1$ + cluster: build-knative + decorate: true + name: build-tests_net-contour_release-1.1 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --build-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^release-1.1$ + cluster: build-knative + decorate: true + name: unit-tests_net-contour_release-1.1 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --unit-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^release-1.1$ + cluster: build-knative + decorate: true + name: integration-tests_net-contour_release-1.1 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --integration-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account diff --git a/prow/jobs/generated/knative-sandbox/net-contour-release-1.2.gen.yaml b/prow/jobs/generated/knative-sandbox/net-contour-release-1.2.gen.yaml new file mode 100644 index 00000000000..be0dfb3e33b --- /dev/null +++ b/prow/jobs/generated/knative-sandbox/net-contour-release-1.2.gen.yaml @@ -0,0 +1,166 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. #### +# #### USE "./hack/generate-configs.sh" TO REGENERATE THIS FILE. #### +# #### #### +# ####################################################################### + +periodics: +- annotations: + testgrid-dashboards: knative-sandbox-release-1.2 + testgrid-tab-name: net-contour-continuous + cluster: build-knative + cron: 27 8 * * * + decorate: true + extra_refs: + - base_ref: release-1.2 + org: knative-sandbox + repo: net-contour + name: continuous_net-contour_release-1.2_periodic + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account +- annotations: + testgrid-dashboards: knative-sandbox-release-1.2 + testgrid-tab-name: net-contour-release + cluster: build-knative + cron: 29 9 * * 2 + decorate: true + extra_refs: + - base_ref: release-1.2 + org: knative-sandbox + repo: net-contour + name: release_net-contour_release-1.2_periodic + spec: + containers: + - command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/net-contour + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.2 + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/release-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/hub-token + name: hub-token + readOnly: true + - mountPath: /etc/release-account + name: release-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: hub-token + secret: + secretName: hub-token + - name: release-account + secret: + secretName: release-account +presubmits: + knative-sandbox/net-contour: + - always_run: true + branches: + - ^release-1.2$ + cluster: build-knative + decorate: true + name: build-tests_net-contour_release-1.2 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --build-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^release-1.2$ + cluster: build-knative + decorate: true + name: unit-tests_net-contour_release-1.2 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --unit-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^release-1.2$ + cluster: build-knative + decorate: true + name: integration-tests_net-contour_release-1.2 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --integration-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account diff --git a/prow/jobs/generated/knative-sandbox/net-contour-release-1.3.gen.yaml b/prow/jobs/generated/knative-sandbox/net-contour-release-1.3.gen.yaml new file mode 100644 index 00000000000..41bef76b274 --- /dev/null +++ b/prow/jobs/generated/knative-sandbox/net-contour-release-1.3.gen.yaml @@ -0,0 +1,166 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. #### +# #### USE "./hack/generate-configs.sh" TO REGENERATE THIS FILE. #### +# #### #### +# ####################################################################### + +periodics: +- annotations: + testgrid-dashboards: knative-sandbox-release-1.3 + testgrid-tab-name: net-contour-continuous + cluster: build-knative + cron: 48 8 * * * + decorate: true + extra_refs: + - base_ref: release-1.3 + org: knative-sandbox + repo: net-contour + name: continuous_net-contour_release-1.3_periodic + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account +- annotations: + testgrid-dashboards: knative-sandbox-release-1.3 + testgrid-tab-name: net-contour-release + cluster: build-knative + cron: 28 9 * * 2 + decorate: true + extra_refs: + - base_ref: release-1.3 + org: knative-sandbox + repo: net-contour + name: release_net-contour_release-1.3_periodic + spec: + containers: + - command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/net-contour + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.3 + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/release-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/hub-token + name: hub-token + readOnly: true + - mountPath: /etc/release-account + name: release-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: hub-token + secret: + secretName: hub-token + - name: release-account + secret: + secretName: release-account +presubmits: + knative-sandbox/net-contour: + - always_run: true + branches: + - ^release-1.3$ + cluster: build-knative + decorate: true + name: build-tests_net-contour_release-1.3 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --build-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^release-1.3$ + cluster: build-knative + decorate: true + name: unit-tests_net-contour_release-1.3 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --unit-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^release-1.3$ + cluster: build-knative + decorate: true + name: integration-tests_net-contour_release-1.3 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --integration-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account diff --git a/prow/jobs/generated/knative-sandbox/net-gateway-api-main.gen.yaml b/prow/jobs/generated/knative-sandbox/net-gateway-api-main.gen.yaml new file mode 100644 index 00000000000..11ef3b4f9ca --- /dev/null +++ b/prow/jobs/generated/knative-sandbox/net-gateway-api-main.gen.yaml @@ -0,0 +1,207 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. #### +# #### USE "./hack/generate-configs.sh" TO REGENERATE THIS FILE. #### +# #### #### +# ####################################################################### + +periodics: +- annotations: + testgrid-dashboards: net-gateway-api + testgrid-tab-name: continuous + cluster: build-knative + cron: 38 */9 * * * + decorate: true + extra_refs: + - base_ref: main + org: knative-sandbox + repo: net-gateway-api + name: continuous_net-gateway-api_main_periodic + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account +- annotations: + testgrid-dashboards: net-gateway-api + testgrid-tab-name: nightly + cluster: build-knative + cron: 6 9 * * * + decorate: true + extra_refs: + - base_ref: main + org: knative-sandbox + repo: net-gateway-api + name: nightly_net-gateway-api_main_periodic + reporter_config: + slack: + channel: net-gateway-api + job_states_to_report: + - failure + report_template: | + "The nightly release job fails, check the log: <{{.Status.URL}}|View logs>" + spec: + containers: + - command: + - runner.sh + - ./hack/release.sh + - --publish + - --tag-release + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/nightly-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/nightly-account + name: nightly-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: nightly-account + secret: + secretName: nightly-account +- annotations: + testgrid-dashboards: net-gateway-api + testgrid-tab-name: release + cluster: build-knative + cron: 50 */9 * * * + decorate: true + extra_refs: + - base_ref: main + org: knative-sandbox + repo: net-gateway-api + name: release_net-gateway-api_main_periodic + spec: + containers: + - command: + - runner.sh + - ./hack/release.sh + - --auto-release + - --release-gcs + - knative-releases/net-gateway-api + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/release-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/hub-token + name: hub-token + readOnly: true + - mountPath: /etc/release-account + name: release-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: hub-token + secret: + secretName: hub-token + - name: release-account + secret: + secretName: release-account +presubmits: + knative-sandbox/net-gateway-api: + - always_run: true + branches: + - ^main$ + cluster: build-knative + decorate: true + name: build-tests_net-gateway-api_main + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --build-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^main$ + cluster: build-knative + decorate: true + name: unit-tests_net-gateway-api_main + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --unit-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^main$ + cluster: build-knative + decorate: true + name: integration-tests_net-gateway-api_main + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --integration-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account diff --git a/prow/jobs/generated/knative-sandbox/net-gateway-api-release-1.1.gen.yaml b/prow/jobs/generated/knative-sandbox/net-gateway-api-release-1.1.gen.yaml new file mode 100644 index 00000000000..ff5880660f0 --- /dev/null +++ b/prow/jobs/generated/knative-sandbox/net-gateway-api-release-1.1.gen.yaml @@ -0,0 +1,166 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. #### +# #### USE "./hack/generate-configs.sh" TO REGENERATE THIS FILE. #### +# #### #### +# ####################################################################### + +periodics: +- annotations: + testgrid-dashboards: knative-sandbox-release-1.1 + testgrid-tab-name: net-gateway-api-continuous + cluster: build-knative + cron: 49 8 * * * + decorate: true + extra_refs: + - base_ref: release-1.1 + org: knative-sandbox + repo: net-gateway-api + name: continuous_net-gateway-api_release-1.1_periodic + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account +- annotations: + testgrid-dashboards: knative-sandbox-release-1.1 + testgrid-tab-name: net-gateway-api-release + cluster: build-knative + cron: 11 9 * * 2 + decorate: true + extra_refs: + - base_ref: release-1.1 + org: knative-sandbox + repo: net-gateway-api + name: release_net-gateway-api_release-1.1_periodic + spec: + containers: + - command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/net-gateway-api + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.1 + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/release-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/hub-token + name: hub-token + readOnly: true + - mountPath: /etc/release-account + name: release-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: hub-token + secret: + secretName: hub-token + - name: release-account + secret: + secretName: release-account +presubmits: + knative-sandbox/net-gateway-api: + - always_run: true + branches: + - ^release-1.1$ + cluster: build-knative + decorate: true + name: build-tests_net-gateway-api_release-1.1 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --build-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^release-1.1$ + cluster: build-knative + decorate: true + name: unit-tests_net-gateway-api_release-1.1 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --unit-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^release-1.1$ + cluster: build-knative + decorate: true + name: integration-tests_net-gateway-api_release-1.1 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --integration-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account diff --git a/prow/jobs/generated/knative-sandbox/net-gateway-api-release-1.2.gen.yaml b/prow/jobs/generated/knative-sandbox/net-gateway-api-release-1.2.gen.yaml new file mode 100644 index 00000000000..76bd0ecfa4a --- /dev/null +++ b/prow/jobs/generated/knative-sandbox/net-gateway-api-release-1.2.gen.yaml @@ -0,0 +1,166 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. #### +# #### USE "./hack/generate-configs.sh" TO REGENERATE THIS FILE. #### +# #### #### +# ####################################################################### + +periodics: +- annotations: + testgrid-dashboards: knative-sandbox-release-1.2 + testgrid-tab-name: net-gateway-api-continuous + cluster: build-knative + cron: 4 8 * * * + decorate: true + extra_refs: + - base_ref: release-1.2 + org: knative-sandbox + repo: net-gateway-api + name: continuous_net-gateway-api_release-1.2_periodic + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account +- annotations: + testgrid-dashboards: knative-sandbox-release-1.2 + testgrid-tab-name: net-gateway-api-release + cluster: build-knative + cron: 8 9 * * 2 + decorate: true + extra_refs: + - base_ref: release-1.2 + org: knative-sandbox + repo: net-gateway-api + name: release_net-gateway-api_release-1.2_periodic + spec: + containers: + - command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/net-gateway-api + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.2 + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/release-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/hub-token + name: hub-token + readOnly: true + - mountPath: /etc/release-account + name: release-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: hub-token + secret: + secretName: hub-token + - name: release-account + secret: + secretName: release-account +presubmits: + knative-sandbox/net-gateway-api: + - always_run: true + branches: + - ^release-1.2$ + cluster: build-knative + decorate: true + name: build-tests_net-gateway-api_release-1.2 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --build-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^release-1.2$ + cluster: build-knative + decorate: true + name: unit-tests_net-gateway-api_release-1.2 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --unit-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^release-1.2$ + cluster: build-knative + decorate: true + name: integration-tests_net-gateway-api_release-1.2 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --integration-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account diff --git a/prow/jobs/generated/knative-sandbox/net-gateway-api-release-1.3.gen.yaml b/prow/jobs/generated/knative-sandbox/net-gateway-api-release-1.3.gen.yaml new file mode 100644 index 00000000000..4829ec99fe7 --- /dev/null +++ b/prow/jobs/generated/knative-sandbox/net-gateway-api-release-1.3.gen.yaml @@ -0,0 +1,166 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. #### +# #### USE "./hack/generate-configs.sh" TO REGENERATE THIS FILE. #### +# #### #### +# ####################################################################### + +periodics: +- annotations: + testgrid-dashboards: knative-sandbox-release-1.3 + testgrid-tab-name: net-gateway-api-continuous + cluster: build-knative + cron: 43 8 * * * + decorate: true + extra_refs: + - base_ref: release-1.3 + org: knative-sandbox + repo: net-gateway-api + name: continuous_net-gateway-api_release-1.3_periodic + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account +- annotations: + testgrid-dashboards: knative-sandbox-release-1.3 + testgrid-tab-name: net-gateway-api-release + cluster: build-knative + cron: 45 9 * * 2 + decorate: true + extra_refs: + - base_ref: release-1.3 + org: knative-sandbox + repo: net-gateway-api + name: release_net-gateway-api_release-1.3_periodic + spec: + containers: + - command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/net-gateway-api + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.3 + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/release-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/hub-token + name: hub-token + readOnly: true + - mountPath: /etc/release-account + name: release-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: hub-token + secret: + secretName: hub-token + - name: release-account + secret: + secretName: release-account +presubmits: + knative-sandbox/net-gateway-api: + - always_run: true + branches: + - ^release-1.3$ + cluster: build-knative + decorate: true + name: build-tests_net-gateway-api_release-1.3 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --build-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^release-1.3$ + cluster: build-knative + decorate: true + name: unit-tests_net-gateway-api_release-1.3 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --unit-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^release-1.3$ + cluster: build-knative + decorate: true + name: integration-tests_net-gateway-api_release-1.3 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --integration-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account diff --git a/prow/jobs/generated/knative-sandbox/net-http01-main.gen.yaml b/prow/jobs/generated/knative-sandbox/net-http01-main.gen.yaml new file mode 100644 index 00000000000..544c4b977be --- /dev/null +++ b/prow/jobs/generated/knative-sandbox/net-http01-main.gen.yaml @@ -0,0 +1,207 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. #### +# #### USE "./hack/generate-configs.sh" TO REGENERATE THIS FILE. #### +# #### #### +# ####################################################################### + +periodics: +- annotations: + testgrid-dashboards: net-http01 + testgrid-tab-name: continuous + cluster: build-knative + cron: 28 */9 * * * + decorate: true + extra_refs: + - base_ref: main + org: knative-sandbox + repo: net-http01 + name: continuous_net-http01_main_periodic + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account +- annotations: + testgrid-dashboards: net-http01 + testgrid-tab-name: nightly + cluster: build-knative + cron: 20 9 * * * + decorate: true + extra_refs: + - base_ref: main + org: knative-sandbox + repo: net-http01 + name: nightly_net-http01_main_periodic + reporter_config: + slack: + channel: net-http01 + job_states_to_report: + - failure + report_template: | + "The nightly release job fails, check the log: <{{.Status.URL}}|View logs>" + spec: + containers: + - command: + - runner.sh + - ./hack/release.sh + - --publish + - --tag-release + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/nightly-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/nightly-account + name: nightly-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: nightly-account + secret: + secretName: nightly-account +- annotations: + testgrid-dashboards: net-http01 + testgrid-tab-name: release + cluster: build-knative + cron: 4 */9 * * * + decorate: true + extra_refs: + - base_ref: main + org: knative-sandbox + repo: net-http01 + name: release_net-http01_main_periodic + spec: + containers: + - command: + - runner.sh + - ./hack/release.sh + - --auto-release + - --release-gcs + - knative-releases/net-http01 + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/release-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/hub-token + name: hub-token + readOnly: true + - mountPath: /etc/release-account + name: release-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: hub-token + secret: + secretName: hub-token + - name: release-account + secret: + secretName: release-account +presubmits: + knative-sandbox/net-http01: + - always_run: true + branches: + - ^main$ + cluster: build-knative + decorate: true + name: build-tests_net-http01_main + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --build-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^main$ + cluster: build-knative + decorate: true + name: unit-tests_net-http01_main + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --unit-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^main$ + cluster: build-knative + decorate: true + name: integration-tests_net-http01_main + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --integration-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account diff --git a/prow/jobs/generated/knative-sandbox/net-http01-release-1.0.gen.yaml b/prow/jobs/generated/knative-sandbox/net-http01-release-1.0.gen.yaml new file mode 100644 index 00000000000..d850b80ef62 --- /dev/null +++ b/prow/jobs/generated/knative-sandbox/net-http01-release-1.0.gen.yaml @@ -0,0 +1,166 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. #### +# #### USE "./hack/generate-configs.sh" TO REGENERATE THIS FILE. #### +# #### #### +# ####################################################################### + +periodics: +- annotations: + testgrid-dashboards: knative-sandbox-release-1.0 + testgrid-tab-name: net-http01-continuous + cluster: build-knative + cron: 28 8 * * * + decorate: true + extra_refs: + - base_ref: release-1.0 + org: knative-sandbox + repo: net-http01 + name: continuous_net-http01_release-1.0_periodic + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account +- annotations: + testgrid-dashboards: knative-sandbox-release-1.0 + testgrid-tab-name: net-http01-release + cluster: build-knative + cron: 16 9 * * 2 + decorate: true + extra_refs: + - base_ref: release-1.0 + org: knative-sandbox + repo: net-http01 + name: release_net-http01_release-1.0_periodic + spec: + containers: + - command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/net-http01 + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.0 + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/release-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/hub-token + name: hub-token + readOnly: true + - mountPath: /etc/release-account + name: release-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: hub-token + secret: + secretName: hub-token + - name: release-account + secret: + secretName: release-account +presubmits: + knative-sandbox/net-http01: + - always_run: true + branches: + - ^release-1.0$ + cluster: build-knative + decorate: true + name: build-tests_net-http01_release-1.0 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --build-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^release-1.0$ + cluster: build-knative + decorate: true + name: unit-tests_net-http01_release-1.0 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --unit-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^release-1.0$ + cluster: build-knative + decorate: true + name: integration-tests_net-http01_release-1.0 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --integration-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account diff --git a/prow/jobs/generated/knative-sandbox/net-http01-release-1.1.gen.yaml b/prow/jobs/generated/knative-sandbox/net-http01-release-1.1.gen.yaml new file mode 100644 index 00000000000..761eab06cf1 --- /dev/null +++ b/prow/jobs/generated/knative-sandbox/net-http01-release-1.1.gen.yaml @@ -0,0 +1,166 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. #### +# #### USE "./hack/generate-configs.sh" TO REGENERATE THIS FILE. #### +# #### #### +# ####################################################################### + +periodics: +- annotations: + testgrid-dashboards: knative-sandbox-release-1.1 + testgrid-tab-name: net-http01-continuous + cluster: build-knative + cron: 35 8 * * * + decorate: true + extra_refs: + - base_ref: release-1.1 + org: knative-sandbox + repo: net-http01 + name: continuous_net-http01_release-1.1_periodic + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account +- annotations: + testgrid-dashboards: knative-sandbox-release-1.1 + testgrid-tab-name: net-http01-release + cluster: build-knative + cron: 37 9 * * 2 + decorate: true + extra_refs: + - base_ref: release-1.1 + org: knative-sandbox + repo: net-http01 + name: release_net-http01_release-1.1_periodic + spec: + containers: + - command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/net-http01 + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.1 + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/release-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/hub-token + name: hub-token + readOnly: true + - mountPath: /etc/release-account + name: release-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: hub-token + secret: + secretName: hub-token + - name: release-account + secret: + secretName: release-account +presubmits: + knative-sandbox/net-http01: + - always_run: true + branches: + - ^release-1.1$ + cluster: build-knative + decorate: true + name: build-tests_net-http01_release-1.1 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --build-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^release-1.1$ + cluster: build-knative + decorate: true + name: unit-tests_net-http01_release-1.1 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --unit-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^release-1.1$ + cluster: build-knative + decorate: true + name: integration-tests_net-http01_release-1.1 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --integration-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account diff --git a/prow/jobs/generated/knative-sandbox/net-http01-release-1.2.gen.yaml b/prow/jobs/generated/knative-sandbox/net-http01-release-1.2.gen.yaml new file mode 100644 index 00000000000..24563a69930 --- /dev/null +++ b/prow/jobs/generated/knative-sandbox/net-http01-release-1.2.gen.yaml @@ -0,0 +1,166 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. #### +# #### USE "./hack/generate-configs.sh" TO REGENERATE THIS FILE. #### +# #### #### +# ####################################################################### + +periodics: +- annotations: + testgrid-dashboards: knative-sandbox-release-1.2 + testgrid-tab-name: net-http01-continuous + cluster: build-knative + cron: 30 8 * * * + decorate: true + extra_refs: + - base_ref: release-1.2 + org: knative-sandbox + repo: net-http01 + name: continuous_net-http01_release-1.2_periodic + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account +- annotations: + testgrid-dashboards: knative-sandbox-release-1.2 + testgrid-tab-name: net-http01-release + cluster: build-knative + cron: 34 9 * * 2 + decorate: true + extra_refs: + - base_ref: release-1.2 + org: knative-sandbox + repo: net-http01 + name: release_net-http01_release-1.2_periodic + spec: + containers: + - command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/net-http01 + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.2 + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/release-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/hub-token + name: hub-token + readOnly: true + - mountPath: /etc/release-account + name: release-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: hub-token + secret: + secretName: hub-token + - name: release-account + secret: + secretName: release-account +presubmits: + knative-sandbox/net-http01: + - always_run: true + branches: + - ^release-1.2$ + cluster: build-knative + decorate: true + name: build-tests_net-http01_release-1.2 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --build-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^release-1.2$ + cluster: build-knative + decorate: true + name: unit-tests_net-http01_release-1.2 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --unit-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^release-1.2$ + cluster: build-knative + decorate: true + name: integration-tests_net-http01_release-1.2 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --integration-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account diff --git a/prow/jobs/generated/knative-sandbox/net-http01-release-1.3.gen.yaml b/prow/jobs/generated/knative-sandbox/net-http01-release-1.3.gen.yaml new file mode 100644 index 00000000000..50d59c11778 --- /dev/null +++ b/prow/jobs/generated/knative-sandbox/net-http01-release-1.3.gen.yaml @@ -0,0 +1,166 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. #### +# #### USE "./hack/generate-configs.sh" TO REGENERATE THIS FILE. #### +# #### #### +# ####################################################################### + +periodics: +- annotations: + testgrid-dashboards: knative-sandbox-release-1.3 + testgrid-tab-name: net-http01-continuous + cluster: build-knative + cron: 1 8 * * * + decorate: true + extra_refs: + - base_ref: release-1.3 + org: knative-sandbox + repo: net-http01 + name: continuous_net-http01_release-1.3_periodic + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account +- annotations: + testgrid-dashboards: knative-sandbox-release-1.3 + testgrid-tab-name: net-http01-release + cluster: build-knative + cron: 43 9 * * 2 + decorate: true + extra_refs: + - base_ref: release-1.3 + org: knative-sandbox + repo: net-http01 + name: release_net-http01_release-1.3_periodic + spec: + containers: + - command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/net-http01 + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.3 + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/release-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/hub-token + name: hub-token + readOnly: true + - mountPath: /etc/release-account + name: release-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: hub-token + secret: + secretName: hub-token + - name: release-account + secret: + secretName: release-account +presubmits: + knative-sandbox/net-http01: + - always_run: true + branches: + - ^release-1.3$ + cluster: build-knative + decorate: true + name: build-tests_net-http01_release-1.3 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --build-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^release-1.3$ + cluster: build-knative + decorate: true + name: unit-tests_net-http01_release-1.3 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --unit-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^release-1.3$ + cluster: build-knative + decorate: true + name: integration-tests_net-http01_release-1.3 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --integration-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account diff --git a/prow/jobs/generated/knative-sandbox/net-istio-main.gen.yaml b/prow/jobs/generated/knative-sandbox/net-istio-main.gen.yaml new file mode 100644 index 00000000000..bc9fa32279f --- /dev/null +++ b/prow/jobs/generated/knative-sandbox/net-istio-main.gen.yaml @@ -0,0 +1,273 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. #### +# #### USE "./hack/generate-configs.sh" TO REGENERATE THIS FILE. #### +# #### #### +# ####################################################################### + +periodics: +- annotations: + testgrid-dashboards: net-istio + testgrid-tab-name: continuous + cluster: build-knative + cron: 41 */9 * * * + decorate: true + extra_refs: + - base_ref: main + org: knative-sandbox + repo: net-istio + name: continuous_net-istio_main_periodic + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account +- annotations: + testgrid-dashboards: net-istio + testgrid-tab-name: nightly + cluster: build-knative + cron: 35 9 * * * + decorate: true + extra_refs: + - base_ref: main + org: knative-sandbox + repo: net-istio + name: nightly_net-istio_main_periodic + reporter_config: + slack: + channel: net-istio + job_states_to_report: + - failure + report_template: | + "The nightly release job fails, check the log: <{{.Status.URL}}|View logs>" + spec: + containers: + - command: + - runner.sh + - ./hack/release.sh + - --publish + - --tag-release + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/nightly-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/nightly-account + name: nightly-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: nightly-account + secret: + secretName: nightly-account +- annotations: + testgrid-dashboards: net-istio + testgrid-tab-name: release + cluster: build-knative + cron: 3 */9 * * * + decorate: true + extra_refs: + - base_ref: main + org: knative-sandbox + repo: net-istio + name: release_net-istio_main_periodic + spec: + containers: + - command: + - runner.sh + - ./hack/release.sh + - --auto-release + - --release-gcs + - knative-releases/net-istio + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/release-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/hub-token + name: hub-token + readOnly: true + - mountPath: /etc/release-account + name: release-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: hub-token + secret: + secretName: hub-token + - name: release-account + secret: + secretName: release-account +presubmits: + knative-sandbox/net-istio: + - always_run: true + branches: + - ^main$ + cluster: build-knative + decorate: true + name: build-tests_net-istio_main + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --build-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^main$ + cluster: build-knative + decorate: true + name: unit-tests_net-istio_main + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --unit-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^main$ + cluster: build-knative + decorate: true + name: integration-tests_net-istio_main + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --integration-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - always_run: true + branches: + - ^main$ + cluster: build-knative + decorate: true + name: latest_net-istio_main + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-tests.sh --istio-version latest + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - always_run: true + branches: + - ^main$ + cluster: build-knative + decorate: true + name: latest-mesh_net-istio_main + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-tests.sh --istio-version latest --mesh + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account diff --git a/prow/jobs/generated/knative-sandbox/net-istio-release-1.0.gen.yaml b/prow/jobs/generated/knative-sandbox/net-istio-release-1.0.gen.yaml new file mode 100644 index 00000000000..8d391689a28 --- /dev/null +++ b/prow/jobs/generated/knative-sandbox/net-istio-release-1.0.gen.yaml @@ -0,0 +1,232 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. #### +# #### USE "./hack/generate-configs.sh" TO REGENERATE THIS FILE. #### +# #### #### +# ####################################################################### + +periodics: +- annotations: + testgrid-dashboards: knative-sandbox-release-1.0 + testgrid-tab-name: net-istio-continuous + cluster: build-knative + cron: 47 8 * * * + decorate: true + extra_refs: + - base_ref: release-1.0 + org: knative-sandbox + repo: net-istio + name: continuous_net-istio_release-1.0_periodic + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account +- annotations: + testgrid-dashboards: knative-sandbox-release-1.0 + testgrid-tab-name: net-istio-release + cluster: build-knative + cron: 37 9 * * 2 + decorate: true + extra_refs: + - base_ref: release-1.0 + org: knative-sandbox + repo: net-istio + name: release_net-istio_release-1.0_periodic + spec: + containers: + - command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/net-istio + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.0 + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/release-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/hub-token + name: hub-token + readOnly: true + - mountPath: /etc/release-account + name: release-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: hub-token + secret: + secretName: hub-token + - name: release-account + secret: + secretName: release-account +presubmits: + knative-sandbox/net-istio: + - always_run: true + branches: + - ^release-1.0$ + cluster: build-knative + decorate: true + name: build-tests_net-istio_release-1.0 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --build-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^release-1.0$ + cluster: build-knative + decorate: true + name: unit-tests_net-istio_release-1.0 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --unit-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^release-1.0$ + cluster: build-knative + decorate: true + name: integration-tests_net-istio_release-1.0 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --integration-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - always_run: true + branches: + - ^release-1.0$ + cluster: build-knative + decorate: true + name: latest_net-istio_release-1.0 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-tests.sh --istio-version latest + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - always_run: true + branches: + - ^release-1.0$ + cluster: build-knative + decorate: true + name: latest-mesh_net-istio_release-1.0 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-tests.sh --istio-version latest --mesh + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account diff --git a/prow/jobs/generated/knative-sandbox/net-istio-release-1.1.gen.yaml b/prow/jobs/generated/knative-sandbox/net-istio-release-1.1.gen.yaml new file mode 100644 index 00000000000..3f396aa17c8 --- /dev/null +++ b/prow/jobs/generated/knative-sandbox/net-istio-release-1.1.gen.yaml @@ -0,0 +1,232 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. #### +# #### USE "./hack/generate-configs.sh" TO REGENERATE THIS FILE. #### +# #### #### +# ####################################################################### + +periodics: +- annotations: + testgrid-dashboards: knative-sandbox-release-1.1 + testgrid-tab-name: net-istio-continuous + cluster: build-knative + cron: 44 8 * * * + decorate: true + extra_refs: + - base_ref: release-1.1 + org: knative-sandbox + repo: net-istio + name: continuous_net-istio_release-1.1_periodic + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account +- annotations: + testgrid-dashboards: knative-sandbox-release-1.1 + testgrid-tab-name: net-istio-release + cluster: build-knative + cron: 32 9 * * 2 + decorate: true + extra_refs: + - base_ref: release-1.1 + org: knative-sandbox + repo: net-istio + name: release_net-istio_release-1.1_periodic + spec: + containers: + - command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/net-istio + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.1 + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/release-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/hub-token + name: hub-token + readOnly: true + - mountPath: /etc/release-account + name: release-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: hub-token + secret: + secretName: hub-token + - name: release-account + secret: + secretName: release-account +presubmits: + knative-sandbox/net-istio: + - always_run: true + branches: + - ^release-1.1$ + cluster: build-knative + decorate: true + name: build-tests_net-istio_release-1.1 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --build-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^release-1.1$ + cluster: build-knative + decorate: true + name: unit-tests_net-istio_release-1.1 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --unit-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^release-1.1$ + cluster: build-knative + decorate: true + name: integration-tests_net-istio_release-1.1 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --integration-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - always_run: true + branches: + - ^release-1.1$ + cluster: build-knative + decorate: true + name: latest_net-istio_release-1.1 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-tests.sh --istio-version latest + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - always_run: true + branches: + - ^release-1.1$ + cluster: build-knative + decorate: true + name: latest-mesh_net-istio_release-1.1 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-tests.sh --istio-version latest --mesh + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account diff --git a/prow/jobs/generated/knative-sandbox/net-istio-release-1.2.gen.yaml b/prow/jobs/generated/knative-sandbox/net-istio-release-1.2.gen.yaml new file mode 100644 index 00000000000..8d30315f2ca --- /dev/null +++ b/prow/jobs/generated/knative-sandbox/net-istio-release-1.2.gen.yaml @@ -0,0 +1,232 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. #### +# #### USE "./hack/generate-configs.sh" TO REGENERATE THIS FILE. #### +# #### #### +# ####################################################################### + +periodics: +- annotations: + testgrid-dashboards: knative-sandbox-release-1.2 + testgrid-tab-name: net-istio-continuous + cluster: build-knative + cron: 41 8 * * * + decorate: true + extra_refs: + - base_ref: release-1.2 + org: knative-sandbox + repo: net-istio + name: continuous_net-istio_release-1.2_periodic + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account +- annotations: + testgrid-dashboards: knative-sandbox-release-1.2 + testgrid-tab-name: net-istio-release + cluster: build-knative + cron: 55 9 * * 2 + decorate: true + extra_refs: + - base_ref: release-1.2 + org: knative-sandbox + repo: net-istio + name: release_net-istio_release-1.2_periodic + spec: + containers: + - command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/net-istio + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.2 + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/release-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/hub-token + name: hub-token + readOnly: true + - mountPath: /etc/release-account + name: release-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: hub-token + secret: + secretName: hub-token + - name: release-account + secret: + secretName: release-account +presubmits: + knative-sandbox/net-istio: + - always_run: true + branches: + - ^release-1.2$ + cluster: build-knative + decorate: true + name: build-tests_net-istio_release-1.2 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --build-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^release-1.2$ + cluster: build-knative + decorate: true + name: unit-tests_net-istio_release-1.2 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --unit-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^release-1.2$ + cluster: build-knative + decorate: true + name: integration-tests_net-istio_release-1.2 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --integration-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - always_run: true + branches: + - ^release-1.2$ + cluster: build-knative + decorate: true + name: latest_net-istio_release-1.2 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-tests.sh --istio-version latest + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - always_run: true + branches: + - ^release-1.2$ + cluster: build-knative + decorate: true + name: latest-mesh_net-istio_release-1.2 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-tests.sh --istio-version latest --mesh + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account diff --git a/prow/jobs/generated/knative-sandbox/net-istio-release-1.3.gen.yaml b/prow/jobs/generated/knative-sandbox/net-istio-release-1.3.gen.yaml new file mode 100644 index 00000000000..c73f99978ad --- /dev/null +++ b/prow/jobs/generated/knative-sandbox/net-istio-release-1.3.gen.yaml @@ -0,0 +1,232 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. #### +# #### USE "./hack/generate-configs.sh" TO REGENERATE THIS FILE. #### +# #### #### +# ####################################################################### + +periodics: +- annotations: + testgrid-dashboards: knative-sandbox-release-1.3 + testgrid-tab-name: net-istio-continuous + cluster: build-knative + cron: 18 8 * * * + decorate: true + extra_refs: + - base_ref: release-1.3 + org: knative-sandbox + repo: net-istio + name: continuous_net-istio_release-1.3_periodic + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account +- annotations: + testgrid-dashboards: knative-sandbox-release-1.3 + testgrid-tab-name: net-istio-release + cluster: build-knative + cron: 6 9 * * 2 + decorate: true + extra_refs: + - base_ref: release-1.3 + org: knative-sandbox + repo: net-istio + name: release_net-istio_release-1.3_periodic + spec: + containers: + - command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/net-istio + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.3 + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/release-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/hub-token + name: hub-token + readOnly: true + - mountPath: /etc/release-account + name: release-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: hub-token + secret: + secretName: hub-token + - name: release-account + secret: + secretName: release-account +presubmits: + knative-sandbox/net-istio: + - always_run: true + branches: + - ^release-1.3$ + cluster: build-knative + decorate: true + name: build-tests_net-istio_release-1.3 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --build-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^release-1.3$ + cluster: build-knative + decorate: true + name: unit-tests_net-istio_release-1.3 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --unit-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^release-1.3$ + cluster: build-knative + decorate: true + name: integration-tests_net-istio_release-1.3 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --integration-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - always_run: true + branches: + - ^release-1.3$ + cluster: build-knative + decorate: true + name: latest_net-istio_release-1.3 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-tests.sh --istio-version latest + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - always_run: true + branches: + - ^release-1.3$ + cluster: build-knative + decorate: true + name: latest-mesh_net-istio_release-1.3 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-tests.sh --istio-version latest --mesh + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account diff --git a/prow/jobs/generated/knative-sandbox/net-kourier-main.gen.yaml b/prow/jobs/generated/knative-sandbox/net-kourier-main.gen.yaml new file mode 100644 index 00000000000..a0186b60fc7 --- /dev/null +++ b/prow/jobs/generated/knative-sandbox/net-kourier-main.gen.yaml @@ -0,0 +1,207 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. #### +# #### USE "./hack/generate-configs.sh" TO REGENERATE THIS FILE. #### +# #### #### +# ####################################################################### + +periodics: +- annotations: + testgrid-dashboards: net-kourier + testgrid-tab-name: continuous + cluster: build-knative + cron: 54 */9 * * * + decorate: true + extra_refs: + - base_ref: main + org: knative-sandbox + repo: net-kourier + name: continuous_net-kourier_main_periodic + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account +- annotations: + testgrid-dashboards: net-kourier + testgrid-tab-name: nightly + cluster: build-knative + cron: 30 9 * * * + decorate: true + extra_refs: + - base_ref: main + org: knative-sandbox + repo: net-kourier + name: nightly_net-kourier_main_periodic + reporter_config: + slack: + channel: net-kourier + job_states_to_report: + - failure + report_template: | + "The nightly release job fails, check the log: <{{.Status.URL}}|View logs>" + spec: + containers: + - command: + - runner.sh + - ./hack/release.sh + - --publish + - --tag-release + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/nightly-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/nightly-account + name: nightly-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: nightly-account + secret: + secretName: nightly-account +- annotations: + testgrid-dashboards: net-kourier + testgrid-tab-name: release + cluster: build-knative + cron: 34 */9 * * * + decorate: true + extra_refs: + - base_ref: main + org: knative-sandbox + repo: net-kourier + name: release_net-kourier_main_periodic + spec: + containers: + - command: + - runner.sh + - ./hack/release.sh + - --auto-release + - --release-gcs + - knative-releases/net-kourier + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/release-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/hub-token + name: hub-token + readOnly: true + - mountPath: /etc/release-account + name: release-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: hub-token + secret: + secretName: hub-token + - name: release-account + secret: + secretName: release-account +presubmits: + knative-sandbox/net-kourier: + - always_run: true + branches: + - ^main$ + cluster: build-knative + decorate: true + name: build-tests_net-kourier_main + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --build-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^main$ + cluster: build-knative + decorate: true + name: unit-tests_net-kourier_main + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --unit-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^main$ + cluster: build-knative + decorate: true + name: integration-tests_net-kourier_main + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --integration-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account diff --git a/prow/jobs/generated/knative-sandbox/net-kourier-release-1.0.gen.yaml b/prow/jobs/generated/knative-sandbox/net-kourier-release-1.0.gen.yaml new file mode 100644 index 00000000000..64f8df4223d --- /dev/null +++ b/prow/jobs/generated/knative-sandbox/net-kourier-release-1.0.gen.yaml @@ -0,0 +1,166 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. #### +# #### USE "./hack/generate-configs.sh" TO REGENERATE THIS FILE. #### +# #### #### +# ####################################################################### + +periodics: +- annotations: + testgrid-dashboards: knative-sandbox-release-1.0 + testgrid-tab-name: net-kourier-continuous + cluster: build-knative + cron: 2 8 * * * + decorate: true + extra_refs: + - base_ref: release-1.0 + org: knative-sandbox + repo: net-kourier + name: continuous_net-kourier_release-1.0_periodic + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account +- annotations: + testgrid-dashboards: knative-sandbox-release-1.0 + testgrid-tab-name: net-kourier-release + cluster: build-knative + cron: 42 9 * * 2 + decorate: true + extra_refs: + - base_ref: release-1.0 + org: knative-sandbox + repo: net-kourier + name: release_net-kourier_release-1.0_periodic + spec: + containers: + - command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/net-kourier + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.0 + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/release-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/hub-token + name: hub-token + readOnly: true + - mountPath: /etc/release-account + name: release-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: hub-token + secret: + secretName: hub-token + - name: release-account + secret: + secretName: release-account +presubmits: + knative-sandbox/net-kourier: + - always_run: true + branches: + - ^release-1.0$ + cluster: build-knative + decorate: true + name: build-tests_net-kourier_release-1.0 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --build-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^release-1.0$ + cluster: build-knative + decorate: true + name: unit-tests_net-kourier_release-1.0 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --unit-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^release-1.0$ + cluster: build-knative + decorate: true + name: integration-tests_net-kourier_release-1.0 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --integration-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account diff --git a/prow/jobs/generated/knative-sandbox/net-kourier-release-1.1.gen.yaml b/prow/jobs/generated/knative-sandbox/net-kourier-release-1.1.gen.yaml new file mode 100644 index 00000000000..8544da08e86 --- /dev/null +++ b/prow/jobs/generated/knative-sandbox/net-kourier-release-1.1.gen.yaml @@ -0,0 +1,166 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. #### +# #### USE "./hack/generate-configs.sh" TO REGENERATE THIS FILE. #### +# #### #### +# ####################################################################### + +periodics: +- annotations: + testgrid-dashboards: knative-sandbox-release-1.1 + testgrid-tab-name: net-kourier-continuous + cluster: build-knative + cron: 17 8 * * * + decorate: true + extra_refs: + - base_ref: release-1.1 + org: knative-sandbox + repo: net-kourier + name: continuous_net-kourier_release-1.1_periodic + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account +- annotations: + testgrid-dashboards: knative-sandbox-release-1.1 + testgrid-tab-name: net-kourier-release + cluster: build-knative + cron: 7 9 * * 2 + decorate: true + extra_refs: + - base_ref: release-1.1 + org: knative-sandbox + repo: net-kourier + name: release_net-kourier_release-1.1_periodic + spec: + containers: + - command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/net-kourier + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.1 + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/release-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/hub-token + name: hub-token + readOnly: true + - mountPath: /etc/release-account + name: release-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: hub-token + secret: + secretName: hub-token + - name: release-account + secret: + secretName: release-account +presubmits: + knative-sandbox/net-kourier: + - always_run: true + branches: + - ^release-1.1$ + cluster: build-knative + decorate: true + name: build-tests_net-kourier_release-1.1 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --build-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^release-1.1$ + cluster: build-knative + decorate: true + name: unit-tests_net-kourier_release-1.1 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --unit-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^release-1.1$ + cluster: build-knative + decorate: true + name: integration-tests_net-kourier_release-1.1 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --integration-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account diff --git a/prow/jobs/generated/knative-sandbox/net-kourier-release-1.2.gen.yaml b/prow/jobs/generated/knative-sandbox/net-kourier-release-1.2.gen.yaml new file mode 100644 index 00000000000..504c080a548 --- /dev/null +++ b/prow/jobs/generated/knative-sandbox/net-kourier-release-1.2.gen.yaml @@ -0,0 +1,166 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. #### +# #### USE "./hack/generate-configs.sh" TO REGENERATE THIS FILE. #### +# #### #### +# ####################################################################### + +periodics: +- annotations: + testgrid-dashboards: knative-sandbox-release-1.2 + testgrid-tab-name: net-kourier-continuous + cluster: build-knative + cron: 48 8 * * * + decorate: true + extra_refs: + - base_ref: release-1.2 + org: knative-sandbox + repo: net-kourier + name: continuous_net-kourier_release-1.2_periodic + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account +- annotations: + testgrid-dashboards: knative-sandbox-release-1.2 + testgrid-tab-name: net-kourier-release + cluster: build-knative + cron: 20 9 * * 2 + decorate: true + extra_refs: + - base_ref: release-1.2 + org: knative-sandbox + repo: net-kourier + name: release_net-kourier_release-1.2_periodic + spec: + containers: + - command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/net-kourier + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.2 + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/release-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/hub-token + name: hub-token + readOnly: true + - mountPath: /etc/release-account + name: release-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: hub-token + secret: + secretName: hub-token + - name: release-account + secret: + secretName: release-account +presubmits: + knative-sandbox/net-kourier: + - always_run: true + branches: + - ^release-1.2$ + cluster: build-knative + decorate: true + name: build-tests_net-kourier_release-1.2 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --build-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^release-1.2$ + cluster: build-knative + decorate: true + name: unit-tests_net-kourier_release-1.2 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --unit-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^release-1.2$ + cluster: build-knative + decorate: true + name: integration-tests_net-kourier_release-1.2 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --integration-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account diff --git a/prow/jobs/generated/knative-sandbox/net-kourier-release-1.3.gen.yaml b/prow/jobs/generated/knative-sandbox/net-kourier-release-1.3.gen.yaml new file mode 100644 index 00000000000..0598974bc74 --- /dev/null +++ b/prow/jobs/generated/knative-sandbox/net-kourier-release-1.3.gen.yaml @@ -0,0 +1,166 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. #### +# #### USE "./hack/generate-configs.sh" TO REGENERATE THIS FILE. #### +# #### #### +# ####################################################################### + +periodics: +- annotations: + testgrid-dashboards: knative-sandbox-release-1.3 + testgrid-tab-name: net-kourier-continuous + cluster: build-knative + cron: 15 8 * * * + decorate: true + extra_refs: + - base_ref: release-1.3 + org: knative-sandbox + repo: net-kourier + name: continuous_net-kourier_release-1.3_periodic + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account +- annotations: + testgrid-dashboards: knative-sandbox-release-1.3 + testgrid-tab-name: net-kourier-release + cluster: build-knative + cron: 53 9 * * 2 + decorate: true + extra_refs: + - base_ref: release-1.3 + org: knative-sandbox + repo: net-kourier + name: release_net-kourier_release-1.3_periodic + spec: + containers: + - command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/net-kourier + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.3 + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/release-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/hub-token + name: hub-token + readOnly: true + - mountPath: /etc/release-account + name: release-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: hub-token + secret: + secretName: hub-token + - name: release-account + secret: + secretName: release-account +presubmits: + knative-sandbox/net-kourier: + - always_run: true + branches: + - ^release-1.3$ + cluster: build-knative + decorate: true + name: build-tests_net-kourier_release-1.3 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --build-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^release-1.3$ + cluster: build-knative + decorate: true + name: unit-tests_net-kourier_release-1.3 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --unit-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^release-1.3$ + cluster: build-knative + decorate: true + name: integration-tests_net-kourier_release-1.3 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --integration-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account diff --git a/prow/jobs/generated/knative-sandbox/reconciler-test-main.gen.yaml b/prow/jobs/generated/knative-sandbox/reconciler-test-main.gen.yaml new file mode 100644 index 00000000000..ab3f103d3b9 --- /dev/null +++ b/prow/jobs/generated/knative-sandbox/reconciler-test-main.gen.yaml @@ -0,0 +1,79 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. #### +# #### USE "./hack/generate-configs.sh" TO REGENERATE THIS FILE. #### +# #### #### +# ####################################################################### + +presubmits: + knative-sandbox/reconciler-test: + - always_run: true + branches: + - ^main$ + cluster: build-knative + decorate: true + name: build-tests_reconciler-test_main + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --build-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^main$ + cluster: build-knative + decorate: true + name: unit-tests_reconciler-test_main + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --unit-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^main$ + cluster: build-knative + decorate: true + name: integration-tests_reconciler-test_main + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --integration-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account diff --git a/prow/jobs/generated/knative-sandbox/sample-controller-main.gen.yaml b/prow/jobs/generated/knative-sandbox/sample-controller-main.gen.yaml new file mode 100644 index 00000000000..7f769a7131e --- /dev/null +++ b/prow/jobs/generated/knative-sandbox/sample-controller-main.gen.yaml @@ -0,0 +1,131 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. #### +# #### USE "./hack/generate-configs.sh" TO REGENERATE THIS FILE. #### +# #### #### +# ####################################################################### + +periodics: +- annotations: + testgrid-dashboards: sample-controller + testgrid-tab-name: nightly + cluster: build-knative + cron: 58 9 * * * + decorate: true + extra_refs: + - base_ref: main + org: knative-sandbox + repo: sample-controller + name: nightly_sample-controller_main_periodic + spec: + containers: + - command: + - runner.sh + - ./hack/release.sh + - --publish + - --tag-release + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/nightly-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/nightly-account + name: nightly-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: nightly-account + secret: + secretName: nightly-account +- annotations: + testgrid-dashboards: sample-controller + testgrid-tab-name: release + cluster: build-knative + cron: 14 */9 * * * + decorate: true + extra_refs: + - base_ref: main + org: knative-sandbox + repo: sample-controller + name: release_sample-controller_main_periodic + spec: + containers: + - command: + - runner.sh + - ./hack/release.sh + - --auto-release + - --release-gcs + - knative-releases/sample-controller + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/release-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/hub-token + name: hub-token + readOnly: true + - mountPath: /etc/release-account + name: release-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: hub-token + secret: + secretName: hub-token + - name: release-account + secret: + secretName: release-account +presubmits: + knative-sandbox/sample-controller: + - always_run: true + branches: + - ^main$ + cluster: build-knative + decorate: true + name: build-tests_sample-controller_main + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --build-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^main$ + cluster: build-knative + decorate: true + name: unit-tests_sample-controller_main + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --unit-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing diff --git a/prow/jobs/generated/knative-sandbox/sample-controller-release-1.0.gen.yaml b/prow/jobs/generated/knative-sandbox/sample-controller-release-1.0.gen.yaml new file mode 100644 index 00000000000..2a6e3218c3d --- /dev/null +++ b/prow/jobs/generated/knative-sandbox/sample-controller-release-1.0.gen.yaml @@ -0,0 +1,97 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. #### +# #### USE "./hack/generate-configs.sh" TO REGENERATE THIS FILE. #### +# #### #### +# ####################################################################### + +periodics: +- annotations: + testgrid-dashboards: knative-sandbox-release-1.0 + testgrid-tab-name: sample-controller-release + cluster: build-knative + cron: 10 9 * * 2 + decorate: true + extra_refs: + - base_ref: release-1.0 + org: knative-sandbox + repo: sample-controller + name: release_sample-controller_release-1.0_periodic + spec: + containers: + - command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/sample-controller + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.0 + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/release-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/hub-token + name: hub-token + readOnly: true + - mountPath: /etc/release-account + name: release-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: hub-token + secret: + secretName: hub-token + - name: release-account + secret: + secretName: release-account +presubmits: + knative-sandbox/sample-controller: + - always_run: true + branches: + - ^release-1.0$ + cluster: build-knative + decorate: true + name: build-tests_sample-controller_release-1.0 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --build-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^release-1.0$ + cluster: build-knative + decorate: true + name: unit-tests_sample-controller_release-1.0 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --unit-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing diff --git a/prow/jobs/generated/knative-sandbox/sample-controller-release-1.1.gen.yaml b/prow/jobs/generated/knative-sandbox/sample-controller-release-1.1.gen.yaml new file mode 100644 index 00000000000..2014ee4744f --- /dev/null +++ b/prow/jobs/generated/knative-sandbox/sample-controller-release-1.1.gen.yaml @@ -0,0 +1,97 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. #### +# #### USE "./hack/generate-configs.sh" TO REGENERATE THIS FILE. #### +# #### #### +# ####################################################################### + +periodics: +- annotations: + testgrid-dashboards: knative-sandbox-release-1.1 + testgrid-tab-name: sample-controller-release + cluster: build-knative + cron: 47 9 * * 2 + decorate: true + extra_refs: + - base_ref: release-1.1 + org: knative-sandbox + repo: sample-controller + name: release_sample-controller_release-1.1_periodic + spec: + containers: + - command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/sample-controller + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.1 + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/release-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/hub-token + name: hub-token + readOnly: true + - mountPath: /etc/release-account + name: release-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: hub-token + secret: + secretName: hub-token + - name: release-account + secret: + secretName: release-account +presubmits: + knative-sandbox/sample-controller: + - always_run: true + branches: + - ^release-1.1$ + cluster: build-knative + decorate: true + name: build-tests_sample-controller_release-1.1 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --build-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^release-1.1$ + cluster: build-knative + decorate: true + name: unit-tests_sample-controller_release-1.1 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --unit-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing diff --git a/prow/jobs/generated/knative-sandbox/sample-controller-release-1.2.gen.yaml b/prow/jobs/generated/knative-sandbox/sample-controller-release-1.2.gen.yaml new file mode 100644 index 00000000000..1525a98dc30 --- /dev/null +++ b/prow/jobs/generated/knative-sandbox/sample-controller-release-1.2.gen.yaml @@ -0,0 +1,97 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. #### +# #### USE "./hack/generate-configs.sh" TO REGENERATE THIS FILE. #### +# #### #### +# ####################################################################### + +periodics: +- annotations: + testgrid-dashboards: knative-sandbox-release-1.2 + testgrid-tab-name: sample-controller-release + cluster: build-knative + cron: 0 9 * * 2 + decorate: true + extra_refs: + - base_ref: release-1.2 + org: knative-sandbox + repo: sample-controller + name: release_sample-controller_release-1.2_periodic + spec: + containers: + - command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/sample-controller + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.2 + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/release-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/hub-token + name: hub-token + readOnly: true + - mountPath: /etc/release-account + name: release-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: hub-token + secret: + secretName: hub-token + - name: release-account + secret: + secretName: release-account +presubmits: + knative-sandbox/sample-controller: + - always_run: true + branches: + - ^release-1.2$ + cluster: build-knative + decorate: true + name: build-tests_sample-controller_release-1.2 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --build-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^release-1.2$ + cluster: build-knative + decorate: true + name: unit-tests_sample-controller_release-1.2 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --unit-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing diff --git a/prow/jobs/generated/knative-sandbox/sample-controller-release-1.3.gen.yaml b/prow/jobs/generated/knative-sandbox/sample-controller-release-1.3.gen.yaml new file mode 100644 index 00000000000..c0bc2d177fc --- /dev/null +++ b/prow/jobs/generated/knative-sandbox/sample-controller-release-1.3.gen.yaml @@ -0,0 +1,97 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. #### +# #### USE "./hack/generate-configs.sh" TO REGENERATE THIS FILE. #### +# #### #### +# ####################################################################### + +periodics: +- annotations: + testgrid-dashboards: knative-sandbox-release-1.3 + testgrid-tab-name: sample-controller-release + cluster: build-knative + cron: 21 9 * * 2 + decorate: true + extra_refs: + - base_ref: release-1.3 + org: knative-sandbox + repo: sample-controller + name: release_sample-controller_release-1.3_periodic + spec: + containers: + - command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/sample-controller + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.3 + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/release-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/hub-token + name: hub-token + readOnly: true + - mountPath: /etc/release-account + name: release-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: hub-token + secret: + secretName: hub-token + - name: release-account + secret: + secretName: release-account +presubmits: + knative-sandbox/sample-controller: + - always_run: true + branches: + - ^release-1.3$ + cluster: build-knative + decorate: true + name: build-tests_sample-controller_release-1.3 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --build-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^release-1.3$ + cluster: build-knative + decorate: true + name: unit-tests_sample-controller_release-1.3 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --unit-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing diff --git a/prow/jobs/generated/knative-sandbox/sample-source-main.gen.yaml b/prow/jobs/generated/knative-sandbox/sample-source-main.gen.yaml new file mode 100644 index 00000000000..c26fe0ba345 --- /dev/null +++ b/prow/jobs/generated/knative-sandbox/sample-source-main.gen.yaml @@ -0,0 +1,131 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. #### +# #### USE "./hack/generate-configs.sh" TO REGENERATE THIS FILE. #### +# #### #### +# ####################################################################### + +periodics: +- annotations: + testgrid-dashboards: sample-source + testgrid-tab-name: nightly + cluster: build-knative + cron: 49 9 * * * + decorate: true + extra_refs: + - base_ref: main + org: knative-sandbox + repo: sample-source + name: nightly_sample-source_main_periodic + spec: + containers: + - command: + - runner.sh + - ./hack/release.sh + - --publish + - --tag-release + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/nightly-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/nightly-account + name: nightly-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: nightly-account + secret: + secretName: nightly-account +- annotations: + testgrid-dashboards: sample-source + testgrid-tab-name: release + cluster: build-knative + cron: 29 */9 * * * + decorate: true + extra_refs: + - base_ref: main + org: knative-sandbox + repo: sample-source + name: release_sample-source_main_periodic + spec: + containers: + - command: + - runner.sh + - ./hack/release.sh + - --auto-release + - --release-gcs + - knative-releases/sample-source + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/release-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/hub-token + name: hub-token + readOnly: true + - mountPath: /etc/release-account + name: release-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: hub-token + secret: + secretName: hub-token + - name: release-account + secret: + secretName: release-account +presubmits: + knative-sandbox/sample-source: + - always_run: true + branches: + - ^main$ + cluster: build-knative + decorate: true + name: build-tests_sample-source_main + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --build-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^main$ + cluster: build-knative + decorate: true + name: unit-tests_sample-source_main + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --unit-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing diff --git a/prow/jobs/generated/knative-sandbox/sample-source-release-1.0.gen.yaml b/prow/jobs/generated/knative-sandbox/sample-source-release-1.0.gen.yaml new file mode 100644 index 00000000000..ef60573ed3e --- /dev/null +++ b/prow/jobs/generated/knative-sandbox/sample-source-release-1.0.gen.yaml @@ -0,0 +1,97 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. #### +# #### USE "./hack/generate-configs.sh" TO REGENERATE THIS FILE. #### +# #### #### +# ####################################################################### + +periodics: +- annotations: + testgrid-dashboards: knative-sandbox-release-1.0 + testgrid-tab-name: sample-source-release + cluster: build-knative + cron: 59 9 * * 2 + decorate: true + extra_refs: + - base_ref: release-1.0 + org: knative-sandbox + repo: sample-source + name: release_sample-source_release-1.0_periodic + spec: + containers: + - command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/sample-source + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.0 + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/release-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/hub-token + name: hub-token + readOnly: true + - mountPath: /etc/release-account + name: release-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: hub-token + secret: + secretName: hub-token + - name: release-account + secret: + secretName: release-account +presubmits: + knative-sandbox/sample-source: + - always_run: true + branches: + - ^release-1.0$ + cluster: build-knative + decorate: true + name: build-tests_sample-source_release-1.0 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --build-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^release-1.0$ + cluster: build-knative + decorate: true + name: unit-tests_sample-source_release-1.0 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --unit-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing diff --git a/prow/jobs/generated/knative-sandbox/sample-source-release-1.1.gen.yaml b/prow/jobs/generated/knative-sandbox/sample-source-release-1.1.gen.yaml new file mode 100644 index 00000000000..d3bfa9ff1b5 --- /dev/null +++ b/prow/jobs/generated/knative-sandbox/sample-source-release-1.1.gen.yaml @@ -0,0 +1,97 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. #### +# #### USE "./hack/generate-configs.sh" TO REGENERATE THIS FILE. #### +# #### #### +# ####################################################################### + +periodics: +- annotations: + testgrid-dashboards: knative-sandbox-release-1.1 + testgrid-tab-name: sample-source-release + cluster: build-knative + cron: 34 9 * * 2 + decorate: true + extra_refs: + - base_ref: release-1.1 + org: knative-sandbox + repo: sample-source + name: release_sample-source_release-1.1_periodic + spec: + containers: + - command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/sample-source + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.1 + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/release-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/hub-token + name: hub-token + readOnly: true + - mountPath: /etc/release-account + name: release-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: hub-token + secret: + secretName: hub-token + - name: release-account + secret: + secretName: release-account +presubmits: + knative-sandbox/sample-source: + - always_run: true + branches: + - ^release-1.1$ + cluster: build-knative + decorate: true + name: build-tests_sample-source_release-1.1 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --build-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^release-1.1$ + cluster: build-knative + decorate: true + name: unit-tests_sample-source_release-1.1 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --unit-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing diff --git a/prow/jobs/generated/knative-sandbox/sample-source-release-1.2.gen.yaml b/prow/jobs/generated/knative-sandbox/sample-source-release-1.2.gen.yaml new file mode 100644 index 00000000000..3cc385c1cce --- /dev/null +++ b/prow/jobs/generated/knative-sandbox/sample-source-release-1.2.gen.yaml @@ -0,0 +1,97 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. #### +# #### USE "./hack/generate-configs.sh" TO REGENERATE THIS FILE. #### +# #### #### +# ####################################################################### + +periodics: +- annotations: + testgrid-dashboards: knative-sandbox-release-1.2 + testgrid-tab-name: sample-source-release + cluster: build-knative + cron: 49 9 * * 2 + decorate: true + extra_refs: + - base_ref: release-1.2 + org: knative-sandbox + repo: sample-source + name: release_sample-source_release-1.2_periodic + spec: + containers: + - command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/sample-source + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.2 + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/release-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/hub-token + name: hub-token + readOnly: true + - mountPath: /etc/release-account + name: release-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: hub-token + secret: + secretName: hub-token + - name: release-account + secret: + secretName: release-account +presubmits: + knative-sandbox/sample-source: + - always_run: true + branches: + - ^release-1.2$ + cluster: build-knative + decorate: true + name: build-tests_sample-source_release-1.2 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --build-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^release-1.2$ + cluster: build-knative + decorate: true + name: unit-tests_sample-source_release-1.2 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --unit-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing diff --git a/prow/jobs/generated/knative-sandbox/sample-source-release-1.3.gen.yaml b/prow/jobs/generated/knative-sandbox/sample-source-release-1.3.gen.yaml new file mode 100644 index 00000000000..15e00be2d16 --- /dev/null +++ b/prow/jobs/generated/knative-sandbox/sample-source-release-1.3.gen.yaml @@ -0,0 +1,97 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. #### +# #### USE "./hack/generate-configs.sh" TO REGENERATE THIS FILE. #### +# #### #### +# ####################################################################### + +periodics: +- annotations: + testgrid-dashboards: knative-sandbox-release-1.3 + testgrid-tab-name: sample-source-release + cluster: build-knative + cron: 12 9 * * 2 + decorate: true + extra_refs: + - base_ref: release-1.3 + org: knative-sandbox + repo: sample-source + name: release_sample-source_release-1.3_periodic + spec: + containers: + - command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/sample-source + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.3 + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/release-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/hub-token + name: hub-token + readOnly: true + - mountPath: /etc/release-account + name: release-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: hub-token + secret: + secretName: hub-token + - name: release-account + secret: + secretName: release-account +presubmits: + knative-sandbox/sample-source: + - always_run: true + branches: + - ^release-1.3$ + cluster: build-knative + decorate: true + name: build-tests_sample-source_release-1.3 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --build-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^release-1.3$ + cluster: build-knative + decorate: true + name: unit-tests_sample-source_release-1.3 + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --unit-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing diff --git a/prow/jobs/generated/knative/caching-main.gen.yaml b/prow/jobs/generated/knative/caching-main.gen.yaml new file mode 100644 index 00000000000..a8c4697209b --- /dev/null +++ b/prow/jobs/generated/knative/caching-main.gen.yaml @@ -0,0 +1,82 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. #### +# #### USE "./hack/generate-configs.sh" TO REGENERATE THIS FILE. #### +# #### #### +# ####################################################################### + +presubmits: + knative/caching: + - always_run: true + branches: + - ^main$ + cluster: build-knative + decorate: true + name: build-tests_caching_main + path_alias: knative.dev/caching + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --build-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^main$ + cluster: build-knative + decorate: true + name: unit-tests_caching_main + path_alias: knative.dev/caching + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --unit-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^main$ + cluster: build-knative + decorate: true + name: integration-tests_caching_main + path_alias: knative.dev/caching + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --integration-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account diff --git a/prow/jobs/generated/knative/client-main.gen.yaml b/prow/jobs/generated/knative/client-main.gen.yaml new file mode 100644 index 00000000000..b4332bdd347 --- /dev/null +++ b/prow/jobs/generated/knative/client-main.gen.yaml @@ -0,0 +1,347 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. #### +# #### USE "./hack/generate-configs.sh" TO REGENERATE THIS FILE. #### +# #### #### +# ####################################################################### + +periodics: +- annotations: + testgrid-dashboards: client + testgrid-tab-name: continuous + cluster: build-knative + cron: 4 */9 * * * + decorate: true + extra_refs: + - base_ref: main + org: knative + path_alias: knative.dev/client + repo: client + name: continuous_client_main_periodic + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account +- annotations: + testgrid-dashboards: client + testgrid-tab-name: tekton + cluster: build-knative + cron: 50 10 * * * + decorate: true + extra_refs: + - base_ref: main + org: knative + path_alias: knative.dev/client + repo: client + name: tekton_client_main_periodic + spec: + containers: + - command: + - runner.sh + - ./test/tekton-tests.sh + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account +- annotations: + testgrid-dashboards: client + testgrid-tab-name: s390x-e2e-tests + cluster: build-knative + cron: 23 10 * * * + decorate: true + extra_refs: + - base_ref: main + org: knative + path_alias: knative.dev/client + repo: client + name: s390x-e2e-tests_client_main_periodic + spec: + containers: + - args: + - bash + - -c + - | + "mkdir -p /root/.kube && cat /opt/cluster/ci-script > connect.sh && chmod +x connect.sh && ./connect.sh client-main && kubectl get cm s390x-config-client -n default -o jsonpath='{.data.adjustment-script}' > adjust.sh && chmod +x adjust.sh && ./adjust.sh && ./test/e2e-tests.sh --run-tests" + command: + - runner.sh + env: + - name: INGRESS_CLASS + value: contour.ingress.networking.knative.dev + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + - name: KO_FLAGS + value: --platform=linux/s390x + - name: PLATFORM + value: linux/s390x + - name: KO_DOCKER_REPO + valueFrom: + secretKeyRef: + key: ko-docker-repo + name: s390x-cluster1 + - name: DISABLE_MD_LINTING + value: "1" + - name: KUBECONFIG + value: /root/.kube/config + - name: DOCKER_CONFIG + value: /opt/cluster + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + - mountPath: /opt/cluster + name: s390x-cluster1 + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - name: s390x-cluster1 + secret: + defaultMode: 384 + secretName: s390x-cluster1 +- annotations: + testgrid-dashboards: client + testgrid-tab-name: nightly + cluster: build-knative + cron: 12 9 * * * + decorate: true + extra_refs: + - base_ref: main + org: knative + path_alias: knative.dev/client + repo: client + name: nightly_client_main_periodic + reporter_config: + slack: + channel: client + job_states_to_report: + - failure + report_template: | + "The nightly release job fails, check the log: <{{.Status.URL}}|View logs>" + spec: + containers: + - command: + - runner.sh + - ./hack/release.sh + - --publish + - --tag-release + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/nightly-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/nightly-account + name: nightly-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: nightly-account + secret: + secretName: nightly-account +- annotations: + testgrid-dashboards: client + testgrid-tab-name: release + cluster: build-knative + cron: 4 */9 * * * + decorate: true + extra_refs: + - base_ref: main + org: knative + path_alias: knative.dev/client + repo: client + name: release_client_main_periodic + spec: + containers: + - command: + - runner.sh + - ./hack/release.sh + - --auto-release + - --release-gcs + - knative-releases/client + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/release-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/hub-token + name: hub-token + readOnly: true + - mountPath: /etc/release-account + name: release-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: hub-token + secret: + secretName: hub-token + - name: release-account + secret: + secretName: release-account +presubmits: + knative/client: + - always_run: true + branches: + - ^main$ + cluster: build-knative + decorate: true + name: build-tests_client_main + path_alias: knative.dev/client + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --build-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^main$ + cluster: build-knative + decorate: true + name: unit-tests_client_main + path_alias: knative.dev/client + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --unit-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^main$ + cluster: build-knative + decorate: true + name: integration-tests_client_main + path_alias: knative.dev/client + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --integration-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - always_run: true + branches: + - ^main$ + cluster: build-knative + decorate: true + name: integration-tests-latest-release_client_main + path_alias: knative.dev/client + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-integration-tests-latest-release.sh + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account diff --git a/prow/jobs/generated/knative/client-pkg-main.gen.yaml b/prow/jobs/generated/knative/client-pkg-main.gen.yaml new file mode 100644 index 00000000000..3e65d436f23 --- /dev/null +++ b/prow/jobs/generated/knative/client-pkg-main.gen.yaml @@ -0,0 +1,88 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. #### +# #### USE "./hack/generate-configs.sh" TO REGENERATE THIS FILE. #### +# #### #### +# ####################################################################### + +periodics: +- annotations: + testgrid-dashboards: client-pkg + testgrid-tab-name: continuous + cluster: build-knative + cron: 31 */9 * * * + decorate: true + extra_refs: + - base_ref: main + org: knative + path_alias: knative.dev/client-pkg + repo: client-pkg + name: continuous_client-pkg_main_periodic + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account +presubmits: + knative/client-pkg: + - always_run: true + branches: + - ^main$ + cluster: build-knative + decorate: true + name: build-tests_client-pkg_main + path_alias: knative.dev/client-pkg + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --build-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^main$ + cluster: build-knative + decorate: true + name: unit-tests_client-pkg_main + path_alias: knative.dev/client-pkg + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --unit-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing diff --git a/prow/jobs/generated/knative/client-release-1.0.gen.yaml b/prow/jobs/generated/knative/client-release-1.0.gen.yaml new file mode 100644 index 00000000000..0095d687b0f --- /dev/null +++ b/prow/jobs/generated/knative/client-release-1.0.gen.yaml @@ -0,0 +1,268 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. #### +# #### USE "./hack/generate-configs.sh" TO REGENERATE THIS FILE. #### +# #### #### +# ####################################################################### + +periodics: +- annotations: + testgrid-dashboards: knative-release-1.0 + testgrid-tab-name: client-continuous + cluster: build-knative + cron: 16 8 * * * + decorate: true + extra_refs: + - base_ref: release-1.0 + org: knative + path_alias: knative.dev/client + repo: client + name: continuous_client_release-1.0_periodic + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account +- annotations: + testgrid-dashboards: knative-release-1.0 + testgrid-tab-name: client-s390x-e2e-tests + cluster: build-knative + cron: 51 10 * * * + decorate: true + extra_refs: + - base_ref: release-1.0 + org: knative + path_alias: knative.dev/client + repo: client + name: s390x-e2e-tests_client_release-1.0_periodic + spec: + containers: + - args: + - bash + - -c + - | + "mkdir -p /root/.kube && cat /opt/cluster/ci-script > connect.sh && chmod +x connect.sh && ./connect.sh client-main && kubectl get cm s390x-config-client -n default -o jsonpath='{.data.adjustment-script}' > adjust.sh && chmod +x adjust.sh && ./adjust.sh && ./test/e2e-tests.sh --run-tests" + command: + - runner.sh + env: + - name: INGRESS_CLASS + value: contour.ingress.networking.knative.dev + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + - name: KO_FLAGS + value: --platform=linux/s390x + - name: PLATFORM + value: linux/s390x + - name: KO_DOCKER_REPO + valueFrom: + secretKeyRef: + key: ko-docker-repo + name: s390x-cluster1 + - name: DISABLE_MD_LINTING + value: "1" + - name: KUBECONFIG + value: /root/.kube/config + - name: DOCKER_CONFIG + value: /opt/cluster + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + - mountPath: /opt/cluster + name: s390x-cluster1 + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - name: s390x-cluster1 + secret: + defaultMode: 384 + secretName: s390x-cluster1 +- annotations: + testgrid-dashboards: knative-release-1.0 + testgrid-tab-name: client-release + cluster: build-knative + cron: 8 9 * * 2 + decorate: true + extra_refs: + - base_ref: release-1.0 + org: knative + path_alias: knative.dev/client + repo: client + name: release_client_release-1.0_periodic + spec: + containers: + - command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/client + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.0 + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/release-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/hub-token + name: hub-token + readOnly: true + - mountPath: /etc/release-account + name: release-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: hub-token + secret: + secretName: hub-token + - name: release-account + secret: + secretName: release-account +presubmits: + knative/client: + - always_run: true + branches: + - ^release-1.0$ + cluster: build-knative + decorate: true + name: build-tests_client_release-1.0 + path_alias: knative.dev/client + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --build-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^release-1.0$ + cluster: build-knative + decorate: true + name: unit-tests_client_release-1.0 + path_alias: knative.dev/client + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --unit-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^release-1.0$ + cluster: build-knative + decorate: true + name: integration-tests_client_release-1.0 + path_alias: knative.dev/client + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --integration-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - always_run: true + branches: + - ^release-1.0$ + cluster: build-knative + decorate: true + name: integration-tests-latest-release_client_release-1.0 + path_alias: knative.dev/client + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-integration-tests-latest-release.sh + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account diff --git a/prow/jobs/generated/knative/client-release-1.1.gen.yaml b/prow/jobs/generated/knative/client-release-1.1.gen.yaml new file mode 100644 index 00000000000..b9459858e8e --- /dev/null +++ b/prow/jobs/generated/knative/client-release-1.1.gen.yaml @@ -0,0 +1,268 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. #### +# #### USE "./hack/generate-configs.sh" TO REGENERATE THIS FILE. #### +# #### #### +# ####################################################################### + +periodics: +- annotations: + testgrid-dashboards: knative-release-1.1 + testgrid-tab-name: client-continuous + cluster: build-knative + cron: 55 8 * * * + decorate: true + extra_refs: + - base_ref: release-1.1 + org: knative + path_alias: knative.dev/client + repo: client + name: continuous_client_release-1.1_periodic + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account +- annotations: + testgrid-dashboards: knative-release-1.1 + testgrid-tab-name: client-s390x-e2e-tests + cluster: build-knative + cron: 46 10 * * * + decorate: true + extra_refs: + - base_ref: release-1.1 + org: knative + path_alias: knative.dev/client + repo: client + name: s390x-e2e-tests_client_release-1.1_periodic + spec: + containers: + - args: + - bash + - -c + - | + "mkdir -p /root/.kube && cat /opt/cluster/ci-script > connect.sh && chmod +x connect.sh && ./connect.sh client-main && kubectl get cm s390x-config-client -n default -o jsonpath='{.data.adjustment-script}' > adjust.sh && chmod +x adjust.sh && ./adjust.sh && ./test/e2e-tests.sh --run-tests" + command: + - runner.sh + env: + - name: INGRESS_CLASS + value: contour.ingress.networking.knative.dev + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + - name: KO_FLAGS + value: --platform=linux/s390x + - name: PLATFORM + value: linux/s390x + - name: KO_DOCKER_REPO + valueFrom: + secretKeyRef: + key: ko-docker-repo + name: s390x-cluster1 + - name: DISABLE_MD_LINTING + value: "1" + - name: KUBECONFIG + value: /root/.kube/config + - name: DOCKER_CONFIG + value: /opt/cluster + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + - mountPath: /opt/cluster + name: s390x-cluster1 + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - name: s390x-cluster1 + secret: + defaultMode: 384 + secretName: s390x-cluster1 +- annotations: + testgrid-dashboards: knative-release-1.1 + testgrid-tab-name: client-release + cluster: build-knative + cron: 53 9 * * 2 + decorate: true + extra_refs: + - base_ref: release-1.1 + org: knative + path_alias: knative.dev/client + repo: client + name: release_client_release-1.1_periodic + spec: + containers: + - command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/client + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.1 + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/release-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/hub-token + name: hub-token + readOnly: true + - mountPath: /etc/release-account + name: release-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: hub-token + secret: + secretName: hub-token + - name: release-account + secret: + secretName: release-account +presubmits: + knative/client: + - always_run: true + branches: + - ^release-1.1$ + cluster: build-knative + decorate: true + name: build-tests_client_release-1.1 + path_alias: knative.dev/client + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --build-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^release-1.1$ + cluster: build-knative + decorate: true + name: unit-tests_client_release-1.1 + path_alias: knative.dev/client + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --unit-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^release-1.1$ + cluster: build-knative + decorate: true + name: integration-tests_client_release-1.1 + path_alias: knative.dev/client + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --integration-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - always_run: true + branches: + - ^release-1.1$ + cluster: build-knative + decorate: true + name: integration-tests-latest-release_client_release-1.1 + path_alias: knative.dev/client + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-integration-tests-latest-release.sh + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account diff --git a/prow/jobs/generated/knative/client-release-1.2.gen.yaml b/prow/jobs/generated/knative/client-release-1.2.gen.yaml new file mode 100644 index 00000000000..e24832220f4 --- /dev/null +++ b/prow/jobs/generated/knative/client-release-1.2.gen.yaml @@ -0,0 +1,268 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. #### +# #### USE "./hack/generate-configs.sh" TO REGENERATE THIS FILE. #### +# #### #### +# ####################################################################### + +periodics: +- annotations: + testgrid-dashboards: knative-release-1.2 + testgrid-tab-name: client-continuous + cluster: build-knative + cron: 38 8 * * * + decorate: true + extra_refs: + - base_ref: release-1.2 + org: knative + path_alias: knative.dev/client + repo: client + name: continuous_client_release-1.2_periodic + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account +- annotations: + testgrid-dashboards: knative-release-1.2 + testgrid-tab-name: client-s390x-e2e-tests + cluster: build-knative + cron: 57 10 * * * + decorate: true + extra_refs: + - base_ref: release-1.2 + org: knative + path_alias: knative.dev/client + repo: client + name: s390x-e2e-tests_client_release-1.2_periodic + spec: + containers: + - args: + - bash + - -c + - | + "mkdir -p /root/.kube && cat /opt/cluster/ci-script > connect.sh && chmod +x connect.sh && ./connect.sh client-main && kubectl get cm s390x-config-client -n default -o jsonpath='{.data.adjustment-script}' > adjust.sh && chmod +x adjust.sh && ./adjust.sh && ./test/e2e-tests.sh --run-tests" + command: + - runner.sh + env: + - name: INGRESS_CLASS + value: contour.ingress.networking.knative.dev + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + - name: KO_FLAGS + value: --platform=linux/s390x + - name: PLATFORM + value: linux/s390x + - name: KO_DOCKER_REPO + valueFrom: + secretKeyRef: + key: ko-docker-repo + name: s390x-cluster1 + - name: DISABLE_MD_LINTING + value: "1" + - name: KUBECONFIG + value: /root/.kube/config + - name: DOCKER_CONFIG + value: /opt/cluster + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + - mountPath: /opt/cluster + name: s390x-cluster1 + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - name: s390x-cluster1 + secret: + defaultMode: 384 + secretName: s390x-cluster1 +- annotations: + testgrid-dashboards: knative-release-1.2 + testgrid-tab-name: client-release + cluster: build-knative + cron: 22 9 * * 2 + decorate: true + extra_refs: + - base_ref: release-1.2 + org: knative + path_alias: knative.dev/client + repo: client + name: release_client_release-1.2_periodic + spec: + containers: + - command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/client + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.2 + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/release-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/hub-token + name: hub-token + readOnly: true + - mountPath: /etc/release-account + name: release-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: hub-token + secret: + secretName: hub-token + - name: release-account + secret: + secretName: release-account +presubmits: + knative/client: + - always_run: true + branches: + - ^release-1.2$ + cluster: build-knative + decorate: true + name: build-tests_client_release-1.2 + path_alias: knative.dev/client + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --build-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^release-1.2$ + cluster: build-knative + decorate: true + name: unit-tests_client_release-1.2 + path_alias: knative.dev/client + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --unit-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^release-1.2$ + cluster: build-knative + decorate: true + name: integration-tests_client_release-1.2 + path_alias: knative.dev/client + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --integration-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - always_run: true + branches: + - ^release-1.2$ + cluster: build-knative + decorate: true + name: integration-tests-latest-release_client_release-1.2 + path_alias: knative.dev/client + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-integration-tests-latest-release.sh + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account diff --git a/prow/jobs/generated/knative/client-release-1.3.gen.yaml b/prow/jobs/generated/knative/client-release-1.3.gen.yaml new file mode 100644 index 00000000000..1b232f242eb --- /dev/null +++ b/prow/jobs/generated/knative/client-release-1.3.gen.yaml @@ -0,0 +1,268 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. #### +# #### USE "./hack/generate-configs.sh" TO REGENERATE THIS FILE. #### +# #### #### +# ####################################################################### + +periodics: +- annotations: + testgrid-dashboards: knative-release-1.3 + testgrid-tab-name: client-continuous + cluster: build-knative + cron: 57 8 * * * + decorate: true + extra_refs: + - base_ref: release-1.3 + org: knative + path_alias: knative.dev/client + repo: client + name: continuous_client_release-1.3_periodic + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account +- annotations: + testgrid-dashboards: knative-release-1.3 + testgrid-tab-name: client-s390x-e2e-tests + cluster: build-knative + cron: 16 10 * * * + decorate: true + extra_refs: + - base_ref: release-1.3 + org: knative + path_alias: knative.dev/client + repo: client + name: s390x-e2e-tests_client_release-1.3_periodic + spec: + containers: + - args: + - bash + - -c + - | + "mkdir -p /root/.kube && cat /opt/cluster/ci-script > connect.sh && chmod +x connect.sh && ./connect.sh client-main && kubectl get cm s390x-config-client -n default -o jsonpath='{.data.adjustment-script}' > adjust.sh && chmod +x adjust.sh && ./adjust.sh && ./test/e2e-tests.sh --run-tests" + command: + - runner.sh + env: + - name: INGRESS_CLASS + value: contour.ingress.networking.knative.dev + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + - name: KO_FLAGS + value: --platform=linux/s390x + - name: PLATFORM + value: linux/s390x + - name: KO_DOCKER_REPO + valueFrom: + secretKeyRef: + key: ko-docker-repo + name: s390x-cluster1 + - name: DISABLE_MD_LINTING + value: "1" + - name: KUBECONFIG + value: /root/.kube/config + - name: DOCKER_CONFIG + value: /opt/cluster + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + - mountPath: /opt/cluster + name: s390x-cluster1 + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - name: s390x-cluster1 + secret: + defaultMode: 384 + secretName: s390x-cluster1 +- annotations: + testgrid-dashboards: knative-release-1.3 + testgrid-tab-name: client-release + cluster: build-knative + cron: 39 9 * * 2 + decorate: true + extra_refs: + - base_ref: release-1.3 + org: knative + path_alias: knative.dev/client + repo: client + name: release_client_release-1.3_periodic + spec: + containers: + - command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/client + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.3 + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/release-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/hub-token + name: hub-token + readOnly: true + - mountPath: /etc/release-account + name: release-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: hub-token + secret: + secretName: hub-token + - name: release-account + secret: + secretName: release-account +presubmits: + knative/client: + - always_run: true + branches: + - ^release-1.3$ + cluster: build-knative + decorate: true + name: build-tests_client_release-1.3 + path_alias: knative.dev/client + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --build-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^release-1.3$ + cluster: build-knative + decorate: true + name: unit-tests_client_release-1.3 + path_alias: knative.dev/client + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --unit-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^release-1.3$ + cluster: build-knative + decorate: true + name: integration-tests_client_release-1.3 + path_alias: knative.dev/client + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --integration-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - always_run: true + branches: + - ^release-1.3$ + cluster: build-knative + decorate: true + name: integration-tests-latest-release_client_release-1.3 + path_alias: knative.dev/client + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-integration-tests-latest-release.sh + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account diff --git a/prow/jobs/generated/knative/docs-main.gen.yaml b/prow/jobs/generated/knative/docs-main.gen.yaml new file mode 100644 index 00000000000..e6fcacf553f --- /dev/null +++ b/prow/jobs/generated/knative/docs-main.gen.yaml @@ -0,0 +1,106 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. #### +# #### USE "./hack/generate-configs.sh" TO REGENERATE THIS FILE. #### +# #### #### +# ####################################################################### + +periodics: +- annotations: + testgrid-dashboards: docs + testgrid-tab-name: continuous + cluster: build-knative + cron: 12 */9 * * * + decorate: true + extra_refs: + - base_ref: main + org: knative + path_alias: knative.dev/docs + repo: docs + name: continuous_docs_main_periodic + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + - name: DOCKER_IN_DOCKER_ENABLED + value: "true" + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + - mountPath: /docker-graph + name: docker-graph + - mountPath: /lib/modules + name: modules + - mountPath: /sys/fs/cgroup + name: cgroup + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - emptyDir: {} + name: docker-graph + - hostPath: + path: /lib/modules + type: Directory + name: modules + - hostPath: + path: /sys/fs/cgroup + type: Directory + name: cgroup +presubmits: + knative/docs: + - always_run: true + branches: + - ^main$ + cluster: build-knative + decorate: true + name: build-tests_docs_main + path_alias: knative.dev/docs + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --build-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^main$ + cluster: build-knative + decorate: true + name: unit-tests_docs_main + path_alias: knative.dev/docs + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --unit-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing diff --git a/prow/jobs/generated/knative/eventing-main.gen.yaml b/prow/jobs/generated/knative/eventing-main.gen.yaml new file mode 100644 index 00000000000..ccadc5e460e --- /dev/null +++ b/prow/jobs/generated/knative/eventing-main.gen.yaml @@ -0,0 +1,386 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. #### +# #### USE "./hack/generate-configs.sh" TO REGENERATE THIS FILE. #### +# #### #### +# ####################################################################### + +periodics: +- annotations: + testgrid-dashboards: eventing + testgrid-tab-name: continuous + cluster: build-knative + cron: 59 */9 * * * + decorate: true + extra_refs: + - base_ref: main + org: knative + path_alias: knative.dev/eventing + repo: eventing + name: continuous_eventing_main_periodic + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: + limits: + memory: 16Gi + requests: + memory: 12Gi + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account +- annotations: + testgrid-dashboards: eventing + testgrid-tab-name: s390x-e2e-tests + cluster: build-knative + cron: 0 5 * * * + decorate: true + extra_refs: + - base_ref: main + org: knative + path_alias: knative.dev/eventing + repo: eventing + name: s390x-e2e-tests_eventing_main_periodic + spec: + containers: + - args: + - bash + - -c + - | + "mkdir -p /root/.kube && cat /opt/cluster/ci-script > connect.sh && chmod +x connect.sh && ./connect.sh eventing-main && kubectl get cm s390x-config-eventing -n default -o jsonpath='{.data.adjustment-script}' > adjust.sh && chmod +x adjust.sh && ./adjust.sh && ./test/e2e-tests.sh --run-tests" + command: + - runner.sh + env: + - name: SYSTEM_NAMESPACE + value: knative-eventing + - name: SCALE_CHAOSDUCK_TO_ZERO + value: "1" + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + - name: KO_FLAGS + value: --platform=linux/s390x + - name: PLATFORM + value: linux/s390x + - name: KO_DOCKER_REPO + valueFrom: + secretKeyRef: + key: ko-docker-repo + name: s390x-cluster1 + - name: DISABLE_MD_LINTING + value: "1" + - name: KUBECONFIG + value: /root/.kube/config + - name: DOCKER_CONFIG + value: /opt/cluster + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: + limits: + memory: 16Gi + requests: + memory: 12Gi + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + - mountPath: /opt/cluster + name: s390x-cluster1 + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - name: s390x-cluster1 + secret: + defaultMode: 384 + secretName: s390x-cluster1 +- annotations: + testgrid-dashboards: eventing + testgrid-tab-name: nightly + cluster: build-knative + cron: 9 9 * * * + decorate: true + extra_refs: + - base_ref: main + org: knative + path_alias: knative.dev/eventing + repo: eventing + name: nightly_eventing_main_periodic + reporter_config: + slack: + channel: eventing + job_states_to_report: + - failure + report_template: | + "The nightly release job fails, check the log: <{{.Status.URL}}|View logs>" + spec: + containers: + - command: + - runner.sh + - ./hack/release.sh + - --publish + - --tag-release + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/nightly-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: + limits: + memory: 16Gi + requests: + memory: 12Gi + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/nightly-account + name: nightly-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: nightly-account + secret: + secretName: nightly-account +- annotations: + testgrid-dashboards: eventing + testgrid-tab-name: release + cluster: build-knative + cron: 41 */9 * * * + decorate: true + extra_refs: + - base_ref: main + org: knative + path_alias: knative.dev/eventing + repo: eventing + name: release_eventing_main_periodic + spec: + containers: + - command: + - runner.sh + - ./hack/release.sh + - --auto-release + - --release-gcs + - knative-releases/eventing + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/release-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: + limits: + memory: 16Gi + requests: + memory: 12Gi + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/hub-token + name: hub-token + readOnly: true + - mountPath: /etc/release-account + name: release-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: hub-token + secret: + secretName: hub-token + - name: release-account + secret: + secretName: release-account +presubmits: + knative/eventing: + - always_run: true + branches: + - ^main$ + cluster: build-knative + decorate: true + name: build-tests_eventing_main + path_alias: knative.dev/eventing + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --build-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: + limits: + memory: 16Gi + requests: + memory: 12Gi + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^main$ + cluster: build-knative + decorate: true + name: unit-tests_eventing_main + path_alias: knative.dev/eventing + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --unit-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: + limits: + memory: 10Gi + requests: + memory: 8Gi + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^main$ + cluster: build-knative + decorate: true + name: conformance-tests_eventing_main + path_alias: knative.dev/eventing + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-conformance-tests.sh + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: + limits: + memory: 16Gi + requests: + memory: 12Gi + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - always_run: true + branches: + - ^main$ + cluster: build-knative + decorate: true + name: reconciler-tests_eventing_main + optional: true + path_alias: knative.dev/eventing + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-rekt-tests.sh + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: + limits: + memory: 16Gi + requests: + memory: 12Gi + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - always_run: true + branches: + - ^main$ + cluster: build-knative + decorate: true + name: upgrade-tests_eventing_main + path_alias: knative.dev/eventing + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-upgrade-tests.sh + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: + limits: + memory: 16Gi + requests: + memory: 12Gi + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account diff --git a/prow/jobs/generated/knative/eventing-release-1.0.gen.yaml b/prow/jobs/generated/knative/eventing-release-1.0.gen.yaml new file mode 100644 index 00000000000..4bf26a94b41 --- /dev/null +++ b/prow/jobs/generated/knative/eventing-release-1.0.gen.yaml @@ -0,0 +1,340 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. #### +# #### USE "./hack/generate-configs.sh" TO REGENERATE THIS FILE. #### +# #### #### +# ####################################################################### + +periodics: +- annotations: + testgrid-dashboards: knative-release-1.0 + testgrid-tab-name: eventing-continuous + cluster: build-knative + cron: 45 8 * * * + decorate: true + extra_refs: + - base_ref: release-1.0 + org: knative + path_alias: knative.dev/eventing + repo: eventing + name: continuous_eventing_release-1.0_periodic + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: + limits: + memory: 16Gi + requests: + memory: 12Gi + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account +- annotations: + testgrid-dashboards: knative-release-1.0 + testgrid-tab-name: eventing-s390x-e2e-tests + cluster: build-knative + cron: 0 5 * * * + decorate: true + extra_refs: + - base_ref: release-1.0 + org: knative + path_alias: knative.dev/eventing + repo: eventing + name: s390x-e2e-tests_eventing_release-1.0_periodic + spec: + containers: + - args: + - bash + - -c + - | + "mkdir -p /root/.kube && cat /opt/cluster/ci-script > connect.sh && chmod +x connect.sh && ./connect.sh eventing-main && kubectl get cm s390x-config-eventing -n default -o jsonpath='{.data.adjustment-script}' > adjust.sh && chmod +x adjust.sh && ./adjust.sh && ./test/e2e-tests.sh --run-tests" + command: + - runner.sh + env: + - name: SYSTEM_NAMESPACE + value: knative-eventing + - name: SCALE_CHAOSDUCK_TO_ZERO + value: "1" + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + - name: KO_FLAGS + value: --platform=linux/s390x + - name: PLATFORM + value: linux/s390x + - name: KO_DOCKER_REPO + valueFrom: + secretKeyRef: + key: ko-docker-repo + name: s390x-cluster1 + - name: DISABLE_MD_LINTING + value: "1" + - name: KUBECONFIG + value: /root/.kube/config + - name: DOCKER_CONFIG + value: /opt/cluster + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: + limits: + memory: 16Gi + requests: + memory: 12Gi + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + - mountPath: /opt/cluster + name: s390x-cluster1 + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - name: s390x-cluster1 + secret: + defaultMode: 384 + secretName: s390x-cluster1 +- annotations: + testgrid-dashboards: knative-release-1.0 + testgrid-tab-name: eventing-release + cluster: build-knative + cron: 15 9 * * 2 + decorate: true + extra_refs: + - base_ref: release-1.0 + org: knative + path_alias: knative.dev/eventing + repo: eventing + name: release_eventing_release-1.0_periodic + spec: + containers: + - command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/eventing + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.0 + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/release-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: + limits: + memory: 16Gi + requests: + memory: 12Gi + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/hub-token + name: hub-token + readOnly: true + - mountPath: /etc/release-account + name: release-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: hub-token + secret: + secretName: hub-token + - name: release-account + secret: + secretName: release-account +presubmits: + knative/eventing: + - always_run: true + branches: + - ^release-1.0$ + cluster: build-knative + decorate: true + name: build-tests_eventing_release-1.0 + path_alias: knative.dev/eventing + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --build-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: + limits: + memory: 16Gi + requests: + memory: 12Gi + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^release-1.0$ + cluster: build-knative + decorate: true + name: unit-tests_eventing_release-1.0 + path_alias: knative.dev/eventing + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --unit-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: + limits: + memory: 10Gi + requests: + memory: 8Gi + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^release-1.0$ + cluster: build-knative + decorate: true + name: conformance-tests_eventing_release-1.0 + path_alias: knative.dev/eventing + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-conformance-tests.sh + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: + limits: + memory: 16Gi + requests: + memory: 12Gi + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - always_run: true + branches: + - ^release-1.0$ + cluster: build-knative + decorate: true + name: reconciler-tests_eventing_release-1.0 + optional: true + path_alias: knative.dev/eventing + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-rekt-tests.sh + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: + limits: + memory: 16Gi + requests: + memory: 12Gi + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - always_run: true + branches: + - ^release-1.0$ + cluster: build-knative + decorate: true + name: upgrade-tests_eventing_release-1.0 + path_alias: knative.dev/eventing + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-upgrade-tests.sh + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: + limits: + memory: 16Gi + requests: + memory: 12Gi + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account diff --git a/prow/jobs/generated/knative/eventing-release-1.1.gen.yaml b/prow/jobs/generated/knative/eventing-release-1.1.gen.yaml new file mode 100644 index 00000000000..5bb1918667c --- /dev/null +++ b/prow/jobs/generated/knative/eventing-release-1.1.gen.yaml @@ -0,0 +1,340 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. #### +# #### USE "./hack/generate-configs.sh" TO REGENERATE THIS FILE. #### +# #### #### +# ####################################################################### + +periodics: +- annotations: + testgrid-dashboards: knative-release-1.1 + testgrid-tab-name: eventing-continuous + cluster: build-knative + cron: 14 8 * * * + decorate: true + extra_refs: + - base_ref: release-1.1 + org: knative + path_alias: knative.dev/eventing + repo: eventing + name: continuous_eventing_release-1.1_periodic + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: + limits: + memory: 16Gi + requests: + memory: 12Gi + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account +- annotations: + testgrid-dashboards: knative-release-1.1 + testgrid-tab-name: eventing-s390x-e2e-tests + cluster: build-knative + cron: 0 5 * * * + decorate: true + extra_refs: + - base_ref: release-1.1 + org: knative + path_alias: knative.dev/eventing + repo: eventing + name: s390x-e2e-tests_eventing_release-1.1_periodic + spec: + containers: + - args: + - bash + - -c + - | + "mkdir -p /root/.kube && cat /opt/cluster/ci-script > connect.sh && chmod +x connect.sh && ./connect.sh eventing-main && kubectl get cm s390x-config-eventing -n default -o jsonpath='{.data.adjustment-script}' > adjust.sh && chmod +x adjust.sh && ./adjust.sh && ./test/e2e-tests.sh --run-tests" + command: + - runner.sh + env: + - name: SYSTEM_NAMESPACE + value: knative-eventing + - name: SCALE_CHAOSDUCK_TO_ZERO + value: "1" + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + - name: KO_FLAGS + value: --platform=linux/s390x + - name: PLATFORM + value: linux/s390x + - name: KO_DOCKER_REPO + valueFrom: + secretKeyRef: + key: ko-docker-repo + name: s390x-cluster1 + - name: DISABLE_MD_LINTING + value: "1" + - name: KUBECONFIG + value: /root/.kube/config + - name: DOCKER_CONFIG + value: /opt/cluster + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: + limits: + memory: 16Gi + requests: + memory: 12Gi + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + - mountPath: /opt/cluster + name: s390x-cluster1 + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - name: s390x-cluster1 + secret: + defaultMode: 384 + secretName: s390x-cluster1 +- annotations: + testgrid-dashboards: knative-release-1.1 + testgrid-tab-name: eventing-release + cluster: build-knative + cron: 10 9 * * 2 + decorate: true + extra_refs: + - base_ref: release-1.1 + org: knative + path_alias: knative.dev/eventing + repo: eventing + name: release_eventing_release-1.1_periodic + spec: + containers: + - command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/eventing + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.1 + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/release-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: + limits: + memory: 16Gi + requests: + memory: 12Gi + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/hub-token + name: hub-token + readOnly: true + - mountPath: /etc/release-account + name: release-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: hub-token + secret: + secretName: hub-token + - name: release-account + secret: + secretName: release-account +presubmits: + knative/eventing: + - always_run: true + branches: + - ^release-1.1$ + cluster: build-knative + decorate: true + name: build-tests_eventing_release-1.1 + path_alias: knative.dev/eventing + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --build-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: + limits: + memory: 16Gi + requests: + memory: 12Gi + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^release-1.1$ + cluster: build-knative + decorate: true + name: unit-tests_eventing_release-1.1 + path_alias: knative.dev/eventing + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --unit-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: + limits: + memory: 10Gi + requests: + memory: 8Gi + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^release-1.1$ + cluster: build-knative + decorate: true + name: conformance-tests_eventing_release-1.1 + path_alias: knative.dev/eventing + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-conformance-tests.sh + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: + limits: + memory: 16Gi + requests: + memory: 12Gi + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - always_run: true + branches: + - ^release-1.1$ + cluster: build-knative + decorate: true + name: reconciler-tests_eventing_release-1.1 + optional: true + path_alias: knative.dev/eventing + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-rekt-tests.sh + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: + limits: + memory: 16Gi + requests: + memory: 12Gi + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - always_run: true + branches: + - ^release-1.1$ + cluster: build-knative + decorate: true + name: upgrade-tests_eventing_release-1.1 + path_alias: knative.dev/eventing + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-upgrade-tests.sh + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: + limits: + memory: 16Gi + requests: + memory: 12Gi + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account diff --git a/prow/jobs/generated/knative/eventing-release-1.2.gen.yaml b/prow/jobs/generated/knative/eventing-release-1.2.gen.yaml new file mode 100644 index 00000000000..738a47a8b1a --- /dev/null +++ b/prow/jobs/generated/knative/eventing-release-1.2.gen.yaml @@ -0,0 +1,340 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. #### +# #### USE "./hack/generate-configs.sh" TO REGENERATE THIS FILE. #### +# #### #### +# ####################################################################### + +periodics: +- annotations: + testgrid-dashboards: knative-release-1.2 + testgrid-tab-name: eventing-continuous + cluster: build-knative + cron: 47 8 * * * + decorate: true + extra_refs: + - base_ref: release-1.2 + org: knative + path_alias: knative.dev/eventing + repo: eventing + name: continuous_eventing_release-1.2_periodic + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: + limits: + memory: 16Gi + requests: + memory: 12Gi + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account +- annotations: + testgrid-dashboards: knative-release-1.2 + testgrid-tab-name: eventing-s390x-e2e-tests + cluster: build-knative + cron: 0 5 * * * + decorate: true + extra_refs: + - base_ref: release-1.2 + org: knative + path_alias: knative.dev/eventing + repo: eventing + name: s390x-e2e-tests_eventing_release-1.2_periodic + spec: + containers: + - args: + - bash + - -c + - | + "mkdir -p /root/.kube && cat /opt/cluster/ci-script > connect.sh && chmod +x connect.sh && ./connect.sh eventing-main && kubectl get cm s390x-config-eventing -n default -o jsonpath='{.data.adjustment-script}' > adjust.sh && chmod +x adjust.sh && ./adjust.sh && ./test/e2e-tests.sh --run-tests" + command: + - runner.sh + env: + - name: SYSTEM_NAMESPACE + value: knative-eventing + - name: SCALE_CHAOSDUCK_TO_ZERO + value: "1" + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + - name: KO_FLAGS + value: --platform=linux/s390x + - name: PLATFORM + value: linux/s390x + - name: KO_DOCKER_REPO + valueFrom: + secretKeyRef: + key: ko-docker-repo + name: s390x-cluster1 + - name: DISABLE_MD_LINTING + value: "1" + - name: KUBECONFIG + value: /root/.kube/config + - name: DOCKER_CONFIG + value: /opt/cluster + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: + limits: + memory: 16Gi + requests: + memory: 12Gi + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + - mountPath: /opt/cluster + name: s390x-cluster1 + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - name: s390x-cluster1 + secret: + defaultMode: 384 + secretName: s390x-cluster1 +- annotations: + testgrid-dashboards: knative-release-1.2 + testgrid-tab-name: eventing-release + cluster: build-knative + cron: 5 9 * * 2 + decorate: true + extra_refs: + - base_ref: release-1.2 + org: knative + path_alias: knative.dev/eventing + repo: eventing + name: release_eventing_release-1.2_periodic + spec: + containers: + - command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/eventing + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.2 + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/release-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: + limits: + memory: 16Gi + requests: + memory: 12Gi + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/hub-token + name: hub-token + readOnly: true + - mountPath: /etc/release-account + name: release-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: hub-token + secret: + secretName: hub-token + - name: release-account + secret: + secretName: release-account +presubmits: + knative/eventing: + - always_run: true + branches: + - ^release-1.2$ + cluster: build-knative + decorate: true + name: build-tests_eventing_release-1.2 + path_alias: knative.dev/eventing + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --build-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: + limits: + memory: 16Gi + requests: + memory: 12Gi + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^release-1.2$ + cluster: build-knative + decorate: true + name: unit-tests_eventing_release-1.2 + path_alias: knative.dev/eventing + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --unit-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: + limits: + memory: 10Gi + requests: + memory: 8Gi + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^release-1.2$ + cluster: build-knative + decorate: true + name: conformance-tests_eventing_release-1.2 + path_alias: knative.dev/eventing + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-conformance-tests.sh + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: + limits: + memory: 16Gi + requests: + memory: 12Gi + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - always_run: true + branches: + - ^release-1.2$ + cluster: build-knative + decorate: true + name: reconciler-tests_eventing_release-1.2 + optional: true + path_alias: knative.dev/eventing + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-rekt-tests.sh + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: + limits: + memory: 16Gi + requests: + memory: 12Gi + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - always_run: true + branches: + - ^release-1.2$ + cluster: build-knative + decorate: true + name: upgrade-tests_eventing_release-1.2 + path_alias: knative.dev/eventing + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-upgrade-tests.sh + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: + limits: + memory: 16Gi + requests: + memory: 12Gi + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account diff --git a/prow/jobs/generated/knative/eventing-release-1.3.gen.yaml b/prow/jobs/generated/knative/eventing-release-1.3.gen.yaml new file mode 100644 index 00000000000..ce2b5661706 --- /dev/null +++ b/prow/jobs/generated/knative/eventing-release-1.3.gen.yaml @@ -0,0 +1,340 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. #### +# #### USE "./hack/generate-configs.sh" TO REGENERATE THIS FILE. #### +# #### #### +# ####################################################################### + +periodics: +- annotations: + testgrid-dashboards: knative-release-1.3 + testgrid-tab-name: eventing-continuous + cluster: build-knative + cron: 52 8 * * * + decorate: true + extra_refs: + - base_ref: release-1.3 + org: knative + path_alias: knative.dev/eventing + repo: eventing + name: continuous_eventing_release-1.3_periodic + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: + limits: + memory: 16Gi + requests: + memory: 12Gi + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account +- annotations: + testgrid-dashboards: knative-release-1.3 + testgrid-tab-name: eventing-s390x-e2e-tests + cluster: build-knative + cron: 0 5 * * * + decorate: true + extra_refs: + - base_ref: release-1.3 + org: knative + path_alias: knative.dev/eventing + repo: eventing + name: s390x-e2e-tests_eventing_release-1.3_periodic + spec: + containers: + - args: + - bash + - -c + - | + "mkdir -p /root/.kube && cat /opt/cluster/ci-script > connect.sh && chmod +x connect.sh && ./connect.sh eventing-main && kubectl get cm s390x-config-eventing -n default -o jsonpath='{.data.adjustment-script}' > adjust.sh && chmod +x adjust.sh && ./adjust.sh && ./test/e2e-tests.sh --run-tests" + command: + - runner.sh + env: + - name: SYSTEM_NAMESPACE + value: knative-eventing + - name: SCALE_CHAOSDUCK_TO_ZERO + value: "1" + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + - name: KO_FLAGS + value: --platform=linux/s390x + - name: PLATFORM + value: linux/s390x + - name: KO_DOCKER_REPO + valueFrom: + secretKeyRef: + key: ko-docker-repo + name: s390x-cluster1 + - name: DISABLE_MD_LINTING + value: "1" + - name: KUBECONFIG + value: /root/.kube/config + - name: DOCKER_CONFIG + value: /opt/cluster + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: + limits: + memory: 16Gi + requests: + memory: 12Gi + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + - mountPath: /opt/cluster + name: s390x-cluster1 + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - name: s390x-cluster1 + secret: + defaultMode: 384 + secretName: s390x-cluster1 +- annotations: + testgrid-dashboards: knative-release-1.3 + testgrid-tab-name: eventing-release + cluster: build-knative + cron: 16 9 * * 2 + decorate: true + extra_refs: + - base_ref: release-1.3 + org: knative + path_alias: knative.dev/eventing + repo: eventing + name: release_eventing_release-1.3_periodic + spec: + containers: + - command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/eventing + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.3 + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/release-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: + limits: + memory: 16Gi + requests: + memory: 12Gi + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/hub-token + name: hub-token + readOnly: true + - mountPath: /etc/release-account + name: release-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: hub-token + secret: + secretName: hub-token + - name: release-account + secret: + secretName: release-account +presubmits: + knative/eventing: + - always_run: true + branches: + - ^release-1.3$ + cluster: build-knative + decorate: true + name: build-tests_eventing_release-1.3 + path_alias: knative.dev/eventing + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --build-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: + limits: + memory: 16Gi + requests: + memory: 12Gi + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^release-1.3$ + cluster: build-knative + decorate: true + name: unit-tests_eventing_release-1.3 + path_alias: knative.dev/eventing + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --unit-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: + limits: + memory: 10Gi + requests: + memory: 8Gi + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^release-1.3$ + cluster: build-knative + decorate: true + name: conformance-tests_eventing_release-1.3 + path_alias: knative.dev/eventing + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-conformance-tests.sh + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: + limits: + memory: 16Gi + requests: + memory: 12Gi + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - always_run: true + branches: + - ^release-1.3$ + cluster: build-knative + decorate: true + name: reconciler-tests_eventing_release-1.3 + optional: true + path_alias: knative.dev/eventing + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-rekt-tests.sh + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: + limits: + memory: 16Gi + requests: + memory: 12Gi + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - always_run: true + branches: + - ^release-1.3$ + cluster: build-knative + decorate: true + name: upgrade-tests_eventing_release-1.3 + path_alias: knative.dev/eventing + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-upgrade-tests.sh + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: + limits: + memory: 16Gi + requests: + memory: 12Gi + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account diff --git a/prow/jobs/generated/knative/hack-main.gen.yaml b/prow/jobs/generated/knative/hack-main.gen.yaml new file mode 100644 index 00000000000..6e5e00596e0 --- /dev/null +++ b/prow/jobs/generated/knative/hack-main.gen.yaml @@ -0,0 +1,117 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. #### +# #### USE "./hack/generate-configs.sh" TO REGENERATE THIS FILE. #### +# #### #### +# ####################################################################### + +presubmits: + knative/hack: + - always_run: true + branches: + - ^main$ + cluster: build-knative + decorate: true + name: build-tests_hack_main + path_alias: knative.dev/hack + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --build-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^main$ + cluster: build-knative + decorate: true + name: unit-tests_hack_main + path_alias: knative.dev/hack + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --unit-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^main$ + cluster: build-knative + decorate: true + name: integration-tests_hack_main + path_alias: knative.dev/hack + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-tests.sh + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - always_run: true + branches: + - ^main$ + cluster: build-knative + decorate: true + name: kind-tests_hack_main + path_alias: knative.dev/hack + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-kind.sh + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account diff --git a/prow/jobs/generated/knative/networking-main.gen.yaml b/prow/jobs/generated/knative/networking-main.gen.yaml new file mode 100644 index 00000000000..2113d55b4f5 --- /dev/null +++ b/prow/jobs/generated/knative/networking-main.gen.yaml @@ -0,0 +1,82 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. #### +# #### USE "./hack/generate-configs.sh" TO REGENERATE THIS FILE. #### +# #### #### +# ####################################################################### + +presubmits: + knative/networking: + - always_run: true + branches: + - ^main$ + cluster: build-knative + decorate: true + name: build-tests_networking_main + path_alias: knative.dev/networking + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --build-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^main$ + cluster: build-knative + decorate: true + name: unit-tests_networking_main + path_alias: knative.dev/networking + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --unit-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^main$ + cluster: build-knative + decorate: true + name: integration-tests_networking_main + path_alias: knative.dev/networking + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --integration-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account diff --git a/prow/jobs/generated/knative/operator-main.gen.yaml b/prow/jobs/generated/knative/operator-main.gen.yaml new file mode 100644 index 00000000000..3d549f32634 --- /dev/null +++ b/prow/jobs/generated/knative/operator-main.gen.yaml @@ -0,0 +1,380 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. #### +# #### USE "./hack/generate-configs.sh" TO REGENERATE THIS FILE. #### +# #### #### +# ####################################################################### + +periodics: +- annotations: + testgrid-dashboards: operator + testgrid-tab-name: continuous + cluster: build-knative + cron: 55 */9 * * * + decorate: true + extra_refs: + - base_ref: main + org: knative + path_alias: knative.dev/operator + repo: operator + name: continuous_operator_main_periodic + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account +- annotations: + testgrid-dashboards: operator + testgrid-tab-name: s390x-e2e-tests + cluster: build-knative + cron: 0 5 * * * + decorate: true + extra_refs: + - base_ref: main + org: knative + path_alias: knative.dev/operator + repo: operator + name: s390x-e2e-tests_operator_main_periodic + spec: + containers: + - args: + - bash + - -c + - | + "mkdir -p /root/.kube && server_addr=$(cat /opt/cluster/config) && ssh -o StrictHostKeyChecking=no -i /opt/cluster/knative01.pem linux1@${server_addr} cat /home/linux1/.kube/config > /root/.kube/config && kubectl get cm s390x-config-operator -n default -o jsonpath='{.data.adjustment-script}' > adjust.sh && chmod +x adjust.sh && ./adjust.sh && ./test/e2e-tests.sh --run-tests" + command: + - runner.sh + env: + - name: INGRESS_CLASS + value: contour.ingress.networking.knative.dev + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + - name: KO_FLAGS + value: --platform=linux/s390x + - name: PLATFORM + value: linux/s390x + - name: KO_DOCKER_REPO + valueFrom: + secretKeyRef: + key: ko-docker-repo + name: s390x-cluster1 + - name: DISABLE_MD_LINTING + value: "1" + - name: KUBECONFIG + value: /root/.kube/config + - name: DOCKER_CONFIG + value: /opt/cluster + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + - mountPath: /opt/cluster + name: s390x-cluster1 + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - name: s390x-cluster1 + secret: + defaultMode: 384 + secretName: s390x-cluster1 +- annotations: + testgrid-dashboards: operator + testgrid-tab-name: nightly + cluster: build-knative + cron: 5 9 * * * + decorate: true + extra_refs: + - base_ref: main + org: knative + path_alias: knative.dev/operator + repo: operator + name: nightly_operator_main_periodic + reporter_config: + slack: + channel: operator + job_states_to_report: + - failure + report_template: | + "The nightly release job fails, check the log: <{{.Status.URL}}|View logs>" + spec: + containers: + - command: + - runner.sh + - ./hack/release.sh + - --publish + - --tag-release + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/nightly-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/nightly-account + name: nightly-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: nightly-account + secret: + secretName: nightly-account +- annotations: + testgrid-dashboards: operator + testgrid-tab-name: release + cluster: build-knative + cron: 45 */9 * * * + decorate: true + extra_refs: + - base_ref: main + org: knative + path_alias: knative.dev/operator + repo: operator + name: release_operator_main_periodic + spec: + containers: + - command: + - runner.sh + - ./hack/release.sh + - --auto-release + - --release-gcs + - knative-releases/operator + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/release-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/hub-token + name: hub-token + readOnly: true + - mountPath: /etc/release-account + name: release-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: hub-token + secret: + secretName: hub-token + - name: release-account + secret: + secretName: release-account +presubmits: + knative/operator: + - always_run: true + branches: + - ^main$ + cluster: build-knative + decorate: true + name: build-tests_operator_main + path_alias: knative.dev/operator + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --build-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^main$ + cluster: build-knative + decorate: true + name: unit-tests_operator_main + path_alias: knative.dev/operator + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --unit-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^main$ + cluster: build-knative + decorate: true + name: integration-tests_operator_main + path_alias: knative.dev/operator + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --integration-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - always_run: true + branches: + - ^main$ + cluster: build-knative + decorate: true + name: upgrade-tests_operator_main + path_alias: knative.dev/operator + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-upgrade-tests.sh + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - always_run: true + branches: + - ^main$ + cluster: build-knative + decorate: true + name: serving-upgrade-tests_operator_main + path_alias: knative.dev/operator + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-serving-upgrade-tests.sh + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - always_run: true + branches: + - ^main$ + cluster: build-knative + decorate: true + name: eventing-upgrade-tests_operator_main + path_alias: knative.dev/operator + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-eventing-upgrade-tests.sh + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account diff --git a/prow/jobs/generated/knative/operator-release-1.0.gen.yaml b/prow/jobs/generated/knative/operator-release-1.0.gen.yaml new file mode 100644 index 00000000000..980e4827774 --- /dev/null +++ b/prow/jobs/generated/knative/operator-release-1.0.gen.yaml @@ -0,0 +1,338 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. #### +# #### USE "./hack/generate-configs.sh" TO REGENERATE THIS FILE. #### +# #### #### +# ####################################################################### + +periodics: +- annotations: + testgrid-dashboards: knative-release-1.0 + testgrid-tab-name: operator-continuous + cluster: build-knative + cron: 9 8 * * * + decorate: true + extra_refs: + - base_ref: release-1.0 + org: knative + path_alias: knative.dev/operator + repo: operator + name: continuous_operator_release-1.0_periodic + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account +- annotations: + testgrid-dashboards: knative-release-1.0 + testgrid-tab-name: operator-s390x-e2e-tests + cluster: build-knative + cron: 0 5 * * * + decorate: true + extra_refs: + - base_ref: release-1.0 + org: knative + path_alias: knative.dev/operator + repo: operator + name: s390x-e2e-tests_operator_release-1.0_periodic + spec: + containers: + - args: + - bash + - -c + - | + "mkdir -p /root/.kube && server_addr=$(cat /opt/cluster/config) && ssh -o StrictHostKeyChecking=no -i /opt/cluster/knative01.pem linux1@${server_addr} cat /home/linux1/.kube/config > /root/.kube/config && kubectl get cm s390x-config-operator -n default -o jsonpath='{.data.adjustment-script}' > adjust.sh && chmod +x adjust.sh && ./adjust.sh && ./test/e2e-tests.sh --run-tests" + command: + - runner.sh + env: + - name: INGRESS_CLASS + value: contour.ingress.networking.knative.dev + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + - name: KO_FLAGS + value: --platform=linux/s390x + - name: PLATFORM + value: linux/s390x + - name: KO_DOCKER_REPO + valueFrom: + secretKeyRef: + key: ko-docker-repo + name: s390x-cluster1 + - name: DISABLE_MD_LINTING + value: "1" + - name: KUBECONFIG + value: /root/.kube/config + - name: DOCKER_CONFIG + value: /opt/cluster + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + - mountPath: /opt/cluster + name: s390x-cluster1 + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - name: s390x-cluster1 + secret: + defaultMode: 384 + secretName: s390x-cluster1 +- annotations: + testgrid-dashboards: knative-release-1.0 + testgrid-tab-name: operator-release + cluster: build-knative + cron: 39 9 * * 2 + decorate: true + extra_refs: + - base_ref: release-1.0 + org: knative + path_alias: knative.dev/operator + repo: operator + name: release_operator_release-1.0_periodic + spec: + containers: + - command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/operator + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.0 + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/release-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/hub-token + name: hub-token + readOnly: true + - mountPath: /etc/release-account + name: release-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: hub-token + secret: + secretName: hub-token + - name: release-account + secret: + secretName: release-account +presubmits: + knative/operator: + - always_run: true + branches: + - ^release-1.0$ + cluster: build-knative + decorate: true + name: build-tests_operator_release-1.0 + path_alias: knative.dev/operator + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --build-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^release-1.0$ + cluster: build-knative + decorate: true + name: unit-tests_operator_release-1.0 + path_alias: knative.dev/operator + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --unit-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^release-1.0$ + cluster: build-knative + decorate: true + name: integration-tests_operator_release-1.0 + path_alias: knative.dev/operator + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --integration-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - always_run: true + branches: + - ^release-1.0$ + cluster: build-knative + decorate: true + name: upgrade-tests_operator_release-1.0 + path_alias: knative.dev/operator + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-upgrade-tests.sh + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - always_run: true + branches: + - ^release-1.0$ + cluster: build-knative + decorate: true + name: serving-upgrade-tests_operator_release-1.0 + path_alias: knative.dev/operator + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-serving-upgrade-tests.sh + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - always_run: true + branches: + - ^release-1.0$ + cluster: build-knative + decorate: true + name: eventing-upgrade-tests_operator_release-1.0 + path_alias: knative.dev/operator + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-eventing-upgrade-tests.sh + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account diff --git a/prow/jobs/generated/knative/operator-release-1.1.gen.yaml b/prow/jobs/generated/knative/operator-release-1.1.gen.yaml new file mode 100644 index 00000000000..9a73f5f281b --- /dev/null +++ b/prow/jobs/generated/knative/operator-release-1.1.gen.yaml @@ -0,0 +1,338 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. #### +# #### USE "./hack/generate-configs.sh" TO REGENERATE THIS FILE. #### +# #### #### +# ####################################################################### + +periodics: +- annotations: + testgrid-dashboards: knative-release-1.1 + testgrid-tab-name: operator-continuous + cluster: build-knative + cron: 22 8 * * * + decorate: true + extra_refs: + - base_ref: release-1.1 + org: knative + path_alias: knative.dev/operator + repo: operator + name: continuous_operator_release-1.1_periodic + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account +- annotations: + testgrid-dashboards: knative-release-1.1 + testgrid-tab-name: operator-s390x-e2e-tests + cluster: build-knative + cron: 0 5 * * * + decorate: true + extra_refs: + - base_ref: release-1.1 + org: knative + path_alias: knative.dev/operator + repo: operator + name: s390x-e2e-tests_operator_release-1.1_periodic + spec: + containers: + - args: + - bash + - -c + - | + "mkdir -p /root/.kube && server_addr=$(cat /opt/cluster/config) && ssh -o StrictHostKeyChecking=no -i /opt/cluster/knative01.pem linux1@${server_addr} cat /home/linux1/.kube/config > /root/.kube/config && kubectl get cm s390x-config-operator -n default -o jsonpath='{.data.adjustment-script}' > adjust.sh && chmod +x adjust.sh && ./adjust.sh && ./test/e2e-tests.sh --run-tests" + command: + - runner.sh + env: + - name: INGRESS_CLASS + value: contour.ingress.networking.knative.dev + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + - name: KO_FLAGS + value: --platform=linux/s390x + - name: PLATFORM + value: linux/s390x + - name: KO_DOCKER_REPO + valueFrom: + secretKeyRef: + key: ko-docker-repo + name: s390x-cluster1 + - name: DISABLE_MD_LINTING + value: "1" + - name: KUBECONFIG + value: /root/.kube/config + - name: DOCKER_CONFIG + value: /opt/cluster + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + - mountPath: /opt/cluster + name: s390x-cluster1 + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - name: s390x-cluster1 + secret: + defaultMode: 384 + secretName: s390x-cluster1 +- annotations: + testgrid-dashboards: knative-release-1.1 + testgrid-tab-name: operator-release + cluster: build-knative + cron: 42 9 * * 2 + decorate: true + extra_refs: + - base_ref: release-1.1 + org: knative + path_alias: knative.dev/operator + repo: operator + name: release_operator_release-1.1_periodic + spec: + containers: + - command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/operator + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.1 + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/release-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/hub-token + name: hub-token + readOnly: true + - mountPath: /etc/release-account + name: release-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: hub-token + secret: + secretName: hub-token + - name: release-account + secret: + secretName: release-account +presubmits: + knative/operator: + - always_run: true + branches: + - ^release-1.1$ + cluster: build-knative + decorate: true + name: build-tests_operator_release-1.1 + path_alias: knative.dev/operator + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --build-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^release-1.1$ + cluster: build-knative + decorate: true + name: unit-tests_operator_release-1.1 + path_alias: knative.dev/operator + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --unit-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^release-1.1$ + cluster: build-knative + decorate: true + name: integration-tests_operator_release-1.1 + path_alias: knative.dev/operator + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --integration-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - always_run: true + branches: + - ^release-1.1$ + cluster: build-knative + decorate: true + name: upgrade-tests_operator_release-1.1 + path_alias: knative.dev/operator + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-upgrade-tests.sh + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - always_run: true + branches: + - ^release-1.1$ + cluster: build-knative + decorate: true + name: serving-upgrade-tests_operator_release-1.1 + path_alias: knative.dev/operator + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-serving-upgrade-tests.sh + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - always_run: true + branches: + - ^release-1.1$ + cluster: build-knative + decorate: true + name: eventing-upgrade-tests_operator_release-1.1 + path_alias: knative.dev/operator + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-eventing-upgrade-tests.sh + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account diff --git a/prow/jobs/generated/knative/operator-release-1.2.gen.yaml b/prow/jobs/generated/knative/operator-release-1.2.gen.yaml new file mode 100644 index 00000000000..b2143c821f9 --- /dev/null +++ b/prow/jobs/generated/knative/operator-release-1.2.gen.yaml @@ -0,0 +1,338 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. #### +# #### USE "./hack/generate-configs.sh" TO REGENERATE THIS FILE. #### +# #### #### +# ####################################################################### + +periodics: +- annotations: + testgrid-dashboards: knative-release-1.2 + testgrid-tab-name: operator-continuous + cluster: build-knative + cron: 51 8 * * * + decorate: true + extra_refs: + - base_ref: release-1.2 + org: knative + path_alias: knative.dev/operator + repo: operator + name: continuous_operator_release-1.2_periodic + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account +- annotations: + testgrid-dashboards: knative-release-1.2 + testgrid-tab-name: operator-s390x-e2e-tests + cluster: build-knative + cron: 0 5 * * * + decorate: true + extra_refs: + - base_ref: release-1.2 + org: knative + path_alias: knative.dev/operator + repo: operator + name: s390x-e2e-tests_operator_release-1.2_periodic + spec: + containers: + - args: + - bash + - -c + - | + "mkdir -p /root/.kube && server_addr=$(cat /opt/cluster/config) && ssh -o StrictHostKeyChecking=no -i /opt/cluster/knative01.pem linux1@${server_addr} cat /home/linux1/.kube/config > /root/.kube/config && kubectl get cm s390x-config-operator -n default -o jsonpath='{.data.adjustment-script}' > adjust.sh && chmod +x adjust.sh && ./adjust.sh && ./test/e2e-tests.sh --run-tests" + command: + - runner.sh + env: + - name: INGRESS_CLASS + value: contour.ingress.networking.knative.dev + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + - name: KO_FLAGS + value: --platform=linux/s390x + - name: PLATFORM + value: linux/s390x + - name: KO_DOCKER_REPO + valueFrom: + secretKeyRef: + key: ko-docker-repo + name: s390x-cluster1 + - name: DISABLE_MD_LINTING + value: "1" + - name: KUBECONFIG + value: /root/.kube/config + - name: DOCKER_CONFIG + value: /opt/cluster + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + - mountPath: /opt/cluster + name: s390x-cluster1 + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - name: s390x-cluster1 + secret: + defaultMode: 384 + secretName: s390x-cluster1 +- annotations: + testgrid-dashboards: knative-release-1.2 + testgrid-tab-name: operator-release + cluster: build-knative + cron: 9 9 * * 2 + decorate: true + extra_refs: + - base_ref: release-1.2 + org: knative + path_alias: knative.dev/operator + repo: operator + name: release_operator_release-1.2_periodic + spec: + containers: + - command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/operator + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.2 + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/release-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/hub-token + name: hub-token + readOnly: true + - mountPath: /etc/release-account + name: release-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: hub-token + secret: + secretName: hub-token + - name: release-account + secret: + secretName: release-account +presubmits: + knative/operator: + - always_run: true + branches: + - ^release-1.2$ + cluster: build-knative + decorate: true + name: build-tests_operator_release-1.2 + path_alias: knative.dev/operator + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --build-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^release-1.2$ + cluster: build-knative + decorate: true + name: unit-tests_operator_release-1.2 + path_alias: knative.dev/operator + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --unit-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^release-1.2$ + cluster: build-knative + decorate: true + name: integration-tests_operator_release-1.2 + path_alias: knative.dev/operator + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --integration-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - always_run: true + branches: + - ^release-1.2$ + cluster: build-knative + decorate: true + name: upgrade-tests_operator_release-1.2 + path_alias: knative.dev/operator + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-upgrade-tests.sh + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - always_run: true + branches: + - ^release-1.2$ + cluster: build-knative + decorate: true + name: serving-upgrade-tests_operator_release-1.2 + path_alias: knative.dev/operator + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-serving-upgrade-tests.sh + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - always_run: true + branches: + - ^release-1.2$ + cluster: build-knative + decorate: true + name: eventing-upgrade-tests_operator_release-1.2 + path_alias: knative.dev/operator + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-eventing-upgrade-tests.sh + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account diff --git a/prow/jobs/generated/knative/operator-release-1.3.gen.yaml b/prow/jobs/generated/knative/operator-release-1.3.gen.yaml new file mode 100644 index 00000000000..db270a9c93f --- /dev/null +++ b/prow/jobs/generated/knative/operator-release-1.3.gen.yaml @@ -0,0 +1,338 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. #### +# #### USE "./hack/generate-configs.sh" TO REGENERATE THIS FILE. #### +# #### #### +# ####################################################################### + +periodics: +- annotations: + testgrid-dashboards: knative-release-1.3 + testgrid-tab-name: operator-continuous + cluster: build-knative + cron: 0 8 * * * + decorate: true + extra_refs: + - base_ref: release-1.3 + org: knative + path_alias: knative.dev/operator + repo: operator + name: continuous_operator_release-1.3_periodic + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account +- annotations: + testgrid-dashboards: knative-release-1.3 + testgrid-tab-name: operator-s390x-e2e-tests + cluster: build-knative + cron: 0 5 * * * + decorate: true + extra_refs: + - base_ref: release-1.3 + org: knative + path_alias: knative.dev/operator + repo: operator + name: s390x-e2e-tests_operator_release-1.3_periodic + spec: + containers: + - args: + - bash + - -c + - | + "mkdir -p /root/.kube && server_addr=$(cat /opt/cluster/config) && ssh -o StrictHostKeyChecking=no -i /opt/cluster/knative01.pem linux1@${server_addr} cat /home/linux1/.kube/config > /root/.kube/config && kubectl get cm s390x-config-operator -n default -o jsonpath='{.data.adjustment-script}' > adjust.sh && chmod +x adjust.sh && ./adjust.sh && ./test/e2e-tests.sh --run-tests" + command: + - runner.sh + env: + - name: INGRESS_CLASS + value: contour.ingress.networking.knative.dev + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + - name: KO_FLAGS + value: --platform=linux/s390x + - name: PLATFORM + value: linux/s390x + - name: KO_DOCKER_REPO + valueFrom: + secretKeyRef: + key: ko-docker-repo + name: s390x-cluster1 + - name: DISABLE_MD_LINTING + value: "1" + - name: KUBECONFIG + value: /root/.kube/config + - name: DOCKER_CONFIG + value: /opt/cluster + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + - mountPath: /opt/cluster + name: s390x-cluster1 + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - name: s390x-cluster1 + secret: + defaultMode: 384 + secretName: s390x-cluster1 +- annotations: + testgrid-dashboards: knative-release-1.3 + testgrid-tab-name: operator-release + cluster: build-knative + cron: 20 9 * * 2 + decorate: true + extra_refs: + - base_ref: release-1.3 + org: knative + path_alias: knative.dev/operator + repo: operator + name: release_operator_release-1.3_periodic + spec: + containers: + - command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/operator + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.3 + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/release-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/hub-token + name: hub-token + readOnly: true + - mountPath: /etc/release-account + name: release-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: hub-token + secret: + secretName: hub-token + - name: release-account + secret: + secretName: release-account +presubmits: + knative/operator: + - always_run: true + branches: + - ^release-1.3$ + cluster: build-knative + decorate: true + name: build-tests_operator_release-1.3 + path_alias: knative.dev/operator + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --build-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^release-1.3$ + cluster: build-knative + decorate: true + name: unit-tests_operator_release-1.3 + path_alias: knative.dev/operator + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --unit-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^release-1.3$ + cluster: build-knative + decorate: true + name: integration-tests_operator_release-1.3 + path_alias: knative.dev/operator + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --integration-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - always_run: true + branches: + - ^release-1.3$ + cluster: build-knative + decorate: true + name: upgrade-tests_operator_release-1.3 + path_alias: knative.dev/operator + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-upgrade-tests.sh + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - always_run: true + branches: + - ^release-1.3$ + cluster: build-knative + decorate: true + name: serving-upgrade-tests_operator_release-1.3 + path_alias: knative.dev/operator + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-serving-upgrade-tests.sh + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - always_run: true + branches: + - ^release-1.3$ + cluster: build-knative + decorate: true + name: eventing-upgrade-tests_operator_release-1.3 + path_alias: knative.dev/operator + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-eventing-upgrade-tests.sh + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account diff --git a/prow/jobs/generated/knative/pkg-main.gen.yaml b/prow/jobs/generated/knative/pkg-main.gen.yaml new file mode 100644 index 00000000000..e76bce0aa8b --- /dev/null +++ b/prow/jobs/generated/knative/pkg-main.gen.yaml @@ -0,0 +1,82 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. #### +# #### USE "./hack/generate-configs.sh" TO REGENERATE THIS FILE. #### +# #### #### +# ####################################################################### + +presubmits: + knative/pkg: + - always_run: true + branches: + - ^main$ + cluster: build-knative + decorate: true + name: build-tests_pkg_main + path_alias: knative.dev/pkg + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --build-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^main$ + cluster: build-knative + decorate: true + name: unit-tests_pkg_main + path_alias: knative.dev/pkg + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --unit-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^main$ + cluster: build-knative + decorate: true + name: integration-tests_pkg_main + path_alias: knative.dev/pkg + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --integration-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account diff --git a/prow/jobs/generated/knative/serving-main.gen.yaml b/prow/jobs/generated/knative/serving-main.gen.yaml new file mode 100644 index 00000000000..588f04464a1 --- /dev/null +++ b/prow/jobs/generated/knative/serving-main.gen.yaml @@ -0,0 +1,1225 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. #### +# #### USE "./hack/generate-configs.sh" TO REGENERATE THIS FILE. #### +# #### #### +# ####################################################################### + +periodics: +- annotations: + testgrid-dashboards: serving + testgrid-tab-name: continuous + cluster: build-knative + cron: 45 */12 * * * + decorate: true + decoration_config: + timeout: 3h0m0s + extra_refs: + - base_ref: main + org: knative + path_alias: knative.dev/serving + repo: serving + name: continuous_serving_main_periodic + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: + limits: + memory: 16Gi + requests: + memory: 12Gi + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account +- annotations: + testgrid-dashboards: serving + testgrid-tab-name: istio-latest-mesh + cluster: build-knative + cron: 36 */9 * * * + decorate: true + extra_refs: + - base_ref: main + org: knative + path_alias: knative.dev/serving + repo: serving + name: istio-latest-mesh_serving_main_periodic + spec: + containers: + - args: + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-tests.sh --istio-version latest --mesh + - --run-test + - ./test/e2e-auto-tls-tests.sh --istio-version latest --mesh + command: + - runner.sh + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: + limits: + memory: 16Gi + requests: + memory: 12Gi + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account +- annotations: + testgrid-dashboards: serving + testgrid-tab-name: istio-latest-no-mesh + cluster: build-knative + cron: 40 */9 * * * + decorate: true + extra_refs: + - base_ref: main + org: knative + path_alias: knative.dev/serving + repo: serving + name: istio-latest-no-mesh_serving_main_periodic + spec: + containers: + - args: + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-tests.sh --istio-version latest --mesh + - --run-test + - ./test/e2e-auto-tls-tests.sh --istio-version latest --mesh + command: + - runner.sh + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: + limits: + memory: 16Gi + requests: + memory: 12Gi + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account +- annotations: + testgrid-dashboards: serving + testgrid-tab-name: istio-head-mesh + cluster: build-knative + cron: 27 */9 * * * + decorate: true + extra_refs: + - base_ref: main + org: knative + path_alias: knative.dev/serving + repo: serving + name: istio-head-mesh_serving_main_periodic + spec: + containers: + - args: + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-tests.sh --istio-version head --mesh + - --run-test + - ./test/e2e-auto-tls-tests.sh --istio-version head --mesh + command: + - runner.sh + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: + limits: + memory: 16Gi + requests: + memory: 12Gi + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account +- annotations: + testgrid-dashboards: serving + testgrid-tab-name: istio-head-no-mesh + cluster: build-knative + cron: 37 */9 * * * + decorate: true + extra_refs: + - base_ref: main + org: knative + path_alias: knative.dev/serving + repo: serving + name: istio-head-no-mesh_serving_main_periodic + spec: + containers: + - args: + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-tests.sh --istio-version head --no-mesh + - --run-test + - ./test/e2e-auto-tls-tests.sh --istio-version head --no-mesh + command: + - runner.sh + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: + limits: + memory: 16Gi + requests: + memory: 12Gi + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account +- annotations: + testgrid-dashboards: serving + testgrid-tab-name: kourier-stable + cluster: build-knative + cron: 7 */9 * * * + decorate: true + extra_refs: + - base_ref: main + org: knative + path_alias: knative.dev/serving + repo: serving + name: kourier-stable_serving_main_periodic + spec: + containers: + - args: + - --run-test + - ./test/e2e-tests.sh --kourier-version stable + - --run-test + - ./test/e2e-auto-tls-tests.sh --kourier-version stable --run-http01-auto-tls-tests + command: + - runner.sh + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: + limits: + memory: 16Gi + requests: + memory: 12Gi + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account +- annotations: + testgrid-dashboards: serving + testgrid-tab-name: contour-latest + cluster: build-knative + cron: 28 */9 * * * + decorate: true + extra_refs: + - base_ref: main + org: knative + path_alias: knative.dev/serving + repo: serving + name: contour-latest_serving_main_periodic + spec: + containers: + - args: + - --run-test + - ./test/e2e-tests.sh --contour-version latest + - --run-test + - ./test/e2e-auto-tls-tests.sh --contour-version latest --run-http01-auto-tls-tests + command: + - runner.sh + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: + limits: + memory: 16Gi + requests: + memory: 12Gi + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account +- annotations: + testgrid-dashboards: serving + testgrid-tab-name: gateway-api-latest + cluster: build-knative + cron: 47 */9 * * * + decorate: true + extra_refs: + - base_ref: main + org: knative + path_alias: knative.dev/serving + repo: serving + name: gateway-api-latest_serving_main_periodic + spec: + containers: + - args: + - --run-test + - ./test/e2e-tests.sh --gateway-api-version latest + command: + - runner.sh + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: + limits: + memory: 16Gi + requests: + memory: 12Gi + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account +- annotations: + testgrid-dashboards: serving + testgrid-tab-name: https + cluster: build-knative + cron: 11 */9 * * * + decorate: true + extra_refs: + - base_ref: main + org: knative + path_alias: knative.dev/serving + repo: serving + name: https_serving_main_periodic + spec: + containers: + - args: + - --run-test + - ./test/e2e-tests.sh --https + - --run-test + - ./test/e2e-auto-tls-tests.sh --https + command: + - runner.sh + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: + limits: + memory: 16Gi + requests: + memory: 12Gi + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account +- annotations: + testgrid-dashboards: serving + testgrid-tab-name: s390x-kourier-tests + cluster: build-knative + cron: 0 5 * * * + decorate: true + extra_refs: + - base_ref: main + org: knative + path_alias: knative.dev/serving + repo: serving + name: s390x-kourier-tests_serving_main_periodic + spec: + containers: + - args: + - bash + - -c + - | + "mkdir -p /root/.kube && cat /opt/cluster/ci-script > connect.sh && chmod +x connect.sh && server_addr=$(./connect.sh kourier-main) && kubectl get cm s390x-config-serving -n default -o jsonpath='{.data.adjustment-script}' > adjust.sh && chmod +x adjust.sh && ./adjust.sh && export TEST_OPTIONS=$TEST_OPTIONS' --ingressendpoint '${server_addr} && ./test/e2e-tests.sh --run-tests --kourier-version latest" + command: + - runner.sh + env: + - name: SYSTEM_NAMESPACE + value: knative-serving + - name: TEST_OPTIONS + value: --enable-alpha --enable-beta --resolvabledomain=false + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + - name: KO_FLAGS + value: --platform=linux/s390x + - name: PLATFORM + value: linux/s390x + - name: KO_DOCKER_REPO + valueFrom: + secretKeyRef: + key: ko-docker-repo + name: s390x-cluster1 + - name: DISABLE_MD_LINTING + value: "1" + - name: KUBECONFIG + value: /root/.kube/config + - name: DOCKER_CONFIG + value: /opt/cluster + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: + limits: + memory: 16Gi + requests: + memory: 12Gi + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + - mountPath: /opt/cluster + name: s390x-cluster1 + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - name: s390x-cluster1 + secret: + defaultMode: 384 + secretName: s390x-cluster1 +- annotations: + testgrid-dashboards: serving + testgrid-tab-name: s390x-contour-tests + cluster: build-knative + cron: 0 5 * * * + decorate: true + extra_refs: + - base_ref: main + org: knative + path_alias: knative.dev/serving + repo: serving + name: s390x-contour-tests_serving_main_periodic + spec: + containers: + - args: + - bash + - -c + - | + "mkdir -p /root/.kube && cat /opt/cluster/ci-script > connect.sh && chmod +x connect.sh && server_addr=$(./connect.sh contour-main) && kubectl get cm s390x-config-serving -n default -o jsonpath='{.data.adjustment-script}' > adjust.sh && chmod +x adjust.sh && ./adjust.sh && export TEST_OPTIONS=$TEST_OPTIONS' --ingressendpoint '${server_addr} && ./test/e2e-tests.sh --run-tests --contour-version latest" + command: + - runner.sh + env: + - name: SYSTEM_NAMESPACE + value: knative-serving + - name: TEST_OPTIONS + value: --enable-alpha --enable-beta --resolvabledomain=false + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + - name: KO_FLAGS + value: --platform=linux/s390x + - name: PLATFORM + value: linux/s390x + - name: KO_DOCKER_REPO + valueFrom: + secretKeyRef: + key: ko-docker-repo + name: s390x-cluster1 + - name: DISABLE_MD_LINTING + value: "1" + - name: KUBECONFIG + value: /root/.kube/config + - name: DOCKER_CONFIG + value: /opt/cluster + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: + limits: + memory: 16Gi + requests: + memory: 12Gi + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + - mountPath: /opt/cluster + name: s390x-cluster1 + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - name: s390x-cluster1 + secret: + defaultMode: 384 + secretName: s390x-cluster1 +- annotations: + testgrid-dashboards: serving + testgrid-tab-name: nightly + cluster: build-knative + cron: 15 9 * * * + decorate: true + decoration_config: + timeout: 3h0m0s + extra_refs: + - base_ref: main + org: knative + path_alias: knative.dev/serving + repo: serving + name: nightly_serving_main_periodic + reporter_config: + slack: + channel: serving + job_states_to_report: + - failure + report_template: | + "The nightly release job fails, check the log: <{{.Status.URL}}|View logs>" + spec: + containers: + - command: + - runner.sh + - ./hack/release.sh + - --publish + - --tag-release + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/nightly-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: + limits: + memory: 16Gi + requests: + memory: 12Gi + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/nightly-account + name: nightly-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: nightly-account + secret: + secretName: nightly-account +- annotations: + testgrid-dashboards: serving + testgrid-tab-name: release + cluster: build-knative + cron: 11 */12 * * * + decorate: true + decoration_config: + timeout: 3h0m0s + extra_refs: + - base_ref: main + org: knative + path_alias: knative.dev/serving + repo: serving + name: release_serving_main_periodic + spec: + containers: + - command: + - runner.sh + - ./hack/release.sh + - --auto-release + - --release-gcs + - knative-releases/serving + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/release-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: + limits: + memory: 16Gi + requests: + memory: 12Gi + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/hub-token + name: hub-token + readOnly: true + - mountPath: /etc/release-account + name: release-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: hub-token + secret: + secretName: hub-token + - name: release-account + secret: + secretName: release-account +presubmits: + knative/serving: + - always_run: true + branches: + - ^main$ + cluster: build-knative + decorate: true + name: build-tests_serving_main + path_alias: knative.dev/serving + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --build-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: + limits: + memory: 16Gi + requests: + memory: 12Gi + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^main$ + cluster: build-knative + decorate: true + name: unit-tests_serving_main + path_alias: knative.dev/serving + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --unit-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: + limits: + memory: 10Gi + requests: + memory: 8Gi + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^main$ + cluster: build-knative + decorate: true + name: upgrade-tests_serving_main + path_alias: knative.dev/serving + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-upgrade-tests.sh + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: + limits: + memory: 16Gi + requests: + memory: 12Gi + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - always_run: false + branches: + - ^main$ + cluster: build-knative + decorate: true + name: performance-tests-kperf_serving_main + optional: true + path_alias: knative.dev/serving + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/performance/performance-tests.sh + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: + limits: + memory: 16Gi + requests: + memory: 12Gi + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - always_run: false + branches: + - ^main$ + cluster: build-knative + decorate: true + name: istio-latest-mesh_serving_main + optional: true + path_alias: knative.dev/serving + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-tests.sh --istio-version latest --mesh + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: + limits: + memory: 16Gi + requests: + memory: 12Gi + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - always_run: false + branches: + - ^main$ + cluster: build-knative + decorate: true + name: istio-latest-mesh-short_serving_main + optional: true + path_alias: knative.dev/serving + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-tests.sh --istio-version latest --mesh --short + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: + limits: + memory: 16Gi + requests: + memory: 12Gi + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - always_run: false + branches: + - ^main$ + cluster: build-knative + decorate: true + name: istio-latest-mesh-tls_serving_main + optional: true + path_alias: knative.dev/serving + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-auto-tls-tests.sh --istio-version latest --mesh + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: + limits: + memory: 16Gi + requests: + memory: 12Gi + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - always_run: false + branches: + - ^main$ + cluster: build-knative + decorate: true + name: istio-latest-no-mesh_serving_main + optional: true + path_alias: knative.dev/serving + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-tests.sh --istio-version latest --no-mesh + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: + limits: + memory: 16Gi + requests: + memory: 12Gi + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - always_run: false + branches: + - ^main$ + cluster: build-knative + decorate: true + name: istio-latest-no-mesh-tls_serving_main + optional: true + path_alias: knative.dev/serving + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-auto-tls-tests.sh --istio-version latest --no-mesh + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: + limits: + memory: 16Gi + requests: + memory: 12Gi + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - always_run: false + branches: + - ^main$ + cluster: build-knative + decorate: true + name: kourier-stable_serving_main + optional: true + path_alias: knative.dev/serving + run_if_changed: ^third_party/kourier-latest/* + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-tests.sh --kourier-version stable + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: + limits: + memory: 16Gi + requests: + memory: 12Gi + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - always_run: false + branches: + - ^main$ + cluster: build-knative + decorate: true + name: kourier-stable-tls_serving_main + optional: true + path_alias: knative.dev/serving + run_if_changed: ^third_party/kourier-latest/* + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-auto-tls-tests.sh --kourier-version stable + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: + limits: + memory: 16Gi + requests: + memory: 12Gi + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - always_run: false + branches: + - ^main$ + cluster: build-knative + decorate: true + name: contour-latest_serving_main + optional: true + path_alias: knative.dev/serving + run_if_changed: ^third_party/contour-latest/* + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-tests.sh --contour-version latest + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: + limits: + memory: 16Gi + requests: + memory: 12Gi + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - always_run: false + branches: + - ^main$ + cluster: build-knative + decorate: true + name: contour-tls_serving_main + optional: true + path_alias: knative.dev/serving + run_if_changed: ^third_party/contour-latest/* + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-auto-tls-tests.sh --contour-version latest + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: + limits: + memory: 16Gi + requests: + memory: 12Gi + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - always_run: false + branches: + - ^main$ + cluster: build-knative + decorate: true + name: gateway-api-latest_serving_main + optional: true + path_alias: knative.dev/serving + run_if_changed: ^third_party/gateway-api-latest/* + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-tests.sh --gateway-api-version latest + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: + limits: + memory: 16Gi + requests: + memory: 12Gi + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account + - always_run: false + branches: + - ^main$ + cluster: build-knative + decorate: true + name: https_serving_main + optional: true + path_alias: knative.dev/serving + run_if_changed: ^third_party/cert-manager-latest/* + spec: + containers: + - args: + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-tests.sh --https + - --run-test + - ./test/e2e-auto-tls-tests.sh --https + command: + - runner.sh + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: + limits: + memory: 16Gi + requests: + memory: 12Gi + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/test-account + name: test-account + readOnly: true + nodeSelector: + type: testing + volumes: + - name: test-account + secret: + secretName: test-account diff --git a/prow/jobs/generated/knative/test-infra-main.gen.yaml b/prow/jobs/generated/knative/test-infra-main.gen.yaml new file mode 100644 index 00000000000..33291fc4d4e --- /dev/null +++ b/prow/jobs/generated/knative/test-infra-main.gen.yaml @@ -0,0 +1,49 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. #### +# #### USE "./hack/generate-configs.sh" TO REGENERATE THIS FILE. #### +# #### #### +# ####################################################################### + +presubmits: + knative/test-infra: + - always_run: true + branches: + - ^main$ + cluster: build-knative + decorate: true + name: build-tests_test-infra_main + path_alias: knative.dev/test-infra + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --build-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing + - always_run: true + branches: + - ^main$ + cluster: build-knative + decorate: true + name: unit-tests_test-infra_main + path_alias: knative.dev/test-infra + spec: + containers: + - command: + - runner.sh + - ./test/presubmit-tests.sh + - --unit-tests + image: gcr.io/knative-tests/test-infra/prow-tests:stable + name: "" + resources: {} + securityContext: + privileged: true + nodeSelector: + type: testing diff --git a/prow/jobs_config/.base.yaml b/prow/jobs_config/.base.yaml new file mode 100644 index 00000000000..95c3aaa3c69 --- /dev/null +++ b/prow/jobs_config/.base.yaml @@ -0,0 +1,114 @@ +autogen_header: | + # ####################################################################### + # #### #### + # #### THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. #### + # #### USE "./hack/generate-configs.sh" TO REGENERATE THIS FILE. #### + # #### #### + # ####################################################################### + +path_aliases: + knative: knative.dev + +node_selector: + type: testing + +cluster: "build-knative" + +requirements: [gcp] +requirement_presets: + nightly: + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/nightly-account/service-account.json + volumeMounts: + - name: nightly-account + mountPath: /etc/nightly-account + readOnly: true + volumes: + - name: nightly-account + secret: + secretName: nightly-account + + release: + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/release-account/service-account.json + volumeMounts: + - name: hub-token + mountPath: /etc/hub-token + readOnly: true + - name: release-account + mountPath: /etc/release-account + readOnly: true + volumes: + - name: hub-token + secret: + secretName: hub-token + - name: release-account + secret: + secretName: release-account + + docker: + env: + - name: DOCKER_IN_DOCKER_ENABLED + value: "true" + volumeMounts: + - name: docker-graph + mountPath: /docker-graph + - name: modules + mountPath: /lib/modules + - name: cgroup + mountPath: /sys/fs/cgroup + volumes: + - name: docker-graph + emptyDir: {} + - name: modules + hostPath: + path: /lib/modules + type: Directory + - name: cgroup + hostPath: + path: /sys/fs/cgroup + type: Directory + + gcp: + env: + - name: E2E_CLUSTER_REGION + value: us-central1 + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/test-account/service-account.json + volumeMounts: + - name: test-account + mountPath: /etc/test-account + readOnly: true + volumes: + - name: test-account + secret: + secretName: test-account + + s390x: + env: + - name: KO_FLAGS + value: "--platform=linux/s390x" + - name: PLATFORM + value: "linux/s390x" + - name: KO_DOCKER_REPO + valueFrom: + secretKeyRef: + name: s390x-cluster1 + key: ko-docker-repo + - name: DISABLE_MD_LINTING + value: "1" + - name: KUBECONFIG + value: /root/.kube/config + - name: DOCKER_CONFIG + value: /opt/cluster + volumeMounts: + - name: s390x-cluster1 + mountPath: /opt/cluster + readOnly: true + volumes: + - name: s390x-cluster1 + secret: + secretName: s390x-cluster1 + defaultMode: 0600 diff --git a/prow/jobs_config/jobs_test.go b/prow/jobs_config/jobs_test.go new file mode 100644 index 00000000000..b65d8dc0cbc --- /dev/null +++ b/prow/jobs_config/jobs_test.go @@ -0,0 +1,151 @@ +/* +Copyright 2022 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// jobs_test.go runs basic validations for the meta Prow job config files. + +package jobs_config + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + "strings" + "testing" + + "istio.io/test-infra/tools/prowgen/pkg/spec" + "k8s.io/apimachinery/pkg/util/sets" + "sigs.k8s.io/yaml" +) + +const jobsConfigPath = "." + +func TestOrgRepo(t *testing.T) { + var errStrs strings.Builder + if err := filepath.WalkDir(jobsConfigPath, func(path string, d os.DirEntry, err error) error { + t.Logf("Validating org and repo for %q", path) + // Skip directory, base config file and other unrelated files. + if d.IsDir() || d.Name() == ".base.yaml" || !strings.HasSuffix(path, ".yaml") { + return nil + } + + jobs := mustReadJobsConfig(t, path) + + org := jobs.Org + parentDir := filepath.Base(filepath.Dir(path)) + if parentDir != org { + errStrs.WriteString(fmt.Sprintf("Config file %q must be under %q folder.\n", path, org)) + } + + if len(jobs.Branches) != 1 { + errStrs.WriteString(fmt.Sprintf("Config file %q must only have one branch configured but got %v.\n", path, jobs.Branches)) + } + + repo := jobs.Repo + branch := jobs.Branches[0] + repoBranch := repo + if branch != "main" { + repoBranch = repo + "-" + branch + } + if strings.TrimSuffix(d.Name(), ".yaml") != repoBranch { + errStrs.WriteString(fmt.Sprintf("Config file %q must be named as %q.\n", path, repoBranch+".yaml")) + } + + return nil + }); err != nil { + t.Fatalf("Error walking dir %q: %v", jobsConfigPath, err) + } + + if errStrs.Len() != 0 { + t.Fatalf("Error validating org and repo:\n%s", errStrs.String()) + } +} + +func TestReleaseJobs(t *testing.T) { + var errStrs strings.Builder + if err := filepath.WalkDir(jobsConfigPath, func(path string, d os.DirEntry, err error) error { + t.Logf("Validating release jobs for %q", path) + // Skip directory, base config file and other unrelated files. + if d.IsDir() || d.Name() == ".base.yaml" || !strings.HasSuffix(path, ".yaml") { + return nil + } + + jobs := mustReadJobsConfig(t, path) + + for _, job := range jobs.Jobs { + if job.Name == "nightly" { + if job.Interval != "" || job.Cron != "" { + errStrs.WriteString(fmt.Sprintf("cron is supposed to be auto-generated, do not add for nightly job in %q\n", path)) + } + reqs := sets.NewString(job.Requirements...) + if !reqs.Has("nightly") { + errStrs.WriteString(fmt.Sprintf("nightly requirement is required for nightly job in %q\n", path)) + } + + excludedReqs := sets.NewString(job.ExcludedRequirements...) + if !excludedReqs.Has("gcp") { + errStrs.WriteString(fmt.Sprintf("gcp requirement cannot be set for nightly job in %q\n", path)) + } + } + + if job.Name == "release" { + if job.Interval != "" || job.Cron != "" { + errStrs.WriteString(fmt.Sprintf("cron is supposed to be auto-generated, do not add it for release job in %q\n", path)) + } + reqs := sets.NewString(job.Requirements...) + if !reqs.Has("release") { + errStrs.WriteString(fmt.Sprintf("release requirement is required for release job in %q\n", path)) + } + + excludedReqs := sets.NewString(job.ExcludedRequirements...) + if !excludedReqs.Has("gcp") { + errStrs.WriteString(fmt.Sprintf("gcp requirement cannot be set for release job in %q\n", path)) + } + + commandArgs := append(job.Command, job.Args...) + for i, arg := range commandArgs { + if arg == "--release-gcs" { + if commandArgs[i+1] != "knative-releases/"+jobs.Repo { + errStrs.WriteString(fmt.Sprintf("--release-gcs must be set to %q for release job in %q\n", "knative-releases/"+jobs.Repo, path)) + } + } + } + } + } + + return nil + }); err != nil { + t.Fatalf("Error walking dir %q: %v", jobsConfigPath, err) + } + + if errStrs.Len() != 0 { + t.Fatalf("Error validating release jobs:\n%s", errStrs.String()) + } +} + +func mustReadJobsConfig(t *testing.T, file string) spec.JobsConfig { + t.Helper() + yamlFile, err := ioutil.ReadFile(file) + if err != nil { + t.Fatalf("Failed to read %q: %v", file, err) + } + jobsConfig := spec.JobsConfig{} + if err := yaml.Unmarshal(yamlFile, &jobsConfig); err != nil { + t.Fatalf("Failed to unmarshal %q: %v", file, err) + } + + return jobsConfig +} diff --git a/prow/jobs_config/knative-sandbox/async-component.yaml b/prow/jobs_config/knative-sandbox/async-component.yaml new file mode 100644 index 00000000000..5c38e7ec150 --- /dev/null +++ b/prow/jobs_config/knative-sandbox/async-component.yaml @@ -0,0 +1,36 @@ +org: knative-sandbox +repo: async-component +branches: [main] +image: gcr.io/knative-tests/test-infra/prow-tests:stable +imagePullPolicy: Always + +jobs: + - name: build-tests + types: [presubmit] + command: [runner.sh, ./test/presubmit-tests.sh, --build-tests] + excluded_requirements: [gcp] + + - name: unit-tests + types: [presubmit] + command: [runner.sh, ./test/presubmit-tests.sh, --unit-tests] + excluded_requirements: [gcp] + + - name: integration-tests + types: [presubmit] + command: [runner.sh, ./test/presubmit-tests.sh, --integration-tests] + + - name: continuous + types: [periodic] + command: [runner.sh, ./test/presubmit-tests.sh, --all-tests] + + - name: nightly + types: [periodic] + command: [runner.sh, ./hack/release.sh, --publish, --tag-release] + requirements: [nightly] + excluded_requirements: [gcp] + + - name: release + types: [periodic] + command: [runner.sh, ./hack/release.sh, --auto-release, --release-gcs, knative-releases/async-component, --release-gcr, gcr.io/knative-releases, --github-token, /etc/hub-token/token] + requirements: [release] + excluded_requirements: [gcp] diff --git a/prow/jobs_config/knative-sandbox/container-freezer.yaml b/prow/jobs_config/knative-sandbox/container-freezer.yaml new file mode 100644 index 00000000000..ee0dda83729 --- /dev/null +++ b/prow/jobs_config/knative-sandbox/container-freezer.yaml @@ -0,0 +1,36 @@ +org: knative-sandbox +repo: container-freezer +branches: [main] +image: gcr.io/knative-tests/test-infra/prow-tests:stable +imagePullPolicy: Always + +jobs: + - name: build-tests + types: [presubmit] + command: [runner.sh, ./test/presubmit-tests.sh, --build-tests] + excluded_requirements: [gcp] + + - name: unit-tests + types: [presubmit] + command: [runner.sh, ./test/presubmit-tests.sh, --unit-tests] + excluded_requirements: [gcp] + + - name: integration-tests + types: [presubmit] + command: [runner.sh, ./test/presubmit-tests.sh, --integration-tests] + + - name: continuous + types: [periodic] + command: [runner.sh, ./test/presubmit-tests.sh, --all-tests] + + - name: nightly + types: [periodic] + command: [runner.sh, ./hack/release.sh, --publish, --tag-release] + requirements: [nightly] + excluded_requirements: [gcp] + + - name: release + types: [periodic] + command: [runner.sh, ./hack/release.sh, --auto-release, --release-gcs, knative-releases/container-freezer, --release-gcr, gcr.io/knative-releases, --github-token, /etc/hub-token/token] + requirements: [release] + excluded_requirements: [gcp] diff --git a/prow/jobs_config/knative-sandbox/discovery.yaml b/prow/jobs_config/knative-sandbox/discovery.yaml new file mode 100644 index 00000000000..6560d9a067d --- /dev/null +++ b/prow/jobs_config/knative-sandbox/discovery.yaml @@ -0,0 +1,20 @@ +org: knative-sandbox +repo: discovery +branches: [main] +image: gcr.io/knative-tests/test-infra/prow-tests:stable +imagePullPolicy: Always + +jobs: + - name: build-tests + types: [presubmit] + command: [runner.sh, ./test/presubmit-tests.sh, --build-tests] + excluded_requirements: [gcp] + + - name: unit-tests + types: [presubmit] + command: [runner.sh, ./test/presubmit-tests.sh, --unit-tests] + excluded_requirements: [gcp] + + - name: integration-tests + types: [presubmit] + command: [runner.sh, ./test/presubmit-tests.sh, --integration-tests] diff --git a/prow/jobs_config/knative-sandbox/eventing-autoscaler-keda-release-1.0.yaml b/prow/jobs_config/knative-sandbox/eventing-autoscaler-keda-release-1.0.yaml new file mode 100644 index 00000000000..96b32dac9e3 --- /dev/null +++ b/prow/jobs_config/knative-sandbox/eventing-autoscaler-keda-release-1.0.yaml @@ -0,0 +1,58 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. #### +# #### PLEASE ONLY MODIFY IT MANUALLY WHEN NEEDED. #### +# #### #### +# ####################################################################### +branches: +- release-1.0 +image: gcr.io/knative-tests/test-infra/prow-tests:stable +jobs: +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-tests.sh --kafka-source + modifiers: + - presubmit_optional + name: integration-test-kafka-source + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-tests.sh --kafka-mt-source + modifiers: + - presubmit_optional + name: integration-test-kafka-mt-source + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + name: continuous + types: + - periodic +- command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/eventing-autoscaler-keda + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.0 + name: release + requirements: + - release + excluded_requirements: + - gcp + types: + - periodic +org: knative-sandbox +repo: eventing-autoscaler-keda diff --git a/prow/jobs_config/knative-sandbox/eventing-autoscaler-keda-release-1.1.yaml b/prow/jobs_config/knative-sandbox/eventing-autoscaler-keda-release-1.1.yaml new file mode 100644 index 00000000000..de188bc1cef --- /dev/null +++ b/prow/jobs_config/knative-sandbox/eventing-autoscaler-keda-release-1.1.yaml @@ -0,0 +1,58 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. #### +# #### PLEASE ONLY MODIFY IT MANUALLY WHEN NEEDED. #### +# #### #### +# ####################################################################### +branches: +- release-1.1 +image: gcr.io/knative-tests/test-infra/prow-tests:stable +jobs: +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-tests.sh --kafka-source + modifiers: + - presubmit_optional + name: integration-test-kafka-source + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-tests.sh --kafka-mt-source + modifiers: + - presubmit_optional + name: integration-test-kafka-mt-source + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + name: continuous + types: + - periodic +- command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/eventing-autoscaler-keda + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.1 + name: release + requirements: + - release + excluded_requirements: + - gcp + types: + - periodic +org: knative-sandbox +repo: eventing-autoscaler-keda diff --git a/prow/jobs_config/knative-sandbox/eventing-autoscaler-keda-release-1.2.yaml b/prow/jobs_config/knative-sandbox/eventing-autoscaler-keda-release-1.2.yaml new file mode 100644 index 00000000000..d7da458ea33 --- /dev/null +++ b/prow/jobs_config/knative-sandbox/eventing-autoscaler-keda-release-1.2.yaml @@ -0,0 +1,58 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. #### +# #### PLEASE ONLY MODIFY IT MANUALLY WHEN NEEDED. #### +# #### #### +# ####################################################################### +branches: +- release-1.2 +image: gcr.io/knative-tests/test-infra/prow-tests:stable +jobs: +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-tests.sh --kafka-source + modifiers: + - presubmit_optional + name: integration-test-kafka-source + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-tests.sh --kafka-mt-source + modifiers: + - presubmit_optional + name: integration-test-kafka-mt-source + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + name: continuous + types: + - periodic +- command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/eventing-autoscaler-keda + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.2 + name: release + requirements: + - release + excluded_requirements: + - gcp + types: + - periodic +org: knative-sandbox +repo: eventing-autoscaler-keda diff --git a/prow/jobs_config/knative-sandbox/eventing-autoscaler-keda-release-1.3.yaml b/prow/jobs_config/knative-sandbox/eventing-autoscaler-keda-release-1.3.yaml new file mode 100644 index 00000000000..1d424991c4c --- /dev/null +++ b/prow/jobs_config/knative-sandbox/eventing-autoscaler-keda-release-1.3.yaml @@ -0,0 +1,58 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. #### +# #### PLEASE ONLY MODIFY IT MANUALLY WHEN NEEDED. #### +# #### #### +# ####################################################################### +branches: +- release-1.3 +image: gcr.io/knative-tests/test-infra/prow-tests:stable +jobs: +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-tests.sh --kafka-source + modifiers: + - presubmit_optional + name: integration-test-kafka-source + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-tests.sh --kafka-mt-source + modifiers: + - presubmit_optional + name: integration-test-kafka-mt-source + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + name: continuous + types: + - periodic +- command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/eventing-autoscaler-keda + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.3 + name: release + requirements: + - release + excluded_requirements: + - gcp + types: + - periodic +org: knative-sandbox +repo: eventing-autoscaler-keda diff --git a/prow/jobs_config/knative-sandbox/eventing-autoscaler-keda.yaml b/prow/jobs_config/knative-sandbox/eventing-autoscaler-keda.yaml new file mode 100644 index 00000000000..a9dd56715e0 --- /dev/null +++ b/prow/jobs_config/knative-sandbox/eventing-autoscaler-keda.yaml @@ -0,0 +1,32 @@ +org: knative-sandbox +repo: eventing-autoscaler-keda +branches: [main] +image: gcr.io/knative-tests/test-infra/prow-tests:stable +imagePullPolicy: Always + +jobs: + - name: integration-test-kafka-source + types: [presubmit] + command: [runner.sh, ./test/presubmit-tests.sh, --run-test, "./test/e2e-tests.sh --kafka-source"] + modifiers: [presubmit_optional] + + - name: integration-test-kafka-mt-source + types: [presubmit] + command: [runner.sh, ./test/presubmit-tests.sh, --run-test, "./test/e2e-tests.sh --kafka-mt-source"] + modifiers: [presubmit_optional] + + - name: continuous + types: [periodic] + command: [runner.sh, ./test/presubmit-tests.sh, --all-tests] + + - name: nightly + types: [periodic] + command: [runner.sh, ./hack/release.sh, --publish, --tag-release] + requirements: [nightly] + excluded_requirements: [gcp] + + - name: release + types: [periodic] + command: [runner.sh, ./hack/release.sh, --auto-release, --release-gcs, knative-releases/eventing-autoscaler-keda, --release-gcr, gcr.io/knative-releases, --github-token, /etc/hub-token/token] + requirements: [release] + excluded_requirements: [gcp] diff --git a/prow/jobs_config/knative-sandbox/eventing-awssqs-release-1.0.yaml b/prow/jobs_config/knative-sandbox/eventing-awssqs-release-1.0.yaml new file mode 100644 index 00000000000..0893959e44f --- /dev/null +++ b/prow/jobs_config/knative-sandbox/eventing-awssqs-release-1.0.yaml @@ -0,0 +1,38 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. #### +# #### PLEASE ONLY MODIFY IT MANUALLY WHEN NEEDED. #### +# #### #### +# ####################################################################### +branches: +- release-1.0 +image: gcr.io/knative-tests/test-infra/prow-tests:stable +jobs: +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + name: continuous + types: + - periodic +- command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/eventing-awssqs + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.0 + name: release + requirements: + - release + excluded_requirements: + - gcp + types: + - periodic +org: knative-sandbox +repo: eventing-awssqs diff --git a/prow/jobs_config/knative-sandbox/eventing-awssqs-release-1.1.yaml b/prow/jobs_config/knative-sandbox/eventing-awssqs-release-1.1.yaml new file mode 100644 index 00000000000..33487cc521c --- /dev/null +++ b/prow/jobs_config/knative-sandbox/eventing-awssqs-release-1.1.yaml @@ -0,0 +1,38 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. #### +# #### PLEASE ONLY MODIFY IT MANUALLY WHEN NEEDED. #### +# #### #### +# ####################################################################### +branches: +- release-1.1 +image: gcr.io/knative-tests/test-infra/prow-tests:stable +jobs: +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + name: continuous + types: + - periodic +- command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/eventing-awssqs + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.1 + name: release + requirements: + - release + excluded_requirements: + - gcp + types: + - periodic +org: knative-sandbox +repo: eventing-awssqs diff --git a/prow/jobs_config/knative-sandbox/eventing-awssqs-release-1.2.yaml b/prow/jobs_config/knative-sandbox/eventing-awssqs-release-1.2.yaml new file mode 100644 index 00000000000..01b82bec27e --- /dev/null +++ b/prow/jobs_config/knative-sandbox/eventing-awssqs-release-1.2.yaml @@ -0,0 +1,38 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. #### +# #### PLEASE ONLY MODIFY IT MANUALLY WHEN NEEDED. #### +# #### #### +# ####################################################################### +branches: +- release-1.2 +image: gcr.io/knative-tests/test-infra/prow-tests:stable +jobs: +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + name: continuous + types: + - periodic +- command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/eventing-awssqs + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.2 + name: release + requirements: + - release + excluded_requirements: + - gcp + types: + - periodic +org: knative-sandbox +repo: eventing-awssqs diff --git a/prow/jobs_config/knative-sandbox/eventing-awssqs.yaml b/prow/jobs_config/knative-sandbox/eventing-awssqs.yaml new file mode 100644 index 00000000000..b5b3361655f --- /dev/null +++ b/prow/jobs_config/knative-sandbox/eventing-awssqs.yaml @@ -0,0 +1,22 @@ +org: knative-sandbox +repo: eventing-awssqs +branches: [main] +image: gcr.io/knative-tests/test-infra/prow-tests:stable +imagePullPolicy: Always + +jobs: + - name: continuous + types: [periodic] + command: [runner.sh, ./test/presubmit-tests.sh, --all-tests] + + - name: nightly + types: [periodic] + command: [runner.sh, ./hack/release.sh, --publish, --tag-release] + requirements: [nightly] + excluded_requirements: [gcp] + + - name: release + types: [periodic] + command: [runner.sh, ./hack/release.sh, --auto-release, --release-gcs, knative-releases/eventing-awssqs, --release-gcr, gcr.io/knative-releases, --github-token, /etc/hub-token/token] + requirements: [release] + excluded_requirements: [gcp] diff --git a/prow/jobs_config/knative-sandbox/eventing-ceph-release-1.0.yaml b/prow/jobs_config/knative-sandbox/eventing-ceph-release-1.0.yaml new file mode 100644 index 00000000000..f490f163a7a --- /dev/null +++ b/prow/jobs_config/knative-sandbox/eventing-ceph-release-1.0.yaml @@ -0,0 +1,41 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. #### +# #### PLEASE ONLY MODIFY IT MANUALLY WHEN NEEDED. #### +# #### #### +# ####################################################################### +branches: +- release-1.0 +image: gcr.io/knative-tests/test-infra/prow-tests:stable +jobs: +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + name: continuous + requirements: + - docker + types: + - periodic +- command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/eventing-ceph + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.0 + name: release + requirements: + - release + excluded_requirements: + - gcp + - docker + types: + - periodic +org: knative-sandbox +repo: eventing-ceph diff --git a/prow/jobs_config/knative-sandbox/eventing-ceph-release-1.1.yaml b/prow/jobs_config/knative-sandbox/eventing-ceph-release-1.1.yaml new file mode 100644 index 00000000000..95b09661276 --- /dev/null +++ b/prow/jobs_config/knative-sandbox/eventing-ceph-release-1.1.yaml @@ -0,0 +1,41 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. #### +# #### PLEASE ONLY MODIFY IT MANUALLY WHEN NEEDED. #### +# #### #### +# ####################################################################### +branches: +- release-1.1 +image: gcr.io/knative-tests/test-infra/prow-tests:stable +jobs: +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + name: continuous + requirements: + - docker + types: + - periodic +- command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/eventing-ceph + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.1 + name: release + requirements: + - release + excluded_requirements: + - gcp + - docker + types: + - periodic +org: knative-sandbox +repo: eventing-ceph diff --git a/prow/jobs_config/knative-sandbox/eventing-ceph-release-1.2.yaml b/prow/jobs_config/knative-sandbox/eventing-ceph-release-1.2.yaml new file mode 100644 index 00000000000..c8f07488c34 --- /dev/null +++ b/prow/jobs_config/knative-sandbox/eventing-ceph-release-1.2.yaml @@ -0,0 +1,41 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. #### +# #### PLEASE ONLY MODIFY IT MANUALLY WHEN NEEDED. #### +# #### #### +# ####################################################################### +branches: +- release-1.2 +image: gcr.io/knative-tests/test-infra/prow-tests:stable +jobs: +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + name: continuous + requirements: + - docker + types: + - periodic +- command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/eventing-ceph + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.2 + name: release + requirements: + - release + excluded_requirements: + - gcp + - docker + types: + - periodic +org: knative-sandbox +repo: eventing-ceph diff --git a/prow/jobs_config/knative-sandbox/eventing-ceph-release-1.3.yaml b/prow/jobs_config/knative-sandbox/eventing-ceph-release-1.3.yaml new file mode 100644 index 00000000000..0f5d64254f1 --- /dev/null +++ b/prow/jobs_config/knative-sandbox/eventing-ceph-release-1.3.yaml @@ -0,0 +1,41 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. #### +# #### PLEASE ONLY MODIFY IT MANUALLY WHEN NEEDED. #### +# #### #### +# ####################################################################### +branches: +- release-1.3 +image: gcr.io/knative-tests/test-infra/prow-tests:stable +jobs: +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + name: continuous + requirements: + - docker + types: + - periodic +- command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/eventing-ceph + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.3 + name: release + requirements: + - release + excluded_requirements: + - gcp + - docker + types: + - periodic +org: knative-sandbox +repo: eventing-ceph diff --git a/prow/jobs_config/knative-sandbox/eventing-ceph.yaml b/prow/jobs_config/knative-sandbox/eventing-ceph.yaml new file mode 100644 index 00000000000..5cbb4873c3b --- /dev/null +++ b/prow/jobs_config/knative-sandbox/eventing-ceph.yaml @@ -0,0 +1,23 @@ +org: knative-sandbox +repo: eventing-ceph +branches: [main] +image: gcr.io/knative-tests/test-infra/prow-tests:stable +imagePullPolicy: Always + +jobs: + - name: continuous + types: [periodic] + command: [runner.sh, ./test/presubmit-tests.sh, --all-tests] + requirements: [docker] + + - name: nightly + types: [periodic] + command: [runner.sh, ./hack/release.sh, --publish, --tag-release] + requirements: [nightly, docker] + excluded_requirements: [gcp] + + - name: release + types: [periodic] + command: [runner.sh, ./hack/release.sh, --auto-release, --release-gcs, knative-releases/eventing-ceph, --release-gcr, gcr.io/knative-releases, --github-token, /etc/hub-token/token] + requirements: [release, docker] + excluded_requirements: [gcp] diff --git a/prow/jobs_config/knative-sandbox/eventing-couchdb-release-1.0.yaml b/prow/jobs_config/knative-sandbox/eventing-couchdb-release-1.0.yaml new file mode 100644 index 00000000000..30f64a70fdc --- /dev/null +++ b/prow/jobs_config/knative-sandbox/eventing-couchdb-release-1.0.yaml @@ -0,0 +1,41 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. #### +# #### PLEASE ONLY MODIFY IT MANUALLY WHEN NEEDED. #### +# #### #### +# ####################################################################### +branches: +- release-1.0 +image: gcr.io/knative-tests/test-infra/prow-tests:stable +jobs: +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + name: continuous + requirements: + - docker + types: + - periodic +- command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/eventing-couchdb + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.0 + name: release + requirements: + - release + excluded_requirements: + - gcp + - docker + types: + - periodic +org: knative-sandbox +repo: eventing-couchdb diff --git a/prow/jobs_config/knative-sandbox/eventing-couchdb-release-1.1.yaml b/prow/jobs_config/knative-sandbox/eventing-couchdb-release-1.1.yaml new file mode 100644 index 00000000000..4d3eb78266d --- /dev/null +++ b/prow/jobs_config/knative-sandbox/eventing-couchdb-release-1.1.yaml @@ -0,0 +1,41 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. #### +# #### PLEASE ONLY MODIFY IT MANUALLY WHEN NEEDED. #### +# #### #### +# ####################################################################### +branches: +- release-1.1 +image: gcr.io/knative-tests/test-infra/prow-tests:stable +jobs: +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + name: continuous + requirements: + - docker + types: + - periodic +- command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/eventing-couchdb + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.1 + name: release + requirements: + - release + excluded_requirements: + - gcp + - docker + types: + - periodic +org: knative-sandbox +repo: eventing-couchdb diff --git a/prow/jobs_config/knative-sandbox/eventing-couchdb.yaml b/prow/jobs_config/knative-sandbox/eventing-couchdb.yaml new file mode 100644 index 00000000000..e18b70a7621 --- /dev/null +++ b/prow/jobs_config/knative-sandbox/eventing-couchdb.yaml @@ -0,0 +1,23 @@ +org: knative-sandbox +repo: eventing-couchdb +branches: [main] +image: gcr.io/knative-tests/test-infra/prow-tests:stable +imagePullPolicy: Always + +jobs: + - name: continuous + types: [periodic] + command: [runner.sh, ./test/presubmit-tests.sh, --all-tests] + requirements: [docker] + + - name: nightly + types: [periodic] + command: [runner.sh, ./hack/release.sh, --publish, --tag-release] + requirements: [nightly, docker] + excluded_requirements: [gcp] + + - name: release + types: [periodic] + command: [runner.sh, ./hack/release.sh, --auto-release, --release-gcs, knative-releases/eventing-couchdb, --release-gcr, gcr.io/knative-releases, --github-token, /etc/hub-token/token] + requirements: [release, docker] + excluded_requirements: [gcp] diff --git a/prow/jobs_config/knative-sandbox/eventing-github-release-1.0.yaml b/prow/jobs_config/knative-sandbox/eventing-github-release-1.0.yaml new file mode 100644 index 00000000000..db8dc80b9f9 --- /dev/null +++ b/prow/jobs_config/knative-sandbox/eventing-github-release-1.0.yaml @@ -0,0 +1,41 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. #### +# #### PLEASE ONLY MODIFY IT MANUALLY WHEN NEEDED. #### +# #### #### +# ####################################################################### +branches: +- release-1.0 +image: gcr.io/knative-tests/test-infra/prow-tests:stable +jobs: +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + name: continuous + requirements: + - docker + types: + - periodic +- command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/eventing-github + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.0 + name: release + requirements: + - release + excluded_requirements: + - gcp + - docker + types: + - periodic +org: knative-sandbox +repo: eventing-github diff --git a/prow/jobs_config/knative-sandbox/eventing-github-release-1.1.yaml b/prow/jobs_config/knative-sandbox/eventing-github-release-1.1.yaml new file mode 100644 index 00000000000..8d7d35f8865 --- /dev/null +++ b/prow/jobs_config/knative-sandbox/eventing-github-release-1.1.yaml @@ -0,0 +1,41 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. #### +# #### PLEASE ONLY MODIFY IT MANUALLY WHEN NEEDED. #### +# #### #### +# ####################################################################### +branches: +- release-1.1 +image: gcr.io/knative-tests/test-infra/prow-tests:stable +jobs: +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + name: continuous + requirements: + - docker + types: + - periodic +- command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/eventing-github + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.1 + name: release + requirements: + - release + excluded_requirements: + - gcp + - docker + types: + - periodic +org: knative-sandbox +repo: eventing-github diff --git a/prow/jobs_config/knative-sandbox/eventing-github-release-1.2.yaml b/prow/jobs_config/knative-sandbox/eventing-github-release-1.2.yaml new file mode 100644 index 00000000000..ca97c8cd8ed --- /dev/null +++ b/prow/jobs_config/knative-sandbox/eventing-github-release-1.2.yaml @@ -0,0 +1,41 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. #### +# #### PLEASE ONLY MODIFY IT MANUALLY WHEN NEEDED. #### +# #### #### +# ####################################################################### +branches: +- release-1.2 +image: gcr.io/knative-tests/test-infra/prow-tests:stable +jobs: +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + name: continuous + requirements: + - docker + types: + - periodic +- command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/eventing-github + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.2 + name: release + requirements: + - release + excluded_requirements: + - gcp + - docker + types: + - periodic +org: knative-sandbox +repo: eventing-github diff --git a/prow/jobs_config/knative-sandbox/eventing-github-release-1.3.yaml b/prow/jobs_config/knative-sandbox/eventing-github-release-1.3.yaml new file mode 100644 index 00000000000..ede2ba97a39 --- /dev/null +++ b/prow/jobs_config/knative-sandbox/eventing-github-release-1.3.yaml @@ -0,0 +1,41 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. #### +# #### PLEASE ONLY MODIFY IT MANUALLY WHEN NEEDED. #### +# #### #### +# ####################################################################### +branches: +- release-1.3 +image: gcr.io/knative-tests/test-infra/prow-tests:stable +jobs: +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + name: continuous + requirements: + - docker + types: + - periodic +- command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/eventing-github + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.3 + name: release + requirements: + - release + excluded_requirements: + - gcp + - docker + types: + - periodic +org: knative-sandbox +repo: eventing-github diff --git a/prow/jobs_config/knative-sandbox/eventing-github.yaml b/prow/jobs_config/knative-sandbox/eventing-github.yaml new file mode 100644 index 00000000000..610fd6497cb --- /dev/null +++ b/prow/jobs_config/knative-sandbox/eventing-github.yaml @@ -0,0 +1,23 @@ +org: knative-sandbox +repo: eventing-github +branches: [main] +image: gcr.io/knative-tests/test-infra/prow-tests:stable +imagePullPolicy: Always + +jobs: + - name: continuous + types: [periodic] + command: [runner.sh, ./test/presubmit-tests.sh, --all-tests] + requirements: [docker] + + - name: nightly + types: [periodic] + command: [runner.sh, ./hack/release.sh, --publish, --tag-release] + requirements: [nightly, docker] + excluded_requirements: [gcp] + + - name: release + types: [periodic] + command: [runner.sh, ./hack/release.sh, --auto-release, --release-gcs, knative-releases/eventing-github, --release-gcr, gcr.io/knative-releases, --github-token, /etc/hub-token/token] + requirements: [release, docker] + excluded_requirements: [gcp] diff --git a/prow/jobs_config/knative-sandbox/eventing-gitlab-release-1.0.yaml b/prow/jobs_config/knative-sandbox/eventing-gitlab-release-1.0.yaml new file mode 100644 index 00000000000..31387ecb384 --- /dev/null +++ b/prow/jobs_config/knative-sandbox/eventing-gitlab-release-1.0.yaml @@ -0,0 +1,47 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. #### +# #### PLEASE ONLY MODIFY IT MANUALLY WHEN NEEDED. #### +# #### #### +# ####################################################################### +branches: +- release-1.0 +image: gcr.io/knative-tests/test-infra/prow-tests:stable +jobs: +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + name: continuous + requirements: + - docker + types: + - periodic +- command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/eventing-gitlab + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.0 + name: release + requirements: + - release + excluded_requirements: + - gcp + - docker + types: + - periodic +org: knative-sandbox +repo: eventing-gitlab +resources_presets: + default: + limits: + memory: 16Gi + requests: + memory: 12Gi diff --git a/prow/jobs_config/knative-sandbox/eventing-gitlab-release-1.1.yaml b/prow/jobs_config/knative-sandbox/eventing-gitlab-release-1.1.yaml new file mode 100644 index 00000000000..cea210ec165 --- /dev/null +++ b/prow/jobs_config/knative-sandbox/eventing-gitlab-release-1.1.yaml @@ -0,0 +1,47 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. #### +# #### PLEASE ONLY MODIFY IT MANUALLY WHEN NEEDED. #### +# #### #### +# ####################################################################### +branches: +- release-1.1 +image: gcr.io/knative-tests/test-infra/prow-tests:stable +jobs: +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + name: continuous + requirements: + - docker + types: + - periodic +- command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/eventing-gitlab + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.1 + name: release + requirements: + - release + excluded_requirements: + - gcp + - docker + types: + - periodic +org: knative-sandbox +repo: eventing-gitlab +resources_presets: + default: + limits: + memory: 16Gi + requests: + memory: 12Gi diff --git a/prow/jobs_config/knative-sandbox/eventing-gitlab-release-1.2.yaml b/prow/jobs_config/knative-sandbox/eventing-gitlab-release-1.2.yaml new file mode 100644 index 00000000000..b753d57ce72 --- /dev/null +++ b/prow/jobs_config/knative-sandbox/eventing-gitlab-release-1.2.yaml @@ -0,0 +1,47 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. #### +# #### PLEASE ONLY MODIFY IT MANUALLY WHEN NEEDED. #### +# #### #### +# ####################################################################### +branches: +- release-1.2 +image: gcr.io/knative-tests/test-infra/prow-tests:stable +jobs: +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + name: continuous + requirements: + - docker + types: + - periodic +- command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/eventing-gitlab + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.2 + name: release + requirements: + - release + excluded_requirements: + - gcp + - docker + types: + - periodic +org: knative-sandbox +repo: eventing-gitlab +resources_presets: + default: + limits: + memory: 16Gi + requests: + memory: 12Gi diff --git a/prow/jobs_config/knative-sandbox/eventing-gitlab-release-1.3.yaml b/prow/jobs_config/knative-sandbox/eventing-gitlab-release-1.3.yaml new file mode 100644 index 00000000000..5cd17658417 --- /dev/null +++ b/prow/jobs_config/knative-sandbox/eventing-gitlab-release-1.3.yaml @@ -0,0 +1,47 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. #### +# #### PLEASE ONLY MODIFY IT MANUALLY WHEN NEEDED. #### +# #### #### +# ####################################################################### +branches: +- release-1.3 +image: gcr.io/knative-tests/test-infra/prow-tests:stable +jobs: +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + name: continuous + requirements: + - docker + types: + - periodic +- command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/eventing-gitlab + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.3 + name: release + requirements: + - release + excluded_requirements: + - gcp + - docker + types: + - periodic +org: knative-sandbox +repo: eventing-gitlab +resources_presets: + default: + limits: + memory: 16Gi + requests: + memory: 12Gi diff --git a/prow/jobs_config/knative-sandbox/eventing-gitlab.yaml b/prow/jobs_config/knative-sandbox/eventing-gitlab.yaml new file mode 100644 index 00000000000..0f004257716 --- /dev/null +++ b/prow/jobs_config/knative-sandbox/eventing-gitlab.yaml @@ -0,0 +1,30 @@ +org: knative-sandbox +repo: eventing-gitlab +branches: [main] +image: gcr.io/knative-tests/test-infra/prow-tests:stable +imagePullPolicy: Always + +jobs: + - name: continuous + types: [periodic] + command: [runner.sh, ./test/presubmit-tests.sh, --all-tests] + requirements: [docker] + + - name: nightly + types: [periodic] + command: [runner.sh, ./hack/release.sh, --publish, --tag-release] + requirements: [nightly, docker] + excluded_requirements: [gcp] + + - name: release + types: [periodic] + command: [runner.sh, ./hack/release.sh, --auto-release, --release-gcs, knative-releases/eventing-gitlab, --release-gcr, gcr.io/knative-releases, --github-token, /etc/hub-token/token] + requirements: [release, docker] + excluded_requirements: [gcp] + +resources_presets: + default: + limits: + memory: 16Gi + requests: + memory: 12Gi diff --git a/prow/jobs_config/knative-sandbox/eventing-kafka-broker-release-1.0.yaml b/prow/jobs_config/knative-sandbox/eventing-kafka-broker-release-1.0.yaml new file mode 100644 index 00000000000..be2d1a3daf5 --- /dev/null +++ b/prow/jobs_config/knative-sandbox/eventing-kafka-broker-release-1.0.yaml @@ -0,0 +1,210 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. #### +# #### PLEASE ONLY MODIFY IT MANUALLY WHEN NEEDED. #### +# #### #### +# ####################################################################### +branches: +- release-1.0 +image: gcr.io/knative-tests/test-infra/prow-tests:stable +jobs: +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --build-tests + excluded_requirements: + - gcp + name: build-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --unit-tests + excluded_requirements: + - gcp + name: unit-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --integration-tests + excluded_requirements: + - gcp + name: integration-tests + requirements: + - docker + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-tests.sh --consolidated + name: integration-test-channel-consolidated + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-tests.sh --consolidated-tls + name: integration-test-channel-consolidated-tls + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-tests.sh --consolidated-sasl + name: integration-test-channel-consolidated-sasl + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-tests.sh --distributed + name: integration-test-channel-distributed + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-tests.sh --mt-source + modifiers: + - presubmit_optional + name: integration-test-mt-source + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-upgrade-tests.sh + name: upgrade-tests + requirements: + - docker + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/reconciler-tests.sh + name: reconciler-tests + requirements: + - docker + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-tests.sh + env: + - name: EVENTING_KAFKA_BROKER_CHANNEL_AUTH_SCENARIO + value: SSL + name: channel-integration-tests-ssl + requirements: + - docker + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-tests.sh + env: + - name: EVENTING_KAFKA_BROKER_CHANNEL_AUTH_SCENARIO + value: SASL_SSL + name: channel-integration-tests-sasl-ssl + requirements: + - docker + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-tests.sh + env: + - name: EVENTING_KAFKA_BROKER_CHANNEL_AUTH_SCENARIO + value: SASL_PLAIN + name: channel-integration-tests-sasl-plain + requirements: + - docker + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/reconciler-tests.sh + env: + - name: EVENTING_KAFKA_BROKER_CHANNEL_AUTH_SCENARIO + value: SSL + name: channel-reconciler-tests-ssl + requirements: + - docker + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/reconciler-tests.sh + env: + - name: EVENTING_KAFKA_BROKER_CHANNEL_AUTH_SCENARIO + value: SASL_SSL + name: channel-reconciler-tests-sasl-ssl + requirements: + - docker + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/reconciler-tests.sh + env: + - name: EVENTING_KAFKA_BROKER_CHANNEL_AUTH_SCENARIO + value: SASL_PLAIN + name: channel-reconciler-tests-sasl-plain + requirements: + - docker + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + name: continuous + requirements: + - docker + types: + - periodic +- command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/eventing-kafka-broker + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.0 + name: release + requirements: + - release + excluded_requirements: + - gcp + - docker + types: + - periodic +org: knative-sandbox +repo: eventing-kafka-broker diff --git a/prow/jobs_config/knative-sandbox/eventing-kafka-broker-release-1.1.yaml b/prow/jobs_config/knative-sandbox/eventing-kafka-broker-release-1.1.yaml new file mode 100644 index 00000000000..2862ae7fade --- /dev/null +++ b/prow/jobs_config/knative-sandbox/eventing-kafka-broker-release-1.1.yaml @@ -0,0 +1,210 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. #### +# #### PLEASE ONLY MODIFY IT MANUALLY WHEN NEEDED. #### +# #### #### +# ####################################################################### +branches: +- release-1.1 +image: gcr.io/knative-tests/test-infra/prow-tests:stable +jobs: +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --build-tests + excluded_requirements: + - gcp + name: build-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --unit-tests + excluded_requirements: + - gcp + name: unit-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --integration-tests + excluded_requirements: + - gcp + name: integration-tests + requirements: + - docker + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-tests.sh --consolidated + name: integration-test-channel-consolidated + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-tests.sh --consolidated-tls + name: integration-test-channel-consolidated-tls + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-tests.sh --consolidated-sasl + name: integration-test-channel-consolidated-sasl + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-tests.sh --distributed + name: integration-test-channel-distributed + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-tests.sh --mt-source + modifiers: + - presubmit_optional + name: integration-test-mt-source + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-upgrade-tests.sh + name: upgrade-tests + requirements: + - docker + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/reconciler-tests.sh + name: reconciler-tests + requirements: + - docker + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-tests.sh + env: + - name: EVENTING_KAFKA_BROKER_CHANNEL_AUTH_SCENARIO + value: SSL + name: channel-integration-tests-ssl + requirements: + - docker + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-tests.sh + env: + - name: EVENTING_KAFKA_BROKER_CHANNEL_AUTH_SCENARIO + value: SASL_SSL + name: channel-integration-tests-sasl-ssl + requirements: + - docker + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-tests.sh + env: + - name: EVENTING_KAFKA_BROKER_CHANNEL_AUTH_SCENARIO + value: SASL_PLAIN + name: channel-integration-tests-sasl-plain + requirements: + - docker + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/reconciler-tests.sh + env: + - name: EVENTING_KAFKA_BROKER_CHANNEL_AUTH_SCENARIO + value: SSL + name: channel-reconciler-tests-ssl + requirements: + - docker + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/reconciler-tests.sh + env: + - name: EVENTING_KAFKA_BROKER_CHANNEL_AUTH_SCENARIO + value: SASL_SSL + name: channel-reconciler-tests-sasl-ssl + requirements: + - docker + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/reconciler-tests.sh + env: + - name: EVENTING_KAFKA_BROKER_CHANNEL_AUTH_SCENARIO + value: SASL_PLAIN + name: channel-reconciler-tests-sasl-plain + requirements: + - docker + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + name: continuous + requirements: + - docker + types: + - periodic +- command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/eventing-kafka-broker + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.1 + name: release + requirements: + - release + excluded_requirements: + - gcp + - docker + types: + - periodic +org: knative-sandbox +repo: eventing-kafka-broker diff --git a/prow/jobs_config/knative-sandbox/eventing-kafka-broker-release-1.2.yaml b/prow/jobs_config/knative-sandbox/eventing-kafka-broker-release-1.2.yaml new file mode 100644 index 00000000000..b4aaf1a98b6 --- /dev/null +++ b/prow/jobs_config/knative-sandbox/eventing-kafka-broker-release-1.2.yaml @@ -0,0 +1,210 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. #### +# #### PLEASE ONLY MODIFY IT MANUALLY WHEN NEEDED. #### +# #### #### +# ####################################################################### +branches: +- release-1.2 +image: gcr.io/knative-tests/test-infra/prow-tests:stable +jobs: +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --build-tests + excluded_requirements: + - gcp + name: build-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --unit-tests + excluded_requirements: + - gcp + name: unit-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --integration-tests + excluded_requirements: + - gcp + name: integration-tests + requirements: + - docker + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-tests.sh --consolidated + name: integration-test-channel-consolidated + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-tests.sh --consolidated-tls + name: integration-test-channel-consolidated-tls + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-tests.sh --consolidated-sasl + name: integration-test-channel-consolidated-sasl + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-tests.sh --distributed + name: integration-test-channel-distributed + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-tests.sh --mt-source + modifiers: + - presubmit_optional + name: integration-test-mt-source + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-upgrade-tests.sh + name: upgrade-tests + requirements: + - docker + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/reconciler-tests.sh + name: reconciler-tests + requirements: + - docker + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-tests.sh + env: + - name: EVENTING_KAFKA_BROKER_CHANNEL_AUTH_SCENARIO + value: SSL + name: channel-integration-tests-ssl + requirements: + - docker + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-tests.sh + env: + - name: EVENTING_KAFKA_BROKER_CHANNEL_AUTH_SCENARIO + value: SASL_SSL + name: channel-integration-tests-sasl-ssl + requirements: + - docker + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-tests.sh + env: + - name: EVENTING_KAFKA_BROKER_CHANNEL_AUTH_SCENARIO + value: SASL_PLAIN + name: channel-integration-tests-sasl-plain + requirements: + - docker + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/reconciler-tests.sh + env: + - name: EVENTING_KAFKA_BROKER_CHANNEL_AUTH_SCENARIO + value: SSL + name: channel-reconciler-tests-ssl + requirements: + - docker + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/reconciler-tests.sh + env: + - name: EVENTING_KAFKA_BROKER_CHANNEL_AUTH_SCENARIO + value: SASL_SSL + name: channel-reconciler-tests-sasl-ssl + requirements: + - docker + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/reconciler-tests.sh + env: + - name: EVENTING_KAFKA_BROKER_CHANNEL_AUTH_SCENARIO + value: SASL_PLAIN + name: channel-reconciler-tests-sasl-plain + requirements: + - docker + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + name: continuous + requirements: + - docker + types: + - periodic +- command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/eventing-kafka-broker + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.2 + name: release + requirements: + - release + excluded_requirements: + - gcp + - docker + types: + - periodic +org: knative-sandbox +repo: eventing-kafka-broker diff --git a/prow/jobs_config/knative-sandbox/eventing-kafka-broker-release-1.3.yaml b/prow/jobs_config/knative-sandbox/eventing-kafka-broker-release-1.3.yaml new file mode 100644 index 00000000000..0b818cff7f0 --- /dev/null +++ b/prow/jobs_config/knative-sandbox/eventing-kafka-broker-release-1.3.yaml @@ -0,0 +1,210 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. #### +# #### PLEASE ONLY MODIFY IT MANUALLY WHEN NEEDED. #### +# #### #### +# ####################################################################### +branches: +- release-1.3 +image: gcr.io/knative-tests/test-infra/prow-tests:stable +jobs: +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --build-tests + excluded_requirements: + - gcp + name: build-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --unit-tests + excluded_requirements: + - gcp + name: unit-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --integration-tests + excluded_requirements: + - gcp + name: integration-tests + requirements: + - docker + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-tests.sh --consolidated + name: integration-test-channel-consolidated + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-tests.sh --consolidated-tls + name: integration-test-channel-consolidated-tls + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-tests.sh --consolidated-sasl + name: integration-test-channel-consolidated-sasl + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-tests.sh --distributed + name: integration-test-channel-distributed + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-tests.sh --mt-source + modifiers: + - presubmit_optional + name: integration-test-mt-source + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-upgrade-tests.sh + name: upgrade-tests + requirements: + - docker + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/reconciler-tests.sh + name: reconciler-tests + requirements: + - docker + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-tests.sh + env: + - name: EVENTING_KAFKA_BROKER_CHANNEL_AUTH_SCENARIO + value: SSL + name: channel-integration-tests-ssl + requirements: + - docker + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-tests.sh + env: + - name: EVENTING_KAFKA_BROKER_CHANNEL_AUTH_SCENARIO + value: SASL_SSL + name: channel-integration-tests-sasl-ssl + requirements: + - docker + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-tests.sh + env: + - name: EVENTING_KAFKA_BROKER_CHANNEL_AUTH_SCENARIO + value: SASL_PLAIN + name: channel-integration-tests-sasl-plain + requirements: + - docker + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/reconciler-tests.sh + env: + - name: EVENTING_KAFKA_BROKER_CHANNEL_AUTH_SCENARIO + value: SSL + name: channel-reconciler-tests-ssl + requirements: + - docker + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/reconciler-tests.sh + env: + - name: EVENTING_KAFKA_BROKER_CHANNEL_AUTH_SCENARIO + value: SASL_SSL + name: channel-reconciler-tests-sasl-ssl + requirements: + - docker + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/reconciler-tests.sh + env: + - name: EVENTING_KAFKA_BROKER_CHANNEL_AUTH_SCENARIO + value: SASL_PLAIN + name: channel-reconciler-tests-sasl-plain + requirements: + - docker + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + name: continuous + requirements: + - docker + types: + - periodic +- command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/eventing-kafka-broker + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.3 + name: release + requirements: + - release + excluded_requirements: + - gcp + - docker + types: + - periodic +org: knative-sandbox +repo: eventing-kafka-broker diff --git a/prow/jobs_config/knative-sandbox/eventing-kafka-broker.yaml b/prow/jobs_config/knative-sandbox/eventing-kafka-broker.yaml new file mode 100644 index 00000000000..830adcd5656 --- /dev/null +++ b/prow/jobs_config/knative-sandbox/eventing-kafka-broker.yaml @@ -0,0 +1,125 @@ +org: knative-sandbox +repo: eventing-kafka-broker +branches: [main] +image: gcr.io/knative-tests/test-infra/prow-tests:stable +imagePullPolicy: Always + +jobs: + - name: build-tests + types: [presubmit] + command: [runner.sh, ./test/presubmit-tests.sh, --build-tests] + excluded_requirements: [gcp] + + - name: unit-tests + types: [presubmit] + command: [runner.sh, ./test/presubmit-tests.sh, --unit-tests] + excluded_requirements: [gcp] + + - name: integration-tests + types: [presubmit] + command: [runner.sh, ./test/presubmit-tests.sh, --integration-tests] + excluded_requirements: [gcp] + requirements: [docker] + + - name: integration-test-channel-consolidated + types: [presubmit] + command: [runner.sh, ./test/presubmit-tests.sh, --run-test, "./test/e2e-tests.sh --consolidated"] + + - name: integration-test-channel-consolidated-tls + types: [presubmit] + command: [runner.sh, ./test/presubmit-tests.sh, --run-test, "./test/e2e-tests.sh --consolidated-tls"] + + - name: integration-test-channel-consolidated-sasl + types: [presubmit] + command: [runner.sh, ./test/presubmit-tests.sh, --run-test, "./test/e2e-tests.sh --consolidated-sasl"] + + - name: integration-test-channel-distributed + types: [presubmit] + command: [runner.sh, ./test/presubmit-tests.sh, --run-test, "./test/e2e-tests.sh --distributed"] + + - name: integration-test-mt-source + types: [presubmit] + command: [runner.sh, ./test/presubmit-tests.sh, --run-test, "./test/e2e-tests.sh --mt-source"] + modifiers: [presubmit_optional] + + - name: upgrade-tests + types: [presubmit] + command: [runner.sh, ./test/presubmit-tests.sh, --run-test, "./test/e2e-upgrade-tests.sh"] + requirements: [docker] + + - name: reconciler-tests + types: [presubmit] + command: [runner.sh, ./test/presubmit-tests.sh, --run-test, "./test/reconciler-tests.sh"] + requirements: [docker] + + - name: channel-integration-tests-ssl + types: [presubmit] + command: [runner.sh, ./test/presubmit-tests.sh, --run-test, "./test/e2e-tests.sh"] + requirements: [docker] + env: + - name: EVENTING_KAFKA_BROKER_CHANNEL_AUTH_SCENARIO + value: SSL + + - name: channel-integration-tests-sasl-ssl + types: [presubmit] + command: [runner.sh, ./test/presubmit-tests.sh, --run-test, "./test/e2e-tests.sh"] + requirements: [docker] + env: + - name: EVENTING_KAFKA_BROKER_CHANNEL_AUTH_SCENARIO + value: SASL_SSL + + - name: channel-integration-tests-sasl-plain + types: [presubmit] + command: [runner.sh, ./test/presubmit-tests.sh, --run-test, "./test/e2e-tests.sh"] + requirements: [docker] + env: + - name: EVENTING_KAFKA_BROKER_CHANNEL_AUTH_SCENARIO + value: SASL_PLAIN + + - name: channel-reconciler-tests-ssl + types: [presubmit] + command: [runner.sh, ./test/presubmit-tests.sh, --run-test, "./test/reconciler-tests.sh"] + requirements: [docker] + env: + - name: EVENTING_KAFKA_BROKER_CHANNEL_AUTH_SCENARIO + value: SSL + + - name: channel-reconciler-tests-sasl-ssl + types: [presubmit] + command: [runner.sh, ./test/presubmit-tests.sh, --run-test, "./test/reconciler-tests.sh"] + requirements: [docker] + env: + - name: EVENTING_KAFKA_BROKER_CHANNEL_AUTH_SCENARIO + value: SASL_SSL + + - name: channel-reconciler-tests-sasl-plain + types: [presubmit] + command: [runner.sh, ./test/presubmit-tests.sh, --run-test, "./test/reconciler-tests.sh"] + requirements: [docker] + env: + - name: EVENTING_KAFKA_BROKER_CHANNEL_AUTH_SCENARIO + value: SASL_PLAIN + + - name: continuous + types: [periodic] + command: [runner.sh, ./test/presubmit-tests.sh, --all-tests] + requirements: [docker] + + - name: nightly + types: [periodic] + command: [runner.sh, ./hack/release.sh, --publish, --tag-release] + requirements: [nightly, docker] + excluded_requirements: [gcp] + reporter_config: + slack: + channel: eventing-kafka + report_template: | + "The nightly release job fails, check the log: <{{.Status.URL}}|View logs>" + job_states_to_report: + - "failure" + + - name: release + types: [periodic] + command: [runner.sh, ./hack/release.sh, --auto-release, --release-gcs, knative-releases/eventing-kafka-broker, --release-gcr, gcr.io/knative-releases, --github-token, /etc/hub-token/token] + requirements: [release, docker] + excluded_requirements: [gcp] diff --git a/prow/jobs_config/knative-sandbox/eventing-kafka-release-1.0.yaml b/prow/jobs_config/knative-sandbox/eventing-kafka-release-1.0.yaml new file mode 100644 index 00000000000..100fbc30425 --- /dev/null +++ b/prow/jobs_config/knative-sandbox/eventing-kafka-release-1.0.yaml @@ -0,0 +1,113 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. #### +# #### PLEASE ONLY MODIFY IT MANUALLY WHEN NEEDED. #### +# #### #### +# ####################################################################### +branches: +- release-1.0 +image: gcr.io/knative-tests/test-infra/prow-tests:stable +jobs: +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --build-tests + excluded_requirements: + - gcp + name: build-tests + requirements: + - docker + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --unit-tests + excluded_requirements: + - gcp + name: unit-tests + requirements: + - docker + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-tests.sh --consolidated + name: integration-test-channel-consolidated + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-tests.sh --consolidated-tls + name: integration-test-channel-consolidated-tls + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-tests.sh --consolidated-sasl + name: integration-test-channel-consolidated-sasl + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-tests.sh --distributed + name: integration-test-channel-distributed + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-tests.sh --mt-source + modifiers: + - presubmit_optional + name: integration-test-mt-source + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-upgrade-tests.sh + name: upgrade-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + name: continuous + requirements: + - docker + types: + - periodic +- command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/eventing-kafka + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.0 + name: release + requirements: + - release + excluded_requirements: + - gcp + - docker + types: + - periodic +org: knative-sandbox +repo: eventing-kafka diff --git a/prow/jobs_config/knative-sandbox/eventing-kafka-release-1.1.yaml b/prow/jobs_config/knative-sandbox/eventing-kafka-release-1.1.yaml new file mode 100644 index 00000000000..6099018d837 --- /dev/null +++ b/prow/jobs_config/knative-sandbox/eventing-kafka-release-1.1.yaml @@ -0,0 +1,113 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. #### +# #### PLEASE ONLY MODIFY IT MANUALLY WHEN NEEDED. #### +# #### #### +# ####################################################################### +branches: +- release-1.1 +image: gcr.io/knative-tests/test-infra/prow-tests:stable +jobs: +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --build-tests + excluded_requirements: + - gcp + name: build-tests + requirements: + - docker + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --unit-tests + excluded_requirements: + - gcp + name: unit-tests + requirements: + - docker + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-tests.sh --consolidated + name: integration-test-channel-consolidated + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-tests.sh --consolidated-tls + name: integration-test-channel-consolidated-tls + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-tests.sh --consolidated-sasl + name: integration-test-channel-consolidated-sasl + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-tests.sh --distributed + name: integration-test-channel-distributed + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-tests.sh --mt-source + modifiers: + - presubmit_optional + name: integration-test-mt-source + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-upgrade-tests.sh + name: upgrade-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + name: continuous + requirements: + - docker + types: + - periodic +- command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/eventing-kafka + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.1 + name: release + requirements: + - release + excluded_requirements: + - gcp + - docker + types: + - periodic +org: knative-sandbox +repo: eventing-kafka diff --git a/prow/jobs_config/knative-sandbox/eventing-kafka-release-1.2.yaml b/prow/jobs_config/knative-sandbox/eventing-kafka-release-1.2.yaml new file mode 100644 index 00000000000..c5e9ff3ef38 --- /dev/null +++ b/prow/jobs_config/knative-sandbox/eventing-kafka-release-1.2.yaml @@ -0,0 +1,113 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. #### +# #### PLEASE ONLY MODIFY IT MANUALLY WHEN NEEDED. #### +# #### #### +# ####################################################################### +branches: +- release-1.2 +image: gcr.io/knative-tests/test-infra/prow-tests:stable +jobs: +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --build-tests + excluded_requirements: + - gcp + name: build-tests + requirements: + - docker + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --unit-tests + excluded_requirements: + - gcp + name: unit-tests + requirements: + - docker + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-tests.sh --consolidated + name: integration-test-channel-consolidated + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-tests.sh --consolidated-tls + name: integration-test-channel-consolidated-tls + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-tests.sh --consolidated-sasl + name: integration-test-channel-consolidated-sasl + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-tests.sh --distributed + name: integration-test-channel-distributed + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-tests.sh --mt-source + modifiers: + - presubmit_optional + name: integration-test-mt-source + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-upgrade-tests.sh + name: upgrade-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + name: continuous + requirements: + - docker + types: + - periodic +- command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/eventing-kafka + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.2 + name: release + requirements: + - release + excluded_requirements: + - gcp + - docker + types: + - periodic +org: knative-sandbox +repo: eventing-kafka diff --git a/prow/jobs_config/knative-sandbox/eventing-kafka-release-1.3.yaml b/prow/jobs_config/knative-sandbox/eventing-kafka-release-1.3.yaml new file mode 100644 index 00000000000..acc301cf4bf --- /dev/null +++ b/prow/jobs_config/knative-sandbox/eventing-kafka-release-1.3.yaml @@ -0,0 +1,113 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. #### +# #### PLEASE ONLY MODIFY IT MANUALLY WHEN NEEDED. #### +# #### #### +# ####################################################################### +branches: +- release-1.3 +image: gcr.io/knative-tests/test-infra/prow-tests:stable +jobs: +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --build-tests + excluded_requirements: + - gcp + name: build-tests + requirements: + - docker + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --unit-tests + excluded_requirements: + - gcp + name: unit-tests + requirements: + - docker + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-tests.sh --consolidated + name: integration-test-channel-consolidated + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-tests.sh --consolidated-tls + name: integration-test-channel-consolidated-tls + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-tests.sh --consolidated-sasl + name: integration-test-channel-consolidated-sasl + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-tests.sh --distributed + name: integration-test-channel-distributed + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-tests.sh --mt-source + modifiers: + - presubmit_optional + name: integration-test-mt-source + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-upgrade-tests.sh + name: upgrade-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + name: continuous + requirements: + - docker + types: + - periodic +- command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/eventing-kafka + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.3 + name: release + requirements: + - release + excluded_requirements: + - gcp + - docker + types: + - periodic +org: knative-sandbox +repo: eventing-kafka diff --git a/prow/jobs_config/knative-sandbox/eventing-kafka.yaml b/prow/jobs_config/knative-sandbox/eventing-kafka.yaml new file mode 100644 index 00000000000..5c8cc53d904 --- /dev/null +++ b/prow/jobs_config/knative-sandbox/eventing-kafka.yaml @@ -0,0 +1,67 @@ +org: knative-sandbox +repo: eventing-kafka +branches: [main] +image: gcr.io/knative-tests/test-infra/prow-tests:stable +imagePullPolicy: Always + +jobs: + - name: build-tests + types: [presubmit] + command: [runner.sh, ./test/presubmit-tests.sh, --build-tests] + excluded_requirements: [gcp] + requirements: [docker] + + - name: unit-tests + types: [presubmit] + command: [runner.sh, ./test/presubmit-tests.sh, --unit-tests] + excluded_requirements: [gcp] + requirements: [docker] + + - name: integration-test-channel-consolidated + types: [presubmit] + command: [runner.sh, ./test/presubmit-tests.sh, --run-test, "./test/e2e-tests.sh --consolidated"] + + - name: integration-test-channel-consolidated-tls + types: [presubmit] + command: [runner.sh, ./test/presubmit-tests.sh, --run-test, "./test/e2e-tests.sh --consolidated-tls"] + + - name: integration-test-channel-consolidated-sasl + types: [presubmit] + command: [runner.sh, ./test/presubmit-tests.sh, --run-test, "./test/e2e-tests.sh --consolidated-sasl"] + + - name: integration-test-channel-distributed + types: [presubmit] + command: [runner.sh, ./test/presubmit-tests.sh, --run-test, "./test/e2e-tests.sh --distributed"] + + - name: integration-test-mt-source + types: [presubmit] + command: [runner.sh, ./test/presubmit-tests.sh, --run-test, "./test/e2e-tests.sh --mt-source"] + modifiers: [presubmit_optional] + + - name: upgrade-tests + types: [presubmit] + command: [runner.sh, ./test/presubmit-tests.sh, --run-test, "./test/e2e-upgrade-tests.sh"] + + - name: continuous + types: [periodic] + command: [runner.sh, ./test/presubmit-tests.sh, --all-tests] + requirements: [docker] + + - name: nightly + types: [periodic] + command: [runner.sh, ./hack/release.sh, --publish, --tag-release] + requirements: [nightly, docker] + excluded_requirements: [gcp] + reporter_config: + slack: + channel: eventing-kafka + report_template: | + "The nightly release job fails, check the log: <{{.Status.URL}}|View logs>" + job_states_to_report: + - "failure" + + - name: release + types: [periodic] + command: [runner.sh, ./hack/release.sh, --auto-release, --release-gcs, knative-releases/eventing-kafka, --release-gcr, gcr.io/knative-releases, --github-token, /etc/hub-token/token] + requirements: [release, docker] + excluded_requirements: [gcp] diff --git a/prow/jobs_config/knative-sandbox/eventing-kogito-release-1.0.yaml b/prow/jobs_config/knative-sandbox/eventing-kogito-release-1.0.yaml new file mode 100644 index 00000000000..e4b7732cdb0 --- /dev/null +++ b/prow/jobs_config/knative-sandbox/eventing-kogito-release-1.0.yaml @@ -0,0 +1,63 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. #### +# #### PLEASE ONLY MODIFY IT MANUALLY WHEN NEEDED. #### +# #### #### +# ####################################################################### +branches: +- release-1.0 +image: gcr.io/knative-tests/test-infra/prow-tests:stable +jobs: +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --build-tests + excluded_requirements: + - gcp + name: build-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --unit-tests + excluded_requirements: + - gcp + name: unit-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --integration-tests + name: integration-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + name: continuous + types: + - periodic +- command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/eventing-kogito + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.0 + name: release + requirements: + - release + excluded_requirements: + - gcp + types: + - periodic +org: knative-sandbox +repo: eventing-kogito diff --git a/prow/jobs_config/knative-sandbox/eventing-kogito-release-1.1.yaml b/prow/jobs_config/knative-sandbox/eventing-kogito-release-1.1.yaml new file mode 100644 index 00000000000..b34893d1e79 --- /dev/null +++ b/prow/jobs_config/knative-sandbox/eventing-kogito-release-1.1.yaml @@ -0,0 +1,63 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. #### +# #### PLEASE ONLY MODIFY IT MANUALLY WHEN NEEDED. #### +# #### #### +# ####################################################################### +branches: +- release-1.1 +image: gcr.io/knative-tests/test-infra/prow-tests:stable +jobs: +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --build-tests + excluded_requirements: + - gcp + name: build-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --unit-tests + excluded_requirements: + - gcp + name: unit-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --integration-tests + name: integration-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + name: continuous + types: + - periodic +- command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/eventing-kogito + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.1 + name: release + requirements: + - release + excluded_requirements: + - gcp + types: + - periodic +org: knative-sandbox +repo: eventing-kogito diff --git a/prow/jobs_config/knative-sandbox/eventing-kogito-release-1.2.yaml b/prow/jobs_config/knative-sandbox/eventing-kogito-release-1.2.yaml new file mode 100644 index 00000000000..5884394bbf7 --- /dev/null +++ b/prow/jobs_config/knative-sandbox/eventing-kogito-release-1.2.yaml @@ -0,0 +1,63 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. #### +# #### PLEASE ONLY MODIFY IT MANUALLY WHEN NEEDED. #### +# #### #### +# ####################################################################### +branches: +- release-1.2 +image: gcr.io/knative-tests/test-infra/prow-tests:stable +jobs: +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --build-tests + excluded_requirements: + - gcp + name: build-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --unit-tests + excluded_requirements: + - gcp + name: unit-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --integration-tests + name: integration-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + name: continuous + types: + - periodic +- command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/eventing-kogito + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.2 + name: release + requirements: + - release + excluded_requirements: + - gcp + types: + - periodic +org: knative-sandbox +repo: eventing-kogito diff --git a/prow/jobs_config/knative-sandbox/eventing-kogito-release-1.3.yaml b/prow/jobs_config/knative-sandbox/eventing-kogito-release-1.3.yaml new file mode 100644 index 00000000000..8403ecdb18f --- /dev/null +++ b/prow/jobs_config/knative-sandbox/eventing-kogito-release-1.3.yaml @@ -0,0 +1,63 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. #### +# #### PLEASE ONLY MODIFY IT MANUALLY WHEN NEEDED. #### +# #### #### +# ####################################################################### +branches: +- release-1.3 +image: gcr.io/knative-tests/test-infra/prow-tests:stable +jobs: +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --build-tests + excluded_requirements: + - gcp + name: build-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --unit-tests + excluded_requirements: + - gcp + name: unit-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --integration-tests + name: integration-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + name: continuous + types: + - periodic +- command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/eventing-kogito + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.3 + name: release + requirements: + - release + excluded_requirements: + - gcp + types: + - periodic +org: knative-sandbox +repo: eventing-kogito diff --git a/prow/jobs_config/knative-sandbox/eventing-kogito.yaml b/prow/jobs_config/knative-sandbox/eventing-kogito.yaml new file mode 100644 index 00000000000..6fe6cddeb9e --- /dev/null +++ b/prow/jobs_config/knative-sandbox/eventing-kogito.yaml @@ -0,0 +1,43 @@ +org: knative-sandbox +repo: eventing-kogito +branches: [main] +image: gcr.io/knative-tests/test-infra/prow-tests:stable +imagePullPolicy: Always + +jobs: + - name: build-tests + types: [presubmit] + command: [runner.sh, ./test/presubmit-tests.sh, --build-tests] + excluded_requirements: [gcp] + + - name: unit-tests + types: [presubmit] + command: [runner.sh, ./test/presubmit-tests.sh, --unit-tests] + excluded_requirements: [gcp] + + - name: integration-tests + types: [presubmit] + command: [runner.sh, ./test/presubmit-tests.sh, --integration-tests] + + - name: continuous + types: [periodic] + command: [runner.sh, ./test/presubmit-tests.sh, --all-tests] + + - name: nightly + types: [periodic] + command: [runner.sh, ./hack/release.sh, --publish, --tag-release] + requirements: [nightly] + excluded_requirements: [gcp] + reporter_config: + slack: + channel: eventing-sources + job_states_to_report: + - failure + report_template: | + "The nightly release job for Kogito failed, check the log: <{{.Status.URL}}|View logs>" + + - name: release + types: [periodic] + command: [runner.sh, ./hack/release.sh, --auto-release, --release-gcs, knative-releases/eventing-kogito, --release-gcr, gcr.io/knative-releases, --github-token, /etc/hub-token/token] + requirements: [release] + excluded_requirements: [gcp] diff --git a/prow/jobs_config/knative-sandbox/eventing-natss-release-1.0.yaml b/prow/jobs_config/knative-sandbox/eventing-natss-release-1.0.yaml new file mode 100644 index 00000000000..813fc34eab8 --- /dev/null +++ b/prow/jobs_config/knative-sandbox/eventing-natss-release-1.0.yaml @@ -0,0 +1,38 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. #### +# #### PLEASE ONLY MODIFY IT MANUALLY WHEN NEEDED. #### +# #### #### +# ####################################################################### +branches: +- release-1.0 +image: gcr.io/knative-tests/test-infra/prow-tests:stable +jobs: +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + name: continuous + types: + - periodic +- command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/eventing-natss + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.0 + name: release + requirements: + - release + excluded_requirements: + - gcp + types: + - periodic +org: knative-sandbox +repo: eventing-natss diff --git a/prow/jobs_config/knative-sandbox/eventing-natss-release-1.1.yaml b/prow/jobs_config/knative-sandbox/eventing-natss-release-1.1.yaml new file mode 100644 index 00000000000..3fed613ad62 --- /dev/null +++ b/prow/jobs_config/knative-sandbox/eventing-natss-release-1.1.yaml @@ -0,0 +1,38 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. #### +# #### PLEASE ONLY MODIFY IT MANUALLY WHEN NEEDED. #### +# #### #### +# ####################################################################### +branches: +- release-1.1 +image: gcr.io/knative-tests/test-infra/prow-tests:stable +jobs: +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + name: continuous + types: + - periodic +- command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/eventing-natss + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.1 + name: release + requirements: + - release + excluded_requirements: + - gcp + types: + - periodic +org: knative-sandbox +repo: eventing-natss diff --git a/prow/jobs_config/knative-sandbox/eventing-natss-release-1.2.yaml b/prow/jobs_config/knative-sandbox/eventing-natss-release-1.2.yaml new file mode 100644 index 00000000000..ffe4cb069f1 --- /dev/null +++ b/prow/jobs_config/knative-sandbox/eventing-natss-release-1.2.yaml @@ -0,0 +1,38 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. #### +# #### PLEASE ONLY MODIFY IT MANUALLY WHEN NEEDED. #### +# #### #### +# ####################################################################### +branches: +- release-1.2 +image: gcr.io/knative-tests/test-infra/prow-tests:stable +jobs: +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + name: continuous + types: + - periodic +- command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/eventing-natss + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.2 + name: release + requirements: + - release + excluded_requirements: + - gcp + types: + - periodic +org: knative-sandbox +repo: eventing-natss diff --git a/prow/jobs_config/knative-sandbox/eventing-natss-release-1.3.yaml b/prow/jobs_config/knative-sandbox/eventing-natss-release-1.3.yaml new file mode 100644 index 00000000000..fb2c6b6eac9 --- /dev/null +++ b/prow/jobs_config/knative-sandbox/eventing-natss-release-1.3.yaml @@ -0,0 +1,38 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. #### +# #### PLEASE ONLY MODIFY IT MANUALLY WHEN NEEDED. #### +# #### #### +# ####################################################################### +branches: +- release-1.3 +image: gcr.io/knative-tests/test-infra/prow-tests:stable +jobs: +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + name: continuous + types: + - periodic +- command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/eventing-natss + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.3 + name: release + requirements: + - release + excluded_requirements: + - gcp + types: + - periodic +org: knative-sandbox +repo: eventing-natss diff --git a/prow/jobs_config/knative-sandbox/eventing-natss.yaml b/prow/jobs_config/knative-sandbox/eventing-natss.yaml new file mode 100644 index 00000000000..f37fd19dc25 --- /dev/null +++ b/prow/jobs_config/knative-sandbox/eventing-natss.yaml @@ -0,0 +1,29 @@ +org: knative-sandbox +repo: eventing-natss +branches: [main] +image: gcr.io/knative-tests/test-infra/prow-tests:stable +imagePullPolicy: Always + +jobs: + - name: continuous + types: [periodic] + command: [runner.sh, ./test/presubmit-tests.sh, --all-tests] + + - name: nightly + types: [periodic] + command: [runner.sh, ./hack/release.sh, --publish, --tag-release] + requirements: [nightly] + excluded_requirements: [gcp] + reporter_config: + slack: + channel: eventing + job_states_to_report: + - failure + report_template: | + "The nightly release job for eventing-natss failed, check the log: <{{.Status.URL}}|View logs>" + + - name: release + types: [periodic] + command: [runner.sh, ./hack/release.sh, --auto-release, --release-gcs, knative-releases/eventing-natss, --release-gcr, gcr.io/knative-releases, --github-token, /etc/hub-token/token] + requirements: [release] + excluded_requirements: [gcp] diff --git a/prow/jobs_config/knative-sandbox/eventing-rabbitmq-release-1.0.yaml b/prow/jobs_config/knative-sandbox/eventing-rabbitmq-release-1.0.yaml new file mode 100644 index 00000000000..8a8ad916c36 --- /dev/null +++ b/prow/jobs_config/knative-sandbox/eventing-rabbitmq-release-1.0.yaml @@ -0,0 +1,38 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. #### +# #### PLEASE ONLY MODIFY IT MANUALLY WHEN NEEDED. #### +# #### #### +# ####################################################################### +branches: +- release-1.0 +image: gcr.io/knative-tests/test-infra/prow-tests:stable +jobs: +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + name: continuous + types: + - periodic +- command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/eventing-rabbitmq + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.0 + name: release + requirements: + - release + excluded_requirements: + - gcp + types: + - periodic +org: knative-sandbox +repo: eventing-rabbitmq diff --git a/prow/jobs_config/knative-sandbox/eventing-rabbitmq-release-1.1.yaml b/prow/jobs_config/knative-sandbox/eventing-rabbitmq-release-1.1.yaml new file mode 100644 index 00000000000..146e3065ee0 --- /dev/null +++ b/prow/jobs_config/knative-sandbox/eventing-rabbitmq-release-1.1.yaml @@ -0,0 +1,38 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. #### +# #### PLEASE ONLY MODIFY IT MANUALLY WHEN NEEDED. #### +# #### #### +# ####################################################################### +branches: +- release-1.1 +image: gcr.io/knative-tests/test-infra/prow-tests:stable +jobs: +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + name: continuous + types: + - periodic +- command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/eventing-rabbitmq + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.1 + name: release + requirements: + - release + excluded_requirements: + - gcp + types: + - periodic +org: knative-sandbox +repo: eventing-rabbitmq diff --git a/prow/jobs_config/knative-sandbox/eventing-rabbitmq-release-1.2.yaml b/prow/jobs_config/knative-sandbox/eventing-rabbitmq-release-1.2.yaml new file mode 100644 index 00000000000..1dc13dfbdde --- /dev/null +++ b/prow/jobs_config/knative-sandbox/eventing-rabbitmq-release-1.2.yaml @@ -0,0 +1,38 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. #### +# #### PLEASE ONLY MODIFY IT MANUALLY WHEN NEEDED. #### +# #### #### +# ####################################################################### +branches: +- release-1.2 +image: gcr.io/knative-tests/test-infra/prow-tests:stable +jobs: +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + name: continuous + types: + - periodic +- command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/eventing-rabbitmq + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.2 + name: release + requirements: + - release + excluded_requirements: + - gcp + types: + - periodic +org: knative-sandbox +repo: eventing-rabbitmq diff --git a/prow/jobs_config/knative-sandbox/eventing-rabbitmq-release-1.3.yaml b/prow/jobs_config/knative-sandbox/eventing-rabbitmq-release-1.3.yaml new file mode 100644 index 00000000000..2dfe2415885 --- /dev/null +++ b/prow/jobs_config/knative-sandbox/eventing-rabbitmq-release-1.3.yaml @@ -0,0 +1,38 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. #### +# #### PLEASE ONLY MODIFY IT MANUALLY WHEN NEEDED. #### +# #### #### +# ####################################################################### +branches: +- release-1.3 +image: gcr.io/knative-tests/test-infra/prow-tests:stable +jobs: +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + name: continuous + types: + - periodic +- command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/eventing-rabbitmq + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.3 + name: release + requirements: + - release + excluded_requirements: + - gcp + types: + - periodic +org: knative-sandbox +repo: eventing-rabbitmq diff --git a/prow/jobs_config/knative-sandbox/eventing-rabbitmq.yaml b/prow/jobs_config/knative-sandbox/eventing-rabbitmq.yaml new file mode 100644 index 00000000000..e0f2fa70e8f --- /dev/null +++ b/prow/jobs_config/knative-sandbox/eventing-rabbitmq.yaml @@ -0,0 +1,29 @@ +org: knative-sandbox +repo: eventing-rabbitmq +branches: [main] +image: gcr.io/knative-tests/test-infra/prow-tests:stable +imagePullPolicy: Always + +jobs: + - name: continuous + types: [periodic] + command: [runner.sh, ./test/presubmit-tests.sh, --all-tests] + + - name: nightly + types: [periodic] + command: [runner.sh, ./hack/release.sh, --publish, --tag-release] + requirements: [nightly] + excluded_requirements: [gcp] + reporter_config: + slack: + channel: eventing-rabbitmq + job_states_to_report: + - failure + report_template: | + "The nightly release job for eventing-rabbitmq failed, check the log: <{{.Status.URL}}|View logs>" + + - name: release + types: [periodic] + command: [runner.sh, ./hack/release.sh, --auto-release, --release-gcs, knative-releases/eventing-rabbitmq, --release-gcr, gcr.io/knative-releases, --github-token, /etc/hub-token/token] + requirements: [release] + excluded_requirements: [gcp] diff --git a/prow/jobs_config/knative-sandbox/eventing-redis-release-1.0.yaml b/prow/jobs_config/knative-sandbox/eventing-redis-release-1.0.yaml new file mode 100644 index 00000000000..005510d138f --- /dev/null +++ b/prow/jobs_config/knative-sandbox/eventing-redis-release-1.0.yaml @@ -0,0 +1,41 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. #### +# #### PLEASE ONLY MODIFY IT MANUALLY WHEN NEEDED. #### +# #### #### +# ####################################################################### +branches: +- release-1.0 +image: gcr.io/knative-tests/test-infra/prow-tests:stable +jobs: +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + name: continuous + requirements: + - docker + types: + - periodic +- command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/eventing-redis + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.0 + name: release + requirements: + - release + excluded_requirements: + - gcp + - docker + types: + - periodic +org: knative-sandbox +repo: eventing-redis diff --git a/prow/jobs_config/knative-sandbox/eventing-redis-release-1.1.yaml b/prow/jobs_config/knative-sandbox/eventing-redis-release-1.1.yaml new file mode 100644 index 00000000000..acec4f7fe60 --- /dev/null +++ b/prow/jobs_config/knative-sandbox/eventing-redis-release-1.1.yaml @@ -0,0 +1,41 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. #### +# #### PLEASE ONLY MODIFY IT MANUALLY WHEN NEEDED. #### +# #### #### +# ####################################################################### +branches: +- release-1.1 +image: gcr.io/knative-tests/test-infra/prow-tests:stable +jobs: +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + name: continuous + requirements: + - docker + types: + - periodic +- command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/eventing-redis + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.1 + name: release + requirements: + - release + excluded_requirements: + - gcp + - docker + types: + - periodic +org: knative-sandbox +repo: eventing-redis diff --git a/prow/jobs_config/knative-sandbox/eventing-redis-release-1.2.yaml b/prow/jobs_config/knative-sandbox/eventing-redis-release-1.2.yaml new file mode 100644 index 00000000000..c9296407e90 --- /dev/null +++ b/prow/jobs_config/knative-sandbox/eventing-redis-release-1.2.yaml @@ -0,0 +1,41 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. #### +# #### PLEASE ONLY MODIFY IT MANUALLY WHEN NEEDED. #### +# #### #### +# ####################################################################### +branches: +- release-1.2 +image: gcr.io/knative-tests/test-infra/prow-tests:stable +jobs: +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + name: continuous + requirements: + - docker + types: + - periodic +- command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/eventing-redis + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.2 + name: release + requirements: + - release + excluded_requirements: + - gcp + - docker + types: + - periodic +org: knative-sandbox +repo: eventing-redis diff --git a/prow/jobs_config/knative-sandbox/eventing-redis-release-1.3.yaml b/prow/jobs_config/knative-sandbox/eventing-redis-release-1.3.yaml new file mode 100644 index 00000000000..64f8c9aeda8 --- /dev/null +++ b/prow/jobs_config/knative-sandbox/eventing-redis-release-1.3.yaml @@ -0,0 +1,41 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. #### +# #### PLEASE ONLY MODIFY IT MANUALLY WHEN NEEDED. #### +# #### #### +# ####################################################################### +branches: +- release-1.3 +image: gcr.io/knative-tests/test-infra/prow-tests:stable +jobs: +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + name: continuous + requirements: + - docker + types: + - periodic +- command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/eventing-redis + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.3 + name: release + requirements: + - release + excluded_requirements: + - gcp + - docker + types: + - periodic +org: knative-sandbox +repo: eventing-redis diff --git a/prow/jobs_config/knative-sandbox/eventing-redis.yaml b/prow/jobs_config/knative-sandbox/eventing-redis.yaml new file mode 100644 index 00000000000..e92e556ba91 --- /dev/null +++ b/prow/jobs_config/knative-sandbox/eventing-redis.yaml @@ -0,0 +1,23 @@ +org: knative-sandbox +repo: eventing-redis +branches: [main] +image: gcr.io/knative-tests/test-infra/prow-tests:stable +imagePullPolicy: Always + +jobs: + - name: continuous + types: [periodic] + command: [runner.sh, ./test/presubmit-tests.sh, --all-tests] + requirements: [docker] + + - name: nightly + types: [periodic] + command: [runner.sh, ./hack/release.sh, --publish, --tag-release] + requirements: [nightly, docker] + excluded_requirements: [gcp] + + - name: release + types: [periodic] + command: [runner.sh, ./hack/release.sh, --auto-release, --release-gcs, knative-releases/eventing-redis, --release-gcr, gcr.io/knative-releases, --github-token, /etc/hub-token/token] + requirements: [release, docker] + excluded_requirements: [gcp] diff --git a/prow/jobs_config/knative-sandbox/kn-plugin-admin-release-1.0.yaml b/prow/jobs_config/knative-sandbox/kn-plugin-admin-release-1.0.yaml new file mode 100644 index 00000000000..e0cd3f9ed2a --- /dev/null +++ b/prow/jobs_config/knative-sandbox/kn-plugin-admin-release-1.0.yaml @@ -0,0 +1,63 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. #### +# #### PLEASE ONLY MODIFY IT MANUALLY WHEN NEEDED. #### +# #### #### +# ####################################################################### +branches: +- release-1.0 +image: gcr.io/knative-tests/test-infra/prow-tests:stable +jobs: +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --build-tests + excluded_requirements: + - gcp + name: build-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --unit-tests + excluded_requirements: + - gcp + name: unit-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --integration-tests + name: integration-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + name: continuous + types: + - periodic +- command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/kn-plugin-admin + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.0 + name: release + requirements: + - release + excluded_requirements: + - gcp + types: + - periodic +org: knative-sandbox +repo: kn-plugin-admin diff --git a/prow/jobs_config/knative-sandbox/kn-plugin-admin-release-1.1.yaml b/prow/jobs_config/knative-sandbox/kn-plugin-admin-release-1.1.yaml new file mode 100644 index 00000000000..838d9280341 --- /dev/null +++ b/prow/jobs_config/knative-sandbox/kn-plugin-admin-release-1.1.yaml @@ -0,0 +1,63 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. #### +# #### PLEASE ONLY MODIFY IT MANUALLY WHEN NEEDED. #### +# #### #### +# ####################################################################### +branches: +- release-1.1 +image: gcr.io/knative-tests/test-infra/prow-tests:stable +jobs: +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --build-tests + excluded_requirements: + - gcp + name: build-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --unit-tests + excluded_requirements: + - gcp + name: unit-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --integration-tests + name: integration-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + name: continuous + types: + - periodic +- command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/kn-plugin-admin + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.1 + name: release + requirements: + - release + excluded_requirements: + - gcp + types: + - periodic +org: knative-sandbox +repo: kn-plugin-admin diff --git a/prow/jobs_config/knative-sandbox/kn-plugin-admin-release-1.2.yaml b/prow/jobs_config/knative-sandbox/kn-plugin-admin-release-1.2.yaml new file mode 100644 index 00000000000..a28b45066ae --- /dev/null +++ b/prow/jobs_config/knative-sandbox/kn-plugin-admin-release-1.2.yaml @@ -0,0 +1,63 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. #### +# #### PLEASE ONLY MODIFY IT MANUALLY WHEN NEEDED. #### +# #### #### +# ####################################################################### +branches: +- release-1.2 +image: gcr.io/knative-tests/test-infra/prow-tests:stable +jobs: +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --build-tests + excluded_requirements: + - gcp + name: build-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --unit-tests + excluded_requirements: + - gcp + name: unit-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --integration-tests + name: integration-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + name: continuous + types: + - periodic +- command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/kn-plugin-admin + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.2 + name: release + requirements: + - release + excluded_requirements: + - gcp + types: + - periodic +org: knative-sandbox +repo: kn-plugin-admin diff --git a/prow/jobs_config/knative-sandbox/kn-plugin-admin-release-1.3.yaml b/prow/jobs_config/knative-sandbox/kn-plugin-admin-release-1.3.yaml new file mode 100644 index 00000000000..9b052debdc3 --- /dev/null +++ b/prow/jobs_config/knative-sandbox/kn-plugin-admin-release-1.3.yaml @@ -0,0 +1,63 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. #### +# #### PLEASE ONLY MODIFY IT MANUALLY WHEN NEEDED. #### +# #### #### +# ####################################################################### +branches: +- release-1.3 +image: gcr.io/knative-tests/test-infra/prow-tests:stable +jobs: +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --build-tests + excluded_requirements: + - gcp + name: build-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --unit-tests + excluded_requirements: + - gcp + name: unit-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --integration-tests + name: integration-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + name: continuous + types: + - periodic +- command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/kn-plugin-admin + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.3 + name: release + requirements: + - release + excluded_requirements: + - gcp + types: + - periodic +org: knative-sandbox +repo: kn-plugin-admin diff --git a/prow/jobs_config/knative-sandbox/kn-plugin-admin.yaml b/prow/jobs_config/knative-sandbox/kn-plugin-admin.yaml new file mode 100644 index 00000000000..9a45dbd4193 --- /dev/null +++ b/prow/jobs_config/knative-sandbox/kn-plugin-admin.yaml @@ -0,0 +1,36 @@ +org: knative-sandbox +repo: kn-plugin-admin +branches: [main] +image: gcr.io/knative-tests/test-infra/prow-tests:stable +imagePullPolicy: Always + +jobs: + - name: build-tests + types: [presubmit] + command: [runner.sh, ./test/presubmit-tests.sh, --build-tests] + excluded_requirements: [gcp] + + - name: unit-tests + types: [presubmit] + command: [runner.sh, ./test/presubmit-tests.sh, --unit-tests] + excluded_requirements: [gcp] + + - name: integration-tests + types: [presubmit] + command: [runner.sh, ./test/presubmit-tests.sh, --integration-tests] + + - name: continuous + types: [periodic] + command: [runner.sh, ./test/presubmit-tests.sh, --all-tests] + + - name: nightly + types: [periodic] + command: [runner.sh, ./hack/release.sh, --publish, --tag-release] + requirements: [nightly] + excluded_requirements: [gcp] + + - name: release + types: [periodic] + command: [runner.sh, ./hack/release.sh, --auto-release, --release-gcs, knative-releases/kn-plugin-admin, --release-gcr, gcr.io/knative-releases, --github-token, /etc/hub-token/token] + requirements: [release] + excluded_requirements: [gcp] diff --git a/prow/jobs_config/knative-sandbox/kn-plugin-diag.yaml b/prow/jobs_config/knative-sandbox/kn-plugin-diag.yaml new file mode 100644 index 00000000000..c1995010f9e --- /dev/null +++ b/prow/jobs_config/knative-sandbox/kn-plugin-diag.yaml @@ -0,0 +1,24 @@ +org: knative-sandbox +repo: kn-plugin-diag +branches: [main] +image: gcr.io/knative-tests/test-infra/prow-tests:stable +imagePullPolicy: Always + +jobs: + - name: build-tests + types: [presubmit] + command: [runner.sh, ./test/presubmit-tests.sh, --build-tests] + excluded_requirements: [gcp] + + - name: unit-tests + types: [presubmit] + command: [runner.sh, ./test/presubmit-tests.sh, --unit-tests] + excluded_requirements: [gcp] + + - name: integration-tests + types: [presubmit] + command: [runner.sh, ./test/presubmit-tests.sh, --integration-tests] + + - name: continuous + types: [periodic] + command: [runner.sh, ./test/presubmit-tests.sh, --all-tests] diff --git a/prow/jobs_config/knative-sandbox/kn-plugin-event-release-1.0.yaml b/prow/jobs_config/knative-sandbox/kn-plugin-event-release-1.0.yaml new file mode 100644 index 00000000000..e18965b8058 --- /dev/null +++ b/prow/jobs_config/knative-sandbox/kn-plugin-event-release-1.0.yaml @@ -0,0 +1,63 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. #### +# #### PLEASE ONLY MODIFY IT MANUALLY WHEN NEEDED. #### +# #### #### +# ####################################################################### +branches: +- release-1.0 +image: gcr.io/knative-tests/test-infra/prow-tests:stable +jobs: +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --build-tests + excluded_requirements: + - gcp + name: build-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --unit-tests + excluded_requirements: + - gcp + name: unit-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --integration-tests + name: integration-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + name: continuous + types: + - periodic +- command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/kn-plugin-event + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.0 + name: release + requirements: + - release + excluded_requirements: + - gcp + types: + - periodic +org: knative-sandbox +repo: kn-plugin-event diff --git a/prow/jobs_config/knative-sandbox/kn-plugin-event-release-1.1.yaml b/prow/jobs_config/knative-sandbox/kn-plugin-event-release-1.1.yaml new file mode 100644 index 00000000000..750afc5e9f9 --- /dev/null +++ b/prow/jobs_config/knative-sandbox/kn-plugin-event-release-1.1.yaml @@ -0,0 +1,63 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. #### +# #### PLEASE ONLY MODIFY IT MANUALLY WHEN NEEDED. #### +# #### #### +# ####################################################################### +branches: +- release-1.1 +image: gcr.io/knative-tests/test-infra/prow-tests:stable +jobs: +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --build-tests + excluded_requirements: + - gcp + name: build-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --unit-tests + excluded_requirements: + - gcp + name: unit-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --integration-tests + name: integration-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + name: continuous + types: + - periodic +- command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/kn-plugin-event + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.1 + name: release + requirements: + - release + excluded_requirements: + - gcp + types: + - periodic +org: knative-sandbox +repo: kn-plugin-event diff --git a/prow/jobs_config/knative-sandbox/kn-plugin-event-release-1.2.yaml b/prow/jobs_config/knative-sandbox/kn-plugin-event-release-1.2.yaml new file mode 100644 index 00000000000..e2f0b98cd62 --- /dev/null +++ b/prow/jobs_config/knative-sandbox/kn-plugin-event-release-1.2.yaml @@ -0,0 +1,63 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. #### +# #### PLEASE ONLY MODIFY IT MANUALLY WHEN NEEDED. #### +# #### #### +# ####################################################################### +branches: +- release-1.2 +image: gcr.io/knative-tests/test-infra/prow-tests:stable +jobs: +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --build-tests + excluded_requirements: + - gcp + name: build-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --unit-tests + excluded_requirements: + - gcp + name: unit-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --integration-tests + name: integration-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + name: continuous + types: + - periodic +- command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/kn-plugin-event + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.2 + name: release + requirements: + - release + excluded_requirements: + - gcp + types: + - periodic +org: knative-sandbox +repo: kn-plugin-event diff --git a/prow/jobs_config/knative-sandbox/kn-plugin-event-release-1.3.yaml b/prow/jobs_config/knative-sandbox/kn-plugin-event-release-1.3.yaml new file mode 100644 index 00000000000..11499c326d4 --- /dev/null +++ b/prow/jobs_config/knative-sandbox/kn-plugin-event-release-1.3.yaml @@ -0,0 +1,63 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. #### +# #### PLEASE ONLY MODIFY IT MANUALLY WHEN NEEDED. #### +# #### #### +# ####################################################################### +branches: +- release-1.3 +image: gcr.io/knative-tests/test-infra/prow-tests:stable +jobs: +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --build-tests + excluded_requirements: + - gcp + name: build-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --unit-tests + excluded_requirements: + - gcp + name: unit-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --integration-tests + name: integration-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + name: continuous + types: + - periodic +- command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/kn-plugin-event + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.3 + name: release + requirements: + - release + excluded_requirements: + - gcp + types: + - periodic +org: knative-sandbox +repo: kn-plugin-event diff --git a/prow/jobs_config/knative-sandbox/kn-plugin-event.yaml b/prow/jobs_config/knative-sandbox/kn-plugin-event.yaml new file mode 100644 index 00000000000..0fd441a4400 --- /dev/null +++ b/prow/jobs_config/knative-sandbox/kn-plugin-event.yaml @@ -0,0 +1,36 @@ +org: knative-sandbox +repo: kn-plugin-event +branches: [main] +image: gcr.io/knative-tests/test-infra/prow-tests:stable +imagePullPolicy: Always + +jobs: + - name: build-tests + types: [presubmit] + command: [runner.sh, ./test/presubmit-tests.sh, --build-tests] + excluded_requirements: [gcp] + + - name: unit-tests + types: [presubmit] + command: [runner.sh, ./test/presubmit-tests.sh, --unit-tests] + excluded_requirements: [gcp] + + - name: integration-tests + types: [presubmit] + command: [runner.sh, ./test/presubmit-tests.sh, --integration-tests] + + - name: continuous + types: [periodic] + command: [runner.sh, ./test/presubmit-tests.sh, --all-tests] + + - name: nightly + types: [periodic] + command: [runner.sh, ./hack/release.sh, --publish, --tag-release] + requirements: [nightly] + excluded_requirements: [gcp] + + - name: release + types: [periodic] + command: [runner.sh, ./hack/release.sh, --auto-release, --release-gcs, knative-releases/kn-plugin-event, --release-gcr, gcr.io/knative-releases, --github-token, /etc/hub-token/token] + requirements: [release] + excluded_requirements: [gcp] diff --git a/prow/jobs_config/knative-sandbox/kn-plugin-func.yaml b/prow/jobs_config/knative-sandbox/kn-plugin-func.yaml new file mode 100644 index 00000000000..7c98eda8cb3 --- /dev/null +++ b/prow/jobs_config/knative-sandbox/kn-plugin-func.yaml @@ -0,0 +1,18 @@ +org: knative-sandbox +repo: kn-plugin-func +branches: [main] +image: gcr.io/knative-tests/test-infra/prow-tests:stable +imagePullPolicy: Always + +jobs: + - name: nightly + types: [periodic] + command: [runner.sh, ./hack/release.sh, --publish, --tag-release] + requirements: [nightly, docker] + excluded_requirements: [gcp] + + - name: release + types: [periodic] + command: [runner.sh, ./hack/release.sh, --auto-release, --release-gcs, knative-releases/kn-plugin-func, --release-gcr, gcr.io/knative-releases, --github-token, /etc/hub-token/token] + requirements: [release, docker] + excluded_requirements: [gcp] diff --git a/prow/jobs_config/knative-sandbox/kn-plugin-migration.yaml b/prow/jobs_config/knative-sandbox/kn-plugin-migration.yaml new file mode 100644 index 00000000000..1e0fd352de4 --- /dev/null +++ b/prow/jobs_config/knative-sandbox/kn-plugin-migration.yaml @@ -0,0 +1,24 @@ +org: knative-sandbox +repo: kn-plugin-migration +branches: [main] +image: gcr.io/knative-tests/test-infra/prow-tests:stable +imagePullPolicy: Always + +jobs: + - name: build-tests + types: [presubmit] + command: [runner.sh, ./test/presubmit-tests.sh, --build-tests] + excluded_requirements: [gcp] + + - name: unit-tests + types: [presubmit] + command: [runner.sh, ./test/presubmit-tests.sh, --unit-tests] + excluded_requirements: [gcp] + + - name: integration-tests + types: [presubmit] + command: [runner.sh, ./test/presubmit-tests.sh, --integration-tests] + + - name: continuous + types: [periodic] + command: [runner.sh, ./test/presubmit-tests.sh, --all-tests] diff --git a/prow/jobs_config/knative-sandbox/kn-plugin-operator.yaml b/prow/jobs_config/knative-sandbox/kn-plugin-operator.yaml new file mode 100644 index 00000000000..73ab3e21c11 --- /dev/null +++ b/prow/jobs_config/knative-sandbox/kn-plugin-operator.yaml @@ -0,0 +1,24 @@ +org: knative-sandbox +repo: kn-plugin-operator +branches: [main] +image: gcr.io/knative-tests/test-infra/prow-tests:stable +imagePullPolicy: Always + +jobs: + - name: build-tests + types: [presubmit] + command: [runner.sh, ./test/presubmit-tests.sh, --build-tests] + excluded_requirements: [gcp] + + - name: unit-tests + types: [presubmit] + command: [runner.sh, ./test/presubmit-tests.sh, --unit-tests] + excluded_requirements: [gcp] + + - name: integration-tests + types: [presubmit] + command: [runner.sh, ./test/presubmit-tests.sh, --integration-tests] + + - name: continuous + types: [periodic] + command: [runner.sh, ./test/presubmit-tests.sh, --all-tests] diff --git a/prow/jobs_config/knative-sandbox/kn-plugin-quickstart-release-1.0.yaml b/prow/jobs_config/knative-sandbox/kn-plugin-quickstart-release-1.0.yaml new file mode 100644 index 00000000000..4c1da65cc7f --- /dev/null +++ b/prow/jobs_config/knative-sandbox/kn-plugin-quickstart-release-1.0.yaml @@ -0,0 +1,63 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. #### +# #### PLEASE ONLY MODIFY IT MANUALLY WHEN NEEDED. #### +# #### #### +# ####################################################################### +branches: +- release-1.0 +image: gcr.io/knative-tests/test-infra/prow-tests:stable +jobs: +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --build-tests + excluded_requirements: + - gcp + name: build-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --unit-tests + excluded_requirements: + - gcp + name: unit-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --integration-tests + name: integration-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + name: continuous + types: + - periodic +- command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/kn-plugin-quickstart + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.0 + name: release + requirements: + - release + excluded_requirements: + - gcp + types: + - periodic +org: knative-sandbox +repo: kn-plugin-quickstart diff --git a/prow/jobs_config/knative-sandbox/kn-plugin-quickstart-release-1.1.yaml b/prow/jobs_config/knative-sandbox/kn-plugin-quickstart-release-1.1.yaml new file mode 100644 index 00000000000..c98d4569a4e --- /dev/null +++ b/prow/jobs_config/knative-sandbox/kn-plugin-quickstart-release-1.1.yaml @@ -0,0 +1,63 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. #### +# #### PLEASE ONLY MODIFY IT MANUALLY WHEN NEEDED. #### +# #### #### +# ####################################################################### +branches: +- release-1.1 +image: gcr.io/knative-tests/test-infra/prow-tests:stable +jobs: +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --build-tests + excluded_requirements: + - gcp + name: build-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --unit-tests + excluded_requirements: + - gcp + name: unit-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --integration-tests + name: integration-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + name: continuous + types: + - periodic +- command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/kn-plugin-quickstart + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.1 + name: release + requirements: + - release + excluded_requirements: + - gcp + types: + - periodic +org: knative-sandbox +repo: kn-plugin-quickstart diff --git a/prow/jobs_config/knative-sandbox/kn-plugin-quickstart-release-1.2.yaml b/prow/jobs_config/knative-sandbox/kn-plugin-quickstart-release-1.2.yaml new file mode 100644 index 00000000000..968b7163274 --- /dev/null +++ b/prow/jobs_config/knative-sandbox/kn-plugin-quickstart-release-1.2.yaml @@ -0,0 +1,63 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. #### +# #### PLEASE ONLY MODIFY IT MANUALLY WHEN NEEDED. #### +# #### #### +# ####################################################################### +branches: +- release-1.2 +image: gcr.io/knative-tests/test-infra/prow-tests:stable +jobs: +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --build-tests + excluded_requirements: + - gcp + name: build-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --unit-tests + excluded_requirements: + - gcp + name: unit-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --integration-tests + name: integration-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + name: continuous + types: + - periodic +- command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/kn-plugin-quickstart + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.2 + name: release + requirements: + - release + excluded_requirements: + - gcp + types: + - periodic +org: knative-sandbox +repo: kn-plugin-quickstart diff --git a/prow/jobs_config/knative-sandbox/kn-plugin-quickstart-release-1.3.yaml b/prow/jobs_config/knative-sandbox/kn-plugin-quickstart-release-1.3.yaml new file mode 100644 index 00000000000..0e9977a05ea --- /dev/null +++ b/prow/jobs_config/knative-sandbox/kn-plugin-quickstart-release-1.3.yaml @@ -0,0 +1,63 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. #### +# #### PLEASE ONLY MODIFY IT MANUALLY WHEN NEEDED. #### +# #### #### +# ####################################################################### +branches: +- release-1.3 +image: gcr.io/knative-tests/test-infra/prow-tests:stable +jobs: +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --build-tests + excluded_requirements: + - gcp + name: build-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --unit-tests + excluded_requirements: + - gcp + name: unit-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --integration-tests + name: integration-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + name: continuous + types: + - periodic +- command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/kn-plugin-quickstart + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.3 + name: release + requirements: + - release + excluded_requirements: + - gcp + types: + - periodic +org: knative-sandbox +repo: kn-plugin-quickstart diff --git a/prow/jobs_config/knative-sandbox/kn-plugin-quickstart.yaml b/prow/jobs_config/knative-sandbox/kn-plugin-quickstart.yaml new file mode 100644 index 00000000000..a1f4c140130 --- /dev/null +++ b/prow/jobs_config/knative-sandbox/kn-plugin-quickstart.yaml @@ -0,0 +1,36 @@ +org: knative-sandbox +repo: kn-plugin-quickstart +branches: [main] +image: gcr.io/knative-tests/test-infra/prow-tests:stable +imagePullPolicy: Always + +jobs: + - name: build-tests + types: [presubmit] + command: [runner.sh, ./test/presubmit-tests.sh, --build-tests] + excluded_requirements: [gcp] + + - name: unit-tests + types: [presubmit] + command: [runner.sh, ./test/presubmit-tests.sh, --unit-tests] + excluded_requirements: [gcp] + + - name: integration-tests + types: [presubmit] + command: [runner.sh, ./test/presubmit-tests.sh, --integration-tests] + + - name: continuous + types: [periodic] + command: [runner.sh, ./test/presubmit-tests.sh, --all-tests] + + - name: nightly + types: [periodic] + command: [runner.sh, ./hack/release.sh, --publish, --tag-release] + requirements: [nightly] + excluded_requirements: [gcp] + + - name: release + types: [periodic] + command: [runner.sh, ./hack/release.sh, --auto-release, --release-gcs, knative-releases/kn-plugin-quickstart, --release-gcr, gcr.io/knative-releases, --github-token, /etc/hub-token/token] + requirements: [release] + excluded_requirements: [gcp] diff --git a/prow/jobs_config/knative-sandbox/kn-plugin-sample.yaml b/prow/jobs_config/knative-sandbox/kn-plugin-sample.yaml new file mode 100644 index 00000000000..7bd791c8ad3 --- /dev/null +++ b/prow/jobs_config/knative-sandbox/kn-plugin-sample.yaml @@ -0,0 +1,24 @@ +org: knative-sandbox +repo: kn-plugin-sample +branches: [main] +image: gcr.io/knative-tests/test-infra/prow-tests:stable +imagePullPolicy: Always + +jobs: + - name: build-tests + types: [presubmit] + command: [runner.sh, ./test/presubmit-tests.sh, --build-tests] + excluded_requirements: [gcp] + + - name: unit-tests + types: [presubmit] + command: [runner.sh, ./test/presubmit-tests.sh, --unit-tests] + excluded_requirements: [gcp] + + - name: integration-tests + types: [presubmit] + command: [runner.sh, ./test/presubmit-tests.sh, --integration-tests] + + - name: continuous + types: [periodic] + command: [runner.sh, ./test/presubmit-tests.sh, --all-tests] diff --git a/prow/jobs_config/knative-sandbox/kn-plugin-service-log.yaml b/prow/jobs_config/knative-sandbox/kn-plugin-service-log.yaml new file mode 100644 index 00000000000..77113440dd5 --- /dev/null +++ b/prow/jobs_config/knative-sandbox/kn-plugin-service-log.yaml @@ -0,0 +1,36 @@ +org: knative-sandbox +repo: kn-plugin-service-log +branches: [main] +image: gcr.io/knative-tests/test-infra/prow-tests:stable +imagePullPolicy: Always + +jobs: + - name: build-tests + types: [presubmit] + command: [runner.sh, ./test/presubmit-tests.sh, --build-tests] + excluded_requirements: [gcp] + + - name: unit-tests + types: [presubmit] + command: [runner.sh, ./test/presubmit-tests.sh, --unit-tests] + excluded_requirements: [gcp] + + - name: integration-tests + types: [presubmit] + command: [runner.sh, ./test/presubmit-tests.sh, --integration-tests] + + - name: continuous + types: [periodic] + command: [runner.sh, ./test/presubmit-tests.sh, --all-tests] + + - name: nightly + types: [periodic] + command: [runner.sh, ./hack/release.sh, --publish, --tag-release] + requirements: [nightly, docker] + excluded_requirements: [gcp] + + - name: release + types: [periodic] + command: [runner.sh, ./hack/release.sh, --auto-release, --release-gcs, knative-releases/kn-plugin-service-log, --release-gcr, gcr.io/knative-releases, --github-token, /etc/hub-token/token] + requirements: [release, docker] + excluded_requirements: [gcp] diff --git a/prow/jobs_config/knative-sandbox/kn-plugin-source-kafka-release-1.0.yaml b/prow/jobs_config/knative-sandbox/kn-plugin-source-kafka-release-1.0.yaml new file mode 100644 index 00000000000..71fa97a881c --- /dev/null +++ b/prow/jobs_config/knative-sandbox/kn-plugin-source-kafka-release-1.0.yaml @@ -0,0 +1,64 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. #### +# #### PLEASE ONLY MODIFY IT MANUALLY WHEN NEEDED. #### +# #### #### +# ####################################################################### +branches: +- release-1.0 +image: gcr.io/knative-tests/test-infra/prow-tests:stable +jobs: +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --build-tests + excluded_requirements: + - gcp + name: build-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --unit-tests + excluded_requirements: + - gcp + name: unit-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --integration-tests + name: integration-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + name: continuous + types: + - periodic +- command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/kn-plugin-source-kafka + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.0 + name: release + requirements: + - release + excluded_requirements: + - gcp + - docker + types: + - periodic +org: knative-sandbox +repo: kn-plugin-source-kafka diff --git a/prow/jobs_config/knative-sandbox/kn-plugin-source-kafka-release-1.1.yaml b/prow/jobs_config/knative-sandbox/kn-plugin-source-kafka-release-1.1.yaml new file mode 100644 index 00000000000..f9b3ec6b366 --- /dev/null +++ b/prow/jobs_config/knative-sandbox/kn-plugin-source-kafka-release-1.1.yaml @@ -0,0 +1,64 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. #### +# #### PLEASE ONLY MODIFY IT MANUALLY WHEN NEEDED. #### +# #### #### +# ####################################################################### +branches: +- release-1.1 +image: gcr.io/knative-tests/test-infra/prow-tests:stable +jobs: +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --build-tests + excluded_requirements: + - gcp + name: build-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --unit-tests + excluded_requirements: + - gcp + name: unit-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --integration-tests + name: integration-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + name: continuous + types: + - periodic +- command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/kn-plugin-source-kafka + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.1 + name: release + requirements: + - release + excluded_requirements: + - gcp + - docker + types: + - periodic +org: knative-sandbox +repo: kn-plugin-source-kafka diff --git a/prow/jobs_config/knative-sandbox/kn-plugin-source-kafka-release-1.2.yaml b/prow/jobs_config/knative-sandbox/kn-plugin-source-kafka-release-1.2.yaml new file mode 100644 index 00000000000..e00349828fb --- /dev/null +++ b/prow/jobs_config/knative-sandbox/kn-plugin-source-kafka-release-1.2.yaml @@ -0,0 +1,64 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. #### +# #### PLEASE ONLY MODIFY IT MANUALLY WHEN NEEDED. #### +# #### #### +# ####################################################################### +branches: +- release-1.2 +image: gcr.io/knative-tests/test-infra/prow-tests:stable +jobs: +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --build-tests + excluded_requirements: + - gcp + name: build-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --unit-tests + excluded_requirements: + - gcp + name: unit-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --integration-tests + name: integration-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + name: continuous + types: + - periodic +- command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/kn-plugin-source-kafka + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.2 + name: release + requirements: + - release + excluded_requirements: + - gcp + - docker + types: + - periodic +org: knative-sandbox +repo: kn-plugin-source-kafka diff --git a/prow/jobs_config/knative-sandbox/kn-plugin-source-kafka-release-1.3.yaml b/prow/jobs_config/knative-sandbox/kn-plugin-source-kafka-release-1.3.yaml new file mode 100644 index 00000000000..77a345dadb2 --- /dev/null +++ b/prow/jobs_config/knative-sandbox/kn-plugin-source-kafka-release-1.3.yaml @@ -0,0 +1,64 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. #### +# #### PLEASE ONLY MODIFY IT MANUALLY WHEN NEEDED. #### +# #### #### +# ####################################################################### +branches: +- release-1.3 +image: gcr.io/knative-tests/test-infra/prow-tests:stable +jobs: +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --build-tests + excluded_requirements: + - gcp + name: build-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --unit-tests + excluded_requirements: + - gcp + name: unit-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --integration-tests + name: integration-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + name: continuous + types: + - periodic +- command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/kn-plugin-source-kafka + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.3 + name: release + requirements: + - release + excluded_requirements: + - gcp + - docker + types: + - periodic +org: knative-sandbox +repo: kn-plugin-source-kafka diff --git a/prow/jobs_config/knative-sandbox/kn-plugin-source-kafka.yaml b/prow/jobs_config/knative-sandbox/kn-plugin-source-kafka.yaml new file mode 100644 index 00000000000..87d233ff2ef --- /dev/null +++ b/prow/jobs_config/knative-sandbox/kn-plugin-source-kafka.yaml @@ -0,0 +1,36 @@ +org: knative-sandbox +repo: kn-plugin-source-kafka +branches: [main] +image: gcr.io/knative-tests/test-infra/prow-tests:stable +imagePullPolicy: Always + +jobs: + - name: build-tests + types: [presubmit] + command: [runner.sh, ./test/presubmit-tests.sh, --build-tests] + excluded_requirements: [gcp] + + - name: unit-tests + types: [presubmit] + command: [runner.sh, ./test/presubmit-tests.sh, --unit-tests] + excluded_requirements: [gcp] + + - name: integration-tests + types: [presubmit] + command: [runner.sh, ./test/presubmit-tests.sh, --integration-tests] + + - name: continuous + types: [periodic] + command: [runner.sh, ./test/presubmit-tests.sh, --all-tests] + + - name: nightly + types: [periodic] + command: [runner.sh, ./hack/release.sh, --publish, --tag-release] + requirements: [nightly, docker] + excluded_requirements: [gcp] + + - name: release + types: [periodic] + command: [runner.sh, ./hack/release.sh, --auto-release, --release-gcs, knative-releases/kn-plugin-source-kafka, --release-gcr, gcr.io/knative-releases, --github-token, /etc/hub-token/token] + requirements: [release, docker] + excluded_requirements: [gcp] diff --git a/prow/jobs_config/knative-sandbox/kn-plugin-source-kamelet-release-1.0.yaml b/prow/jobs_config/knative-sandbox/kn-plugin-source-kamelet-release-1.0.yaml new file mode 100644 index 00000000000..b3113b2e7be --- /dev/null +++ b/prow/jobs_config/knative-sandbox/kn-plugin-source-kamelet-release-1.0.yaml @@ -0,0 +1,63 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. #### +# #### PLEASE ONLY MODIFY IT MANUALLY WHEN NEEDED. #### +# #### #### +# ####################################################################### +branches: +- release-1.0 +image: gcr.io/knative-tests/test-infra/prow-tests:stable +jobs: +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --build-tests + excluded_requirements: + - gcp + name: build-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --unit-tests + excluded_requirements: + - gcp + name: unit-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --integration-tests + name: integration-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + name: continuous + types: + - periodic +- command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/kn-plugin-source-kamelet + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.0 + name: release + requirements: + - release + excluded_requirements: + - gcp + types: + - periodic +org: knative-sandbox +repo: kn-plugin-source-kamelet diff --git a/prow/jobs_config/knative-sandbox/kn-plugin-source-kamelet-release-1.1.yaml b/prow/jobs_config/knative-sandbox/kn-plugin-source-kamelet-release-1.1.yaml new file mode 100644 index 00000000000..7ba42b0b6cb --- /dev/null +++ b/prow/jobs_config/knative-sandbox/kn-plugin-source-kamelet-release-1.1.yaml @@ -0,0 +1,63 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. #### +# #### PLEASE ONLY MODIFY IT MANUALLY WHEN NEEDED. #### +# #### #### +# ####################################################################### +branches: +- release-1.1 +image: gcr.io/knative-tests/test-infra/prow-tests:stable +jobs: +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --build-tests + excluded_requirements: + - gcp + name: build-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --unit-tests + excluded_requirements: + - gcp + name: unit-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --integration-tests + name: integration-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + name: continuous + types: + - periodic +- command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/kn-plugin-source-kamelet + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.1 + name: release + requirements: + - release + excluded_requirements: + - gcp + types: + - periodic +org: knative-sandbox +repo: kn-plugin-source-kamelet diff --git a/prow/jobs_config/knative-sandbox/kn-plugin-source-kamelet-release-1.2.yaml b/prow/jobs_config/knative-sandbox/kn-plugin-source-kamelet-release-1.2.yaml new file mode 100644 index 00000000000..e633ffb95a3 --- /dev/null +++ b/prow/jobs_config/knative-sandbox/kn-plugin-source-kamelet-release-1.2.yaml @@ -0,0 +1,63 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. #### +# #### PLEASE ONLY MODIFY IT MANUALLY WHEN NEEDED. #### +# #### #### +# ####################################################################### +branches: +- release-1.2 +image: gcr.io/knative-tests/test-infra/prow-tests:stable +jobs: +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --build-tests + excluded_requirements: + - gcp + name: build-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --unit-tests + excluded_requirements: + - gcp + name: unit-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --integration-tests + name: integration-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + name: continuous + types: + - periodic +- command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/kn-plugin-source-kamelet + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.2 + name: release + requirements: + - release + excluded_requirements: + - gcp + types: + - periodic +org: knative-sandbox +repo: kn-plugin-source-kamelet diff --git a/prow/jobs_config/knative-sandbox/kn-plugin-source-kamelet-release-1.3.yaml b/prow/jobs_config/knative-sandbox/kn-plugin-source-kamelet-release-1.3.yaml new file mode 100644 index 00000000000..4a15844471c --- /dev/null +++ b/prow/jobs_config/knative-sandbox/kn-plugin-source-kamelet-release-1.3.yaml @@ -0,0 +1,63 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. #### +# #### PLEASE ONLY MODIFY IT MANUALLY WHEN NEEDED. #### +# #### #### +# ####################################################################### +branches: +- release-1.3 +image: gcr.io/knative-tests/test-infra/prow-tests:stable +jobs: +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --build-tests + excluded_requirements: + - gcp + name: build-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --unit-tests + excluded_requirements: + - gcp + name: unit-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --integration-tests + name: integration-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + name: continuous + types: + - periodic +- command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/kn-plugin-source-kamelet + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.3 + name: release + requirements: + - release + excluded_requirements: + - gcp + types: + - periodic +org: knative-sandbox +repo: kn-plugin-source-kamelet diff --git a/prow/jobs_config/knative-sandbox/kn-plugin-source-kamelet.yaml b/prow/jobs_config/knative-sandbox/kn-plugin-source-kamelet.yaml new file mode 100644 index 00000000000..52e72f3779b --- /dev/null +++ b/prow/jobs_config/knative-sandbox/kn-plugin-source-kamelet.yaml @@ -0,0 +1,36 @@ +org: knative-sandbox +repo: kn-plugin-source-kamelet +branches: [main] +image: gcr.io/knative-tests/test-infra/prow-tests:stable +imagePullPolicy: Always + +jobs: + - name: build-tests + types: [presubmit] + command: [runner.sh, ./test/presubmit-tests.sh, --build-tests] + excluded_requirements: [gcp] + + - name: unit-tests + types: [presubmit] + command: [runner.sh, ./test/presubmit-tests.sh, --unit-tests] + excluded_requirements: [gcp] + + - name: integration-tests + types: [presubmit] + command: [runner.sh, ./test/presubmit-tests.sh, --integration-tests] + + - name: continuous + types: [periodic] + command: [runner.sh, ./test/presubmit-tests.sh, --all-tests] + + - name: nightly + types: [periodic] + command: [runner.sh, ./hack/release.sh, --publish, --tag-release] + requirements: [nightly] + excluded_requirements: [gcp] + + - name: release + types: [periodic] + command: [runner.sh, ./hack/release.sh, --auto-release, --release-gcs, knative-releases/kn-plugin-source-kamelet, --release-gcr, gcr.io/knative-releases, --github-token, /etc/hub-token/token] + requirements: [release] + excluded_requirements: [gcp] diff --git a/prow/jobs_config/knative-sandbox/kperf.yaml b/prow/jobs_config/knative-sandbox/kperf.yaml new file mode 100644 index 00000000000..5d50b48512c --- /dev/null +++ b/prow/jobs_config/knative-sandbox/kperf.yaml @@ -0,0 +1,20 @@ +org: knative-sandbox +repo: kperf +branches: [main] +image: gcr.io/knative-tests/test-infra/prow-tests:stable +imagePullPolicy: Always + +jobs: + - name: build-tests + types: [presubmit] + command: [runner.sh, ./test/presubmit-tests.sh, --build-tests] + excluded_requirements: [gcp] + + - name: unit-tests + types: [presubmit] + command: [runner.sh, ./test/presubmit-tests.sh, --unit-tests] + excluded_requirements: [gcp] + + - name: integration-tests + types: [presubmit] + command: [runner.sh, ./test/presubmit-tests.sh, --integration-tests] diff --git a/prow/jobs_config/knative-sandbox/net-certmanager-release-1.0.yaml b/prow/jobs_config/knative-sandbox/net-certmanager-release-1.0.yaml new file mode 100644 index 00000000000..effc35fda26 --- /dev/null +++ b/prow/jobs_config/knative-sandbox/net-certmanager-release-1.0.yaml @@ -0,0 +1,63 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. #### +# #### PLEASE ONLY MODIFY IT MANUALLY WHEN NEEDED. #### +# #### #### +# ####################################################################### +branches: +- release-1.0 +image: gcr.io/knative-tests/test-infra/prow-tests:stable +jobs: +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --build-tests + excluded_requirements: + - gcp + name: build-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --unit-tests + excluded_requirements: + - gcp + name: unit-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --integration-tests + name: integration-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + name: continuous + types: + - periodic +- command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/net-certmanager + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.0 + name: release + requirements: + - release + excluded_requirements: + - gcp + types: + - periodic +org: knative-sandbox +repo: net-certmanager diff --git a/prow/jobs_config/knative-sandbox/net-certmanager-release-1.1.yaml b/prow/jobs_config/knative-sandbox/net-certmanager-release-1.1.yaml new file mode 100644 index 00000000000..344945e06ad --- /dev/null +++ b/prow/jobs_config/knative-sandbox/net-certmanager-release-1.1.yaml @@ -0,0 +1,63 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. #### +# #### PLEASE ONLY MODIFY IT MANUALLY WHEN NEEDED. #### +# #### #### +# ####################################################################### +branches: +- release-1.1 +image: gcr.io/knative-tests/test-infra/prow-tests:stable +jobs: +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --build-tests + excluded_requirements: + - gcp + name: build-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --unit-tests + excluded_requirements: + - gcp + name: unit-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --integration-tests + name: integration-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + name: continuous + types: + - periodic +- command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/net-certmanager + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.1 + name: release + requirements: + - release + excluded_requirements: + - gcp + types: + - periodic +org: knative-sandbox +repo: net-certmanager diff --git a/prow/jobs_config/knative-sandbox/net-certmanager-release-1.2.yaml b/prow/jobs_config/knative-sandbox/net-certmanager-release-1.2.yaml new file mode 100644 index 00000000000..0a0647c3c7b --- /dev/null +++ b/prow/jobs_config/knative-sandbox/net-certmanager-release-1.2.yaml @@ -0,0 +1,63 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. #### +# #### PLEASE ONLY MODIFY IT MANUALLY WHEN NEEDED. #### +# #### #### +# ####################################################################### +branches: +- release-1.2 +image: gcr.io/knative-tests/test-infra/prow-tests:stable +jobs: +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --build-tests + excluded_requirements: + - gcp + name: build-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --unit-tests + excluded_requirements: + - gcp + name: unit-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --integration-tests + name: integration-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + name: continuous + types: + - periodic +- command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/net-certmanager + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.2 + name: release + requirements: + - release + excluded_requirements: + - gcp + types: + - periodic +org: knative-sandbox +repo: net-certmanager diff --git a/prow/jobs_config/knative-sandbox/net-certmanager-release-1.3.yaml b/prow/jobs_config/knative-sandbox/net-certmanager-release-1.3.yaml new file mode 100644 index 00000000000..9b80de15759 --- /dev/null +++ b/prow/jobs_config/knative-sandbox/net-certmanager-release-1.3.yaml @@ -0,0 +1,63 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. #### +# #### PLEASE ONLY MODIFY IT MANUALLY WHEN NEEDED. #### +# #### #### +# ####################################################################### +branches: +- release-1.3 +image: gcr.io/knative-tests/test-infra/prow-tests:stable +jobs: +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --build-tests + excluded_requirements: + - gcp + name: build-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --unit-tests + excluded_requirements: + - gcp + name: unit-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --integration-tests + name: integration-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + name: continuous + types: + - periodic +- command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/net-certmanager + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.3 + name: release + requirements: + - release + excluded_requirements: + - gcp + types: + - periodic +org: knative-sandbox +repo: net-certmanager diff --git a/prow/jobs_config/knative-sandbox/net-certmanager.yaml b/prow/jobs_config/knative-sandbox/net-certmanager.yaml new file mode 100644 index 00000000000..f99086e8494 --- /dev/null +++ b/prow/jobs_config/knative-sandbox/net-certmanager.yaml @@ -0,0 +1,43 @@ +org: knative-sandbox +repo: net-certmanager +branches: [main] +image: gcr.io/knative-tests/test-infra/prow-tests:stable +imagePullPolicy: Always + +jobs: + - name: build-tests + types: [presubmit] + command: [runner.sh, ./test/presubmit-tests.sh, --build-tests] + excluded_requirements: [gcp] + + - name: unit-tests + types: [presubmit] + command: [runner.sh, ./test/presubmit-tests.sh, --unit-tests] + excluded_requirements: [gcp] + + - name: integration-tests + types: [presubmit] + command: [runner.sh, ./test/presubmit-tests.sh, --integration-tests] + + - name: continuous + types: [periodic] + command: [runner.sh, ./test/presubmit-tests.sh, --all-tests] + + - name: nightly + types: [periodic] + command: [runner.sh, ./hack/release.sh, --publish, --tag-release] + requirements: [nightly] + excluded_requirements: [gcp] + reporter_config: + slack: + channel: net-certmanager + report_template: | + "The nightly release job fails, check the log: <{{.Status.URL}}|View logs>" + job_states_to_report: + - "failure" + + - name: release + types: [periodic] + command: [runner.sh, ./hack/release.sh, --auto-release, --release-gcs, knative-releases/net-certmanager, --release-gcr, gcr.io/knative-releases, --github-token, /etc/hub-token/token] + requirements: [release] + excluded_requirements: [gcp] diff --git a/prow/jobs_config/knative-sandbox/net-contour-release-1.0.yaml b/prow/jobs_config/knative-sandbox/net-contour-release-1.0.yaml new file mode 100644 index 00000000000..71c3b307ab0 --- /dev/null +++ b/prow/jobs_config/knative-sandbox/net-contour-release-1.0.yaml @@ -0,0 +1,63 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. #### +# #### PLEASE ONLY MODIFY IT MANUALLY WHEN NEEDED. #### +# #### #### +# ####################################################################### +branches: +- release-1.0 +image: gcr.io/knative-tests/test-infra/prow-tests:stable +jobs: +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --build-tests + excluded_requirements: + - gcp + name: build-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --unit-tests + excluded_requirements: + - gcp + name: unit-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --integration-tests + name: integration-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + name: continuous + types: + - periodic +- command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/net-contour + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.0 + name: release + requirements: + - release + excluded_requirements: + - gcp + types: + - periodic +org: knative-sandbox +repo: net-contour diff --git a/prow/jobs_config/knative-sandbox/net-contour-release-1.1.yaml b/prow/jobs_config/knative-sandbox/net-contour-release-1.1.yaml new file mode 100644 index 00000000000..a3842af5120 --- /dev/null +++ b/prow/jobs_config/knative-sandbox/net-contour-release-1.1.yaml @@ -0,0 +1,63 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. #### +# #### PLEASE ONLY MODIFY IT MANUALLY WHEN NEEDED. #### +# #### #### +# ####################################################################### +branches: +- release-1.1 +image: gcr.io/knative-tests/test-infra/prow-tests:stable +jobs: +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --build-tests + excluded_requirements: + - gcp + name: build-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --unit-tests + excluded_requirements: + - gcp + name: unit-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --integration-tests + name: integration-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + name: continuous + types: + - periodic +- command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/net-contour + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.1 + name: release + requirements: + - release + excluded_requirements: + - gcp + types: + - periodic +org: knative-sandbox +repo: net-contour diff --git a/prow/jobs_config/knative-sandbox/net-contour-release-1.2.yaml b/prow/jobs_config/knative-sandbox/net-contour-release-1.2.yaml new file mode 100644 index 00000000000..6aba2091e78 --- /dev/null +++ b/prow/jobs_config/knative-sandbox/net-contour-release-1.2.yaml @@ -0,0 +1,63 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. #### +# #### PLEASE ONLY MODIFY IT MANUALLY WHEN NEEDED. #### +# #### #### +# ####################################################################### +branches: +- release-1.2 +image: gcr.io/knative-tests/test-infra/prow-tests:stable +jobs: +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --build-tests + excluded_requirements: + - gcp + name: build-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --unit-tests + excluded_requirements: + - gcp + name: unit-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --integration-tests + name: integration-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + name: continuous + types: + - periodic +- command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/net-contour + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.2 + name: release + requirements: + - release + excluded_requirements: + - gcp + types: + - periodic +org: knative-sandbox +repo: net-contour diff --git a/prow/jobs_config/knative-sandbox/net-contour-release-1.3.yaml b/prow/jobs_config/knative-sandbox/net-contour-release-1.3.yaml new file mode 100644 index 00000000000..2a7eee73bdb --- /dev/null +++ b/prow/jobs_config/knative-sandbox/net-contour-release-1.3.yaml @@ -0,0 +1,63 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. #### +# #### PLEASE ONLY MODIFY IT MANUALLY WHEN NEEDED. #### +# #### #### +# ####################################################################### +branches: +- release-1.3 +image: gcr.io/knative-tests/test-infra/prow-tests:stable +jobs: +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --build-tests + excluded_requirements: + - gcp + name: build-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --unit-tests + excluded_requirements: + - gcp + name: unit-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --integration-tests + name: integration-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + name: continuous + types: + - periodic +- command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/net-contour + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.3 + name: release + requirements: + - release + excluded_requirements: + - gcp + types: + - periodic +org: knative-sandbox +repo: net-contour diff --git a/prow/jobs_config/knative-sandbox/net-contour.yaml b/prow/jobs_config/knative-sandbox/net-contour.yaml new file mode 100644 index 00000000000..47adddfcd33 --- /dev/null +++ b/prow/jobs_config/knative-sandbox/net-contour.yaml @@ -0,0 +1,43 @@ +org: knative-sandbox +repo: net-contour +branches: [main] +image: gcr.io/knative-tests/test-infra/prow-tests:stable +imagePullPolicy: Always + +jobs: + - name: build-tests + types: [presubmit] + command: [runner.sh, ./test/presubmit-tests.sh, --build-tests] + excluded_requirements: [gcp] + + - name: unit-tests + types: [presubmit] + command: [runner.sh, ./test/presubmit-tests.sh, --unit-tests] + excluded_requirements: [gcp] + + - name: integration-tests + types: [presubmit] + command: [runner.sh, ./test/presubmit-tests.sh, --integration-tests] + + - name: continuous + types: [periodic] + command: [runner.sh, ./test/presubmit-tests.sh, --all-tests] + + - name: nightly + types: [periodic] + command: [runner.sh, ./hack/release.sh, --publish, --tag-release] + requirements: [nightly] + excluded_requirements: [gcp] + reporter_config: + slack: + channel: net-contour + report_template: | + "The nightly release job fails, check the log: <{{.Status.URL}}|View logs>" + job_states_to_report: + - "failure" + + - name: release + types: [periodic] + command: [runner.sh, ./hack/release.sh, --auto-release, --release-gcs, knative-releases/net-contour, --release-gcr, gcr.io/knative-releases, --github-token, /etc/hub-token/token] + requirements: [release] + excluded_requirements: [gcp] diff --git a/prow/jobs_config/knative-sandbox/net-gateway-api-release-1.1.yaml b/prow/jobs_config/knative-sandbox/net-gateway-api-release-1.1.yaml new file mode 100644 index 00000000000..90455fd962d --- /dev/null +++ b/prow/jobs_config/knative-sandbox/net-gateway-api-release-1.1.yaml @@ -0,0 +1,63 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. #### +# #### PLEASE ONLY MODIFY IT MANUALLY WHEN NEEDED. #### +# #### #### +# ####################################################################### +branches: +- release-1.1 +image: gcr.io/knative-tests/test-infra/prow-tests:stable +jobs: +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --build-tests + excluded_requirements: + - gcp + name: build-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --unit-tests + excluded_requirements: + - gcp + name: unit-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --integration-tests + name: integration-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + name: continuous + types: + - periodic +- command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/net-gateway-api + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.1 + name: release + requirements: + - release + excluded_requirements: + - gcp + types: + - periodic +org: knative-sandbox +repo: net-gateway-api diff --git a/prow/jobs_config/knative-sandbox/net-gateway-api-release-1.2.yaml b/prow/jobs_config/knative-sandbox/net-gateway-api-release-1.2.yaml new file mode 100644 index 00000000000..e8b86cbc9eb --- /dev/null +++ b/prow/jobs_config/knative-sandbox/net-gateway-api-release-1.2.yaml @@ -0,0 +1,63 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. #### +# #### PLEASE ONLY MODIFY IT MANUALLY WHEN NEEDED. #### +# #### #### +# ####################################################################### +branches: +- release-1.2 +image: gcr.io/knative-tests/test-infra/prow-tests:stable +jobs: +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --build-tests + excluded_requirements: + - gcp + name: build-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --unit-tests + excluded_requirements: + - gcp + name: unit-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --integration-tests + name: integration-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + name: continuous + types: + - periodic +- command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/net-gateway-api + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.2 + name: release + requirements: + - release + excluded_requirements: + - gcp + types: + - periodic +org: knative-sandbox +repo: net-gateway-api diff --git a/prow/jobs_config/knative-sandbox/net-gateway-api-release-1.3.yaml b/prow/jobs_config/knative-sandbox/net-gateway-api-release-1.3.yaml new file mode 100644 index 00000000000..d41f69bf148 --- /dev/null +++ b/prow/jobs_config/knative-sandbox/net-gateway-api-release-1.3.yaml @@ -0,0 +1,63 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. #### +# #### PLEASE ONLY MODIFY IT MANUALLY WHEN NEEDED. #### +# #### #### +# ####################################################################### +branches: +- release-1.3 +image: gcr.io/knative-tests/test-infra/prow-tests:stable +jobs: +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --build-tests + excluded_requirements: + - gcp + name: build-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --unit-tests + excluded_requirements: + - gcp + name: unit-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --integration-tests + name: integration-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + name: continuous + types: + - periodic +- command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/net-gateway-api + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.3 + name: release + requirements: + - release + excluded_requirements: + - gcp + types: + - periodic +org: knative-sandbox +repo: net-gateway-api diff --git a/prow/jobs_config/knative-sandbox/net-gateway-api.yaml b/prow/jobs_config/knative-sandbox/net-gateway-api.yaml new file mode 100644 index 00000000000..08cb4c4213c --- /dev/null +++ b/prow/jobs_config/knative-sandbox/net-gateway-api.yaml @@ -0,0 +1,43 @@ +org: knative-sandbox +repo: net-gateway-api +branches: [main] +image: gcr.io/knative-tests/test-infra/prow-tests:stable +imagePullPolicy: Always + +jobs: + - name: build-tests + types: [presubmit] + command: [runner.sh, ./test/presubmit-tests.sh, --build-tests] + excluded_requirements: [gcp] + + - name: unit-tests + types: [presubmit] + command: [runner.sh, ./test/presubmit-tests.sh, --unit-tests] + excluded_requirements: [gcp] + + - name: integration-tests + types: [presubmit] + command: [runner.sh, ./test/presubmit-tests.sh, --integration-tests] + + - name: continuous + types: [periodic] + command: [runner.sh, ./test/presubmit-tests.sh, --all-tests] + + - name: nightly + types: [periodic] + command: [runner.sh, ./hack/release.sh, --publish, --tag-release] + requirements: [nightly] + excluded_requirements: [gcp] + reporter_config: + slack: + channel: net-gateway-api + report_template: | + "The nightly release job fails, check the log: <{{.Status.URL}}|View logs>" + job_states_to_report: + - "failure" + + - name: release + types: [periodic] + command: [runner.sh, ./hack/release.sh, --auto-release, --release-gcs, knative-releases/net-gateway-api, --release-gcr, gcr.io/knative-releases, --github-token, /etc/hub-token/token] + requirements: [release] + excluded_requirements: [gcp] diff --git a/prow/jobs_config/knative-sandbox/net-http01-release-1.0.yaml b/prow/jobs_config/knative-sandbox/net-http01-release-1.0.yaml new file mode 100644 index 00000000000..4397d1be1e2 --- /dev/null +++ b/prow/jobs_config/knative-sandbox/net-http01-release-1.0.yaml @@ -0,0 +1,63 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. #### +# #### PLEASE ONLY MODIFY IT MANUALLY WHEN NEEDED. #### +# #### #### +# ####################################################################### +branches: +- release-1.0 +image: gcr.io/knative-tests/test-infra/prow-tests:stable +jobs: +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --build-tests + excluded_requirements: + - gcp + name: build-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --unit-tests + excluded_requirements: + - gcp + name: unit-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --integration-tests + name: integration-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + name: continuous + types: + - periodic +- command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/net-http01 + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.0 + name: release + requirements: + - release + excluded_requirements: + - gcp + types: + - periodic +org: knative-sandbox +repo: net-http01 diff --git a/prow/jobs_config/knative-sandbox/net-http01-release-1.1.yaml b/prow/jobs_config/knative-sandbox/net-http01-release-1.1.yaml new file mode 100644 index 00000000000..f60c4231044 --- /dev/null +++ b/prow/jobs_config/knative-sandbox/net-http01-release-1.1.yaml @@ -0,0 +1,63 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. #### +# #### PLEASE ONLY MODIFY IT MANUALLY WHEN NEEDED. #### +# #### #### +# ####################################################################### +branches: +- release-1.1 +image: gcr.io/knative-tests/test-infra/prow-tests:stable +jobs: +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --build-tests + excluded_requirements: + - gcp + name: build-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --unit-tests + excluded_requirements: + - gcp + name: unit-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --integration-tests + name: integration-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + name: continuous + types: + - periodic +- command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/net-http01 + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.1 + name: release + requirements: + - release + excluded_requirements: + - gcp + types: + - periodic +org: knative-sandbox +repo: net-http01 diff --git a/prow/jobs_config/knative-sandbox/net-http01-release-1.2.yaml b/prow/jobs_config/knative-sandbox/net-http01-release-1.2.yaml new file mode 100644 index 00000000000..8cddc60878f --- /dev/null +++ b/prow/jobs_config/knative-sandbox/net-http01-release-1.2.yaml @@ -0,0 +1,63 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. #### +# #### PLEASE ONLY MODIFY IT MANUALLY WHEN NEEDED. #### +# #### #### +# ####################################################################### +branches: +- release-1.2 +image: gcr.io/knative-tests/test-infra/prow-tests:stable +jobs: +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --build-tests + excluded_requirements: + - gcp + name: build-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --unit-tests + excluded_requirements: + - gcp + name: unit-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --integration-tests + name: integration-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + name: continuous + types: + - periodic +- command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/net-http01 + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.2 + name: release + requirements: + - release + excluded_requirements: + - gcp + types: + - periodic +org: knative-sandbox +repo: net-http01 diff --git a/prow/jobs_config/knative-sandbox/net-http01-release-1.3.yaml b/prow/jobs_config/knative-sandbox/net-http01-release-1.3.yaml new file mode 100644 index 00000000000..f7f1e541ef4 --- /dev/null +++ b/prow/jobs_config/knative-sandbox/net-http01-release-1.3.yaml @@ -0,0 +1,63 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. #### +# #### PLEASE ONLY MODIFY IT MANUALLY WHEN NEEDED. #### +# #### #### +# ####################################################################### +branches: +- release-1.3 +image: gcr.io/knative-tests/test-infra/prow-tests:stable +jobs: +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --build-tests + excluded_requirements: + - gcp + name: build-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --unit-tests + excluded_requirements: + - gcp + name: unit-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --integration-tests + name: integration-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + name: continuous + types: + - periodic +- command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/net-http01 + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.3 + name: release + requirements: + - release + excluded_requirements: + - gcp + types: + - periodic +org: knative-sandbox +repo: net-http01 diff --git a/prow/jobs_config/knative-sandbox/net-http01.yaml b/prow/jobs_config/knative-sandbox/net-http01.yaml new file mode 100644 index 00000000000..583315e6cf4 --- /dev/null +++ b/prow/jobs_config/knative-sandbox/net-http01.yaml @@ -0,0 +1,43 @@ +org: knative-sandbox +repo: net-http01 +branches: [main] +image: gcr.io/knative-tests/test-infra/prow-tests:stable +imagePullPolicy: Always + +jobs: + - name: build-tests + types: [presubmit] + command: [runner.sh, ./test/presubmit-tests.sh, --build-tests] + excluded_requirements: [gcp] + + - name: unit-tests + types: [presubmit] + command: [runner.sh, ./test/presubmit-tests.sh, --unit-tests] + excluded_requirements: [gcp] + + - name: integration-tests + types: [presubmit] + command: [runner.sh, ./test/presubmit-tests.sh, --integration-tests] + + - name: continuous + types: [periodic] + command: [runner.sh, ./test/presubmit-tests.sh, --all-tests] + + - name: nightly + types: [periodic] + command: [runner.sh, ./hack/release.sh, --publish, --tag-release] + requirements: [nightly] + excluded_requirements: [gcp] + reporter_config: + slack: + channel: net-http01 + report_template: | + "The nightly release job fails, check the log: <{{.Status.URL}}|View logs>" + job_states_to_report: + - "failure" + + - name: release + types: [periodic] + command: [runner.sh, ./hack/release.sh, --auto-release, --release-gcs, knative-releases/net-http01, --release-gcr, gcr.io/knative-releases, --github-token, /etc/hub-token/token] + requirements: [release] + excluded_requirements: [gcp] diff --git a/prow/jobs_config/knative-sandbox/net-istio-release-1.0.yaml b/prow/jobs_config/knative-sandbox/net-istio-release-1.0.yaml new file mode 100644 index 00000000000..731385943fd --- /dev/null +++ b/prow/jobs_config/knative-sandbox/net-istio-release-1.0.yaml @@ -0,0 +1,79 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. #### +# #### PLEASE ONLY MODIFY IT MANUALLY WHEN NEEDED. #### +# #### #### +# ####################################################################### +branches: +- release-1.0 +image: gcr.io/knative-tests/test-infra/prow-tests:stable +jobs: +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --build-tests + excluded_requirements: + - gcp + name: build-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --unit-tests + excluded_requirements: + - gcp + name: unit-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --integration-tests + name: integration-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-tests.sh --istio-version latest + name: latest + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-tests.sh --istio-version latest --mesh + name: latest-mesh + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + name: continuous + types: + - periodic +- command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/net-istio + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.0 + name: release + requirements: + - release + excluded_requirements: + - gcp + types: + - periodic +org: knative-sandbox +repo: net-istio diff --git a/prow/jobs_config/knative-sandbox/net-istio-release-1.1.yaml b/prow/jobs_config/knative-sandbox/net-istio-release-1.1.yaml new file mode 100644 index 00000000000..c74bca046b7 --- /dev/null +++ b/prow/jobs_config/knative-sandbox/net-istio-release-1.1.yaml @@ -0,0 +1,79 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. #### +# #### PLEASE ONLY MODIFY IT MANUALLY WHEN NEEDED. #### +# #### #### +# ####################################################################### +branches: +- release-1.1 +image: gcr.io/knative-tests/test-infra/prow-tests:stable +jobs: +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --build-tests + excluded_requirements: + - gcp + name: build-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --unit-tests + excluded_requirements: + - gcp + name: unit-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --integration-tests + name: integration-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-tests.sh --istio-version latest + name: latest + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-tests.sh --istio-version latest --mesh + name: latest-mesh + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + name: continuous + types: + - periodic +- command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/net-istio + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.1 + name: release + requirements: + - release + excluded_requirements: + - gcp + types: + - periodic +org: knative-sandbox +repo: net-istio diff --git a/prow/jobs_config/knative-sandbox/net-istio-release-1.2.yaml b/prow/jobs_config/knative-sandbox/net-istio-release-1.2.yaml new file mode 100644 index 00000000000..61a1cd4c3c1 --- /dev/null +++ b/prow/jobs_config/knative-sandbox/net-istio-release-1.2.yaml @@ -0,0 +1,79 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. #### +# #### PLEASE ONLY MODIFY IT MANUALLY WHEN NEEDED. #### +# #### #### +# ####################################################################### +branches: +- release-1.2 +image: gcr.io/knative-tests/test-infra/prow-tests:stable +jobs: +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --build-tests + excluded_requirements: + - gcp + name: build-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --unit-tests + excluded_requirements: + - gcp + name: unit-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --integration-tests + name: integration-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-tests.sh --istio-version latest + name: latest + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-tests.sh --istio-version latest --mesh + name: latest-mesh + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + name: continuous + types: + - periodic +- command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/net-istio + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.2 + name: release + requirements: + - release + excluded_requirements: + - gcp + types: + - periodic +org: knative-sandbox +repo: net-istio diff --git a/prow/jobs_config/knative-sandbox/net-istio-release-1.3.yaml b/prow/jobs_config/knative-sandbox/net-istio-release-1.3.yaml new file mode 100644 index 00000000000..31c4c9edf3f --- /dev/null +++ b/prow/jobs_config/knative-sandbox/net-istio-release-1.3.yaml @@ -0,0 +1,79 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. #### +# #### PLEASE ONLY MODIFY IT MANUALLY WHEN NEEDED. #### +# #### #### +# ####################################################################### +branches: +- release-1.3 +image: gcr.io/knative-tests/test-infra/prow-tests:stable +jobs: +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --build-tests + excluded_requirements: + - gcp + name: build-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --unit-tests + excluded_requirements: + - gcp + name: unit-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --integration-tests + name: integration-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-tests.sh --istio-version latest + name: latest + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-tests.sh --istio-version latest --mesh + name: latest-mesh + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + name: continuous + types: + - periodic +- command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/net-istio + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.3 + name: release + requirements: + - release + excluded_requirements: + - gcp + types: + - periodic +org: knative-sandbox +repo: net-istio diff --git a/prow/jobs_config/knative-sandbox/net-istio.yaml b/prow/jobs_config/knative-sandbox/net-istio.yaml new file mode 100644 index 00000000000..81ce0df45d2 --- /dev/null +++ b/prow/jobs_config/knative-sandbox/net-istio.yaml @@ -0,0 +1,51 @@ +org: knative-sandbox +repo: net-istio +branches: [main] +image: gcr.io/knative-tests/test-infra/prow-tests:stable +imagePullPolicy: Always + +jobs: + - name: build-tests + types: [presubmit] + command: [runner.sh, ./test/presubmit-tests.sh, --build-tests] + excluded_requirements: [gcp] + + - name: unit-tests + types: [presubmit] + command: [runner.sh, ./test/presubmit-tests.sh, --unit-tests] + excluded_requirements: [gcp] + + - name: integration-tests + types: [presubmit] + command: [runner.sh, ./test/presubmit-tests.sh, --integration-tests] + + - name: latest + types: [presubmit] + command: [runner.sh, ./test/presubmit-tests.sh, --run-test, "./test/e2e-tests.sh --istio-version latest"] + + - name: latest-mesh + types: [presubmit] + command: [runner.sh, ./test/presubmit-tests.sh, --run-test, "./test/e2e-tests.sh --istio-version latest --mesh"] + + - name: continuous + types: [periodic] + command: [runner.sh, ./test/presubmit-tests.sh, --all-tests] + + - name: nightly + types: [periodic] + command: [runner.sh, ./hack/release.sh, --publish, --tag-release] + requirements: [nightly] + excluded_requirements: [gcp] + reporter_config: + slack: + channel: net-istio + report_template: | + "The nightly release job fails, check the log: <{{.Status.URL}}|View logs>" + job_states_to_report: + - "failure" + + - name: release + types: [periodic] + command: [runner.sh, ./hack/release.sh, --auto-release, --release-gcs, knative-releases/net-istio, --release-gcr, gcr.io/knative-releases, --github-token, /etc/hub-token/token] + requirements: [release] + excluded_requirements: [gcp] diff --git a/prow/jobs_config/knative-sandbox/net-kourier-release-1.0.yaml b/prow/jobs_config/knative-sandbox/net-kourier-release-1.0.yaml new file mode 100644 index 00000000000..d18ccb7eed7 --- /dev/null +++ b/prow/jobs_config/knative-sandbox/net-kourier-release-1.0.yaml @@ -0,0 +1,63 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. #### +# #### PLEASE ONLY MODIFY IT MANUALLY WHEN NEEDED. #### +# #### #### +# ####################################################################### +branches: +- release-1.0 +image: gcr.io/knative-tests/test-infra/prow-tests:stable +jobs: +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --build-tests + excluded_requirements: + - gcp + name: build-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --unit-tests + excluded_requirements: + - gcp + name: unit-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --integration-tests + name: integration-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + name: continuous + types: + - periodic +- command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/net-kourier + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.0 + name: release + requirements: + - release + excluded_requirements: + - gcp + types: + - periodic +org: knative-sandbox +repo: net-kourier diff --git a/prow/jobs_config/knative-sandbox/net-kourier-release-1.1.yaml b/prow/jobs_config/knative-sandbox/net-kourier-release-1.1.yaml new file mode 100644 index 00000000000..80074a9670b --- /dev/null +++ b/prow/jobs_config/knative-sandbox/net-kourier-release-1.1.yaml @@ -0,0 +1,63 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. #### +# #### PLEASE ONLY MODIFY IT MANUALLY WHEN NEEDED. #### +# #### #### +# ####################################################################### +branches: +- release-1.1 +image: gcr.io/knative-tests/test-infra/prow-tests:stable +jobs: +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --build-tests + excluded_requirements: + - gcp + name: build-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --unit-tests + excluded_requirements: + - gcp + name: unit-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --integration-tests + name: integration-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + name: continuous + types: + - periodic +- command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/net-kourier + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.1 + name: release + requirements: + - release + excluded_requirements: + - gcp + types: + - periodic +org: knative-sandbox +repo: net-kourier diff --git a/prow/jobs_config/knative-sandbox/net-kourier-release-1.2.yaml b/prow/jobs_config/knative-sandbox/net-kourier-release-1.2.yaml new file mode 100644 index 00000000000..a93e447bff8 --- /dev/null +++ b/prow/jobs_config/knative-sandbox/net-kourier-release-1.2.yaml @@ -0,0 +1,63 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. #### +# #### PLEASE ONLY MODIFY IT MANUALLY WHEN NEEDED. #### +# #### #### +# ####################################################################### +branches: +- release-1.2 +image: gcr.io/knative-tests/test-infra/prow-tests:stable +jobs: +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --build-tests + excluded_requirements: + - gcp + name: build-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --unit-tests + excluded_requirements: + - gcp + name: unit-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --integration-tests + name: integration-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + name: continuous + types: + - periodic +- command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/net-kourier + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.2 + name: release + requirements: + - release + excluded_requirements: + - gcp + types: + - periodic +org: knative-sandbox +repo: net-kourier diff --git a/prow/jobs_config/knative-sandbox/net-kourier-release-1.3.yaml b/prow/jobs_config/knative-sandbox/net-kourier-release-1.3.yaml new file mode 100644 index 00000000000..d04eefc7700 --- /dev/null +++ b/prow/jobs_config/knative-sandbox/net-kourier-release-1.3.yaml @@ -0,0 +1,63 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. #### +# #### PLEASE ONLY MODIFY IT MANUALLY WHEN NEEDED. #### +# #### #### +# ####################################################################### +branches: +- release-1.3 +image: gcr.io/knative-tests/test-infra/prow-tests:stable +jobs: +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --build-tests + excluded_requirements: + - gcp + name: build-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --unit-tests + excluded_requirements: + - gcp + name: unit-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --integration-tests + name: integration-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + name: continuous + types: + - periodic +- command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/net-kourier + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.3 + name: release + requirements: + - release + excluded_requirements: + - gcp + types: + - periodic +org: knative-sandbox +repo: net-kourier diff --git a/prow/jobs_config/knative-sandbox/net-kourier.yaml b/prow/jobs_config/knative-sandbox/net-kourier.yaml new file mode 100644 index 00000000000..9855df72807 --- /dev/null +++ b/prow/jobs_config/knative-sandbox/net-kourier.yaml @@ -0,0 +1,43 @@ +org: knative-sandbox +repo: net-kourier +branches: [main] +image: gcr.io/knative-tests/test-infra/prow-tests:stable +imagePullPolicy: Always + +jobs: + - name: build-tests + types: [presubmit] + command: [runner.sh, ./test/presubmit-tests.sh, --build-tests] + excluded_requirements: [gcp] + + - name: unit-tests + types: [presubmit] + command: [runner.sh, ./test/presubmit-tests.sh, --unit-tests] + excluded_requirements: [gcp] + + - name: integration-tests + types: [presubmit] + command: [runner.sh, ./test/presubmit-tests.sh, --integration-tests] + + - name: continuous + types: [periodic] + command: [runner.sh, ./test/presubmit-tests.sh, --all-tests] + + - name: nightly + types: [periodic] + command: [runner.sh, ./hack/release.sh, --publish, --tag-release] + requirements: [nightly] + excluded_requirements: [gcp] + reporter_config: + slack: + channel: net-kourier + report_template: | + "The nightly release job fails, check the log: <{{.Status.URL}}|View logs>" + job_states_to_report: + - "failure" + + - name: release + types: [periodic] + command: [runner.sh, ./hack/release.sh, --auto-release, --release-gcs, knative-releases/net-kourier, --release-gcr, gcr.io/knative-releases, --github-token, /etc/hub-token/token] + requirements: [release] + excluded_requirements: [gcp] diff --git a/prow/jobs_config/knative-sandbox/reconciler-test.yaml b/prow/jobs_config/knative-sandbox/reconciler-test.yaml new file mode 100644 index 00000000000..a40e15e605c --- /dev/null +++ b/prow/jobs_config/knative-sandbox/reconciler-test.yaml @@ -0,0 +1,20 @@ +org: knative-sandbox +repo: reconciler-test +branches: [main] +image: gcr.io/knative-tests/test-infra/prow-tests:stable +imagePullPolicy: Always + +jobs: + - name: build-tests + types: [presubmit] + command: [runner.sh, ./test/presubmit-tests.sh, --build-tests] + excluded_requirements: [gcp] + + - name: unit-tests + types: [presubmit] + command: [runner.sh, ./test/presubmit-tests.sh, --unit-tests] + excluded_requirements: [gcp] + + - name: integration-tests + types: [presubmit] + command: [runner.sh, ./test/presubmit-tests.sh, --integration-tests] diff --git a/prow/jobs_config/knative-sandbox/sample-controller-release-1.0.yaml b/prow/jobs_config/knative-sandbox/sample-controller-release-1.0.yaml new file mode 100644 index 00000000000..ba10acc5e51 --- /dev/null +++ b/prow/jobs_config/knative-sandbox/sample-controller-release-1.0.yaml @@ -0,0 +1,49 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. #### +# #### PLEASE ONLY MODIFY IT MANUALLY WHEN NEEDED. #### +# #### #### +# ####################################################################### +branches: +- release-1.0 +image: gcr.io/knative-tests/test-infra/prow-tests:stable +jobs: +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --build-tests + excluded_requirements: + - gcp + name: build-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --unit-tests + excluded_requirements: + - gcp + name: unit-tests + types: + - presubmit +- command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/sample-controller + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.0 + name: release + requirements: + - release + excluded_requirements: + - gcp + types: + - periodic +org: knative-sandbox +repo: sample-controller diff --git a/prow/jobs_config/knative-sandbox/sample-controller-release-1.1.yaml b/prow/jobs_config/knative-sandbox/sample-controller-release-1.1.yaml new file mode 100644 index 00000000000..cf99b7dae15 --- /dev/null +++ b/prow/jobs_config/knative-sandbox/sample-controller-release-1.1.yaml @@ -0,0 +1,49 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. #### +# #### PLEASE ONLY MODIFY IT MANUALLY WHEN NEEDED. #### +# #### #### +# ####################################################################### +branches: +- release-1.1 +image: gcr.io/knative-tests/test-infra/prow-tests:stable +jobs: +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --build-tests + excluded_requirements: + - gcp + name: build-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --unit-tests + excluded_requirements: + - gcp + name: unit-tests + types: + - presubmit +- command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/sample-controller + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.1 + name: release + requirements: + - release + excluded_requirements: + - gcp + types: + - periodic +org: knative-sandbox +repo: sample-controller diff --git a/prow/jobs_config/knative-sandbox/sample-controller-release-1.2.yaml b/prow/jobs_config/knative-sandbox/sample-controller-release-1.2.yaml new file mode 100644 index 00000000000..4ff6ea72eae --- /dev/null +++ b/prow/jobs_config/knative-sandbox/sample-controller-release-1.2.yaml @@ -0,0 +1,49 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. #### +# #### PLEASE ONLY MODIFY IT MANUALLY WHEN NEEDED. #### +# #### #### +# ####################################################################### +branches: +- release-1.2 +image: gcr.io/knative-tests/test-infra/prow-tests:stable +jobs: +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --build-tests + excluded_requirements: + - gcp + name: build-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --unit-tests + excluded_requirements: + - gcp + name: unit-tests + types: + - presubmit +- command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/sample-controller + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.2 + name: release + requirements: + - release + excluded_requirements: + - gcp + types: + - periodic +org: knative-sandbox +repo: sample-controller diff --git a/prow/jobs_config/knative-sandbox/sample-controller-release-1.3.yaml b/prow/jobs_config/knative-sandbox/sample-controller-release-1.3.yaml new file mode 100644 index 00000000000..a7cd85e1d2f --- /dev/null +++ b/prow/jobs_config/knative-sandbox/sample-controller-release-1.3.yaml @@ -0,0 +1,49 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. #### +# #### PLEASE ONLY MODIFY IT MANUALLY WHEN NEEDED. #### +# #### #### +# ####################################################################### +branches: +- release-1.3 +image: gcr.io/knative-tests/test-infra/prow-tests:stable +jobs: +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --build-tests + excluded_requirements: + - gcp + name: build-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --unit-tests + excluded_requirements: + - gcp + name: unit-tests + types: + - presubmit +- command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/sample-controller + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.3 + name: release + requirements: + - release + excluded_requirements: + - gcp + types: + - periodic +org: knative-sandbox +repo: sample-controller diff --git a/prow/jobs_config/knative-sandbox/sample-controller.yaml b/prow/jobs_config/knative-sandbox/sample-controller.yaml new file mode 100644 index 00000000000..5604cdf0a37 --- /dev/null +++ b/prow/jobs_config/knative-sandbox/sample-controller.yaml @@ -0,0 +1,28 @@ +org: knative-sandbox +repo: sample-controller +branches: [main] +image: gcr.io/knative-tests/test-infra/prow-tests:stable +imagePullPolicy: Always + +jobs: + - name: build-tests + types: [presubmit] + command: [runner.sh, ./test/presubmit-tests.sh, --build-tests] + excluded_requirements: [gcp] + + - name: unit-tests + types: [presubmit] + command: [runner.sh, ./test/presubmit-tests.sh, --unit-tests] + excluded_requirements: [gcp] + + - name: nightly + types: [periodic] + command: [runner.sh, ./hack/release.sh, --publish, --tag-release] + requirements: [nightly] + excluded_requirements: [gcp] + + - name: release + types: [periodic] + command: [runner.sh, ./hack/release.sh, --auto-release, --release-gcs, knative-releases/sample-controller, --release-gcr, gcr.io/knative-releases, --github-token, /etc/hub-token/token] + requirements: [release] + excluded_requirements: [gcp] diff --git a/prow/jobs_config/knative-sandbox/sample-source-release-1.0.yaml b/prow/jobs_config/knative-sandbox/sample-source-release-1.0.yaml new file mode 100644 index 00000000000..7d348ae6f99 --- /dev/null +++ b/prow/jobs_config/knative-sandbox/sample-source-release-1.0.yaml @@ -0,0 +1,49 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. #### +# #### PLEASE ONLY MODIFY IT MANUALLY WHEN NEEDED. #### +# #### #### +# ####################################################################### +branches: +- release-1.0 +image: gcr.io/knative-tests/test-infra/prow-tests:stable +jobs: +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --build-tests + excluded_requirements: + - gcp + name: build-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --unit-tests + excluded_requirements: + - gcp + name: unit-tests + types: + - presubmit +- command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/sample-source + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.0 + name: release + requirements: + - release + excluded_requirements: + - gcp + types: + - periodic +org: knative-sandbox +repo: sample-source diff --git a/prow/jobs_config/knative-sandbox/sample-source-release-1.1.yaml b/prow/jobs_config/knative-sandbox/sample-source-release-1.1.yaml new file mode 100644 index 00000000000..0ff0a5e4e02 --- /dev/null +++ b/prow/jobs_config/knative-sandbox/sample-source-release-1.1.yaml @@ -0,0 +1,49 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. #### +# #### PLEASE ONLY MODIFY IT MANUALLY WHEN NEEDED. #### +# #### #### +# ####################################################################### +branches: +- release-1.1 +image: gcr.io/knative-tests/test-infra/prow-tests:stable +jobs: +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --build-tests + excluded_requirements: + - gcp + name: build-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --unit-tests + excluded_requirements: + - gcp + name: unit-tests + types: + - presubmit +- command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/sample-source + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.1 + name: release + requirements: + - release + excluded_requirements: + - gcp + types: + - periodic +org: knative-sandbox +repo: sample-source diff --git a/prow/jobs_config/knative-sandbox/sample-source-release-1.2.yaml b/prow/jobs_config/knative-sandbox/sample-source-release-1.2.yaml new file mode 100644 index 00000000000..cb410c13db2 --- /dev/null +++ b/prow/jobs_config/knative-sandbox/sample-source-release-1.2.yaml @@ -0,0 +1,49 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. #### +# #### PLEASE ONLY MODIFY IT MANUALLY WHEN NEEDED. #### +# #### #### +# ####################################################################### +branches: +- release-1.2 +image: gcr.io/knative-tests/test-infra/prow-tests:stable +jobs: +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --build-tests + excluded_requirements: + - gcp + name: build-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --unit-tests + excluded_requirements: + - gcp + name: unit-tests + types: + - presubmit +- command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/sample-source + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.2 + name: release + requirements: + - release + excluded_requirements: + - gcp + types: + - periodic +org: knative-sandbox +repo: sample-source diff --git a/prow/jobs_config/knative-sandbox/sample-source-release-1.3.yaml b/prow/jobs_config/knative-sandbox/sample-source-release-1.3.yaml new file mode 100644 index 00000000000..9e6eadb7a04 --- /dev/null +++ b/prow/jobs_config/knative-sandbox/sample-source-release-1.3.yaml @@ -0,0 +1,49 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. #### +# #### PLEASE ONLY MODIFY IT MANUALLY WHEN NEEDED. #### +# #### #### +# ####################################################################### +branches: +- release-1.3 +image: gcr.io/knative-tests/test-infra/prow-tests:stable +jobs: +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --build-tests + excluded_requirements: + - gcp + name: build-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --unit-tests + excluded_requirements: + - gcp + name: unit-tests + types: + - presubmit +- command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/sample-source + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.3 + name: release + requirements: + - release + excluded_requirements: + - gcp + types: + - periodic +org: knative-sandbox +repo: sample-source diff --git a/prow/jobs_config/knative-sandbox/sample-source.yaml b/prow/jobs_config/knative-sandbox/sample-source.yaml new file mode 100644 index 00000000000..acad115c81a --- /dev/null +++ b/prow/jobs_config/knative-sandbox/sample-source.yaml @@ -0,0 +1,28 @@ +org: knative-sandbox +repo: sample-source +branches: [main] +image: gcr.io/knative-tests/test-infra/prow-tests:stable +imagePullPolicy: Always + +jobs: + - name: build-tests + types: [presubmit] + command: [runner.sh, ./test/presubmit-tests.sh, --build-tests] + excluded_requirements: [gcp] + + - name: unit-tests + types: [presubmit] + command: [runner.sh, ./test/presubmit-tests.sh, --unit-tests] + excluded_requirements: [gcp] + + - name: nightly + types: [periodic] + command: [runner.sh, ./hack/release.sh, --publish, --tag-release] + requirements: [nightly] + excluded_requirements: [gcp] + + - name: release + types: [periodic] + command: [runner.sh, ./hack/release.sh, --auto-release, --release-gcs, knative-releases/sample-source, --release-gcr, gcr.io/knative-releases, --github-token, /etc/hub-token/token] + requirements: [release] + excluded_requirements: [gcp] diff --git a/prow/jobs_config/knative/caching.yaml b/prow/jobs_config/knative/caching.yaml new file mode 100644 index 00000000000..5cad4b73940 --- /dev/null +++ b/prow/jobs_config/knative/caching.yaml @@ -0,0 +1,20 @@ +org: knative +repo: caching +branches: [main] +image: gcr.io/knative-tests/test-infra/prow-tests:stable +imagePullPolicy: Always + +jobs: + - name: build-tests + types: [presubmit] + command: [runner.sh, ./test/presubmit-tests.sh, --build-tests] + excluded_requirements: [gcp] + + - name: unit-tests + types: [presubmit] + command: [runner.sh, ./test/presubmit-tests.sh, --unit-tests] + excluded_requirements: [gcp] + + - name: integration-tests + types: [presubmit] + command: [runner.sh, ./test/presubmit-tests.sh, --integration-tests] diff --git a/prow/jobs_config/knative/client-pkg.yaml b/prow/jobs_config/knative/client-pkg.yaml new file mode 100644 index 00000000000..bed0c1be105 --- /dev/null +++ b/prow/jobs_config/knative/client-pkg.yaml @@ -0,0 +1,20 @@ +org: knative +repo: client-pkg +branches: [main] +image: gcr.io/knative-tests/test-infra/prow-tests:stable +imagePullPolicy: Always + +jobs: + - name: build-tests + types: [presubmit] + command: [runner.sh, ./test/presubmit-tests.sh, --build-tests] + excluded_requirements: [gcp] + + - name: unit-tests + types: [presubmit] + command: [runner.sh, ./test/presubmit-tests.sh, --unit-tests] + excluded_requirements: [gcp] + + - name: continuous + types: [periodic] + command: [runner.sh, ./test/presubmit-tests.sh, --all-tests] diff --git a/prow/jobs_config/knative/client-release-1.0.yaml b/prow/jobs_config/knative/client-release-1.0.yaml new file mode 100644 index 00000000000..67ff234049e --- /dev/null +++ b/prow/jobs_config/knative/client-release-1.0.yaml @@ -0,0 +1,84 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. #### +# #### PLEASE ONLY MODIFY IT MANUALLY WHEN NEEDED. #### +# #### #### +# ####################################################################### +branches: +- release-1.0 +image: gcr.io/knative-tests/test-infra/prow-tests:stable +jobs: +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --build-tests + excluded_requirements: + - gcp + name: build-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --unit-tests + excluded_requirements: + - gcp + name: unit-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --integration-tests + name: integration-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-integration-tests-latest-release.sh + name: integration-tests-latest-release + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + name: continuous + types: + - periodic +- args: + - bash + - -c + - | + "mkdir -p /root/.kube && cat /opt/cluster/ci-script > connect.sh && chmod +x connect.sh && ./connect.sh client-main && kubectl get cm s390x-config-client -n default -o jsonpath='{.data.adjustment-script}' > adjust.sh && chmod +x adjust.sh && ./adjust.sh && ./test/e2e-tests.sh --run-tests" + command: + - runner.sh + env: + - name: INGRESS_CLASS + value: contour.ingress.networking.knative.dev + name: s390x-e2e-tests + requirements: + - s390x + types: + - periodic +- command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/client + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.0 + name: release + requirements: + - release + excluded_requirements: + - gcp + types: + - periodic +org: knative +repo: client diff --git a/prow/jobs_config/knative/client-release-1.1.yaml b/prow/jobs_config/knative/client-release-1.1.yaml new file mode 100644 index 00000000000..56a796648c9 --- /dev/null +++ b/prow/jobs_config/knative/client-release-1.1.yaml @@ -0,0 +1,84 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. #### +# #### PLEASE ONLY MODIFY IT MANUALLY WHEN NEEDED. #### +# #### #### +# ####################################################################### +branches: +- release-1.1 +image: gcr.io/knative-tests/test-infra/prow-tests:stable +jobs: +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --build-tests + excluded_requirements: + - gcp + name: build-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --unit-tests + excluded_requirements: + - gcp + name: unit-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --integration-tests + name: integration-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-integration-tests-latest-release.sh + name: integration-tests-latest-release + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + name: continuous + types: + - periodic +- args: + - bash + - -c + - | + "mkdir -p /root/.kube && cat /opt/cluster/ci-script > connect.sh && chmod +x connect.sh && ./connect.sh client-main && kubectl get cm s390x-config-client -n default -o jsonpath='{.data.adjustment-script}' > adjust.sh && chmod +x adjust.sh && ./adjust.sh && ./test/e2e-tests.sh --run-tests" + command: + - runner.sh + env: + - name: INGRESS_CLASS + value: contour.ingress.networking.knative.dev + name: s390x-e2e-tests + requirements: + - s390x + types: + - periodic +- command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/client + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.1 + name: release + requirements: + - release + excluded_requirements: + - gcp + types: + - periodic +org: knative +repo: client diff --git a/prow/jobs_config/knative/client-release-1.2.yaml b/prow/jobs_config/knative/client-release-1.2.yaml new file mode 100644 index 00000000000..7f2001f174a --- /dev/null +++ b/prow/jobs_config/knative/client-release-1.2.yaml @@ -0,0 +1,84 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. #### +# #### PLEASE ONLY MODIFY IT MANUALLY WHEN NEEDED. #### +# #### #### +# ####################################################################### +branches: +- release-1.2 +image: gcr.io/knative-tests/test-infra/prow-tests:stable +jobs: +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --build-tests + excluded_requirements: + - gcp + name: build-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --unit-tests + excluded_requirements: + - gcp + name: unit-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --integration-tests + name: integration-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-integration-tests-latest-release.sh + name: integration-tests-latest-release + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + name: continuous + types: + - periodic +- args: + - bash + - -c + - | + "mkdir -p /root/.kube && cat /opt/cluster/ci-script > connect.sh && chmod +x connect.sh && ./connect.sh client-main && kubectl get cm s390x-config-client -n default -o jsonpath='{.data.adjustment-script}' > adjust.sh && chmod +x adjust.sh && ./adjust.sh && ./test/e2e-tests.sh --run-tests" + command: + - runner.sh + env: + - name: INGRESS_CLASS + value: contour.ingress.networking.knative.dev + name: s390x-e2e-tests + requirements: + - s390x + types: + - periodic +- command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/client + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.2 + name: release + requirements: + - release + excluded_requirements: + - gcp + types: + - periodic +org: knative +repo: client diff --git a/prow/jobs_config/knative/client-release-1.3.yaml b/prow/jobs_config/knative/client-release-1.3.yaml new file mode 100644 index 00000000000..4c94f41244d --- /dev/null +++ b/prow/jobs_config/knative/client-release-1.3.yaml @@ -0,0 +1,84 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. #### +# #### PLEASE ONLY MODIFY IT MANUALLY WHEN NEEDED. #### +# #### #### +# ####################################################################### +branches: +- release-1.3 +image: gcr.io/knative-tests/test-infra/prow-tests:stable +jobs: +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --build-tests + excluded_requirements: + - gcp + name: build-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --unit-tests + excluded_requirements: + - gcp + name: unit-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --integration-tests + name: integration-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-integration-tests-latest-release.sh + name: integration-tests-latest-release + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + name: continuous + types: + - periodic +- args: + - bash + - -c + - | + "mkdir -p /root/.kube && cat /opt/cluster/ci-script > connect.sh && chmod +x connect.sh && ./connect.sh client-main && kubectl get cm s390x-config-client -n default -o jsonpath='{.data.adjustment-script}' > adjust.sh && chmod +x adjust.sh && ./adjust.sh && ./test/e2e-tests.sh --run-tests" + command: + - runner.sh + env: + - name: INGRESS_CLASS + value: contour.ingress.networking.knative.dev + name: s390x-e2e-tests + requirements: + - s390x + types: + - periodic +- command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/client + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.3 + name: release + requirements: + - release + excluded_requirements: + - gcp + types: + - periodic +org: knative +repo: client diff --git a/prow/jobs_config/knative/client.yaml b/prow/jobs_config/knative/client.yaml new file mode 100644 index 00000000000..ae31162a9be --- /dev/null +++ b/prow/jobs_config/knative/client.yaml @@ -0,0 +1,64 @@ +org: knative +repo: client +branches: [main] +image: gcr.io/knative-tests/test-infra/prow-tests:stable +imagePullPolicy: Always + +jobs: + - name: build-tests + types: [presubmit] + command: [runner.sh, ./test/presubmit-tests.sh, --build-tests] + excluded_requirements: [gcp] + + - name: unit-tests + types: [presubmit] + command: [runner.sh, ./test/presubmit-tests.sh, --unit-tests] + excluded_requirements: [gcp] + + - name: integration-tests + types: [presubmit] + command: [runner.sh, ./test/presubmit-tests.sh, --integration-tests] + + - name: integration-tests-latest-release + types: [presubmit] + command: [runner.sh, ./test/presubmit-integration-tests-latest-release.sh] + + - name: continuous + types: [periodic] + command: [runner.sh, ./test/presubmit-tests.sh, --all-tests] + + - name: tekton + types: [periodic] + command: [runner.sh, ./test/tekton-tests.sh] + + - name: s390x-e2e-tests + types: [periodic] + requirements: [s390x] + command: [runner.sh] + args: + - bash + - -c + - | + "mkdir -p /root/.kube && cat /opt/cluster/ci-script > connect.sh && chmod +x connect.sh && ./connect.sh client-main && kubectl get cm s390x-config-client -n default -o jsonpath='{.data.adjustment-script}' > adjust.sh && chmod +x adjust.sh && ./adjust.sh && ./test/e2e-tests.sh --run-tests" + env: + - name: INGRESS_CLASS + value: contour.ingress.networking.knative.dev + + - name: nightly + types: [periodic] + command: [runner.sh, ./hack/release.sh, --publish, --tag-release] + requirements: [nightly] + excluded_requirements: [gcp] + reporter_config: + slack: + channel: client + report_template: | + "The nightly release job fails, check the log: <{{.Status.URL}}|View logs>" + job_states_to_report: + - "failure" + + - name: release + types: [periodic] + command: [runner.sh, ./hack/release.sh, --auto-release, --release-gcs, knative-releases/client, --release-gcr, gcr.io/knative-releases, --github-token, /etc/hub-token/token] + requirements: [release] + excluded_requirements: [gcp] diff --git a/prow/jobs_config/knative/docs.yaml b/prow/jobs_config/knative/docs.yaml new file mode 100644 index 00000000000..b7729624718 --- /dev/null +++ b/prow/jobs_config/knative/docs.yaml @@ -0,0 +1,21 @@ +org: knative +repo: docs +branches: [main] +image: gcr.io/knative-tests/test-infra/prow-tests:stable +imagePullPolicy: Always + +jobs: + - name: build-tests + types: [presubmit] + command: [runner.sh, ./test/presubmit-tests.sh, --build-tests] + excluded_requirements: [gcp] + + - name: unit-tests + types: [presubmit] + command: [runner.sh, ./test/presubmit-tests.sh, --unit-tests] + excluded_requirements: [gcp] + + - name: continuous + types: [periodic] + command: [runner.sh, ./test/presubmit-tests.sh, --all-tests] + requirements: [docker] diff --git a/prow/jobs_config/knative/eventing-release-1.0.yaml b/prow/jobs_config/knative/eventing-release-1.0.yaml new file mode 100644 index 00000000000..92b66e2c129 --- /dev/null +++ b/prow/jobs_config/knative/eventing-release-1.0.yaml @@ -0,0 +1,113 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. #### +# #### PLEASE ONLY MODIFY IT MANUALLY WHEN NEEDED. #### +# #### #### +# ####################################################################### +branches: +- release-1.0 +image: gcr.io/knative-tests/test-infra/prow-tests:stable +jobs: +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --build-tests + excluded_requirements: + - gcp + name: build-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --unit-tests + excluded_requirements: + - gcp + name: unit-tests + resources: default + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-conformance-tests.sh + name: conformance-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-rekt-tests.sh + modifiers: + - presubmit_optional + name: reconciler-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-upgrade-tests.sh + name: upgrade-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + name: continuous + types: + - periodic +- args: + - bash + - -c + - | + "mkdir -p /root/.kube && cat /opt/cluster/ci-script > connect.sh && chmod +x connect.sh && ./connect.sh eventing-main && kubectl get cm s390x-config-eventing -n default -o jsonpath='{.data.adjustment-script}' > adjust.sh && chmod +x adjust.sh && ./adjust.sh && ./test/e2e-tests.sh --run-tests" + command: + - runner.sh + cron: 0 5 * * * + env: + - name: SYSTEM_NAMESPACE + value: knative-eventing + - name: SCALE_CHAOSDUCK_TO_ZERO + value: "1" + name: s390x-e2e-tests + requirements: + - s390x + types: + - periodic +- command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/eventing + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.0 + name: release + requirements: + - release + excluded_requirements: + - gcp + types: + - periodic +org: knative +repo: eventing +resources: high +resources_presets: + default: + limits: + memory: 10Gi + requests: + memory: 8Gi + high: + limits: + memory: 16Gi + requests: + memory: 12Gi diff --git a/prow/jobs_config/knative/eventing-release-1.1.yaml b/prow/jobs_config/knative/eventing-release-1.1.yaml new file mode 100644 index 00000000000..29a8141437c --- /dev/null +++ b/prow/jobs_config/knative/eventing-release-1.1.yaml @@ -0,0 +1,113 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. #### +# #### PLEASE ONLY MODIFY IT MANUALLY WHEN NEEDED. #### +# #### #### +# ####################################################################### +branches: +- release-1.1 +image: gcr.io/knative-tests/test-infra/prow-tests:stable +jobs: +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --build-tests + excluded_requirements: + - gcp + name: build-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --unit-tests + excluded_requirements: + - gcp + name: unit-tests + resources: default + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-conformance-tests.sh + name: conformance-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-rekt-tests.sh + modifiers: + - presubmit_optional + name: reconciler-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-upgrade-tests.sh + name: upgrade-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + name: continuous + types: + - periodic +- args: + - bash + - -c + - | + "mkdir -p /root/.kube && cat /opt/cluster/ci-script > connect.sh && chmod +x connect.sh && ./connect.sh eventing-main && kubectl get cm s390x-config-eventing -n default -o jsonpath='{.data.adjustment-script}' > adjust.sh && chmod +x adjust.sh && ./adjust.sh && ./test/e2e-tests.sh --run-tests" + command: + - runner.sh + cron: 0 5 * * * + env: + - name: SYSTEM_NAMESPACE + value: knative-eventing + - name: SCALE_CHAOSDUCK_TO_ZERO + value: "1" + name: s390x-e2e-tests + requirements: + - s390x + types: + - periodic +- command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/eventing + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.1 + name: release + requirements: + - release + excluded_requirements: + - gcp + types: + - periodic +org: knative +repo: eventing +resources: high +resources_presets: + default: + limits: + memory: 10Gi + requests: + memory: 8Gi + high: + limits: + memory: 16Gi + requests: + memory: 12Gi diff --git a/prow/jobs_config/knative/eventing-release-1.2.yaml b/prow/jobs_config/knative/eventing-release-1.2.yaml new file mode 100644 index 00000000000..5cf6a51849f --- /dev/null +++ b/prow/jobs_config/knative/eventing-release-1.2.yaml @@ -0,0 +1,113 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. #### +# #### PLEASE ONLY MODIFY IT MANUALLY WHEN NEEDED. #### +# #### #### +# ####################################################################### +branches: +- release-1.2 +image: gcr.io/knative-tests/test-infra/prow-tests:stable +jobs: +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --build-tests + excluded_requirements: + - gcp + name: build-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --unit-tests + excluded_requirements: + - gcp + name: unit-tests + resources: default + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-conformance-tests.sh + name: conformance-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-rekt-tests.sh + modifiers: + - presubmit_optional + name: reconciler-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-upgrade-tests.sh + name: upgrade-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + name: continuous + types: + - periodic +- args: + - bash + - -c + - | + "mkdir -p /root/.kube && cat /opt/cluster/ci-script > connect.sh && chmod +x connect.sh && ./connect.sh eventing-main && kubectl get cm s390x-config-eventing -n default -o jsonpath='{.data.adjustment-script}' > adjust.sh && chmod +x adjust.sh && ./adjust.sh && ./test/e2e-tests.sh --run-tests" + command: + - runner.sh + cron: 0 5 * * * + env: + - name: SYSTEM_NAMESPACE + value: knative-eventing + - name: SCALE_CHAOSDUCK_TO_ZERO + value: "1" + name: s390x-e2e-tests + requirements: + - s390x + types: + - periodic +- command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/eventing + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.2 + name: release + requirements: + - release + excluded_requirements: + - gcp + types: + - periodic +org: knative +repo: eventing +resources: high +resources_presets: + default: + limits: + memory: 10Gi + requests: + memory: 8Gi + high: + limits: + memory: 16Gi + requests: + memory: 12Gi diff --git a/prow/jobs_config/knative/eventing-release-1.3.yaml b/prow/jobs_config/knative/eventing-release-1.3.yaml new file mode 100644 index 00000000000..f199a399b99 --- /dev/null +++ b/prow/jobs_config/knative/eventing-release-1.3.yaml @@ -0,0 +1,113 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. #### +# #### PLEASE ONLY MODIFY IT MANUALLY WHEN NEEDED. #### +# #### #### +# ####################################################################### +branches: +- release-1.3 +image: gcr.io/knative-tests/test-infra/prow-tests:stable +jobs: +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --build-tests + excluded_requirements: + - gcp + name: build-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --unit-tests + excluded_requirements: + - gcp + name: unit-tests + resources: default + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-conformance-tests.sh + name: conformance-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-rekt-tests.sh + modifiers: + - presubmit_optional + name: reconciler-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-upgrade-tests.sh + name: upgrade-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + name: continuous + types: + - periodic +- args: + - bash + - -c + - | + "mkdir -p /root/.kube && cat /opt/cluster/ci-script > connect.sh && chmod +x connect.sh && ./connect.sh eventing-main && kubectl get cm s390x-config-eventing -n default -o jsonpath='{.data.adjustment-script}' > adjust.sh && chmod +x adjust.sh && ./adjust.sh && ./test/e2e-tests.sh --run-tests" + command: + - runner.sh + cron: 0 5 * * * + env: + - name: SYSTEM_NAMESPACE + value: knative-eventing + - name: SCALE_CHAOSDUCK_TO_ZERO + value: "1" + name: s390x-e2e-tests + requirements: + - s390x + types: + - periodic +- command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/eventing + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.3 + name: release + requirements: + - release + excluded_requirements: + - gcp + types: + - periodic +org: knative +repo: eventing +resources: high +resources_presets: + default: + limits: + memory: 10Gi + requests: + memory: 8Gi + high: + limits: + memory: 16Gi + requests: + memory: 12Gi diff --git a/prow/jobs_config/knative/eventing.yaml b/prow/jobs_config/knative/eventing.yaml new file mode 100644 index 00000000000..0ef4332b9cb --- /dev/null +++ b/prow/jobs_config/knative/eventing.yaml @@ -0,0 +1,83 @@ +org: knative +repo: eventing +branches: [main] +image: gcr.io/knative-tests/test-infra/prow-tests:stable +imagePullPolicy: Always + +jobs: + - name: build-tests + types: [presubmit] + command: [runner.sh, ./test/presubmit-tests.sh, --build-tests] + excluded_requirements: [gcp] + + - name: unit-tests + types: [presubmit] + resources: default + command: [runner.sh, ./test/presubmit-tests.sh, --unit-tests] + excluded_requirements: [gcp] + + - name: conformance-tests + types: [presubmit] + command: [runner.sh, ./test/presubmit-tests.sh, --run-test, ./test/e2e-conformance-tests.sh] + + - name: reconciler-tests + types: [presubmit] + command: [runner.sh, ./test/presubmit-tests.sh, --run-test, ./test/e2e-rekt-tests.sh] + modifiers: [presubmit_optional] + + - name: upgrade-tests + types: [presubmit] + command: [runner.sh, ./test/presubmit-tests.sh, --run-test, ./test/e2e-upgrade-tests.sh] + + - name: continuous + types: [periodic] + command: [runner.sh, ./test/presubmit-tests.sh, --all-tests] + + - name: s390x-e2e-tests + types: [periodic] + cron: 0 5 * * * + requirements: [s390x] + command: [runner.sh] + args: + - bash + - -c + - | + "mkdir -p /root/.kube && cat /opt/cluster/ci-script > connect.sh && chmod +x connect.sh && ./connect.sh eventing-main && kubectl get cm s390x-config-eventing -n default -o jsonpath='{.data.adjustment-script}' > adjust.sh && chmod +x adjust.sh && ./adjust.sh && ./test/e2e-tests.sh --run-tests" + env: + - name: SYSTEM_NAMESPACE + value: knative-eventing + - name: SCALE_CHAOSDUCK_TO_ZERO + value: "1" + + - name: nightly + types: [periodic] + command: [runner.sh, ./hack/release.sh, --publish, --tag-release] + requirements: [nightly] + excluded_requirements: [gcp] + reporter_config: + slack: + channel: eventing + report_template: | + "The nightly release job fails, check the log: <{{.Status.URL}}|View logs>" + job_states_to_report: + - "failure" + + - name: release + types: [periodic] + command: [runner.sh, ./hack/release.sh, --auto-release, --release-gcs, knative-releases/eventing, --release-gcr, gcr.io/knative-releases, --github-token, /etc/hub-token/token] + requirements: [release] + excluded_requirements: [gcp] + +resources: high +resources_presets: + high: + limits: + memory: 16Gi + requests: + memory: 12Gi + + default: + limits: + memory: 10Gi + requests: + memory: 8Gi diff --git a/prow/jobs_config/knative/hack.yaml b/prow/jobs_config/knative/hack.yaml new file mode 100644 index 00000000000..277ca187995 --- /dev/null +++ b/prow/jobs_config/knative/hack.yaml @@ -0,0 +1,24 @@ +org: knative +repo: hack +branches: [main] +image: gcr.io/knative-tests/test-infra/prow-tests:stable +imagePullPolicy: Always + +jobs: + - name: build-tests + types: [presubmit] + command: [runner.sh, ./test/presubmit-tests.sh, --build-tests] + excluded_requirements: [gcp] + + - name: unit-tests + types: [presubmit] + command: [runner.sh, ./test/presubmit-tests.sh, --unit-tests] + excluded_requirements: [gcp] + + - name: integration-tests + types: [presubmit] + command: [runner.sh, ./test/presubmit-tests.sh, --run-test, ./test/e2e-tests.sh] + + - name: kind-tests + types: [presubmit] + command: [runner.sh, ./test/presubmit-tests.sh, --run-test, ./test/e2e-kind.sh] diff --git a/prow/jobs_config/knative/networking.yaml b/prow/jobs_config/knative/networking.yaml new file mode 100644 index 00000000000..be8b1175002 --- /dev/null +++ b/prow/jobs_config/knative/networking.yaml @@ -0,0 +1,20 @@ +org: knative +repo: networking +branches: [main] +image: gcr.io/knative-tests/test-infra/prow-tests:stable +imagePullPolicy: Always + +jobs: + - name: build-tests + types: [presubmit] + command: [runner.sh, ./test/presubmit-tests.sh, --build-tests] + excluded_requirements: [gcp] + + - name: unit-tests + types: [presubmit] + command: [runner.sh, ./test/presubmit-tests.sh, --unit-tests] + excluded_requirements: [gcp] + + - name: integration-tests + types: [presubmit] + command: [runner.sh, ./test/presubmit-tests.sh, --integration-tests] diff --git a/prow/jobs_config/knative/operator-release-1.0.yaml b/prow/jobs_config/knative/operator-release-1.0.yaml new file mode 100644 index 00000000000..204d5397863 --- /dev/null +++ b/prow/jobs_config/knative/operator-release-1.0.yaml @@ -0,0 +1,103 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. #### +# #### PLEASE ONLY MODIFY IT MANUALLY WHEN NEEDED. #### +# #### #### +# ####################################################################### +branches: +- release-1.0 +image: gcr.io/knative-tests/test-infra/prow-tests:stable +jobs: +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --build-tests + excluded_requirements: + - gcp + name: build-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --unit-tests + excluded_requirements: + - gcp + name: unit-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --integration-tests + name: integration-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-upgrade-tests.sh + name: upgrade-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-serving-upgrade-tests.sh + name: serving-upgrade-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-eventing-upgrade-tests.sh + name: eventing-upgrade-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + name: continuous + types: + - periodic +- args: + - bash + - -c + - | + "mkdir -p /root/.kube && server_addr=$(cat /opt/cluster/config) && ssh -o StrictHostKeyChecking=no -i /opt/cluster/knative01.pem linux1@${server_addr} cat /home/linux1/.kube/config > /root/.kube/config && kubectl get cm s390x-config-operator -n default -o jsonpath='{.data.adjustment-script}' > adjust.sh && chmod +x adjust.sh && ./adjust.sh && ./test/e2e-tests.sh --run-tests" + command: + - runner.sh + cron: 0 5 * * * + env: + - name: INGRESS_CLASS + value: contour.ingress.networking.knative.dev + name: s390x-e2e-tests + requirements: + - s390x + types: + - periodic +- command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/operator + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.0 + name: release + requirements: + - release + excluded_requirements: + - gcp + types: + - periodic +org: knative +repo: operator diff --git a/prow/jobs_config/knative/operator-release-1.1.yaml b/prow/jobs_config/knative/operator-release-1.1.yaml new file mode 100644 index 00000000000..835565cd36c --- /dev/null +++ b/prow/jobs_config/knative/operator-release-1.1.yaml @@ -0,0 +1,103 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. #### +# #### PLEASE ONLY MODIFY IT MANUALLY WHEN NEEDED. #### +# #### #### +# ####################################################################### +branches: +- release-1.1 +image: gcr.io/knative-tests/test-infra/prow-tests:stable +jobs: +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --build-tests + excluded_requirements: + - gcp + name: build-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --unit-tests + excluded_requirements: + - gcp + name: unit-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --integration-tests + name: integration-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-upgrade-tests.sh + name: upgrade-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-serving-upgrade-tests.sh + name: serving-upgrade-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-eventing-upgrade-tests.sh + name: eventing-upgrade-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + name: continuous + types: + - periodic +- args: + - bash + - -c + - | + "mkdir -p /root/.kube && server_addr=$(cat /opt/cluster/config) && ssh -o StrictHostKeyChecking=no -i /opt/cluster/knative01.pem linux1@${server_addr} cat /home/linux1/.kube/config > /root/.kube/config && kubectl get cm s390x-config-operator -n default -o jsonpath='{.data.adjustment-script}' > adjust.sh && chmod +x adjust.sh && ./adjust.sh && ./test/e2e-tests.sh --run-tests" + command: + - runner.sh + cron: 0 5 * * * + env: + - name: INGRESS_CLASS + value: contour.ingress.networking.knative.dev + name: s390x-e2e-tests + requirements: + - s390x + types: + - periodic +- command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/operator + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.1 + name: release + requirements: + - release + excluded_requirements: + - gcp + types: + - periodic +org: knative +repo: operator diff --git a/prow/jobs_config/knative/operator-release-1.2.yaml b/prow/jobs_config/knative/operator-release-1.2.yaml new file mode 100644 index 00000000000..e8ad652076f --- /dev/null +++ b/prow/jobs_config/knative/operator-release-1.2.yaml @@ -0,0 +1,103 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. #### +# #### PLEASE ONLY MODIFY IT MANUALLY WHEN NEEDED. #### +# #### #### +# ####################################################################### +branches: +- release-1.2 +image: gcr.io/knative-tests/test-infra/prow-tests:stable +jobs: +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --build-tests + excluded_requirements: + - gcp + name: build-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --unit-tests + excluded_requirements: + - gcp + name: unit-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --integration-tests + name: integration-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-upgrade-tests.sh + name: upgrade-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-serving-upgrade-tests.sh + name: serving-upgrade-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-eventing-upgrade-tests.sh + name: eventing-upgrade-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + name: continuous + types: + - periodic +- args: + - bash + - -c + - | + "mkdir -p /root/.kube && server_addr=$(cat /opt/cluster/config) && ssh -o StrictHostKeyChecking=no -i /opt/cluster/knative01.pem linux1@${server_addr} cat /home/linux1/.kube/config > /root/.kube/config && kubectl get cm s390x-config-operator -n default -o jsonpath='{.data.adjustment-script}' > adjust.sh && chmod +x adjust.sh && ./adjust.sh && ./test/e2e-tests.sh --run-tests" + command: + - runner.sh + cron: 0 5 * * * + env: + - name: INGRESS_CLASS + value: contour.ingress.networking.knative.dev + name: s390x-e2e-tests + requirements: + - s390x + types: + - periodic +- command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/operator + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.2 + name: release + requirements: + - release + excluded_requirements: + - gcp + types: + - periodic +org: knative +repo: operator diff --git a/prow/jobs_config/knative/operator-release-1.3.yaml b/prow/jobs_config/knative/operator-release-1.3.yaml new file mode 100644 index 00000000000..1ea124dbc60 --- /dev/null +++ b/prow/jobs_config/knative/operator-release-1.3.yaml @@ -0,0 +1,103 @@ +# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. #### +# #### PLEASE ONLY MODIFY IT MANUALLY WHEN NEEDED. #### +# #### #### +# ####################################################################### +branches: +- release-1.3 +image: gcr.io/knative-tests/test-infra/prow-tests:stable +jobs: +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --build-tests + excluded_requirements: + - gcp + name: build-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --unit-tests + excluded_requirements: + - gcp + name: unit-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --integration-tests + name: integration-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-upgrade-tests.sh + name: upgrade-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-serving-upgrade-tests.sh + name: serving-upgrade-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-eventing-upgrade-tests.sh + name: eventing-upgrade-tests + types: + - presubmit +- command: + - runner.sh + - ./test/presubmit-tests.sh + - --all-tests + name: continuous + types: + - periodic +- args: + - bash + - -c + - | + "mkdir -p /root/.kube && server_addr=$(cat /opt/cluster/config) && ssh -o StrictHostKeyChecking=no -i /opt/cluster/knative01.pem linux1@${server_addr} cat /home/linux1/.kube/config > /root/.kube/config && kubectl get cm s390x-config-operator -n default -o jsonpath='{.data.adjustment-script}' > adjust.sh && chmod +x adjust.sh && ./adjust.sh && ./test/e2e-tests.sh --run-tests" + command: + - runner.sh + cron: 0 5 * * * + env: + - name: INGRESS_CLASS + value: contour.ingress.networking.knative.dev + name: s390x-e2e-tests + requirements: + - s390x + types: + - periodic +- command: + - runner.sh + - ./hack/release.sh + - --dot-release + - --release-gcs + - knative-releases/operator + - --release-gcr + - gcr.io/knative-releases + - --github-token + - /etc/hub-token/token + - --branch + - release-1.3 + name: release + requirements: + - release + excluded_requirements: + - gcp + types: + - periodic +org: knative +repo: operator diff --git a/prow/jobs_config/knative/operator.yaml b/prow/jobs_config/knative/operator.yaml new file mode 100644 index 00000000000..1ca27f36085 --- /dev/null +++ b/prow/jobs_config/knative/operator.yaml @@ -0,0 +1,69 @@ +org: knative +repo: operator +branches: [main] +image: gcr.io/knative-tests/test-infra/prow-tests:stable +imagePullPolicy: Always + +jobs: + - name: build-tests + types: [presubmit] + command: [runner.sh, ./test/presubmit-tests.sh, --build-tests] + excluded_requirements: [gcp] + + - name: unit-tests + types: [presubmit] + command: [runner.sh, ./test/presubmit-tests.sh, --unit-tests] + excluded_requirements: [gcp] + + - name: integration-tests + types: [presubmit] + command: [runner.sh, ./test/presubmit-tests.sh, --integration-tests] + + - name: upgrade-tests + types: [presubmit] + command: [runner.sh, ./test/presubmit-tests.sh, --run-test, ./test/e2e-upgrade-tests.sh] + + - name: serving-upgrade-tests + types: [presubmit] + command: [runner.sh, ./test/presubmit-tests.sh, --run-test, ./test/e2e-serving-upgrade-tests.sh] + + - name: eventing-upgrade-tests + types: [presubmit] + command: [runner.sh, ./test/presubmit-tests.sh, --run-test, ./test/e2e-eventing-upgrade-tests.sh] + + - name: continuous + types: [periodic] + command: [runner.sh, ./test/presubmit-tests.sh, --all-tests] + + - name: s390x-e2e-tests + types: [periodic] + cron: 0 5 * * * + requirements: [s390x] + command: [runner.sh] + args: + - bash + - -c + - | + "mkdir -p /root/.kube && server_addr=$(cat /opt/cluster/config) && ssh -o StrictHostKeyChecking=no -i /opt/cluster/knative01.pem linux1@${server_addr} cat /home/linux1/.kube/config > /root/.kube/config && kubectl get cm s390x-config-operator -n default -o jsonpath='{.data.adjustment-script}' > adjust.sh && chmod +x adjust.sh && ./adjust.sh && ./test/e2e-tests.sh --run-tests" + env: + - name: INGRESS_CLASS + value: contour.ingress.networking.knative.dev + + - name: nightly + types: [periodic] + command: [runner.sh, ./hack/release.sh, --publish, --tag-release] + requirements: [nightly] + excluded_requirements: [gcp] + reporter_config: + slack: + channel: operator + report_template: | + "The nightly release job fails, check the log: <{{.Status.URL}}|View logs>" + job_states_to_report: + - "failure" + + - name: release + types: [periodic] + command: [runner.sh, ./hack/release.sh, --auto-release, --release-gcs, knative-releases/operator, --release-gcr, gcr.io/knative-releases, --github-token, /etc/hub-token/token] + requirements: [release] + excluded_requirements: [gcp] diff --git a/prow/jobs_config/knative/pkg.yaml b/prow/jobs_config/knative/pkg.yaml new file mode 100644 index 00000000000..203654c9217 --- /dev/null +++ b/prow/jobs_config/knative/pkg.yaml @@ -0,0 +1,20 @@ +org: knative +repo: pkg +branches: [main] +image: gcr.io/knative-tests/test-infra/prow-tests:stable +imagePullPolicy: Always + +jobs: + - name: build-tests + types: [presubmit] + command: [runner.sh, ./test/presubmit-tests.sh, --build-tests] + excluded_requirements: [gcp] + + - name: unit-tests + types: [presubmit] + command: [runner.sh, ./test/presubmit-tests.sh, --unit-tests] + excluded_requirements: [gcp] + + - name: integration-tests + types: [presubmit] + command: [runner.sh, ./test/presubmit-tests.sh, --integration-tests] diff --git a/prow/jobs_config/knative/serving.yaml b/prow/jobs_config/knative/serving.yaml new file mode 100644 index 00000000000..6c1707f4d83 --- /dev/null +++ b/prow/jobs_config/knative/serving.yaml @@ -0,0 +1,248 @@ +org: knative +repo: serving +branches: [main] +image: gcr.io/knative-tests/test-infra/prow-tests:stable +imagePullPolicy: Always + +jobs: + - name: build-tests + types: [presubmit] + command: [runner.sh, ./test/presubmit-tests.sh, --build-tests] + excluded_requirements: [gcp] + + - name: unit-tests + types: [presubmit] + resources: default + command: [runner.sh, ./test/presubmit-tests.sh, --unit-tests] + excluded_requirements: [gcp] + + - name: upgrade-tests + types: [presubmit] + command: [runner.sh, ./test/presubmit-tests.sh, --run-test, ./test/e2e-upgrade-tests.sh] + + - name: performance-tests-kperf + types: [presubmit] + command: [runner.sh, ./test/presubmit-tests.sh, --run-test, ./test/performance/performance-tests.sh] + modifiers: [presubmit_optional, presubmit_skipped] + + - name: istio-latest-mesh + types: [presubmit] + command: [runner.sh, ./test/presubmit-tests.sh, --run-test, "./test/e2e-tests.sh --istio-version latest --mesh"] + modifiers: [presubmit_optional, presubmit_skipped] + + - name: istio-latest-mesh-short + types: [presubmit] + command: [runner.sh, ./test/presubmit-tests.sh, --run-test, "./test/e2e-tests.sh --istio-version latest --mesh --short"] + modifiers: [presubmit_optional, presubmit_skipped] + + - name: istio-latest-mesh-tls + types: [presubmit] + command: [runner.sh, ./test/presubmit-tests.sh, --run-test, "./test/e2e-auto-tls-tests.sh --istio-version latest --mesh"] + modifiers: [presubmit_optional, presubmit_skipped] + + - name: istio-latest-no-mesh + types: [presubmit] + command: [runner.sh, ./test/presubmit-tests.sh, --run-test, "./test/e2e-tests.sh --istio-version latest --no-mesh"] + modifiers: [presubmit_optional, presubmit_skipped] + + - name: istio-latest-no-mesh-tls + types: [presubmit] + command: [runner.sh, ./test/presubmit-tests.sh, --run-test, "./test/e2e-auto-tls-tests.sh --istio-version latest --no-mesh"] + modifiers: [presubmit_optional, presubmit_skipped] + + - name: kourier-stable + types: [presubmit] + regex: ^third_party/kourier-latest/* + command: [runner.sh, ./test/presubmit-tests.sh, --run-test, "./test/e2e-tests.sh --kourier-version stable"] + modifiers: [presubmit_optional, presubmit_skipped] + + - name: kourier-stable-tls + types: [presubmit] + regex: ^third_party/kourier-latest/* + command: [runner.sh, ./test/presubmit-tests.sh, --run-test, "./test/e2e-auto-tls-tests.sh --kourier-version stable"] + modifiers: [presubmit_optional, presubmit_skipped] + + - name: contour-latest + types: [presubmit] + regex: ^third_party/contour-latest/* + command: [runner.sh, ./test/presubmit-tests.sh, --run-test, "./test/e2e-tests.sh --contour-version latest"] + modifiers: [presubmit_optional, presubmit_skipped] + + - name: contour-tls + types: [presubmit] + regex: ^third_party/contour-latest/* + command: [runner.sh, ./test/presubmit-tests.sh, --run-test, "./test/e2e-auto-tls-tests.sh --contour-version latest"] + modifiers: [presubmit_optional, presubmit_skipped] + + - name: gateway-api-latest + types: [presubmit] + regex: ^third_party/gateway-api-latest/* + command: [runner.sh, ./test/presubmit-tests.sh, --run-test, "./test/e2e-tests.sh --gateway-api-version latest"] + modifiers: [presubmit_optional, presubmit_skipped] + + - name: https + types: [presubmit] + regex: ^third_party/cert-manager-latest/* + command: + - runner.sh + args: + - ./test/presubmit-tests.sh + - --run-test + - "./test/e2e-tests.sh --https" + - --run-test + - "./test/e2e-auto-tls-tests.sh --https" + modifiers: [presubmit_optional, presubmit_skipped] + + - name: continuous + timeout: 3h + types: [periodic] + command: [runner.sh, ./test/presubmit-tests.sh, --all-tests] + + - name: istio-latest-mesh + types: [periodic] + command: + - runner.sh + args: + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-tests.sh --istio-version latest --mesh + - --run-test + - ./test/e2e-auto-tls-tests.sh --istio-version latest --mesh + + - name: istio-latest-no-mesh + types: [periodic] + command: + - runner.sh + args: + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-tests.sh --istio-version latest --mesh + - --run-test + - ./test/e2e-auto-tls-tests.sh --istio-version latest --mesh + + - name: istio-head-mesh + types: [periodic] + command: + - runner.sh + args: + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-tests.sh --istio-version head --mesh + - --run-test + - ./test/e2e-auto-tls-tests.sh --istio-version head --mesh + + - name: istio-head-no-mesh + types: [periodic] + command: + - runner.sh + args: + - ./test/presubmit-tests.sh + - --run-test + - ./test/e2e-tests.sh --istio-version head --no-mesh + - --run-test + - ./test/e2e-auto-tls-tests.sh --istio-version head --no-mesh + + - name: kourier-stable + types: [periodic] + command: + - runner.sh + args: + - --run-test + - ./test/e2e-tests.sh --kourier-version stable + - --run-test + - ./test/e2e-auto-tls-tests.sh --kourier-version stable --run-http01-auto-tls-tests + + - name: contour-latest + types: [periodic] + command: + - runner.sh + args: + - --run-test + - ./test/e2e-tests.sh --contour-version latest + - --run-test + - ./test/e2e-auto-tls-tests.sh --contour-version latest --run-http01-auto-tls-tests + + - name: gateway-api-latest + types: [periodic] + command: + - runner.sh + args: + - --run-test + - ./test/e2e-tests.sh --gateway-api-version latest + + - name: https + types: [periodic] + command: + - runner.sh + args: + - --run-test + - ./test/e2e-tests.sh --https + - --run-test + - ./test/e2e-auto-tls-tests.sh --https + + - name: s390x-kourier-tests + types: [periodic] + cron: 0 5 * * * + requirements: [s390x] + command: [runner.sh] + args: + - bash + - -c + - | + "mkdir -p /root/.kube && cat /opt/cluster/ci-script > connect.sh && chmod +x connect.sh && server_addr=$(./connect.sh kourier-main) && kubectl get cm s390x-config-serving -n default -o jsonpath='{.data.adjustment-script}' > adjust.sh && chmod +x adjust.sh && ./adjust.sh && export TEST_OPTIONS=$TEST_OPTIONS' --ingressendpoint '${server_addr} && ./test/e2e-tests.sh --run-tests --kourier-version latest" + env: + - name: SYSTEM_NAMESPACE + value: knative-serving + - name: TEST_OPTIONS + value: "--enable-alpha --enable-beta --resolvabledomain=false" + + - name: s390x-contour-tests + types: [periodic] + cron: 0 5 * * * + requirements: [s390x] + command: [runner.sh] + args: + - bash + - -c + - | + "mkdir -p /root/.kube && cat /opt/cluster/ci-script > connect.sh && chmod +x connect.sh && server_addr=$(./connect.sh contour-main) && kubectl get cm s390x-config-serving -n default -o jsonpath='{.data.adjustment-script}' > adjust.sh && chmod +x adjust.sh && ./adjust.sh && export TEST_OPTIONS=$TEST_OPTIONS' --ingressendpoint '${server_addr} && ./test/e2e-tests.sh --run-tests --contour-version latest" + env: + - name: SYSTEM_NAMESPACE + value: knative-serving + - name: TEST_OPTIONS + value: "--enable-alpha --enable-beta --resolvabledomain=false" + + - name: nightly + types: [periodic] + timeout: 3h + command: [runner.sh, ./hack/release.sh, --publish, --tag-release] + requirements: [nightly] + excluded_requirements: [gcp] + reporter_config: + slack: + channel: serving + report_template: | + "The nightly release job fails, check the log: <{{.Status.URL}}|View logs>" + job_states_to_report: + - "failure" + + - name: release + types: [periodic] + timeout: 3h + command: [runner.sh, ./hack/release.sh, --auto-release, --release-gcs, knative-releases/serving, --release-gcr, gcr.io/knative-releases, --github-token, /etc/hub-token/token] + requirements: [release] + excluded_requirements: [gcp] + +resources: high +resources_presets: + high: + limits: + memory: 16Gi + requests: + memory: 12Gi + + default: + limits: + memory: 10Gi + requests: + memory: 8Gi diff --git a/prow/jobs_config/knative/test-infra.yaml b/prow/jobs_config/knative/test-infra.yaml new file mode 100644 index 00000000000..a5d16dc1921 --- /dev/null +++ b/prow/jobs_config/knative/test-infra.yaml @@ -0,0 +1,16 @@ +org: knative +repo: test-infra +branches: [main] +image: gcr.io/knative-tests/test-infra/prow-tests:stable +imagePullPolicy: Always + +jobs: + - name: build-tests + types: [presubmit] + command: [runner.sh, ./test/presubmit-tests.sh, --build-tests] + excluded_requirements: [gcp] + + - name: unit-tests + types: [presubmit] + command: [runner.sh, ./test/presubmit-tests.sh, --unit-tests] + excluded_requirements: [gcp] diff --git a/third_party/VENDOR-LICENSE/github.com/Azure/go-autorest/autorest/LICENSE b/third_party/VENDOR-LICENSE/github.com/Azure/go-autorest/autorest/LICENSE new file mode 100644 index 00000000000..b9d6a27ea92 --- /dev/null +++ b/third_party/VENDOR-LICENSE/github.com/Azure/go-autorest/autorest/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2015 Microsoft Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/third_party/VENDOR-LICENSE/github.com/Azure/go-autorest/autorest/adal/LICENSE b/third_party/VENDOR-LICENSE/github.com/Azure/go-autorest/autorest/adal/LICENSE new file mode 100644 index 00000000000..b9d6a27ea92 --- /dev/null +++ b/third_party/VENDOR-LICENSE/github.com/Azure/go-autorest/autorest/adal/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2015 Microsoft Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/third_party/VENDOR-LICENSE/github.com/Azure/go-autorest/autorest/date/LICENSE b/third_party/VENDOR-LICENSE/github.com/Azure/go-autorest/autorest/date/LICENSE new file mode 100644 index 00000000000..b9d6a27ea92 --- /dev/null +++ b/third_party/VENDOR-LICENSE/github.com/Azure/go-autorest/autorest/date/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2015 Microsoft Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/third_party/VENDOR-LICENSE/github.com/Azure/go-autorest/logger/LICENSE b/third_party/VENDOR-LICENSE/github.com/Azure/go-autorest/logger/LICENSE new file mode 100644 index 00000000000..b9d6a27ea92 --- /dev/null +++ b/third_party/VENDOR-LICENSE/github.com/Azure/go-autorest/logger/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2015 Microsoft Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/third_party/VENDOR-LICENSE/github.com/Azure/go-autorest/tracing/LICENSE b/third_party/VENDOR-LICENSE/github.com/Azure/go-autorest/tracing/LICENSE new file mode 100644 index 00000000000..b9d6a27ea92 --- /dev/null +++ b/third_party/VENDOR-LICENSE/github.com/Azure/go-autorest/tracing/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2015 Microsoft Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/third_party/VENDOR-LICENSE/k8s.io/apimachinery/pkg/util/LICENSE b/third_party/VENDOR-LICENSE/github.com/GoogleCloudPlatform/testgrid/LICENSE similarity index 100% rename from third_party/VENDOR-LICENSE/k8s.io/apimachinery/pkg/util/LICENSE rename to third_party/VENDOR-LICENSE/github.com/GoogleCloudPlatform/testgrid/LICENSE diff --git a/third_party/VENDOR-LICENSE/github.com/andygrunwald/go-gerrit/LICENSE b/third_party/VENDOR-LICENSE/github.com/andygrunwald/go-gerrit/LICENSE new file mode 100644 index 00000000000..692f6bea285 --- /dev/null +++ b/third_party/VENDOR-LICENSE/github.com/andygrunwald/go-gerrit/LICENSE @@ -0,0 +1,22 @@ +The MIT License (MIT) + +Copyright (c) 2015 Andy Grunwald + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + diff --git a/third_party/VENDOR-LICENSE/github.com/andygrunwald/go-jira/LICENSE b/third_party/VENDOR-LICENSE/github.com/andygrunwald/go-jira/LICENSE new file mode 100644 index 00000000000..692f6bea285 --- /dev/null +++ b/third_party/VENDOR-LICENSE/github.com/andygrunwald/go-jira/LICENSE @@ -0,0 +1,22 @@ +The MIT License (MIT) + +Copyright (c) 2015 Andy Grunwald + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + diff --git a/third_party/VENDOR-LICENSE/github.com/aws/aws-sdk-go/LICENSE.txt b/third_party/VENDOR-LICENSE/github.com/aws/aws-sdk-go/LICENSE.txt new file mode 100644 index 00000000000..d6456956733 --- /dev/null +++ b/third_party/VENDOR-LICENSE/github.com/aws/aws-sdk-go/LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/third_party/VENDOR-LICENSE/github.com/aws/aws-sdk-go/NOTICE.txt b/third_party/VENDOR-LICENSE/github.com/aws/aws-sdk-go/NOTICE.txt new file mode 100644 index 00000000000..899129ecc46 --- /dev/null +++ b/third_party/VENDOR-LICENSE/github.com/aws/aws-sdk-go/NOTICE.txt @@ -0,0 +1,3 @@ +AWS SDK for Go +Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. +Copyright 2014-2015 Stripe, Inc. diff --git a/third_party/VENDOR-LICENSE/github.com/aws/aws-sdk-go/internal/sync/singleflight/LICENSE b/third_party/VENDOR-LICENSE/github.com/aws/aws-sdk-go/internal/sync/singleflight/LICENSE new file mode 100644 index 00000000000..6a66aea5eaf --- /dev/null +++ b/third_party/VENDOR-LICENSE/github.com/aws/aws-sdk-go/internal/sync/singleflight/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/third_party/VENDOR-LICENSE/github.com/beorn7/perks/quantile/LICENSE b/third_party/VENDOR-LICENSE/github.com/beorn7/perks/quantile/LICENSE new file mode 100644 index 00000000000..339177be663 --- /dev/null +++ b/third_party/VENDOR-LICENSE/github.com/beorn7/perks/quantile/LICENSE @@ -0,0 +1,20 @@ +Copyright (C) 2013 Blake Mizerany + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/third_party/VENDOR-LICENSE/github.com/cespare/xxhash/v2/LICENSE.txt b/third_party/VENDOR-LICENSE/github.com/cespare/xxhash/v2/LICENSE.txt new file mode 100644 index 00000000000..24b53065f40 --- /dev/null +++ b/third_party/VENDOR-LICENSE/github.com/cespare/xxhash/v2/LICENSE.txt @@ -0,0 +1,22 @@ +Copyright (c) 2016 Caleb Spare + +MIT License + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/third_party/VENDOR-LICENSE/github.com/danwakefield/fnmatch/LICENSE b/third_party/VENDOR-LICENSE/github.com/danwakefield/fnmatch/LICENSE new file mode 100644 index 00000000000..0dc9851a343 --- /dev/null +++ b/third_party/VENDOR-LICENSE/github.com/danwakefield/fnmatch/LICENSE @@ -0,0 +1,23 @@ +Copyright (c) 2016, Daniel Wakefield +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/third_party/VENDOR-LICENSE/github.com/denormal/go-gitignore/LICENSE b/third_party/VENDOR-LICENSE/github.com/denormal/go-gitignore/LICENSE new file mode 100644 index 00000000000..7c7d093d93f --- /dev/null +++ b/third_party/VENDOR-LICENSE/github.com/denormal/go-gitignore/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2016 Denormal Limited + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/third_party/VENDOR-LICENSE/github.com/dgrijalva/jwt-go/v4/LICENSE b/third_party/VENDOR-LICENSE/github.com/dgrijalva/jwt-go/v4/LICENSE new file mode 100644 index 00000000000..df83a9c2f01 --- /dev/null +++ b/third_party/VENDOR-LICENSE/github.com/dgrijalva/jwt-go/v4/LICENSE @@ -0,0 +1,8 @@ +Copyright (c) 2012 Dave Grijalva + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + diff --git a/third_party/VENDOR-LICENSE/github.com/docker/cli/cli/config/NOTICE b/third_party/VENDOR-LICENSE/github.com/docker/cli/cli/config/NOTICE index 58b19b6d15b..0c74e15b057 100644 --- a/third_party/VENDOR-LICENSE/github.com/docker/cli/cli/config/NOTICE +++ b/third_party/VENDOR-LICENSE/github.com/docker/cli/cli/config/NOTICE @@ -3,7 +3,7 @@ Copyright 2012-2017 Docker, Inc. This product includes software developed at Docker, Inc. (https://www.docker.com). -This product contains software (https://github.com/creack/pty) developed +This product contains software (https://github.com/kr/pty) developed by Keith Rarick, licensed under the MIT License. The following is courtesy of our legal counsel: diff --git a/third_party/VENDOR-LICENSE/github.com/docker/docker/pkg/homedir/LICENSE b/third_party/VENDOR-LICENSE/github.com/docker/docker/pkg/homedir/LICENSE index 6d8d58fb676..8f3fee627a4 100644 --- a/third_party/VENDOR-LICENSE/github.com/docker/docker/pkg/homedir/LICENSE +++ b/third_party/VENDOR-LICENSE/github.com/docker/docker/pkg/homedir/LICENSE @@ -176,7 +176,7 @@ END OF TERMS AND CONDITIONS - Copyright 2013-2018 Docker, Inc. + Copyright 2013-2016 Docker, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/third_party/VENDOR-LICENSE/github.com/docker/docker/pkg/homedir/NOTICE b/third_party/VENDOR-LICENSE/github.com/docker/docker/pkg/homedir/NOTICE index 58b19b6d15b..8a37c1c7bc4 100644 --- a/third_party/VENDOR-LICENSE/github.com/docker/docker/pkg/homedir/NOTICE +++ b/third_party/VENDOR-LICENSE/github.com/docker/docker/pkg/homedir/NOTICE @@ -1,9 +1,9 @@ Docker -Copyright 2012-2017 Docker, Inc. +Copyright 2012-2016 Docker, Inc. This product includes software developed at Docker, Inc. (https://www.docker.com). -This product contains software (https://github.com/creack/pty) developed +This product contains software (https://github.com/kr/pty) developed by Keith Rarick, licensed under the MIT License. The following is courtesy of our legal counsel: diff --git a/third_party/VENDOR-LICENSE/github.com/evanphx/json-patch/LICENSE b/third_party/VENDOR-LICENSE/github.com/evanphx/json-patch/LICENSE new file mode 100644 index 00000000000..df76d7d7716 --- /dev/null +++ b/third_party/VENDOR-LICENSE/github.com/evanphx/json-patch/LICENSE @@ -0,0 +1,25 @@ +Copyright (c) 2014, Evan Phoenix +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. +* Neither the name of the Evan Phoenix nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/third_party/VENDOR-LICENSE/github.com/fatih/structs/LICENSE b/third_party/VENDOR-LICENSE/github.com/fatih/structs/LICENSE new file mode 100644 index 00000000000..34504e4b3ef --- /dev/null +++ b/third_party/VENDOR-LICENSE/github.com/fatih/structs/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 Fatih Arslan + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/third_party/VENDOR-LICENSE/github.com/felixge/fgprof/LICENSE.txt b/third_party/VENDOR-LICENSE/github.com/felixge/fgprof/LICENSE.txt new file mode 100644 index 00000000000..3e424911bdb --- /dev/null +++ b/third_party/VENDOR-LICENSE/github.com/felixge/fgprof/LICENSE.txt @@ -0,0 +1,8 @@ +The MIT License (MIT) +Copyright © 2020 Felix Geisendörfer + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/third_party/VENDOR-LICENSE/github.com/form3tech-oss/jwt-go/LICENSE b/third_party/VENDOR-LICENSE/github.com/form3tech-oss/jwt-go/LICENSE new file mode 100644 index 00000000000..df83a9c2f01 --- /dev/null +++ b/third_party/VENDOR-LICENSE/github.com/form3tech-oss/jwt-go/LICENSE @@ -0,0 +1,8 @@ +Copyright (c) 2012 Dave Grijalva + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + diff --git a/third_party/VENDOR-LICENSE/github.com/fsnotify/fsnotify/LICENSE b/third_party/VENDOR-LICENSE/github.com/fsnotify/fsnotify/LICENSE new file mode 100644 index 00000000000..e180c8fb059 --- /dev/null +++ b/third_party/VENDOR-LICENSE/github.com/fsnotify/fsnotify/LICENSE @@ -0,0 +1,28 @@ +Copyright (c) 2012 The Go Authors. All rights reserved. +Copyright (c) 2012-2019 fsnotify Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/third_party/VENDOR-LICENSE/github.com/fvbommel/sortorder/LICENSE b/third_party/VENDOR-LICENSE/github.com/fvbommel/sortorder/LICENSE new file mode 100644 index 00000000000..5c695fb590f --- /dev/null +++ b/third_party/VENDOR-LICENSE/github.com/fvbommel/sortorder/LICENSE @@ -0,0 +1,17 @@ +The MIT License (MIT) +Copyright (c) 2015 Frits van Bommel +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/third_party/VENDOR-LICENSE/github.com/ghodss/yaml/LICENSE b/third_party/VENDOR-LICENSE/github.com/ghodss/yaml/LICENSE new file mode 100644 index 00000000000..7805d36de73 --- /dev/null +++ b/third_party/VENDOR-LICENSE/github.com/ghodss/yaml/LICENSE @@ -0,0 +1,50 @@ +The MIT License (MIT) + +Copyright (c) 2014 Sam Ghods + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + + +Copyright (c) 2012 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/third_party/VENDOR-LICENSE/github.com/go-logr/logr/LICENSE b/third_party/VENDOR-LICENSE/github.com/go-logr/logr/LICENSE new file mode 100644 index 00000000000..8dada3edaf5 --- /dev/null +++ b/third_party/VENDOR-LICENSE/github.com/go-logr/logr/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/third_party/VENDOR-LICENSE/github.com/gogo/protobuf/LICENSE b/third_party/VENDOR-LICENSE/github.com/gogo/protobuf/LICENSE new file mode 100644 index 00000000000..f57de90da8a --- /dev/null +++ b/third_party/VENDOR-LICENSE/github.com/gogo/protobuf/LICENSE @@ -0,0 +1,35 @@ +Copyright (c) 2013, The GoGo Authors. All rights reserved. + +Protocol Buffers for Go with Gadgets + +Go support for Protocol Buffers - Google's data interchange format + +Copyright 2010 The Go Authors. All rights reserved. +https://github.com/golang/protobuf + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + diff --git a/third_party/VENDOR-LICENSE/github.com/golang-jwt/jwt/LICENSE b/third_party/VENDOR-LICENSE/github.com/golang-jwt/jwt/LICENSE new file mode 100644 index 00000000000..35dbc252041 --- /dev/null +++ b/third_party/VENDOR-LICENSE/github.com/golang-jwt/jwt/LICENSE @@ -0,0 +1,9 @@ +Copyright (c) 2012 Dave Grijalva +Copyright (c) 2021 golang-jwt maintainers + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + diff --git a/third_party/VENDOR-LICENSE/github.com/gomodule/redigo/redis/LICENSE b/third_party/VENDOR-LICENSE/github.com/gomodule/redigo/redis/LICENSE new file mode 100644 index 00000000000..f433b1a53f5 --- /dev/null +++ b/third_party/VENDOR-LICENSE/github.com/gomodule/redigo/redis/LICENSE @@ -0,0 +1,177 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS diff --git a/third_party/VENDOR-LICENSE/github.com/google/btree/LICENSE b/third_party/VENDOR-LICENSE/github.com/google/btree/LICENSE new file mode 100644 index 00000000000..d6456956733 --- /dev/null +++ b/third_party/VENDOR-LICENSE/github.com/google/btree/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/third_party/VENDOR-LICENSE/github.com/google/gofuzz/LICENSE b/third_party/VENDOR-LICENSE/github.com/google/gofuzz/LICENSE new file mode 100644 index 00000000000..d6456956733 --- /dev/null +++ b/third_party/VENDOR-LICENSE/github.com/google/gofuzz/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/third_party/VENDOR-LICENSE/github.com/google/pprof/profile/LICENSE b/third_party/VENDOR-LICENSE/github.com/google/pprof/profile/LICENSE new file mode 100644 index 00000000000..d6456956733 --- /dev/null +++ b/third_party/VENDOR-LICENSE/github.com/google/pprof/profile/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/third_party/VENDOR-LICENSE/github.com/google/wire/LICENSE b/third_party/VENDOR-LICENSE/github.com/google/wire/LICENSE new file mode 100644 index 00000000000..d6456956733 --- /dev/null +++ b/third_party/VENDOR-LICENSE/github.com/google/wire/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/third_party/VENDOR-LICENSE/github.com/googleapis/gax-go/LICENSE b/third_party/VENDOR-LICENSE/github.com/googleapis/gax-go/LICENSE new file mode 100644 index 00000000000..6d16b6578a2 --- /dev/null +++ b/third_party/VENDOR-LICENSE/github.com/googleapis/gax-go/LICENSE @@ -0,0 +1,27 @@ +Copyright 2016, Google Inc. +All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/third_party/VENDOR-LICENSE/github.com/googleapis/gnostic/LICENSE b/third_party/VENDOR-LICENSE/github.com/googleapis/gnostic/LICENSE new file mode 100644 index 00000000000..6b0b1270ff0 --- /dev/null +++ b/third_party/VENDOR-LICENSE/github.com/googleapis/gnostic/LICENSE @@ -0,0 +1,203 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + diff --git a/third_party/VENDOR-LICENSE/github.com/gregjones/httpcache/LICENSE.txt b/third_party/VENDOR-LICENSE/github.com/gregjones/httpcache/LICENSE.txt new file mode 100644 index 00000000000..81316beb0cb --- /dev/null +++ b/third_party/VENDOR-LICENSE/github.com/gregjones/httpcache/LICENSE.txt @@ -0,0 +1,7 @@ +Copyright © 2012 Greg Jones (greg.jones@gmail.com) + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. \ No newline at end of file diff --git a/third_party/VENDOR-LICENSE/github.com/hashicorp/go-cleanhttp/LICENSE b/third_party/VENDOR-LICENSE/github.com/hashicorp/go-cleanhttp/LICENSE new file mode 100644 index 00000000000..e87a115e462 --- /dev/null +++ b/third_party/VENDOR-LICENSE/github.com/hashicorp/go-cleanhttp/LICENSE @@ -0,0 +1,363 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. "Contributor" + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. "Contributor Version" + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the terms of + a Secondary License. + +1.6. "Executable Form" + + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + + means a work that combines Covered Software with other material, in a + separate file or files, that is not Covered Software. + +1.8. "License" + + means this document. + +1.9. "Licensable" + + means having the right to grant, to the maximum extent possible, whether + at the time of the initial grant or subsequently, any and all of the + rights conveyed by this License. + +1.10. "Modifications" + + means any of the following: + + a. any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. "Patent Claims" of a Contributor + + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the License, + by the making, using, selling, offering for sale, having made, import, + or transfer of either its Contributions or its Contributor Version. + +1.12. "Secondary License" + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. "Source Code Form" + + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, "control" means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution + become effective for each Contribution on the date the Contributor first + distributes such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under + this License. No additional rights or licenses will be implied from the + distribution or licensing of Covered Software under this License. + Notwithstanding Section 2.1(b) above, no patent license is granted by a + Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of + its Contributions. + + This License does not grant any rights in the trademarks, service marks, + or logos of any Contributor (except as may be necessary to comply with + the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this + License (see Section 10.2) or under the terms of a Secondary License (if + permitted under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its + Contributions are its original creation(s) or it has sufficient rights to + grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under + applicable copyright doctrines of fair use, fair dealing, or other + equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under + the terms of this License. You must inform recipients that the Source + Code Form of the Covered Software is governed by the terms of this + License, and how they can obtain a copy of this License. You may not + attempt to alter or restrict the recipients' rights in the Source Code + Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter the + recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for + the Covered Software. If the Larger Work is a combination of Covered + Software with a work governed by one or more Secondary Licenses, and the + Covered Software is not Incompatible With Secondary Licenses, this + License permits You to additionally distribute such Covered Software + under the terms of such Secondary License(s), so that the recipient of + the Larger Work may, at their option, further distribute the Covered + Software under the terms of either this License or such Secondary + License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices + (including copyright notices, patent notices, disclaimers of warranty, or + limitations of liability) contained within the Source Code Form of the + Covered Software, except that You may alter any license notices to the + extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on + behalf of any Contributor. You must make it absolutely clear that any + such warranty, support, indemnity, or liability obligation is offered by + You alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, + judicial order, or regulation then You must: (a) comply with the terms of + this License to the maximum extent possible; and (b) describe the + limitations and the code they affect. Such description must be placed in a + text file included with all distributions of the Covered Software under + this License. Except to the extent prohibited by statute or regulation, + such description must be sufficiently detailed for a recipient of ordinary + skill to be able to understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing + basis, if such Contributor fails to notify You of the non-compliance by + some reasonable means prior to 60 days after You have come back into + compliance. Moreover, Your grants from a particular Contributor are + reinstated on an ongoing basis if such Contributor notifies You of the + non-compliance by some reasonable means, this is the first time You have + received notice of non-compliance with this License from such + Contributor, and You become compliant prior to 30 days after Your receipt + of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, + counter-claims, and cross-claims) alleging that a Contributor Version + directly or indirectly infringes any patent, then the rights granted to + You by any and all Contributors for the Covered Software under Section + 2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an "as is" basis, + without warranty of any kind, either expressed, implied, or statutory, + including, without limitation, warranties that the Covered Software is free + of defects, merchantable, fit for a particular purpose or non-infringing. + The entire risk as to the quality and performance of the Covered Software + is with You. Should any Covered Software prove defective in any respect, + You (not any Contributor) assume the cost of any necessary servicing, + repair, or correction. This disclaimer of warranty constitutes an essential + part of this License. No use of any Covered Software is authorized under + this License except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from + such party's negligence to the extent applicable law prohibits such + limitation. Some jurisdictions do not allow the exclusion or limitation of + incidental or consequential damages, so this exclusion and limitation may + not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts + of a jurisdiction where the defendant maintains its principal place of + business and such litigation shall be governed by laws of that + jurisdiction, without reference to its conflict-of-law provisions. Nothing + in this Section shall prevent a party's ability to bring cross-claims or + counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject + matter hereof. If any provision of this License is held to be + unenforceable, such provision shall be reformed only to the extent + necessary to make it enforceable. Any law or regulation which provides that + the language of a contract shall be construed against the drafter shall not + be used to construe this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version + of the License under which You originally received the Covered Software, + or under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a + modified version of this License if you rename the license and remove + any references to the name of the license steward (except to note that + such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary + Licenses If You choose to distribute Source Code Form that is + Incompatible With Secondary Licenses under the terms of this version of + the License, the notice described in Exhibit B of this License must be + attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, +then You may include the notice in a location (such as a LICENSE file in a +relevant directory) where a recipient would be likely to look for such a +notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice + + This Source Code Form is "Incompatible + With Secondary Licenses", as defined by + the Mozilla Public License, v. 2.0. + diff --git a/third_party/VENDOR-LICENSE/github.com/hashicorp/go-cleanhttp/README.md b/third_party/VENDOR-LICENSE/github.com/hashicorp/go-cleanhttp/README.md new file mode 100644 index 00000000000..036e5313fc8 --- /dev/null +++ b/third_party/VENDOR-LICENSE/github.com/hashicorp/go-cleanhttp/README.md @@ -0,0 +1,30 @@ +# cleanhttp + +Functions for accessing "clean" Go http.Client values + +------------- + +The Go standard library contains a default `http.Client` called +`http.DefaultClient`. It is a common idiom in Go code to start with +`http.DefaultClient` and tweak it as necessary, and in fact, this is +encouraged; from the `http` package documentation: + +> The Client's Transport typically has internal state (cached TCP connections), +so Clients should be reused instead of created as needed. Clients are safe for +concurrent use by multiple goroutines. + +Unfortunately, this is a shared value, and it is not uncommon for libraries to +assume that they are free to modify it at will. With enough dependencies, it +can be very easy to encounter strange problems and race conditions due to +manipulation of this shared value across libraries and goroutines (clients are +safe for concurrent use, but writing values to the client struct itself is not +protected). + +Making things worse is the fact that a bare `http.Client` will use a default +`http.Transport` called `http.DefaultTransport`, which is another global value +that behaves the same way. So it is not simply enough to replace +`http.DefaultClient` with `&http.Client{}`. + +This repository provides some simple functions to get a "clean" `http.Client` +-- one that uses the same default values as the Go standard library, but +returns a client that does not share any state with other clients. diff --git a/third_party/VENDOR-LICENSE/github.com/hashicorp/go-cleanhttp/cleanhttp.go b/third_party/VENDOR-LICENSE/github.com/hashicorp/go-cleanhttp/cleanhttp.go new file mode 100644 index 00000000000..8d306bf5134 --- /dev/null +++ b/third_party/VENDOR-LICENSE/github.com/hashicorp/go-cleanhttp/cleanhttp.go @@ -0,0 +1,57 @@ +package cleanhttp + +import ( + "net" + "net/http" + "runtime" + "time" +) + +// DefaultTransport returns a new http.Transport with similar default values to +// http.DefaultTransport, but with idle connections and keepalives disabled. +func DefaultTransport() *http.Transport { + transport := DefaultPooledTransport() + transport.DisableKeepAlives = true + transport.MaxIdleConnsPerHost = -1 + return transport +} + +// DefaultPooledTransport returns a new http.Transport with similar default +// values to http.DefaultTransport. Do not use this for transient transports as +// it can leak file descriptors over time. Only use this for transports that +// will be re-used for the same host(s). +func DefaultPooledTransport() *http.Transport { + transport := &http.Transport{ + Proxy: http.ProxyFromEnvironment, + DialContext: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + DualStack: true, + }).DialContext, + MaxIdleConns: 100, + IdleConnTimeout: 90 * time.Second, + TLSHandshakeTimeout: 10 * time.Second, + ExpectContinueTimeout: 1 * time.Second, + MaxIdleConnsPerHost: runtime.GOMAXPROCS(0) + 1, + } + return transport +} + +// DefaultClient returns a new http.Client with similar default values to +// http.Client, but with a non-shared Transport, idle connections disabled, and +// keepalives disabled. +func DefaultClient() *http.Client { + return &http.Client{ + Transport: DefaultTransport(), + } +} + +// DefaultPooledClient returns a new http.Client with similar default values to +// http.Client, but with a shared Transport. Do not use this function for +// transient clients as it can leak file descriptors over time. Only use this +// for clients that will be re-used for the same host(s). +func DefaultPooledClient() *http.Client { + return &http.Client{ + Transport: DefaultPooledTransport(), + } +} diff --git a/third_party/VENDOR-LICENSE/github.com/hashicorp/go-cleanhttp/doc.go b/third_party/VENDOR-LICENSE/github.com/hashicorp/go-cleanhttp/doc.go new file mode 100644 index 00000000000..05841092a7b --- /dev/null +++ b/third_party/VENDOR-LICENSE/github.com/hashicorp/go-cleanhttp/doc.go @@ -0,0 +1,20 @@ +// Package cleanhttp offers convenience utilities for acquiring "clean" +// http.Transport and http.Client structs. +// +// Values set on http.DefaultClient and http.DefaultTransport affect all +// callers. This can have detrimental effects, esepcially in TLS contexts, +// where client or root certificates set to talk to multiple endpoints can end +// up displacing each other, leading to hard-to-debug issues. This package +// provides non-shared http.Client and http.Transport structs to ensure that +// the configuration will not be overwritten by other parts of the application +// or dependencies. +// +// The DefaultClient and DefaultTransport functions disable idle connections +// and keepalives. Without ensuring that idle connections are closed before +// garbage collection, short-term clients/transports can leak file descriptors, +// eventually leading to "too many open files" errors. If you will be +// connecting to the same hosts repeatedly from the same client, you can use +// DefaultPooledClient to receive a client that has connection pooling +// semantics similar to http.DefaultClient. +// +package cleanhttp diff --git a/third_party/VENDOR-LICENSE/github.com/hashicorp/go-cleanhttp/go.mod b/third_party/VENDOR-LICENSE/github.com/hashicorp/go-cleanhttp/go.mod new file mode 100644 index 00000000000..310f07569fc --- /dev/null +++ b/third_party/VENDOR-LICENSE/github.com/hashicorp/go-cleanhttp/go.mod @@ -0,0 +1 @@ +module github.com/hashicorp/go-cleanhttp diff --git a/third_party/VENDOR-LICENSE/github.com/hashicorp/go-cleanhttp/handlers.go b/third_party/VENDOR-LICENSE/github.com/hashicorp/go-cleanhttp/handlers.go new file mode 100644 index 00000000000..3c845dc0dc6 --- /dev/null +++ b/third_party/VENDOR-LICENSE/github.com/hashicorp/go-cleanhttp/handlers.go @@ -0,0 +1,48 @@ +package cleanhttp + +import ( + "net/http" + "strings" + "unicode" +) + +// HandlerInput provides input options to cleanhttp's handlers +type HandlerInput struct { + ErrStatus int +} + +// PrintablePathCheckHandler is a middleware that ensures the request path +// contains only printable runes. +func PrintablePathCheckHandler(next http.Handler, input *HandlerInput) http.Handler { + // Nil-check on input to make it optional + if input == nil { + input = &HandlerInput{ + ErrStatus: http.StatusBadRequest, + } + } + + // Default to http.StatusBadRequest on error + if input.ErrStatus == 0 { + input.ErrStatus = http.StatusBadRequest + } + + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r != nil { + // Check URL path for non-printable characters + idx := strings.IndexFunc(r.URL.Path, func(c rune) bool { + return !unicode.IsPrint(c) + }) + + if idx != -1 { + w.WriteHeader(input.ErrStatus) + return + } + + if next != nil { + next.ServeHTTP(w, r) + } + } + + return + }) +} diff --git a/third_party/VENDOR-LICENSE/github.com/hashicorp/go-multierror/.travis.yml b/third_party/VENDOR-LICENSE/github.com/hashicorp/go-multierror/.travis.yml deleted file mode 100644 index 304a8359558..00000000000 --- a/third_party/VENDOR-LICENSE/github.com/hashicorp/go-multierror/.travis.yml +++ /dev/null @@ -1,12 +0,0 @@ -sudo: false - -language: go - -go: - - 1.x - -branches: - only: - - master - -script: make test testrace diff --git a/third_party/VENDOR-LICENSE/github.com/hashicorp/go-multierror/README.md b/third_party/VENDOR-LICENSE/github.com/hashicorp/go-multierror/README.md index ead5830f7b7..71dd308ed81 100644 --- a/third_party/VENDOR-LICENSE/github.com/hashicorp/go-multierror/README.md +++ b/third_party/VENDOR-LICENSE/github.com/hashicorp/go-multierror/README.md @@ -1,10 +1,11 @@ # go-multierror -[![Build Status](http://img.shields.io/travis/hashicorp/go-multierror.svg?style=flat-square)][travis] -[![Go Documentation](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)][godocs] +[![CircleCI](https://img.shields.io/circleci/build/github/hashicorp/go-multierror/master)](https://circleci.com/gh/hashicorp/go-multierror) +[![Go Reference](https://pkg.go.dev/badge/github.com/hashicorp/go-multierror.svg)](https://pkg.go.dev/github.com/hashicorp/go-multierror) +![GitHub go.mod Go version](https://img.shields.io/github/go-mod/go-version/hashicorp/go-multierror) -[travis]: https://travis-ci.org/hashicorp/go-multierror -[godocs]: https://godoc.org/github.com/hashicorp/go-multierror +[circleci]: https://app.circleci.com/pipelines/github/hashicorp/go-multierror +[godocs]: https://pkg.go.dev/github.com/hashicorp/go-multierror `go-multierror` is a package for Go that provides a mechanism for representing a list of `error` values as a single `error`. @@ -14,16 +15,35 @@ be a list of errors. If the caller knows this, they can unwrap the list and access the errors. If the caller doesn't know, the error formats to a nice human-readable format. -`go-multierror` implements the -[errwrap](https://github.com/hashicorp/errwrap) interface so that it can -be used with that library, as well. +`go-multierror` is fully compatible with the Go standard library +[errors](https://golang.org/pkg/errors/) package, including the +functions `As`, `Is`, and `Unwrap`. This provides a standardized approach +for introspecting on error values. ## Installation and Docs Install using `go get github.com/hashicorp/go-multierror`. Full documentation is available at -http://godoc.org/github.com/hashicorp/go-multierror +https://pkg.go.dev/github.com/hashicorp/go-multierror + +### Requires go version 1.13 or newer + +`go-multierror` requires go version 1.13 or newer. Go 1.13 introduced +[error wrapping](https://golang.org/doc/go1.13#error_wrapping), which +this library takes advantage of. + +If you need to use an earlier version of go, you can use the +[v1.0.0](https://github.com/hashicorp/go-multierror/tree/v1.0.0) +tag, which doesn't rely on features in go 1.13. + +If you see compile errors that look like the below, it's likely that +you're on an older version of go: + +``` +/go/src/github.com/hashicorp/go-multierror/multierror.go:112:9: undefined: errors.As +/go/src/github.com/hashicorp/go-multierror/multierror.go:117:9: undefined: errors.Is +``` ## Usage @@ -81,6 +101,39 @@ if err := something(); err != nil { } ``` +You can also use the standard [`errors.Unwrap`](https://golang.org/pkg/errors/#Unwrap) +function. This will continue to unwrap into subsequent errors until none exist. + +**Extracting an error** + +The standard library [`errors.As`](https://golang.org/pkg/errors/#As) +function can be used directly with a multierror to extract a specific error: + +```go +// Assume err is a multierror value +err := somefunc() + +// We want to know if "err" has a "RichErrorType" in it and extract it. +var errRich RichErrorType +if errors.As(err, &errRich) { + // It has it, and now errRich is populated. +} +``` + +**Checking for an exact error value** + +Some errors are returned as exact errors such as the [`ErrNotExist`](https://golang.org/pkg/os/#pkg-variables) +error in the `os` package. You can check if this error is present by using +the standard [`errors.Is`](https://golang.org/pkg/errors/#Is) function. + +```go +// Assume err is a multierror value +err := somefunc() +if errors.Is(err, os.ErrNotExist) { + // err contains os.ErrNotExist +} +``` + **Returning a multierror only if there are errors** If you build a `multierror.Error`, you can use the `ErrorOrNil` function diff --git a/third_party/VENDOR-LICENSE/github.com/hashicorp/go-multierror/append.go b/third_party/VENDOR-LICENSE/github.com/hashicorp/go-multierror/append.go index 775b6e753e7..3e2589bfde0 100644 --- a/third_party/VENDOR-LICENSE/github.com/hashicorp/go-multierror/append.go +++ b/third_party/VENDOR-LICENSE/github.com/hashicorp/go-multierror/append.go @@ -6,6 +6,8 @@ package multierror // If err is not a multierror.Error, then it will be turned into // one. If any of the errs are multierr.Error, they will be flattened // one level into err. +// Any nil errors within errs will be ignored. If err is nil, a new +// *Error will be returned. func Append(err error, errs ...error) *Error { switch err := err.(type) { case *Error: diff --git a/third_party/VENDOR-LICENSE/github.com/hashicorp/go-multierror/go.mod b/third_party/VENDOR-LICENSE/github.com/hashicorp/go-multierror/go.mod index 2534331d5f9..141cc4ccb25 100644 --- a/third_party/VENDOR-LICENSE/github.com/hashicorp/go-multierror/go.mod +++ b/third_party/VENDOR-LICENSE/github.com/hashicorp/go-multierror/go.mod @@ -1,3 +1,5 @@ module github.com/hashicorp/go-multierror +go 1.13 + require github.com/hashicorp/errwrap v1.0.0 diff --git a/third_party/VENDOR-LICENSE/github.com/hashicorp/go-multierror/go.sum b/third_party/VENDOR-LICENSE/github.com/hashicorp/go-multierror/go.sum index 85b1f8ff333..e8238e9ec91 100644 --- a/third_party/VENDOR-LICENSE/github.com/hashicorp/go-multierror/go.sum +++ b/third_party/VENDOR-LICENSE/github.com/hashicorp/go-multierror/go.sum @@ -1,4 +1,2 @@ -github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce h1:prjrVgOk2Yg6w+PflHoszQNLTUh4kaByUcEWM/9uin4= -github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= diff --git a/third_party/VENDOR-LICENSE/github.com/hashicorp/go-multierror/group.go b/third_party/VENDOR-LICENSE/github.com/hashicorp/go-multierror/group.go new file mode 100644 index 00000000000..9c29efb7f87 --- /dev/null +++ b/third_party/VENDOR-LICENSE/github.com/hashicorp/go-multierror/group.go @@ -0,0 +1,38 @@ +package multierror + +import "sync" + +// Group is a collection of goroutines which return errors that need to be +// coalesced. +type Group struct { + mutex sync.Mutex + err *Error + wg sync.WaitGroup +} + +// Go calls the given function in a new goroutine. +// +// If the function returns an error it is added to the group multierror which +// is returned by Wait. +func (g *Group) Go(f func() error) { + g.wg.Add(1) + + go func() { + defer g.wg.Done() + + if err := f(); err != nil { + g.mutex.Lock() + g.err = Append(g.err, err) + g.mutex.Unlock() + } + }() +} + +// Wait blocks until all function calls from the Go method have returned, then +// returns the multierror. +func (g *Group) Wait() *Error { + g.wg.Wait() + g.mutex.Lock() + defer g.mutex.Unlock() + return g.err +} diff --git a/third_party/VENDOR-LICENSE/github.com/hashicorp/go-multierror/multierror.go b/third_party/VENDOR-LICENSE/github.com/hashicorp/go-multierror/multierror.go index 89b1422d1d1..f5457432646 100644 --- a/third_party/VENDOR-LICENSE/github.com/hashicorp/go-multierror/multierror.go +++ b/third_party/VENDOR-LICENSE/github.com/hashicorp/go-multierror/multierror.go @@ -1,6 +1,7 @@ package multierror import ( + "errors" "fmt" ) @@ -39,13 +40,82 @@ func (e *Error) GoString() string { return fmt.Sprintf("*%#v", *e) } -// WrappedErrors returns the list of errors that this Error is wrapping. -// It is an implementation of the errwrap.Wrapper interface so that -// multierror.Error can be used with that library. +// WrappedErrors returns the list of errors that this Error is wrapping. It is +// an implementation of the errwrap.Wrapper interface so that multierror.Error +// can be used with that library. // -// This method is not safe to be called concurrently and is no different -// than accessing the Errors field directly. It is implemented only to -// satisfy the errwrap.Wrapper interface. +// This method is not safe to be called concurrently. Unlike accessing the +// Errors field directly, this function also checks if the multierror is nil to +// prevent a null-pointer panic. It satisfies the errwrap.Wrapper interface. func (e *Error) WrappedErrors() []error { + if e == nil { + return nil + } return e.Errors } + +// Unwrap returns an error from Error (or nil if there are no errors). +// This error returned will further support Unwrap to get the next error, +// etc. The order will match the order of Errors in the multierror.Error +// at the time of calling. +// +// The resulting error supports errors.As/Is/Unwrap so you can continue +// to use the stdlib errors package to introspect further. +// +// This will perform a shallow copy of the errors slice. Any errors appended +// to this error after calling Unwrap will not be available until a new +// Unwrap is called on the multierror.Error. +func (e *Error) Unwrap() error { + // If we have no errors then we do nothing + if e == nil || len(e.Errors) == 0 { + return nil + } + + // If we have exactly one error, we can just return that directly. + if len(e.Errors) == 1 { + return e.Errors[0] + } + + // Shallow copy the slice + errs := make([]error, len(e.Errors)) + copy(errs, e.Errors) + return chain(errs) +} + +// chain implements the interfaces necessary for errors.Is/As/Unwrap to +// work in a deterministic way with multierror. A chain tracks a list of +// errors while accounting for the current represented error. This lets +// Is/As be meaningful. +// +// Unwrap returns the next error. In the cleanest form, Unwrap would return +// the wrapped error here but we can't do that if we want to properly +// get access to all the errors. Instead, users are recommended to use +// Is/As to get the correct error type out. +// +// Precondition: []error is non-empty (len > 0) +type chain []error + +// Error implements the error interface +func (e chain) Error() string { + return e[0].Error() +} + +// Unwrap implements errors.Unwrap by returning the next error in the +// chain or nil if there are no more errors. +func (e chain) Unwrap() error { + if len(e) == 1 { + return nil + } + + return e[1:] +} + +// As implements errors.As by attempting to map to the current value. +func (e chain) As(target interface{}) bool { + return errors.As(e[0], target) +} + +// Is implements errors.Is by comparing the current value directly. +func (e chain) Is(target error) bool { + return errors.Is(e[0], target) +} diff --git a/third_party/VENDOR-LICENSE/github.com/hashicorp/go-retryablehttp/.gitignore b/third_party/VENDOR-LICENSE/github.com/hashicorp/go-retryablehttp/.gitignore new file mode 100644 index 00000000000..4e309e0b326 --- /dev/null +++ b/third_party/VENDOR-LICENSE/github.com/hashicorp/go-retryablehttp/.gitignore @@ -0,0 +1,4 @@ +.idea/ +*.iml +*.test +.vscode/ \ No newline at end of file diff --git a/third_party/VENDOR-LICENSE/github.com/hashicorp/go-retryablehttp/.travis.yml b/third_party/VENDOR-LICENSE/github.com/hashicorp/go-retryablehttp/.travis.yml new file mode 100644 index 00000000000..c4fb6d6c8bb --- /dev/null +++ b/third_party/VENDOR-LICENSE/github.com/hashicorp/go-retryablehttp/.travis.yml @@ -0,0 +1,12 @@ +sudo: false + +language: go + +go: + - 1.12.4 + +branches: + only: + - master + +script: make updatedeps test diff --git a/third_party/VENDOR-LICENSE/github.com/hashicorp/go-retryablehttp/LICENSE b/third_party/VENDOR-LICENSE/github.com/hashicorp/go-retryablehttp/LICENSE new file mode 100644 index 00000000000..e87a115e462 --- /dev/null +++ b/third_party/VENDOR-LICENSE/github.com/hashicorp/go-retryablehttp/LICENSE @@ -0,0 +1,363 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. "Contributor" + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. "Contributor Version" + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the terms of + a Secondary License. + +1.6. "Executable Form" + + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + + means a work that combines Covered Software with other material, in a + separate file or files, that is not Covered Software. + +1.8. "License" + + means this document. + +1.9. "Licensable" + + means having the right to grant, to the maximum extent possible, whether + at the time of the initial grant or subsequently, any and all of the + rights conveyed by this License. + +1.10. "Modifications" + + means any of the following: + + a. any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. "Patent Claims" of a Contributor + + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the License, + by the making, using, selling, offering for sale, having made, import, + or transfer of either its Contributions or its Contributor Version. + +1.12. "Secondary License" + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. "Source Code Form" + + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, "control" means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution + become effective for each Contribution on the date the Contributor first + distributes such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under + this License. No additional rights or licenses will be implied from the + distribution or licensing of Covered Software under this License. + Notwithstanding Section 2.1(b) above, no patent license is granted by a + Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of + its Contributions. + + This License does not grant any rights in the trademarks, service marks, + or logos of any Contributor (except as may be necessary to comply with + the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this + License (see Section 10.2) or under the terms of a Secondary License (if + permitted under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its + Contributions are its original creation(s) or it has sufficient rights to + grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under + applicable copyright doctrines of fair use, fair dealing, or other + equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under + the terms of this License. You must inform recipients that the Source + Code Form of the Covered Software is governed by the terms of this + License, and how they can obtain a copy of this License. You may not + attempt to alter or restrict the recipients' rights in the Source Code + Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter the + recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for + the Covered Software. If the Larger Work is a combination of Covered + Software with a work governed by one or more Secondary Licenses, and the + Covered Software is not Incompatible With Secondary Licenses, this + License permits You to additionally distribute such Covered Software + under the terms of such Secondary License(s), so that the recipient of + the Larger Work may, at their option, further distribute the Covered + Software under the terms of either this License or such Secondary + License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices + (including copyright notices, patent notices, disclaimers of warranty, or + limitations of liability) contained within the Source Code Form of the + Covered Software, except that You may alter any license notices to the + extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on + behalf of any Contributor. You must make it absolutely clear that any + such warranty, support, indemnity, or liability obligation is offered by + You alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, + judicial order, or regulation then You must: (a) comply with the terms of + this License to the maximum extent possible; and (b) describe the + limitations and the code they affect. Such description must be placed in a + text file included with all distributions of the Covered Software under + this License. Except to the extent prohibited by statute or regulation, + such description must be sufficiently detailed for a recipient of ordinary + skill to be able to understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing + basis, if such Contributor fails to notify You of the non-compliance by + some reasonable means prior to 60 days after You have come back into + compliance. Moreover, Your grants from a particular Contributor are + reinstated on an ongoing basis if such Contributor notifies You of the + non-compliance by some reasonable means, this is the first time You have + received notice of non-compliance with this License from such + Contributor, and You become compliant prior to 30 days after Your receipt + of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, + counter-claims, and cross-claims) alleging that a Contributor Version + directly or indirectly infringes any patent, then the rights granted to + You by any and all Contributors for the Covered Software under Section + 2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an "as is" basis, + without warranty of any kind, either expressed, implied, or statutory, + including, without limitation, warranties that the Covered Software is free + of defects, merchantable, fit for a particular purpose or non-infringing. + The entire risk as to the quality and performance of the Covered Software + is with You. Should any Covered Software prove defective in any respect, + You (not any Contributor) assume the cost of any necessary servicing, + repair, or correction. This disclaimer of warranty constitutes an essential + part of this License. No use of any Covered Software is authorized under + this License except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from + such party's negligence to the extent applicable law prohibits such + limitation. Some jurisdictions do not allow the exclusion or limitation of + incidental or consequential damages, so this exclusion and limitation may + not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts + of a jurisdiction where the defendant maintains its principal place of + business and such litigation shall be governed by laws of that + jurisdiction, without reference to its conflict-of-law provisions. Nothing + in this Section shall prevent a party's ability to bring cross-claims or + counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject + matter hereof. If any provision of this License is held to be + unenforceable, such provision shall be reformed only to the extent + necessary to make it enforceable. Any law or regulation which provides that + the language of a contract shall be construed against the drafter shall not + be used to construe this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version + of the License under which You originally received the Covered Software, + or under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a + modified version of this License if you rename the license and remove + any references to the name of the license steward (except to note that + such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary + Licenses If You choose to distribute Source Code Form that is + Incompatible With Secondary Licenses under the terms of this version of + the License, the notice described in Exhibit B of this License must be + attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, +then You may include the notice in a location (such as a LICENSE file in a +relevant directory) where a recipient would be likely to look for such a +notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice + + This Source Code Form is "Incompatible + With Secondary Licenses", as defined by + the Mozilla Public License, v. 2.0. + diff --git a/third_party/VENDOR-LICENSE/github.com/hashicorp/go-retryablehttp/Makefile b/third_party/VENDOR-LICENSE/github.com/hashicorp/go-retryablehttp/Makefile new file mode 100644 index 00000000000..da17640e644 --- /dev/null +++ b/third_party/VENDOR-LICENSE/github.com/hashicorp/go-retryablehttp/Makefile @@ -0,0 +1,11 @@ +default: test + +test: + go vet ./... + go test -race ./... + +updatedeps: + go get -f -t -u ./... + go get -f -u ./... + +.PHONY: default test updatedeps diff --git a/third_party/VENDOR-LICENSE/github.com/hashicorp/go-retryablehttp/README.md b/third_party/VENDOR-LICENSE/github.com/hashicorp/go-retryablehttp/README.md new file mode 100644 index 00000000000..30357c75668 --- /dev/null +++ b/third_party/VENDOR-LICENSE/github.com/hashicorp/go-retryablehttp/README.md @@ -0,0 +1,61 @@ +go-retryablehttp +================ + +[![Build Status](http://img.shields.io/travis/hashicorp/go-retryablehttp.svg?style=flat-square)][travis] +[![Go Documentation](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)][godocs] + +[travis]: http://travis-ci.org/hashicorp/go-retryablehttp +[godocs]: http://godoc.org/github.com/hashicorp/go-retryablehttp + +The `retryablehttp` package provides a familiar HTTP client interface with +automatic retries and exponential backoff. It is a thin wrapper over the +standard `net/http` client library and exposes nearly the same public API. This +makes `retryablehttp` very easy to drop into existing programs. + +`retryablehttp` performs automatic retries under certain conditions. Mainly, if +an error is returned by the client (connection errors, etc.), or if a 500-range +response code is received (except 501), then a retry is invoked after a wait +period. Otherwise, the response is returned and left to the caller to +interpret. + +The main difference from `net/http` is that requests which take a request body +(POST/PUT et. al) can have the body provided in a number of ways (some more or +less efficient) that allow "rewinding" the request body if the initial request +fails so that the full request can be attempted again. See the +[godoc](http://godoc.org/github.com/hashicorp/go-retryablehttp) for more +details. + +Version 0.6.0 and before are compatible with Go prior to 1.12. From 0.6.1 onward, Go 1.12+ is required. + +Example Use +=========== + +Using this library should look almost identical to what you would do with +`net/http`. The most simple example of a GET request is shown below: + +```go +resp, err := retryablehttp.Get("/foo") +if err != nil { + panic(err) +} +``` + +The returned response object is an `*http.Response`, the same thing you would +usually get from `net/http`. Had the request failed one or more times, the above +call would block and retry with exponential backoff. + +## Getting a stdlib `*http.Client` with retries + +It's possible to convert a `*retryablehttp.Client` directly to a `*http.Client`. +This makes use of retryablehttp broadly applicable with minimal effort. Simply +configure a `*retryablehttp.Client` as you wish, and then call `StandardClient()`: + +```go +retryClient := retryablehttp.NewClient() +retryClient.RetryMax = 10 + +standardClient := retryClient.StandardClient() // *http.Client +``` + +For more usage and examples see the +[godoc](http://godoc.org/github.com/hashicorp/go-retryablehttp). diff --git a/third_party/VENDOR-LICENSE/github.com/hashicorp/go-retryablehttp/client.go b/third_party/VENDOR-LICENSE/github.com/hashicorp/go-retryablehttp/client.go new file mode 100644 index 00000000000..f1ccd3df35c --- /dev/null +++ b/third_party/VENDOR-LICENSE/github.com/hashicorp/go-retryablehttp/client.go @@ -0,0 +1,705 @@ +// Package retryablehttp provides a familiar HTTP client interface with +// automatic retries and exponential backoff. It is a thin wrapper over the +// standard net/http client library and exposes nearly the same public API. +// This makes retryablehttp very easy to drop into existing programs. +// +// retryablehttp performs automatic retries under certain conditions. Mainly, if +// an error is returned by the client (connection errors etc), or if a 500-range +// response is received, then a retry is invoked. Otherwise, the response is +// returned and left to the caller to interpret. +// +// Requests which take a request body should provide a non-nil function +// parameter. The best choice is to provide either a function satisfying +// ReaderFunc which provides multiple io.Readers in an efficient manner, a +// *bytes.Buffer (the underlying raw byte slice will be used) or a raw byte +// slice. As it is a reference type, and we will wrap it as needed by readers, +// we can efficiently re-use the request body without needing to copy it. If an +// io.Reader (such as a *bytes.Reader) is provided, the full body will be read +// prior to the first request, and will be efficiently re-used for any retries. +// ReadSeeker can be used, but some users have observed occasional data races +// between the net/http library and the Seek functionality of some +// implementations of ReadSeeker, so should be avoided if possible. +package retryablehttp + +import ( + "bytes" + "context" + "crypto/x509" + "fmt" + "io" + "io/ioutil" + "log" + "math" + "math/rand" + "net/http" + "net/url" + "os" + "regexp" + "strings" + "sync" + "time" + + "github.com/hashicorp/go-cleanhttp" +) + +var ( + // Default retry configuration + defaultRetryWaitMin = 1 * time.Second + defaultRetryWaitMax = 30 * time.Second + defaultRetryMax = 4 + + // defaultLogger is the logger provided with defaultClient + defaultLogger = log.New(os.Stderr, "", log.LstdFlags) + + // defaultClient is used for performing requests without explicitly making + // a new client. It is purposely private to avoid modifications. + defaultClient = NewClient() + + // We need to consume response bodies to maintain http connections, but + // limit the size we consume to respReadLimit. + respReadLimit = int64(4096) + + // A regular expression to match the error returned by net/http when the + // configured number of redirects is exhausted. This error isn't typed + // specifically so we resort to matching on the error string. + redirectsErrorRe = regexp.MustCompile(`stopped after \d+ redirects\z`) + + // A regular expression to match the error returned by net/http when the + // scheme specified in the URL is invalid. This error isn't typed + // specifically so we resort to matching on the error string. + schemeErrorRe = regexp.MustCompile(`unsupported protocol scheme`) +) + +// ReaderFunc is the type of function that can be given natively to NewRequest +type ReaderFunc func() (io.Reader, error) + +// LenReader is an interface implemented by many in-memory io.Reader's. Used +// for automatically sending the right Content-Length header when possible. +type LenReader interface { + Len() int +} + +// Request wraps the metadata needed to create HTTP requests. +type Request struct { + // body is a seekable reader over the request body payload. This is + // used to rewind the request data in between retries. + body ReaderFunc + + // Embed an HTTP request directly. This makes a *Request act exactly + // like an *http.Request so that all meta methods are supported. + *http.Request +} + +// WithContext returns wrapped Request with a shallow copy of underlying *http.Request +// with its context changed to ctx. The provided ctx must be non-nil. +func (r *Request) WithContext(ctx context.Context) *Request { + r.Request = r.Request.WithContext(ctx) + return r +} + +// BodyBytes allows accessing the request body. It is an analogue to +// http.Request's Body variable, but it returns a copy of the underlying data +// rather than consuming it. +// +// This function is not thread-safe; do not call it at the same time as another +// call, or at the same time this request is being used with Client.Do. +func (r *Request) BodyBytes() ([]byte, error) { + if r.body == nil { + return nil, nil + } + body, err := r.body() + if err != nil { + return nil, err + } + buf := new(bytes.Buffer) + _, err = buf.ReadFrom(body) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +// SetBody allows setting the request body. +// +// It is useful if a new body needs to be set without constructing a new Request. +func (r *Request) SetBody(rawBody interface{}) error { + bodyReader, contentLength, err := getBodyReaderAndContentLength(rawBody) + if err != nil { + return err + } + r.body = bodyReader + r.ContentLength = contentLength + return nil +} + +// WriteTo allows copying the request body into a writer. +// +// It writes data to w until there's no more data to write or +// when an error occurs. The return int64 value is the number of bytes +// written. Any error encountered during the write is also returned. +// The signature matches io.WriterTo interface. +func (r *Request) WriteTo(w io.Writer) (int64, error) { + body, err := r.body() + if err != nil { + return 0, err + } + if c, ok := body.(io.Closer); ok { + defer c.Close() + } + return io.Copy(w, body) +} + +func getBodyReaderAndContentLength(rawBody interface{}) (ReaderFunc, int64, error) { + var bodyReader ReaderFunc + var contentLength int64 + + switch body := rawBody.(type) { + // If they gave us a function already, great! Use it. + case ReaderFunc: + bodyReader = body + tmp, err := body() + if err != nil { + return nil, 0, err + } + if lr, ok := tmp.(LenReader); ok { + contentLength = int64(lr.Len()) + } + if c, ok := tmp.(io.Closer); ok { + c.Close() + } + + case func() (io.Reader, error): + bodyReader = body + tmp, err := body() + if err != nil { + return nil, 0, err + } + if lr, ok := tmp.(LenReader); ok { + contentLength = int64(lr.Len()) + } + if c, ok := tmp.(io.Closer); ok { + c.Close() + } + + // If a regular byte slice, we can read it over and over via new + // readers + case []byte: + buf := body + bodyReader = func() (io.Reader, error) { + return bytes.NewReader(buf), nil + } + contentLength = int64(len(buf)) + + // If a bytes.Buffer we can read the underlying byte slice over and + // over + case *bytes.Buffer: + buf := body + bodyReader = func() (io.Reader, error) { + return bytes.NewReader(buf.Bytes()), nil + } + contentLength = int64(buf.Len()) + + // We prioritize *bytes.Reader here because we don't really want to + // deal with it seeking so want it to match here instead of the + // io.ReadSeeker case. + case *bytes.Reader: + buf, err := ioutil.ReadAll(body) + if err != nil { + return nil, 0, err + } + bodyReader = func() (io.Reader, error) { + return bytes.NewReader(buf), nil + } + contentLength = int64(len(buf)) + + // Compat case + case io.ReadSeeker: + raw := body + bodyReader = func() (io.Reader, error) { + _, err := raw.Seek(0, 0) + return ioutil.NopCloser(raw), err + } + if lr, ok := raw.(LenReader); ok { + contentLength = int64(lr.Len()) + } + + // Read all in so we can reset + case io.Reader: + buf, err := ioutil.ReadAll(body) + if err != nil { + return nil, 0, err + } + bodyReader = func() (io.Reader, error) { + return bytes.NewReader(buf), nil + } + contentLength = int64(len(buf)) + + // No body provided, nothing to do + case nil: + + // Unrecognized type + default: + return nil, 0, fmt.Errorf("cannot handle type %T", rawBody) + } + return bodyReader, contentLength, nil +} + +// FromRequest wraps an http.Request in a retryablehttp.Request +func FromRequest(r *http.Request) (*Request, error) { + bodyReader, _, err := getBodyReaderAndContentLength(r.Body) + if err != nil { + return nil, err + } + // Could assert contentLength == r.ContentLength + return &Request{bodyReader, r}, nil +} + +// NewRequest creates a new wrapped request. +func NewRequest(method, url string, rawBody interface{}) (*Request, error) { + bodyReader, contentLength, err := getBodyReaderAndContentLength(rawBody) + if err != nil { + return nil, err + } + + httpReq, err := http.NewRequest(method, url, nil) + if err != nil { + return nil, err + } + httpReq.ContentLength = contentLength + + return &Request{bodyReader, httpReq}, nil +} + +// Logger interface allows to use other loggers than +// standard log.Logger. +type Logger interface { + Printf(string, ...interface{}) +} + +// LeveledLogger interface implements the basic methods that a logger library needs +type LeveledLogger interface { + Error(string, ...interface{}) + Info(string, ...interface{}) + Debug(string, ...interface{}) + Warn(string, ...interface{}) +} + +// hookLogger adapts an LeveledLogger to Logger for use by the existing hook functions +// without changing the API. +type hookLogger struct { + LeveledLogger +} + +func (h hookLogger) Printf(s string, args ...interface{}) { + h.Info(fmt.Sprintf(s, args...)) +} + +// RequestLogHook allows a function to run before each retry. The HTTP +// request which will be made, and the retry number (0 for the initial +// request) are available to users. The internal logger is exposed to +// consumers. +type RequestLogHook func(Logger, *http.Request, int) + +// ResponseLogHook is like RequestLogHook, but allows running a function +// on each HTTP response. This function will be invoked at the end of +// every HTTP request executed, regardless of whether a subsequent retry +// needs to be performed or not. If the response body is read or closed +// from this method, this will affect the response returned from Do(). +type ResponseLogHook func(Logger, *http.Response) + +// CheckRetry specifies a policy for handling retries. It is called +// following each request with the response and error values returned by +// the http.Client. If CheckRetry returns false, the Client stops retrying +// and returns the response to the caller. If CheckRetry returns an error, +// that error value is returned in lieu of the error from the request. The +// Client will close any response body when retrying, but if the retry is +// aborted it is up to the CheckRetry callback to properly close any +// response body before returning. +type CheckRetry func(ctx context.Context, resp *http.Response, err error) (bool, error) + +// Backoff specifies a policy for how long to wait between retries. +// It is called after a failing request to determine the amount of time +// that should pass before trying again. +type Backoff func(min, max time.Duration, attemptNum int, resp *http.Response) time.Duration + +// ErrorHandler is called if retries are expired, containing the last status +// from the http library. If not specified, default behavior for the library is +// to close the body and return an error indicating how many tries were +// attempted. If overriding this, be sure to close the body if needed. +type ErrorHandler func(resp *http.Response, err error, numTries int) (*http.Response, error) + +// Client is used to make HTTP requests. It adds additional functionality +// like automatic retries to tolerate minor outages. +type Client struct { + HTTPClient *http.Client // Internal HTTP client. + Logger interface{} // Customer logger instance. Can be either Logger or LeveledLogger + + RetryWaitMin time.Duration // Minimum time to wait + RetryWaitMax time.Duration // Maximum time to wait + RetryMax int // Maximum number of retries + + // RequestLogHook allows a user-supplied function to be called + // before each retry. + RequestLogHook RequestLogHook + + // ResponseLogHook allows a user-supplied function to be called + // with the response from each HTTP request executed. + ResponseLogHook ResponseLogHook + + // CheckRetry specifies the policy for handling retries, and is called + // after each request. The default policy is DefaultRetryPolicy. + CheckRetry CheckRetry + + // Backoff specifies the policy for how long to wait between retries + Backoff Backoff + + // ErrorHandler specifies the custom error handler to use, if any + ErrorHandler ErrorHandler + + loggerInit sync.Once +} + +// NewClient creates a new Client with default settings. +func NewClient() *Client { + return &Client{ + HTTPClient: cleanhttp.DefaultPooledClient(), + Logger: defaultLogger, + RetryWaitMin: defaultRetryWaitMin, + RetryWaitMax: defaultRetryWaitMax, + RetryMax: defaultRetryMax, + CheckRetry: DefaultRetryPolicy, + Backoff: DefaultBackoff, + } +} + +func (c *Client) logger() interface{} { + c.loggerInit.Do(func() { + if c.Logger == nil { + return + } + + switch c.Logger.(type) { + case Logger, LeveledLogger: + // ok + default: + // This should happen in dev when they are setting Logger and work on code, not in prod. + panic(fmt.Sprintf("invalid logger type passed, must be Logger or LeveledLogger, was %T", c.Logger)) + } + }) + + return c.Logger +} + +// DefaultRetryPolicy provides a default callback for Client.CheckRetry, which +// will retry on connection errors and server errors. +func DefaultRetryPolicy(ctx context.Context, resp *http.Response, err error) (bool, error) { + // do not retry on context.Canceled or context.DeadlineExceeded + if ctx.Err() != nil { + return false, ctx.Err() + } + + if err != nil { + if v, ok := err.(*url.Error); ok { + // Don't retry if the error was due to too many redirects. + if redirectsErrorRe.MatchString(v.Error()) { + return false, nil + } + + // Don't retry if the error was due to an invalid protocol scheme. + if schemeErrorRe.MatchString(v.Error()) { + return false, nil + } + + // Don't retry if the error was due to TLS cert verification failure. + if _, ok := v.Err.(x509.UnknownAuthorityError); ok { + return false, nil + } + } + + // The error is likely recoverable so retry. + return true, nil + } + + // Check the response code. We retry on 500-range responses to allow + // the server time to recover, as 500's are typically not permanent + // errors and may relate to outages on the server side. This will catch + // invalid response codes as well, like 0 and 999. + if resp.StatusCode == 0 || (resp.StatusCode >= 500 && resp.StatusCode != 501) { + return true, nil + } + + return false, nil +} + +// DefaultBackoff provides a default callback for Client.Backoff which +// will perform exponential backoff based on the attempt number and limited +// by the provided minimum and maximum durations. +func DefaultBackoff(min, max time.Duration, attemptNum int, resp *http.Response) time.Duration { + mult := math.Pow(2, float64(attemptNum)) * float64(min) + sleep := time.Duration(mult) + if float64(sleep) != mult || sleep > max { + sleep = max + } + return sleep +} + +// LinearJitterBackoff provides a callback for Client.Backoff which will +// perform linear backoff based on the attempt number and with jitter to +// prevent a thundering herd. +// +// min and max here are *not* absolute values. The number to be multiplied by +// the attempt number will be chosen at random from between them, thus they are +// bounding the jitter. +// +// For instance: +// * To get strictly linear backoff of one second increasing each retry, set +// both to one second (1s, 2s, 3s, 4s, ...) +// * To get a small amount of jitter centered around one second increasing each +// retry, set to around one second, such as a min of 800ms and max of 1200ms +// (892ms, 2102ms, 2945ms, 4312ms, ...) +// * To get extreme jitter, set to a very wide spread, such as a min of 100ms +// and a max of 20s (15382ms, 292ms, 51321ms, 35234ms, ...) +func LinearJitterBackoff(min, max time.Duration, attemptNum int, resp *http.Response) time.Duration { + // attemptNum always starts at zero but we want to start at 1 for multiplication + attemptNum++ + + if max <= min { + // Unclear what to do here, or they are the same, so return min * + // attemptNum + return min * time.Duration(attemptNum) + } + + // Seed rand; doing this every time is fine + rand := rand.New(rand.NewSource(int64(time.Now().Nanosecond()))) + + // Pick a random number that lies somewhere between the min and max and + // multiply by the attemptNum. attemptNum starts at zero so we always + // increment here. We first get a random percentage, then apply that to the + // difference between min and max, and add to min. + jitter := rand.Float64() * float64(max-min) + jitterMin := int64(jitter) + int64(min) + return time.Duration(jitterMin * int64(attemptNum)) +} + +// PassthroughErrorHandler is an ErrorHandler that directly passes through the +// values from the net/http library for the final request. The body is not +// closed. +func PassthroughErrorHandler(resp *http.Response, err error, _ int) (*http.Response, error) { + return resp, err +} + +// Do wraps calling an HTTP method with retries. +func (c *Client) Do(req *Request) (*http.Response, error) { + if c.HTTPClient == nil { + c.HTTPClient = cleanhttp.DefaultPooledClient() + } + + logger := c.logger() + + if logger != nil { + switch v := logger.(type) { + case Logger: + v.Printf("[DEBUG] %s %s", req.Method, req.URL) + case LeveledLogger: + v.Debug("performing request", "method", req.Method, "url", req.URL) + } + } + + var resp *http.Response + var err error + + for i := 0; ; i++ { + var code int // HTTP response code + + // Always rewind the request body when non-nil. + if req.body != nil { + body, err := req.body() + if err != nil { + c.HTTPClient.CloseIdleConnections() + return resp, err + } + if c, ok := body.(io.ReadCloser); ok { + req.Body = c + } else { + req.Body = ioutil.NopCloser(body) + } + } + + if c.RequestLogHook != nil { + switch v := logger.(type) { + case Logger: + c.RequestLogHook(v, req.Request, i) + case LeveledLogger: + c.RequestLogHook(hookLogger{v}, req.Request, i) + default: + c.RequestLogHook(nil, req.Request, i) + } + } + + // Attempt the request + resp, err = c.HTTPClient.Do(req.Request) + if resp != nil { + code = resp.StatusCode + } + + // Check if we should continue with retries. + checkOK, checkErr := c.CheckRetry(req.Context(), resp, err) + + if err != nil { + switch v := logger.(type) { + case Logger: + v.Printf("[ERR] %s %s request failed: %v", req.Method, req.URL, err) + case LeveledLogger: + v.Error("request failed", "error", err, "method", req.Method, "url", req.URL) + } + } else { + // Call this here to maintain the behavior of logging all requests, + // even if CheckRetry signals to stop. + if c.ResponseLogHook != nil { + // Call the response logger function if provided. + switch v := logger.(type) { + case Logger: + c.ResponseLogHook(v, resp) + case LeveledLogger: + c.ResponseLogHook(hookLogger{v}, resp) + default: + c.ResponseLogHook(nil, resp) + } + } + } + + // Now decide if we should continue. + if !checkOK { + if checkErr != nil { + err = checkErr + } + c.HTTPClient.CloseIdleConnections() + return resp, err + } + + // We do this before drainBody because there's no need for the I/O if + // we're breaking out + remain := c.RetryMax - i + if remain <= 0 { + break + } + + // We're going to retry, consume any response to reuse the connection. + if err == nil && resp != nil { + c.drainBody(resp.Body) + } + + wait := c.Backoff(c.RetryWaitMin, c.RetryWaitMax, i, resp) + desc := fmt.Sprintf("%s %s", req.Method, req.URL) + if code > 0 { + desc = fmt.Sprintf("%s (status: %d)", desc, code) + } + if logger != nil { + switch v := logger.(type) { + case Logger: + v.Printf("[DEBUG] %s: retrying in %s (%d left)", desc, wait, remain) + case LeveledLogger: + v.Debug("retrying request", "request", desc, "timeout", wait, "remaining", remain) + } + } + select { + case <-req.Context().Done(): + c.HTTPClient.CloseIdleConnections() + return nil, req.Context().Err() + case <-time.After(wait): + } + } + + if c.ErrorHandler != nil { + c.HTTPClient.CloseIdleConnections() + return c.ErrorHandler(resp, err, c.RetryMax+1) + } + + // By default, we close the response body and return an error without + // returning the response + if resp != nil { + resp.Body.Close() + } + c.HTTPClient.CloseIdleConnections() + return nil, fmt.Errorf("%s %s giving up after %d attempts", + req.Method, req.URL, c.RetryMax+1) +} + +// Try to read the response body so we can reuse this connection. +func (c *Client) drainBody(body io.ReadCloser) { + defer body.Close() + _, err := io.Copy(ioutil.Discard, io.LimitReader(body, respReadLimit)) + if err != nil { + if c.logger() != nil { + switch v := c.logger().(type) { + case Logger: + v.Printf("[ERR] error reading response body: %v", err) + case LeveledLogger: + v.Error("error reading response body", "error", err) + } + } + } +} + +// Get is a shortcut for doing a GET request without making a new client. +func Get(url string) (*http.Response, error) { + return defaultClient.Get(url) +} + +// Get is a convenience helper for doing simple GET requests. +func (c *Client) Get(url string) (*http.Response, error) { + req, err := NewRequest("GET", url, nil) + if err != nil { + return nil, err + } + return c.Do(req) +} + +// Head is a shortcut for doing a HEAD request without making a new client. +func Head(url string) (*http.Response, error) { + return defaultClient.Head(url) +} + +// Head is a convenience method for doing simple HEAD requests. +func (c *Client) Head(url string) (*http.Response, error) { + req, err := NewRequest("HEAD", url, nil) + if err != nil { + return nil, err + } + return c.Do(req) +} + +// Post is a shortcut for doing a POST request without making a new client. +func Post(url, bodyType string, body interface{}) (*http.Response, error) { + return defaultClient.Post(url, bodyType, body) +} + +// Post is a convenience method for doing simple POST requests. +func (c *Client) Post(url, bodyType string, body interface{}) (*http.Response, error) { + req, err := NewRequest("POST", url, body) + if err != nil { + return nil, err + } + req.Header.Set("Content-Type", bodyType) + return c.Do(req) +} + +// PostForm is a shortcut to perform a POST with form data without creating +// a new client. +func PostForm(url string, data url.Values) (*http.Response, error) { + return defaultClient.PostForm(url, data) +} + +// PostForm is a convenience method for doing simple POST operations using +// pre-filled url.Values form data. +func (c *Client) PostForm(url string, data url.Values) (*http.Response, error) { + return c.Post(url, "application/x-www-form-urlencoded", strings.NewReader(data.Encode())) +} + +// StandardClient returns a stdlib *http.Client with a custom Transport, which +// shims in a *retryablehttp.Client for added retries. +func (c *Client) StandardClient() *http.Client { + return &http.Client{ + Transport: &RoundTripper{Client: c}, + } +} diff --git a/third_party/VENDOR-LICENSE/github.com/hashicorp/go-retryablehttp/go.mod b/third_party/VENDOR-LICENSE/github.com/hashicorp/go-retryablehttp/go.mod new file mode 100644 index 00000000000..7cc02b76fa4 --- /dev/null +++ b/third_party/VENDOR-LICENSE/github.com/hashicorp/go-retryablehttp/go.mod @@ -0,0 +1,8 @@ +module github.com/hashicorp/go-retryablehttp + +require ( + github.com/hashicorp/go-cleanhttp v0.5.1 + github.com/hashicorp/go-hclog v0.9.2 +) + +go 1.13 diff --git a/third_party/VENDOR-LICENSE/github.com/hashicorp/go-retryablehttp/go.sum b/third_party/VENDOR-LICENSE/github.com/hashicorp/go-retryablehttp/go.sum new file mode 100644 index 00000000000..71afe568227 --- /dev/null +++ b/third_party/VENDOR-LICENSE/github.com/hashicorp/go-retryablehttp/go.sum @@ -0,0 +1,10 @@ +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/hashicorp/go-cleanhttp v0.5.1 h1:dH3aiDG9Jvb5r5+bYHsikaOUIpcM0xvgMXVoDkXMzJM= +github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-hclog v0.9.2 h1:CG6TE5H9/JXsFWJCfoIVpKFIkFe6ysEuHirp4DxCsHI= +github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= diff --git a/third_party/VENDOR-LICENSE/github.com/hashicorp/go-retryablehttp/roundtripper.go b/third_party/VENDOR-LICENSE/github.com/hashicorp/go-retryablehttp/roundtripper.go new file mode 100644 index 00000000000..b841b4cfe53 --- /dev/null +++ b/third_party/VENDOR-LICENSE/github.com/hashicorp/go-retryablehttp/roundtripper.go @@ -0,0 +1,43 @@ +package retryablehttp + +import ( + "net/http" + "sync" +) + +// RoundTripper implements the http.RoundTripper interface, using a retrying +// HTTP client to execute requests. +// +// It is important to note that retryablehttp doesn't always act exactly as a +// RoundTripper should. This is highly dependent on the retryable client's +// configuration. +type RoundTripper struct { + // The client to use during requests. If nil, the default retryablehttp + // client and settings will be used. + Client *Client + + // once ensures that the logic to initialize the default client runs at + // most once, in a single thread. + once sync.Once +} + +// init initializes the underlying retryable client. +func (rt *RoundTripper) init() { + if rt.Client == nil { + rt.Client = NewClient() + } +} + +// RoundTrip satisfies the http.RoundTripper interface. +func (rt *RoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { + rt.once.Do(rt.init) + + // Convert the request to be retryable. + retryableReq, err := FromRequest(req) + if err != nil { + return nil, err + } + + // Execute the request. + return rt.Client.Do(retryableReq) +} diff --git a/third_party/VENDOR-LICENSE/github.com/hashicorp/golang-lru/simplelru/LICENSE b/third_party/VENDOR-LICENSE/github.com/hashicorp/golang-lru/simplelru/LICENSE new file mode 100644 index 00000000000..be2cc4dfb60 --- /dev/null +++ b/third_party/VENDOR-LICENSE/github.com/hashicorp/golang-lru/simplelru/LICENSE @@ -0,0 +1,362 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. "Contributor" + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. "Contributor Version" + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the terms of + a Secondary License. + +1.6. "Executable Form" + + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + + means a work that combines Covered Software with other material, in a + separate file or files, that is not Covered Software. + +1.8. "License" + + means this document. + +1.9. "Licensable" + + means having the right to grant, to the maximum extent possible, whether + at the time of the initial grant or subsequently, any and all of the + rights conveyed by this License. + +1.10. "Modifications" + + means any of the following: + + a. any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. "Patent Claims" of a Contributor + + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the License, + by the making, using, selling, offering for sale, having made, import, + or transfer of either its Contributions or its Contributor Version. + +1.12. "Secondary License" + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. "Source Code Form" + + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, "control" means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution + become effective for each Contribution on the date the Contributor first + distributes such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under + this License. No additional rights or licenses will be implied from the + distribution or licensing of Covered Software under this License. + Notwithstanding Section 2.1(b) above, no patent license is granted by a + Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of + its Contributions. + + This License does not grant any rights in the trademarks, service marks, + or logos of any Contributor (except as may be necessary to comply with + the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this + License (see Section 10.2) or under the terms of a Secondary License (if + permitted under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its + Contributions are its original creation(s) or it has sufficient rights to + grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under + applicable copyright doctrines of fair use, fair dealing, or other + equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under + the terms of this License. You must inform recipients that the Source + Code Form of the Covered Software is governed by the terms of this + License, and how they can obtain a copy of this License. You may not + attempt to alter or restrict the recipients' rights in the Source Code + Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter the + recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for + the Covered Software. If the Larger Work is a combination of Covered + Software with a work governed by one or more Secondary Licenses, and the + Covered Software is not Incompatible With Secondary Licenses, this + License permits You to additionally distribute such Covered Software + under the terms of such Secondary License(s), so that the recipient of + the Larger Work may, at their option, further distribute the Covered + Software under the terms of either this License or such Secondary + License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices + (including copyright notices, patent notices, disclaimers of warranty, or + limitations of liability) contained within the Source Code Form of the + Covered Software, except that You may alter any license notices to the + extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on + behalf of any Contributor. You must make it absolutely clear that any + such warranty, support, indemnity, or liability obligation is offered by + You alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, + judicial order, or regulation then You must: (a) comply with the terms of + this License to the maximum extent possible; and (b) describe the + limitations and the code they affect. Such description must be placed in a + text file included with all distributions of the Covered Software under + this License. Except to the extent prohibited by statute or regulation, + such description must be sufficiently detailed for a recipient of ordinary + skill to be able to understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing + basis, if such Contributor fails to notify You of the non-compliance by + some reasonable means prior to 60 days after You have come back into + compliance. Moreover, Your grants from a particular Contributor are + reinstated on an ongoing basis if such Contributor notifies You of the + non-compliance by some reasonable means, this is the first time You have + received notice of non-compliance with this License from such + Contributor, and You become compliant prior to 30 days after Your receipt + of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, + counter-claims, and cross-claims) alleging that a Contributor Version + directly or indirectly infringes any patent, then the rights granted to + You by any and all Contributors for the Covered Software under Section + 2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an "as is" basis, + without warranty of any kind, either expressed, implied, or statutory, + including, without limitation, warranties that the Covered Software is free + of defects, merchantable, fit for a particular purpose or non-infringing. + The entire risk as to the quality and performance of the Covered Software + is with You. Should any Covered Software prove defective in any respect, + You (not any Contributor) assume the cost of any necessary servicing, + repair, or correction. This disclaimer of warranty constitutes an essential + part of this License. No use of any Covered Software is authorized under + this License except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from + such party's negligence to the extent applicable law prohibits such + limitation. Some jurisdictions do not allow the exclusion or limitation of + incidental or consequential damages, so this exclusion and limitation may + not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts + of a jurisdiction where the defendant maintains its principal place of + business and such litigation shall be governed by laws of that + jurisdiction, without reference to its conflict-of-law provisions. Nothing + in this Section shall prevent a party's ability to bring cross-claims or + counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject + matter hereof. If any provision of this License is held to be + unenforceable, such provision shall be reformed only to the extent + necessary to make it enforceable. Any law or regulation which provides that + the language of a contract shall be construed against the drafter shall not + be used to construe this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version + of the License under which You originally received the Covered Software, + or under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a + modified version of this License if you rename the license and remove + any references to the name of the license steward (except to note that + such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary + Licenses If You choose to distribute Source Code Form that is + Incompatible With Secondary Licenses under the terms of this version of + the License, the notice described in Exhibit B of this License must be + attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, +then You may include the notice in a location (such as a LICENSE file in a +relevant directory) where a recipient would be likely to look for such a +notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice + + This Source Code Form is "Incompatible + With Secondary Licenses", as defined by + the Mozilla Public License, v. 2.0. diff --git a/third_party/VENDOR-LICENSE/github.com/hashicorp/golang-lru/simplelru/simplelru/lru.go b/third_party/VENDOR-LICENSE/github.com/hashicorp/golang-lru/simplelru/simplelru/lru.go new file mode 100644 index 00000000000..a86c8539e06 --- /dev/null +++ b/third_party/VENDOR-LICENSE/github.com/hashicorp/golang-lru/simplelru/simplelru/lru.go @@ -0,0 +1,177 @@ +package simplelru + +import ( + "container/list" + "errors" +) + +// EvictCallback is used to get a callback when a cache entry is evicted +type EvictCallback func(key interface{}, value interface{}) + +// LRU implements a non-thread safe fixed size LRU cache +type LRU struct { + size int + evictList *list.List + items map[interface{}]*list.Element + onEvict EvictCallback +} + +// entry is used to hold a value in the evictList +type entry struct { + key interface{} + value interface{} +} + +// NewLRU constructs an LRU of the given size +func NewLRU(size int, onEvict EvictCallback) (*LRU, error) { + if size <= 0 { + return nil, errors.New("Must provide a positive size") + } + c := &LRU{ + size: size, + evictList: list.New(), + items: make(map[interface{}]*list.Element), + onEvict: onEvict, + } + return c, nil +} + +// Purge is used to completely clear the cache. +func (c *LRU) Purge() { + for k, v := range c.items { + if c.onEvict != nil { + c.onEvict(k, v.Value.(*entry).value) + } + delete(c.items, k) + } + c.evictList.Init() +} + +// Add adds a value to the cache. Returns true if an eviction occurred. +func (c *LRU) Add(key, value interface{}) (evicted bool) { + // Check for existing item + if ent, ok := c.items[key]; ok { + c.evictList.MoveToFront(ent) + ent.Value.(*entry).value = value + return false + } + + // Add new item + ent := &entry{key, value} + entry := c.evictList.PushFront(ent) + c.items[key] = entry + + evict := c.evictList.Len() > c.size + // Verify size not exceeded + if evict { + c.removeOldest() + } + return evict +} + +// Get looks up a key's value from the cache. +func (c *LRU) Get(key interface{}) (value interface{}, ok bool) { + if ent, ok := c.items[key]; ok { + c.evictList.MoveToFront(ent) + if ent.Value.(*entry) == nil { + return nil, false + } + return ent.Value.(*entry).value, true + } + return +} + +// Contains checks if a key is in the cache, without updating the recent-ness +// or deleting it for being stale. +func (c *LRU) Contains(key interface{}) (ok bool) { + _, ok = c.items[key] + return ok +} + +// Peek returns the key value (or undefined if not found) without updating +// the "recently used"-ness of the key. +func (c *LRU) Peek(key interface{}) (value interface{}, ok bool) { + var ent *list.Element + if ent, ok = c.items[key]; ok { + return ent.Value.(*entry).value, true + } + return nil, ok +} + +// Remove removes the provided key from the cache, returning if the +// key was contained. +func (c *LRU) Remove(key interface{}) (present bool) { + if ent, ok := c.items[key]; ok { + c.removeElement(ent) + return true + } + return false +} + +// RemoveOldest removes the oldest item from the cache. +func (c *LRU) RemoveOldest() (key interface{}, value interface{}, ok bool) { + ent := c.evictList.Back() + if ent != nil { + c.removeElement(ent) + kv := ent.Value.(*entry) + return kv.key, kv.value, true + } + return nil, nil, false +} + +// GetOldest returns the oldest entry +func (c *LRU) GetOldest() (key interface{}, value interface{}, ok bool) { + ent := c.evictList.Back() + if ent != nil { + kv := ent.Value.(*entry) + return kv.key, kv.value, true + } + return nil, nil, false +} + +// Keys returns a slice of the keys in the cache, from oldest to newest. +func (c *LRU) Keys() []interface{} { + keys := make([]interface{}, len(c.items)) + i := 0 + for ent := c.evictList.Back(); ent != nil; ent = ent.Prev() { + keys[i] = ent.Value.(*entry).key + i++ + } + return keys +} + +// Len returns the number of items in the cache. +func (c *LRU) Len() int { + return c.evictList.Len() +} + +// Resize changes the cache size. +func (c *LRU) Resize(size int) (evicted int) { + diff := c.Len() - size + if diff < 0 { + diff = 0 + } + for i := 0; i < diff; i++ { + c.removeOldest() + } + c.size = size + return diff +} + +// removeOldest removes the oldest item from the cache. +func (c *LRU) removeOldest() { + ent := c.evictList.Back() + if ent != nil { + c.removeElement(ent) + } +} + +// removeElement is used to remove a given list element from the cache +func (c *LRU) removeElement(e *list.Element) { + c.evictList.Remove(e) + kv := e.Value.(*entry) + delete(c.items, kv.key) + if c.onEvict != nil { + c.onEvict(kv.key, kv.value) + } +} diff --git a/third_party/VENDOR-LICENSE/github.com/hashicorp/golang-lru/simplelru/simplelru/lru_interface.go b/third_party/VENDOR-LICENSE/github.com/hashicorp/golang-lru/simplelru/simplelru/lru_interface.go new file mode 100644 index 00000000000..92d70934d63 --- /dev/null +++ b/third_party/VENDOR-LICENSE/github.com/hashicorp/golang-lru/simplelru/simplelru/lru_interface.go @@ -0,0 +1,39 @@ +package simplelru + +// LRUCache is the interface for simple LRU cache. +type LRUCache interface { + // Adds a value to the cache, returns true if an eviction occurred and + // updates the "recently used"-ness of the key. + Add(key, value interface{}) bool + + // Returns key's value from the cache and + // updates the "recently used"-ness of the key. #value, isFound + Get(key interface{}) (value interface{}, ok bool) + + // Checks if a key exists in cache without updating the recent-ness. + Contains(key interface{}) (ok bool) + + // Returns key's value without updating the "recently used"-ness of the key. + Peek(key interface{}) (value interface{}, ok bool) + + // Removes a key from the cache. + Remove(key interface{}) bool + + // Removes the oldest entry from cache. + RemoveOldest() (interface{}, interface{}, bool) + + // Returns the oldest entry from the cache. #key, value, isFound + GetOldest() (interface{}, interface{}, bool) + + // Returns a slice of the keys in the cache, from oldest to newest. + Keys() []interface{} + + // Returns the number of items in the cache. + Len() int + + // Clears all cache entries. + Purge() + + // Resizes cache, returning number evicted + Resize(int) int +} diff --git a/third_party/VENDOR-LICENSE/github.com/jmespath/go-jmespath/LICENSE b/third_party/VENDOR-LICENSE/github.com/jmespath/go-jmespath/LICENSE new file mode 100644 index 00000000000..b03310a91fd --- /dev/null +++ b/third_party/VENDOR-LICENSE/github.com/jmespath/go-jmespath/LICENSE @@ -0,0 +1,13 @@ +Copyright 2015 James Saryerwinnie + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/third_party/VENDOR-LICENSE/github.com/json-iterator/go/LICENSE b/third_party/VENDOR-LICENSE/github.com/json-iterator/go/LICENSE new file mode 100644 index 00000000000..2cf4f5ab28e --- /dev/null +++ b/third_party/VENDOR-LICENSE/github.com/json-iterator/go/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2016 json-iterator + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/third_party/VENDOR-LICENSE/github.com/mattn/go-zglob/LICENSE b/third_party/VENDOR-LICENSE/github.com/mattn/go-zglob/LICENSE new file mode 100644 index 00000000000..740fa931322 --- /dev/null +++ b/third_party/VENDOR-LICENSE/github.com/mattn/go-zglob/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2017 Yasuhiro Matsumoto + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/third_party/VENDOR-LICENSE/github.com/matttproud/golang_protobuf_extensions/pbutil/LICENSE b/third_party/VENDOR-LICENSE/github.com/matttproud/golang_protobuf_extensions/pbutil/LICENSE new file mode 100644 index 00000000000..8dada3edaf5 --- /dev/null +++ b/third_party/VENDOR-LICENSE/github.com/matttproud/golang_protobuf_extensions/pbutil/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/third_party/VENDOR-LICENSE/github.com/matttproud/golang_protobuf_extensions/pbutil/NOTICE b/third_party/VENDOR-LICENSE/github.com/matttproud/golang_protobuf_extensions/pbutil/NOTICE new file mode 100644 index 00000000000..5d8cb5b72e7 --- /dev/null +++ b/third_party/VENDOR-LICENSE/github.com/matttproud/golang_protobuf_extensions/pbutil/NOTICE @@ -0,0 +1 @@ +Copyright 2012 Matt T. Proud (matt.proud@gmail.com) diff --git a/third_party/VENDOR-LICENSE/github.com/modern-go/concurrent/LICENSE b/third_party/VENDOR-LICENSE/github.com/modern-go/concurrent/LICENSE new file mode 100644 index 00000000000..261eeb9e9f8 --- /dev/null +++ b/third_party/VENDOR-LICENSE/github.com/modern-go/concurrent/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/third_party/VENDOR-LICENSE/github.com/modern-go/reflect2/LICENSE b/third_party/VENDOR-LICENSE/github.com/modern-go/reflect2/LICENSE new file mode 100644 index 00000000000..261eeb9e9f8 --- /dev/null +++ b/third_party/VENDOR-LICENSE/github.com/modern-go/reflect2/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/third_party/VENDOR-LICENSE/github.com/opencontainers/runc/libcontainer/user/LICENSE b/third_party/VENDOR-LICENSE/github.com/opencontainers/runc/libcontainer/user/LICENSE new file mode 100644 index 00000000000..27448585ad4 --- /dev/null +++ b/third_party/VENDOR-LICENSE/github.com/opencontainers/runc/libcontainer/user/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2014 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/third_party/VENDOR-LICENSE/github.com/opencontainers/runc/libcontainer/user/NOTICE b/third_party/VENDOR-LICENSE/github.com/opencontainers/runc/libcontainer/user/NOTICE new file mode 100644 index 00000000000..5c97abce4b9 --- /dev/null +++ b/third_party/VENDOR-LICENSE/github.com/opencontainers/runc/libcontainer/user/NOTICE @@ -0,0 +1,17 @@ +runc + +Copyright 2012-2015 Docker, Inc. + +This product includes software developed at Docker, Inc. (http://www.docker.com). + +The following is courtesy of our legal counsel: + + +Use and transfer of Docker may be subject to certain restrictions by the +United States and other governments. +It is your responsibility to ensure that your use and/or transfer does not +violate applicable laws. + +For more information, please see http://www.bis.doc.gov + +See also http://www.apache.org/dev/crypto.html and/or seek legal counsel. diff --git a/third_party/VENDOR-LICENSE/github.com/peterbourgon/diskv/LICENSE b/third_party/VENDOR-LICENSE/github.com/peterbourgon/diskv/LICENSE new file mode 100644 index 00000000000..41ce7f16e1d --- /dev/null +++ b/third_party/VENDOR-LICENSE/github.com/peterbourgon/diskv/LICENSE @@ -0,0 +1,19 @@ +Copyright (c) 2011-2012 Peter Bourgon + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/third_party/VENDOR-LICENSE/github.com/prometheus/client_golang/prometheus/LICENSE b/third_party/VENDOR-LICENSE/github.com/prometheus/client_golang/prometheus/LICENSE new file mode 100644 index 00000000000..261eeb9e9f8 --- /dev/null +++ b/third_party/VENDOR-LICENSE/github.com/prometheus/client_golang/prometheus/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/third_party/VENDOR-LICENSE/github.com/prometheus/client_golang/prometheus/NOTICE b/third_party/VENDOR-LICENSE/github.com/prometheus/client_golang/prometheus/NOTICE new file mode 100644 index 00000000000..dd878a30ee9 --- /dev/null +++ b/third_party/VENDOR-LICENSE/github.com/prometheus/client_golang/prometheus/NOTICE @@ -0,0 +1,23 @@ +Prometheus instrumentation library for Go applications +Copyright 2012-2015 The Prometheus Authors + +This product includes software developed at +SoundCloud Ltd. (http://soundcloud.com/). + + +The following components are included in this product: + +perks - a fork of https://github.com/bmizerany/perks +https://github.com/beorn7/perks +Copyright 2013-2015 Blake Mizerany, Björn Rabenstein +See https://github.com/beorn7/perks/blob/master/README.md for license details. + +Go support for Protocol Buffers - Google's data interchange format +http://github.com/golang/protobuf/ +Copyright 2010 The Go Authors +See source code for license details. + +Support for streaming Protocol Buffer messages for the Go language (golang). +https://github.com/matttproud/golang_protobuf_extensions +Copyright 2013 Matt T. Proud +Licensed under the Apache License, Version 2.0 diff --git a/third_party/VENDOR-LICENSE/github.com/prometheus/client_model/go/LICENSE b/third_party/VENDOR-LICENSE/github.com/prometheus/client_model/go/LICENSE new file mode 100644 index 00000000000..261eeb9e9f8 --- /dev/null +++ b/third_party/VENDOR-LICENSE/github.com/prometheus/client_model/go/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/third_party/VENDOR-LICENSE/github.com/prometheus/client_model/go/NOTICE b/third_party/VENDOR-LICENSE/github.com/prometheus/client_model/go/NOTICE new file mode 100644 index 00000000000..20110e410e5 --- /dev/null +++ b/third_party/VENDOR-LICENSE/github.com/prometheus/client_model/go/NOTICE @@ -0,0 +1,5 @@ +Data model artifacts for Prometheus. +Copyright 2012-2015 The Prometheus Authors + +This product includes software developed at +SoundCloud Ltd. (http://soundcloud.com/). diff --git a/third_party/VENDOR-LICENSE/github.com/prometheus/common/LICENSE b/third_party/VENDOR-LICENSE/github.com/prometheus/common/LICENSE new file mode 100644 index 00000000000..261eeb9e9f8 --- /dev/null +++ b/third_party/VENDOR-LICENSE/github.com/prometheus/common/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/third_party/VENDOR-LICENSE/github.com/prometheus/common/NOTICE b/third_party/VENDOR-LICENSE/github.com/prometheus/common/NOTICE new file mode 100644 index 00000000000..636a2c1a5e8 --- /dev/null +++ b/third_party/VENDOR-LICENSE/github.com/prometheus/common/NOTICE @@ -0,0 +1,5 @@ +Common libraries shared by Prometheus Go components. +Copyright 2015 The Prometheus Authors + +This product includes software developed at +SoundCloud Ltd. (http://soundcloud.com/). diff --git a/third_party/VENDOR-LICENSE/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/README.txt b/third_party/VENDOR-LICENSE/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/README.txt new file mode 100644 index 00000000000..7723656d58d --- /dev/null +++ b/third_party/VENDOR-LICENSE/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/README.txt @@ -0,0 +1,67 @@ +PACKAGE + +package goautoneg +import "bitbucket.org/ww/goautoneg" + +HTTP Content-Type Autonegotiation. + +The functions in this package implement the behaviour specified in +http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html + +Copyright (c) 2011, Open Knowledge Foundation Ltd. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + + Neither the name of the Open Knowledge Foundation Ltd. nor the + names of its contributors may be used to endorse or promote + products derived from this software without specific prior written + permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +FUNCTIONS + +func Negotiate(header string, alternatives []string) (content_type string) +Negotiate the most appropriate content_type given the accept header +and a list of alternatives. + +func ParseAccept(header string) (accept []Accept) +Parse an Accept Header string returning a sorted list +of clauses + + +TYPES + +type Accept struct { + Type, SubType string + Q float32 + Params map[string]string +} +Structure to represent a clause in an HTTP Accept Header + + +SUBDIRECTORIES + + .hg diff --git a/third_party/VENDOR-LICENSE/github.com/prometheus/procfs/LICENSE b/third_party/VENDOR-LICENSE/github.com/prometheus/procfs/LICENSE new file mode 100644 index 00000000000..261eeb9e9f8 --- /dev/null +++ b/third_party/VENDOR-LICENSE/github.com/prometheus/procfs/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/third_party/VENDOR-LICENSE/github.com/prometheus/procfs/NOTICE b/third_party/VENDOR-LICENSE/github.com/prometheus/procfs/NOTICE new file mode 100644 index 00000000000..53c5e9aa111 --- /dev/null +++ b/third_party/VENDOR-LICENSE/github.com/prometheus/procfs/NOTICE @@ -0,0 +1,7 @@ +procfs provides functions to retrieve system, kernel and process +metrics from the pseudo-filesystem proc. + +Copyright 2014-2015 The Prometheus Authors + +This product includes software developed at +SoundCloud Ltd. (http://soundcloud.com/). diff --git a/third_party/VENDOR-LICENSE/github.com/shurcooL/githubv4/LICENSE b/third_party/VENDOR-LICENSE/github.com/shurcooL/githubv4/LICENSE new file mode 100644 index 00000000000..ca4c77642da --- /dev/null +++ b/third_party/VENDOR-LICENSE/github.com/shurcooL/githubv4/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2017 Dmitri Shuralyov + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/third_party/VENDOR-LICENSE/github.com/shurcooL/graphql/LICENSE b/third_party/VENDOR-LICENSE/github.com/shurcooL/graphql/LICENSE new file mode 100644 index 00000000000..ca4c77642da --- /dev/null +++ b/third_party/VENDOR-LICENSE/github.com/shurcooL/graphql/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2017 Dmitri Shuralyov + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/third_party/VENDOR-LICENSE/github.com/tektoncd/pipeline/pkg/LICENSE b/third_party/VENDOR-LICENSE/github.com/tektoncd/pipeline/pkg/LICENSE new file mode 100644 index 00000000000..d6456956733 --- /dev/null +++ b/third_party/VENDOR-LICENSE/github.com/tektoncd/pipeline/pkg/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/third_party/VENDOR-LICENSE/github.com/trivago/tgo/LICENSE b/third_party/VENDOR-LICENSE/github.com/trivago/tgo/LICENSE new file mode 100644 index 00000000000..8f71f43fee3 --- /dev/null +++ b/third_party/VENDOR-LICENSE/github.com/trivago/tgo/LICENSE @@ -0,0 +1,202 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + diff --git a/third_party/VENDOR-LICENSE/go4.org/bytereplacer/LICENSE b/third_party/VENDOR-LICENSE/go4.org/bytereplacer/LICENSE new file mode 100644 index 00000000000..8f71f43fee3 --- /dev/null +++ b/third_party/VENDOR-LICENSE/go4.org/bytereplacer/LICENSE @@ -0,0 +1,202 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + diff --git a/third_party/VENDOR-LICENSE/gocloud.dev/LICENSE b/third_party/VENDOR-LICENSE/gocloud.dev/LICENSE new file mode 100644 index 00000000000..d6456956733 --- /dev/null +++ b/third_party/VENDOR-LICENSE/gocloud.dev/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/third_party/VENDOR-LICENSE/golang.org/x/term/LICENSE b/third_party/VENDOR-LICENSE/golang.org/x/term/LICENSE new file mode 100644 index 00000000000..6a66aea5eaf --- /dev/null +++ b/third_party/VENDOR-LICENSE/golang.org/x/term/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/third_party/VENDOR-LICENSE/golang.org/x/time/rate/LICENSE b/third_party/VENDOR-LICENSE/golang.org/x/time/rate/LICENSE new file mode 100644 index 00000000000..6a66aea5eaf --- /dev/null +++ b/third_party/VENDOR-LICENSE/golang.org/x/time/rate/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/third_party/VENDOR-LICENSE/gomodules.xyz/jsonpatch/v2/LICENSE b/third_party/VENDOR-LICENSE/gomodules.xyz/jsonpatch/v2/LICENSE new file mode 100644 index 00000000000..8f71f43fee3 --- /dev/null +++ b/third_party/VENDOR-LICENSE/gomodules.xyz/jsonpatch/v2/LICENSE @@ -0,0 +1,202 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + diff --git a/third_party/VENDOR-LICENSE/gopkg.in/fsnotify.v1/LICENSE b/third_party/VENDOR-LICENSE/gopkg.in/fsnotify.v1/LICENSE new file mode 100644 index 00000000000..f21e5408009 --- /dev/null +++ b/third_party/VENDOR-LICENSE/gopkg.in/fsnotify.v1/LICENSE @@ -0,0 +1,28 @@ +Copyright (c) 2012 The Go Authors. All rights reserved. +Copyright (c) 2012 fsnotify Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/third_party/VENDOR-LICENSE/gopkg.in/inf.v0/LICENSE b/third_party/VENDOR-LICENSE/gopkg.in/inf.v0/LICENSE new file mode 100644 index 00000000000..87a5cede339 --- /dev/null +++ b/third_party/VENDOR-LICENSE/gopkg.in/inf.v0/LICENSE @@ -0,0 +1,28 @@ +Copyright (c) 2012 Péter Surányi. Portions Copyright (c) 2009 The Go +Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/third_party/VENDOR-LICENSE/gopkg.in/ini.v1/LICENSE b/third_party/VENDOR-LICENSE/gopkg.in/ini.v1/LICENSE new file mode 100644 index 00000000000..d361bbcdf5c --- /dev/null +++ b/third_party/VENDOR-LICENSE/gopkg.in/ini.v1/LICENSE @@ -0,0 +1,191 @@ +Apache License +Version 2.0, January 2004 +http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + +"License" shall mean the terms and conditions for use, reproduction, and +distribution as defined by Sections 1 through 9 of this document. + +"Licensor" shall mean the copyright owner or entity authorized by the copyright +owner that is granting the License. + +"Legal Entity" shall mean the union of the acting entity and all other entities +that control, are controlled by, or are under common control with that entity. +For the purposes of this definition, "control" means (i) the power, direct or +indirect, to cause the direction or management of such entity, whether by +contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the +outstanding shares, or (iii) beneficial ownership of such entity. + +"You" (or "Your") shall mean an individual or Legal Entity exercising +permissions granted by this License. + +"Source" form shall mean the preferred form for making modifications, including +but not limited to software source code, documentation source, and configuration +files. + +"Object" form shall mean any form resulting from mechanical transformation or +translation of a Source form, including but not limited to compiled object code, +generated documentation, and conversions to other media types. + +"Work" shall mean the work of authorship, whether in Source or Object form, made +available under the License, as indicated by a copyright notice that is included +in or attached to the work (an example is provided in the Appendix below). + +"Derivative Works" shall mean any work, whether in Source or Object form, that +is based on (or derived from) the Work and for which the editorial revisions, +annotations, elaborations, or other modifications represent, as a whole, an +original work of authorship. For the purposes of this License, Derivative Works +shall not include works that remain separable from, or merely link (or bind by +name) to the interfaces of, the Work and Derivative Works thereof. + +"Contribution" shall mean any work of authorship, including the original version +of the Work and any modifications or additions to that Work or Derivative Works +thereof, that is intentionally submitted to Licensor for inclusion in the Work +by the copyright owner or by an individual or Legal Entity authorized to submit +on behalf of the copyright owner. For the purposes of this definition, +"submitted" means any form of electronic, verbal, or written communication sent +to the Licensor or its representatives, including but not limited to +communication on electronic mailing lists, source code control systems, and +issue tracking systems that are managed by, or on behalf of, the Licensor for +the purpose of discussing and improving the Work, but excluding communication +that is conspicuously marked or otherwise designated in writing by the copyright +owner as "Not a Contribution." + +"Contributor" shall mean Licensor and any individual or Legal Entity on behalf +of whom a Contribution has been received by Licensor and subsequently +incorporated within the Work. + +2. Grant of Copyright License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable copyright license to reproduce, prepare Derivative Works of, +publicly display, publicly perform, sublicense, and distribute the Work and such +Derivative Works in Source or Object form. + +3. Grant of Patent License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable (except as stated in this section) patent license to make, have +made, use, offer to sell, sell, import, and otherwise transfer the Work, where +such license applies only to those patent claims licensable by such Contributor +that are necessarily infringed by their Contribution(s) alone or by combination +of their Contribution(s) with the Work to which such Contribution(s) was +submitted. If You institute patent litigation against any entity (including a +cross-claim or counterclaim in a lawsuit) alleging that the Work or a +Contribution incorporated within the Work constitutes direct or contributory +patent infringement, then any patent licenses granted to You under this License +for that Work shall terminate as of the date such litigation is filed. + +4. Redistribution. + +You may reproduce and distribute copies of the Work or Derivative Works thereof +in any medium, with or without modifications, and in Source or Object form, +provided that You meet the following conditions: + +You must give any other recipients of the Work or Derivative Works a copy of +this License; and +You must cause any modified files to carry prominent notices stating that You +changed the files; and +You must retain, in the Source form of any Derivative Works that You distribute, +all copyright, patent, trademark, and attribution notices from the Source form +of the Work, excluding those notices that do not pertain to any part of the +Derivative Works; and +If the Work includes a "NOTICE" text file as part of its distribution, then any +Derivative Works that You distribute must include a readable copy of the +attribution notices contained within such NOTICE file, excluding those notices +that do not pertain to any part of the Derivative Works, in at least one of the +following places: within a NOTICE text file distributed as part of the +Derivative Works; within the Source form or documentation, if provided along +with the Derivative Works; or, within a display generated by the Derivative +Works, if and wherever such third-party notices normally appear. The contents of +the NOTICE file are for informational purposes only and do not modify the +License. You may add Your own attribution notices within Derivative Works that +You distribute, alongside or as an addendum to the NOTICE text from the Work, +provided that such additional attribution notices cannot be construed as +modifying the License. +You may add Your own copyright statement to Your modifications and may provide +additional or different license terms and conditions for use, reproduction, or +distribution of Your modifications, or for any such Derivative Works as a whole, +provided Your use, reproduction, and distribution of the Work otherwise complies +with the conditions stated in this License. + +5. Submission of Contributions. + +Unless You explicitly state otherwise, any Contribution intentionally submitted +for inclusion in the Work by You to the Licensor shall be under the terms and +conditions of this License, without any additional terms or conditions. +Notwithstanding the above, nothing herein shall supersede or modify the terms of +any separate license agreement you may have executed with Licensor regarding +such Contributions. + +6. Trademarks. + +This License does not grant permission to use the trade names, trademarks, +service marks, or product names of the Licensor, except as required for +reasonable and customary use in describing the origin of the Work and +reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. + +Unless required by applicable law or agreed to in writing, Licensor provides the +Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, +including, without limitation, any warranties or conditions of TITLE, +NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are +solely responsible for determining the appropriateness of using or +redistributing the Work and assume any risks associated with Your exercise of +permissions under this License. + +8. Limitation of Liability. + +In no event and under no legal theory, whether in tort (including negligence), +contract, or otherwise, unless required by applicable law (such as deliberate +and grossly negligent acts) or agreed to in writing, shall any Contributor be +liable to You for damages, including any direct, indirect, special, incidental, +or consequential damages of any character arising as a result of this License or +out of the use or inability to use the Work (including but not limited to +damages for loss of goodwill, work stoppage, computer failure or malfunction, or +any and all other commercial damages or losses), even if such Contributor has +been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. + +While redistributing the Work or Derivative Works thereof, You may choose to +offer, and charge a fee for, acceptance of support, warranty, indemnity, or +other liability obligations and/or rights consistent with this License. However, +in accepting such obligations, You may act only on Your own behalf and on Your +sole responsibility, not on behalf of any other Contributor, and only if You +agree to indemnify, defend, and hold each Contributor harmless for any liability +incurred by, or claims asserted against, such Contributor by reason of your +accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work + +To apply the Apache License to your work, attach the following boilerplate +notice, with the fields enclosed by brackets "[]" replaced with your own +identifying information. (Don't include the brackets!) The text should be +enclosed in the appropriate comment syntax for the file format. We also +recommend that a file or class name and description of purpose be included on +the same "printed page" as the copyright notice for easier identification within +third-party archives. + + Copyright 2014 Unknwon + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/third_party/VENDOR-LICENSE/gopkg.in/robfig/cron.v2/LICENSE b/third_party/VENDOR-LICENSE/gopkg.in/robfig/cron.v2/LICENSE new file mode 100644 index 00000000000..3a0f627ffeb --- /dev/null +++ b/third_party/VENDOR-LICENSE/gopkg.in/robfig/cron.v2/LICENSE @@ -0,0 +1,21 @@ +Copyright (C) 2012 Rob Figueiredo +All Rights Reserved. + +MIT LICENSE + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/third_party/VENDOR-LICENSE/gopkg.in/yaml.v3/LICENSE b/third_party/VENDOR-LICENSE/gopkg.in/yaml.v3/LICENSE new file mode 100644 index 00000000000..2683e4bb1f2 --- /dev/null +++ b/third_party/VENDOR-LICENSE/gopkg.in/yaml.v3/LICENSE @@ -0,0 +1,50 @@ + +This project is covered by two different licenses: MIT and Apache. + +#### MIT License #### + +The following files were ported to Go from C files of libyaml, and thus +are still covered by their original MIT license, with the additional +copyright staring in 2011 when the project was ported over: + + apic.go emitterc.go parserc.go readerc.go scannerc.go + writerc.go yamlh.go yamlprivateh.go + +Copyright (c) 2006-2010 Kirill Simonov +Copyright (c) 2006-2011 Kirill Simonov + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +### Apache License ### + +All the remaining project files are covered by the Apache license: + +Copyright (c) 2011-2019 Canonical Ltd + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/third_party/VENDOR-LICENSE/gopkg.in/yaml.v3/NOTICE b/third_party/VENDOR-LICENSE/gopkg.in/yaml.v3/NOTICE new file mode 100644 index 00000000000..866d74a7ad7 --- /dev/null +++ b/third_party/VENDOR-LICENSE/gopkg.in/yaml.v3/NOTICE @@ -0,0 +1,13 @@ +Copyright 2011-2016 Canonical Ltd. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/third_party/VENDOR-LICENSE/istio.io/test-infra/tools/prowgen/pkg/LICENSE b/third_party/VENDOR-LICENSE/istio.io/test-infra/tools/prowgen/pkg/LICENSE new file mode 100644 index 00000000000..56e48aa37f6 --- /dev/null +++ b/third_party/VENDOR-LICENSE/istio.io/test-infra/tools/prowgen/pkg/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2016-2020 Istio Authors + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/third_party/VENDOR-LICENSE/k8s.io/api/LICENSE b/third_party/VENDOR-LICENSE/k8s.io/api/LICENSE new file mode 100644 index 00000000000..d6456956733 --- /dev/null +++ b/third_party/VENDOR-LICENSE/k8s.io/api/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/third_party/VENDOR-LICENSE/k8s.io/apimachinery/pkg/LICENSE b/third_party/VENDOR-LICENSE/k8s.io/apimachinery/pkg/LICENSE new file mode 100644 index 00000000000..d6456956733 --- /dev/null +++ b/third_party/VENDOR-LICENSE/k8s.io/apimachinery/pkg/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/third_party/VENDOR-LICENSE/k8s.io/apimachinery/third_party/forked/golang/LICENSE b/third_party/VENDOR-LICENSE/k8s.io/apimachinery/third_party/forked/golang/LICENSE new file mode 100644 index 00000000000..6a66aea5eaf --- /dev/null +++ b/third_party/VENDOR-LICENSE/k8s.io/apimachinery/third_party/forked/golang/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/third_party/VENDOR-LICENSE/k8s.io/client-go/LICENSE b/third_party/VENDOR-LICENSE/k8s.io/client-go/LICENSE new file mode 100644 index 00000000000..d6456956733 --- /dev/null +++ b/third_party/VENDOR-LICENSE/k8s.io/client-go/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/third_party/VENDOR-LICENSE/k8s.io/client-go/third_party/forked/golang/template/LICENSE b/third_party/VENDOR-LICENSE/k8s.io/client-go/third_party/forked/golang/template/LICENSE new file mode 100644 index 00000000000..6a66aea5eaf --- /dev/null +++ b/third_party/VENDOR-LICENSE/k8s.io/client-go/third_party/forked/golang/template/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/third_party/VENDOR-LICENSE/k8s.io/component-base/config/LICENSE b/third_party/VENDOR-LICENSE/k8s.io/component-base/config/LICENSE new file mode 100644 index 00000000000..d6456956733 --- /dev/null +++ b/third_party/VENDOR-LICENSE/k8s.io/component-base/config/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/third_party/VENDOR-LICENSE/k8s.io/klog/v2/LICENSE b/third_party/VENDOR-LICENSE/k8s.io/klog/v2/LICENSE new file mode 100644 index 00000000000..37ec93a14fd --- /dev/null +++ b/third_party/VENDOR-LICENSE/k8s.io/klog/v2/LICENSE @@ -0,0 +1,191 @@ +Apache License +Version 2.0, January 2004 +http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + +"License" shall mean the terms and conditions for use, reproduction, and +distribution as defined by Sections 1 through 9 of this document. + +"Licensor" shall mean the copyright owner or entity authorized by the copyright +owner that is granting the License. + +"Legal Entity" shall mean the union of the acting entity and all other entities +that control, are controlled by, or are under common control with that entity. +For the purposes of this definition, "control" means (i) the power, direct or +indirect, to cause the direction or management of such entity, whether by +contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the +outstanding shares, or (iii) beneficial ownership of such entity. + +"You" (or "Your") shall mean an individual or Legal Entity exercising +permissions granted by this License. + +"Source" form shall mean the preferred form for making modifications, including +but not limited to software source code, documentation source, and configuration +files. + +"Object" form shall mean any form resulting from mechanical transformation or +translation of a Source form, including but not limited to compiled object code, +generated documentation, and conversions to other media types. + +"Work" shall mean the work of authorship, whether in Source or Object form, made +available under the License, as indicated by a copyright notice that is included +in or attached to the work (an example is provided in the Appendix below). + +"Derivative Works" shall mean any work, whether in Source or Object form, that +is based on (or derived from) the Work and for which the editorial revisions, +annotations, elaborations, or other modifications represent, as a whole, an +original work of authorship. For the purposes of this License, Derivative Works +shall not include works that remain separable from, or merely link (or bind by +name) to the interfaces of, the Work and Derivative Works thereof. + +"Contribution" shall mean any work of authorship, including the original version +of the Work and any modifications or additions to that Work or Derivative Works +thereof, that is intentionally submitted to Licensor for inclusion in the Work +by the copyright owner or by an individual or Legal Entity authorized to submit +on behalf of the copyright owner. For the purposes of this definition, +"submitted" means any form of electronic, verbal, or written communication sent +to the Licensor or its representatives, including but not limited to +communication on electronic mailing lists, source code control systems, and +issue tracking systems that are managed by, or on behalf of, the Licensor for +the purpose of discussing and improving the Work, but excluding communication +that is conspicuously marked or otherwise designated in writing by the copyright +owner as "Not a Contribution." + +"Contributor" shall mean Licensor and any individual or Legal Entity on behalf +of whom a Contribution has been received by Licensor and subsequently +incorporated within the Work. + +2. Grant of Copyright License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable copyright license to reproduce, prepare Derivative Works of, +publicly display, publicly perform, sublicense, and distribute the Work and such +Derivative Works in Source or Object form. + +3. Grant of Patent License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable (except as stated in this section) patent license to make, have +made, use, offer to sell, sell, import, and otherwise transfer the Work, where +such license applies only to those patent claims licensable by such Contributor +that are necessarily infringed by their Contribution(s) alone or by combination +of their Contribution(s) with the Work to which such Contribution(s) was +submitted. If You institute patent litigation against any entity (including a +cross-claim or counterclaim in a lawsuit) alleging that the Work or a +Contribution incorporated within the Work constitutes direct or contributory +patent infringement, then any patent licenses granted to You under this License +for that Work shall terminate as of the date such litigation is filed. + +4. Redistribution. + +You may reproduce and distribute copies of the Work or Derivative Works thereof +in any medium, with or without modifications, and in Source or Object form, +provided that You meet the following conditions: + +You must give any other recipients of the Work or Derivative Works a copy of +this License; and +You must cause any modified files to carry prominent notices stating that You +changed the files; and +You must retain, in the Source form of any Derivative Works that You distribute, +all copyright, patent, trademark, and attribution notices from the Source form +of the Work, excluding those notices that do not pertain to any part of the +Derivative Works; and +If the Work includes a "NOTICE" text file as part of its distribution, then any +Derivative Works that You distribute must include a readable copy of the +attribution notices contained within such NOTICE file, excluding those notices +that do not pertain to any part of the Derivative Works, in at least one of the +following places: within a NOTICE text file distributed as part of the +Derivative Works; within the Source form or documentation, if provided along +with the Derivative Works; or, within a display generated by the Derivative +Works, if and wherever such third-party notices normally appear. The contents of +the NOTICE file are for informational purposes only and do not modify the +License. You may add Your own attribution notices within Derivative Works that +You distribute, alongside or as an addendum to the NOTICE text from the Work, +provided that such additional attribution notices cannot be construed as +modifying the License. +You may add Your own copyright statement to Your modifications and may provide +additional or different license terms and conditions for use, reproduction, or +distribution of Your modifications, or for any such Derivative Works as a whole, +provided Your use, reproduction, and distribution of the Work otherwise complies +with the conditions stated in this License. + +5. Submission of Contributions. + +Unless You explicitly state otherwise, any Contribution intentionally submitted +for inclusion in the Work by You to the Licensor shall be under the terms and +conditions of this License, without any additional terms or conditions. +Notwithstanding the above, nothing herein shall supersede or modify the terms of +any separate license agreement you may have executed with Licensor regarding +such Contributions. + +6. Trademarks. + +This License does not grant permission to use the trade names, trademarks, +service marks, or product names of the Licensor, except as required for +reasonable and customary use in describing the origin of the Work and +reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. + +Unless required by applicable law or agreed to in writing, Licensor provides the +Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, +including, without limitation, any warranties or conditions of TITLE, +NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are +solely responsible for determining the appropriateness of using or +redistributing the Work and assume any risks associated with Your exercise of +permissions under this License. + +8. Limitation of Liability. + +In no event and under no legal theory, whether in tort (including negligence), +contract, or otherwise, unless required by applicable law (such as deliberate +and grossly negligent acts) or agreed to in writing, shall any Contributor be +liable to You for damages, including any direct, indirect, special, incidental, +or consequential damages of any character arising as a result of this License or +out of the use or inability to use the Work (including but not limited to +damages for loss of goodwill, work stoppage, computer failure or malfunction, or +any and all other commercial damages or losses), even if such Contributor has +been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. + +While redistributing the Work or Derivative Works thereof, You may choose to +offer, and charge a fee for, acceptance of support, warranty, indemnity, or +other liability obligations and/or rights consistent with this License. However, +in accepting such obligations, You may act only on Your own behalf and on Your +sole responsibility, not on behalf of any other Contributor, and only if You +agree to indemnify, defend, and hold each Contributor harmless for any liability +incurred by, or claims asserted against, such Contributor by reason of your +accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work + +To apply the Apache License to your work, attach the following boilerplate +notice, with the fields enclosed by brackets "[]" replaced with your own +identifying information. (Don't include the brackets!) The text should be +enclosed in the appropriate comment syntax for the file format. We also +recommend that a file or class name and description of purpose be included on +the same "printed page" as the copyright notice for easier identification within +third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/third_party/VENDOR-LICENSE/k8s.io/kube-openapi/pkg/util/proto/LICENSE b/third_party/VENDOR-LICENSE/k8s.io/kube-openapi/pkg/util/proto/LICENSE new file mode 100644 index 00000000000..d6456956733 --- /dev/null +++ b/third_party/VENDOR-LICENSE/k8s.io/kube-openapi/pkg/util/proto/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/third_party/VENDOR-LICENSE/k8s.io/test-infra/prow/LICENSE b/third_party/VENDOR-LICENSE/k8s.io/test-infra/LICENSE similarity index 100% rename from third_party/VENDOR-LICENSE/k8s.io/test-infra/prow/LICENSE rename to third_party/VENDOR-LICENSE/k8s.io/test-infra/LICENSE diff --git a/third_party/VENDOR-LICENSE/k8s.io/utils/LICENSE b/third_party/VENDOR-LICENSE/k8s.io/utils/LICENSE new file mode 100644 index 00000000000..d6456956733 --- /dev/null +++ b/third_party/VENDOR-LICENSE/k8s.io/utils/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/third_party/VENDOR-LICENSE/knative.dev/pkg/LICENSE b/third_party/VENDOR-LICENSE/knative.dev/pkg/LICENSE new file mode 100644 index 00000000000..261eeb9e9f8 --- /dev/null +++ b/third_party/VENDOR-LICENSE/knative.dev/pkg/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/third_party/VENDOR-LICENSE/sigs.k8s.io/controller-runtime/pkg/LICENSE b/third_party/VENDOR-LICENSE/sigs.k8s.io/controller-runtime/pkg/LICENSE new file mode 100644 index 00000000000..8dada3edaf5 --- /dev/null +++ b/third_party/VENDOR-LICENSE/sigs.k8s.io/controller-runtime/pkg/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/third_party/VENDOR-LICENSE/sigs.k8s.io/structured-merge-diff/v4/LICENSE b/third_party/VENDOR-LICENSE/sigs.k8s.io/structured-merge-diff/v4/LICENSE new file mode 100644 index 00000000000..8dada3edaf5 --- /dev/null +++ b/third_party/VENDOR-LICENSE/sigs.k8s.io/structured-merge-diff/v4/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/tools/config-generator/README.md b/tools/config-generator/README.md deleted file mode 100644 index 6e21106fb4b..00000000000 --- a/tools/config-generator/README.md +++ /dev/null @@ -1,13 +0,0 @@ -# config-generator - -config-generator is a tool that takes a meta config file (e.g. -../../prow/config_knative.yaml) and [templates](./templates) as -input, and generates configuration files for Prow and testgrid. - -## Notice - -As Knative evolves and more and more Prow jobs are required, this tool has -become clumsy and hard to maintain. There have been some initial discussions to -replace it with a more generic solution, but no clear outcome yet. If you have -any ideas on it, please join the discussion in Knative Productivity Slack -channel. diff --git a/tools/config-generator/customjobs.go b/tools/config-generator/customjobs.go deleted file mode 100644 index e94b0afea27..00000000000 --- a/tools/config-generator/customjobs.go +++ /dev/null @@ -1,50 +0,0 @@ -/* -Copyright 2020 The Knative Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Although custom jobs are not generated by this generator, certain testgrid -// configs are needed for certain custom jobs - -package main - -var ( - customJobnames = []string{ - "ci-knative-cleanup", - "ci-knative-flakes-reporter", - "ci-knative-flakes-resultsrecorder", - "ci-knative-prow-jobs-syncer", - "post-knative-test-infra-image-push", - "post-knative-sandbox-peribolos", - "post-knative-test-infra-deploy-tools", - } -) - -func addCustomJobsTestgrid() { - var ( - extras = map[string]string{ - "num_failures_to_alert": "1", - "alert_options": "\n alert_mail_to_addresses: \"serverless-engprod-sea@google.com\"", - } - ) - for _, job := range customJobnames { - metaData.AddNonAlignedTest(NonAlignedTestGroup{ - DashboardGroup: "maintenance", - DashboardName: "utilities", - HumanTabName: job, - CIJobName: job, - Extra: extras, - }) - } -} diff --git a/tools/config-generator/customjobs_test.go b/tools/config-generator/customjobs_test.go deleted file mode 100644 index 5c424eadf83..00000000000 --- a/tools/config-generator/customjobs_test.go +++ /dev/null @@ -1,90 +0,0 @@ -/* -Copyright 2020 The Knative Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package main - -import ( - "io/ioutil" - "os" - "path/filepath" - "strings" - "testing" - - "gopkg.in/yaml.v2" - "k8s.io/apimachinery/pkg/util/sets" -) - -var ( - defaultTemplateConfigPath = "../../prow/jobs/custom" -) - -type customJobStruct struct { - Presubmits map[string][]singleCustomJob `yaml:"presubmits,omitempty"` - Postsubmits map[string][]singleCustomJob `yaml:"postsubmits,omitempty"` - Periodics []singleCustomJob `yaml:"periodics,omitempty"` -} - -type singleCustomJob struct { - Name string `yaml:"name"` -} - -func TestEnsureCustomJob(t *testing.T) { - SetupForTesting() - validJobs := sets.NewString() - filepath.Walk(defaultTemplateConfigPath, func(path string, info os.FileInfo, err error) error { - if strings.HasSuffix(path, ".yaml") { - content, err := ioutil.ReadFile(path) - if err != nil { - t.Fatalf("Failed reading template file %q: %v", path, err) - } - - allCustomJobs := customJobStruct{} - if err = yaml.Unmarshal(content, &allCustomJobs); err != nil { - t.Fatalf("Failed unmarshalling %q: %v", path, err) - } - for _, sjs := range allCustomJobs.Presubmits { - for _, sj := range sjs { - validJobs.Insert(sj.Name) - } - } - for _, sjs := range allCustomJobs.Postsubmits { - for _, sj := range sjs { - validJobs.Insert(sj.Name) - } - } - for _, sj := range allCustomJobs.Periodics { - validJobs.Insert(sj.Name) - } - } - return nil - }) - - for _, job := range customJobnames { - if !validJobs.Has(job) { - t.Fatalf("Job %q doesn't exist in %q", job, defaultTemplateConfigPath) - } - } -} - -func TestAddCustomJobsTestgrid(t *testing.T) { - SetupForTesting() - addCustomJobsTestgrid() - if len(metaData.nonAligned) != len(customJobnames) { - t.Errorf("Mismatch in number of nonaligned jobs: expected %d, Actual %d", - len(customJobnames), - len(metaData.nonAligned)) - } -} diff --git a/tools/config-generator/getlivebranch.go b/tools/config-generator/getlivebranch.go deleted file mode 100644 index 2807ff76653..00000000000 --- a/tools/config-generator/getlivebranch.go +++ /dev/null @@ -1,59 +0,0 @@ -/* -Copyright 2020 The Knative Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package main - -import ( - "fmt" - "regexp" - "strings" - - "github.com/google/go-github/v32/github" - "knative.dev/test-infra/pkg/ghutil" -) - -func latestReleaseBranch(gc ghutil.GithubOperations, repo string) (string, error) { - parts := strings.Split(repo, "/") - if len(parts) != 2 { - return "", fmt.Errorf("repo name %q should be in the form of [ORG]/[REPO]", repo) - } - branches, err := gc.ListBranches(parts[0], parts[1]) - if err != nil { - return "", fmt.Errorf("failed listing branches for repo %q: %w", repo, err) - } - return filterLatest(branches), nil -} - -// filterLatest returns latest release branch in the form of -// [MAJOR].[MINOR], if there is no valid release branch exist in the form of -// `release-[MAJOR]-[MINOR]`, then it returns "" -func filterLatest(branches []*github.Branch) string { - var ( - reReleaseBranch = regexp.MustCompile(`^release\-(\d+\.\d+)$`) - latest = "" - ) - - for _, branch := range branches { - if matches := reReleaseBranch.FindStringSubmatch(*branch.Name); len(matches) > 1 { - release := matches[1] - if latest == "" || versionComp(release, latest) > 0 { - latest = release - } - } - } - - return latest -} diff --git a/tools/config-generator/getlivebranch_test.go b/tools/config-generator/getlivebranch_test.go deleted file mode 100644 index c1d2e5c8833..00000000000 --- a/tools/config-generator/getlivebranch_test.go +++ /dev/null @@ -1,75 +0,0 @@ -/* -Copyright 2020 The Knative Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package main - -import ( - "testing" - - "github.com/google/go-cmp/cmp" - "github.com/google/go-github/v32/github" - "knative.dev/test-infra/pkg/ghutil/fakeghutil" -) - -func TestLatestReleaseBranch(t *testing.T) { - SetupForTesting() - fgc := fakeghutil.NewFakeGithubClient() - - names := []string{ - "release-0.1", - "release-1.0", - "release-3.4", - "release-0.2", - } - - branches := []*github.Branch{} - for i := range names { - branches = append(branches, &github.Branch{Name: &names[i]}) - } - - fgc.Branches = map[string][]*github.Branch{ - "my-repo": branches, - } - - _, err := latestReleaseBranch(fgc, "no slash") - if err == nil { - t.Fatalf("Format was not ORG/REPO, expected error.") - } - latest, _ := latestReleaseBranch(fgc, "my-org/my-repo") - if diff := cmp.Diff(latest, "3.4"); diff != "" { - t.Fatalf("Did not find latest version (-got +want)\n%s", diff) - } -} - -func TestFilterLatest(t *testing.T) { - SetupForTesting() - names := []string{ - "release-0.1", - "release-1.0", - "release-3.4", - "release-0.2", - } - - branches := []*github.Branch{} - for i := range names { - branches = append(branches, &github.Branch{Name: &names[i]}) - } - - res := filterLatest(branches) - if diff := cmp.Diff(res, "3.4"); diff != "" { - t.Fatalf("Did not find latest version (-got +want)\n%s", diff) - } -} diff --git a/tools/config-generator/k8s_testgrid_config.go b/tools/config-generator/k8s_testgrid_config.go deleted file mode 100644 index 82397ddb6f5..00000000000 --- a/tools/config-generator/k8s_testgrid_config.go +++ /dev/null @@ -1,89 +0,0 @@ -/* -Copyright 2019 The Knative Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// data definitions that are used for the config file generation of k8s testgrid - -package main - -import ( - "regexp" - "sort" - - "k8s.io/apimachinery/pkg/util/sets" -) - -const ( - k8sTestgridTempl = "k8s_testgrid.yaml" - k8sTestgridGroupTempl = "k8s_testgrid_testgroup.yaml" -) - -type k8sTestgridData struct { - AllRepos []string - OrgsAndRepos map[string][]string -} - -func generateK8sTestgrid(metaData TestGridMetaData) { - // Regex expression for `knative-0.21`, `knative-sandbox-1.00` - reReleaseBranch := regexp.MustCompile(`(knative|knative\-sandbox|google)\-[\d]+\.[\d]+`) - - allReposSet := sets.NewString("name: utilities") - // Sort orgsAndRepos to maintain the output order - allOrgs := []string{"maintenance", "prow-tests"} - for org := range metaData.md { - allOrgs = append(allOrgs, org) - } - sort.Strings(allOrgs) - orgsAndRepos := map[string][]string{ - "maintenance": {"utilities"}, - } - for org, repos := range metaData.md { - // If org name matches release branch then this is a ungrouped - if reReleaseBranch.MatchString(org) { - allReposSet.Insert("name: " + org) - continue - } - renamedReposForOrg := []string{} - for repo := range repos { - allReposSet.Insert("name: " + repo) - if repo == "utilities" { - continue - } - renamedReposForOrg = append(renamedReposForOrg, repo) - } - orgsAndRepos[org] = renamedReposForOrg - } - allRepos := allReposSet.List() // Returns in sorted order. - - executeTemplate("k8s testgrid", - readTemplate(k8sTestgridTempl), - struct{ AllRepos []string }{allRepos}) - - for _, org := range allOrgs { - repos := orgsAndRepos[org] - sort.Strings(repos) - groupName := org - // If group name matches release branch then skip - if reReleaseBranch.MatchString(groupName) { - continue - } - executeTemplate("k8s testgrid group", - readTemplate(k8sTestgridGroupTempl), - struct { - Org string - Repos []string - }{groupName, repos}) - } -} diff --git a/tools/config-generator/main.go b/tools/config-generator/main.go deleted file mode 100644 index 6036c469eb5..00000000000 --- a/tools/config-generator/main.go +++ /dev/null @@ -1,861 +0,0 @@ -/* -Copyright 2020 The Knative Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// The make_config tool generates a full Prow config for the Knative project, -// with input from a yaml file with key definitions. - -package main - -import ( - "bytes" - "flag" - "fmt" - "io" - "io/ioutil" - "log" - "os" - "path" - "regexp" - "runtime" - "sort" - "strings" - "text/template" - "time" - - "gopkg.in/yaml.v2" - "k8s.io/apimachinery/pkg/util/sets" - - "knative.dev/test-infra/pkg/ghutil" -) - -const ( - // Manifests generated by ko are indented by 2 spaces. - baseIndent = " " - templateDir = "templates" - - // ########################################################## - // ############## prow configuration templates ############## - // ########################################################## - // commonHeaderConfig contains common header definitions. - commonHeaderConfig = "common_header.yaml" -) - -var ( - // GitHub orgs that are using knative.dev path alias. - pathAliasOrgs = sets.NewString("knative", "knative-sandbox") - // GitHub repos that are not using knative.dev path alias. - nonPathAliasRepos = sets.NewString("knative/docs") -) - -type logFatalfFunc func(string, ...interface{}) - -// repositoryData contains basic data about each Knative repository. -type repositoryData struct { - Name string - EnablePerformanceTests bool - EnableGoCoverage bool - GoCoverageThreshold int - Processed bool -} - -// prowConfigTemplateData contains basic data about Prow. -type prowConfigTemplateData struct { - Year int - GcsBucket string - PresubmitLogsDir string - LogsDir string - ProwHost string - TestGridHost string - GubernatorHost string - TestGridGcsBucket string - TideRepos []string - ManagedRepos []string - ManagedOrgs []string - JobConfigPath string - CoreConfigPath string - PluginConfigPath string - TestInfraRepo string -} - -// baseProwJobTemplateData contains basic data about a Prow job. -type baseProwJobTemplateData struct { - OrgName string - RepoName string - RepoNameForJob string - GcsBucket string - GcsLogDir string - GcsPresubmitLogDir string - RepoURI string - RepoBranch string - CloneURI string - SecurityContext []string - SkipBranches []string - Branches []string - DecorationConfig []string - ExtraRefs []string - Command string - Args []string - Env []string - Volumes []string - VolumeMounts []string - Resources []string - ReporterConfig []string - JobStatesToReport []string - Timeout int - AlwaysRun bool - Optional bool - TestAccount string - ServiceAccount string - ReleaseGcs string - GoCoverageThreshold int - Image string - Labels []string - PathAlias string - Cluster string - NeedsMonitor bool - Annotations []string -} - -// #################################################################################################### -// ################ data definitions that are used for the prow config file generation ################ -// #################################################################################################### - -// outputter is a struct that directs program output and counts the number of write calls. -type outputter struct { - io.Writer - count int -} - -func newOutputter(writer io.Writer) outputter { - return outputter{writer, 0} -} - -// outputConfig outputs the given line, if not empty, to the output writer (e.g. stdout). -func (o *outputter) outputConfig(line string) { - if strings.TrimSpace(line) != "" { - fmt.Fprintln(o, strings.TrimRight(line, " ")) - o.count++ - } -} - -// sectionGenerator is a function that generates Prow job configs given a slice of a yaml file with configs. -type sectionGenerator func(string, string, yaml.MapSlice) - -// stringArrayFlag is the content of a multi-value flag. -type stringArrayFlag []string - -var ( - // Values used in the jobs that can be changed through command-line flags. - // TODO: these should be CapsCase - // ... until they are not global - output outputter - logFatalf logFatalfFunc - prowHost string - testGridHost string - gubernatorHost string - GCSBucket string - testGridGcsBucket string - LogsDir string - presubmitLogsDir string - testAccount string - nightlyAccount string - releaseAccount string - prowTestsDockerImage string - presubmitScript string - releaseScript string - webhookAPICoverageScript string - upgradeReleaseBranches bool - githubTokenPath string - - // ######################################################################### - // ############## data used for generating prow configuration ############## - // ######################################################################### - // Array constants used throughout the jobs. - allPresubmitTests = []string{"--all-tests"} - releaseNightly = []string{"--publish", "--tag-release"} - releaseLocal = []string{"--nopublish", "--notag-release"} - - // Overrides and behavior changes through command-line flags. - repositoryOverride string - jobNameFilter string - preCommand string - extraEnvVars stringArrayFlag - timeoutOverride int - - // List of Knative repositories. - // Not guaranteed unique by any value of the struct - repositories []repositoryData - - // Map which sections of the config.yaml were written to stdout. - sectionMap map[string]bool - - releaseRegex = regexp.MustCompile(`.+-[0-9\.]+$`) -) - -// Yaml parsing helpers. - -// read template yaml file content -func readTemplate(fp string) string { - if _, ok := templatesCache[fp]; !ok { - // get the directory of the currently running file - _, f, _, _ := runtime.Caller(0) - content, err := ioutil.ReadFile(path.Join(path.Dir(f), templateDir, fp)) - if err != nil { - logFatalf("Failed read file '%s': '%v'", fp, err) - } - templatesCache[fp] = string(content) - } - return templatesCache[fp] -} - -// Config generation functions. - -// newbaseProwJobTemplateData returns a baseProwJobTemplateData type with its initial, default values. -func newbaseProwJobTemplateData(repo string) baseProwJobTemplateData { - var data baseProwJobTemplateData - data.Timeout = 50 - data.OrgName = strings.Split(repo, "/")[0] - data.RepoName = strings.Replace(repo, data.OrgName+"/", "", 1) - data.ExtraRefs = []string{"- org: " + data.OrgName, " repo: " + data.RepoName} - if pathAliasOrgs.Has(data.OrgName) && !nonPathAliasRepos.Has(repo) { - data.PathAlias = "path_alias: knative.dev/" + data.RepoName - data.ExtraRefs = append(data.ExtraRefs, " "+data.PathAlias) - } - data.RepoNameForJob = strings.ToLower(strings.Replace(repo, "/", "-", -1)) - - data.RepoBranch = "main" // Default to be main for other repos - data.GcsBucket = GCSBucket - data.RepoURI = "github.com/" + repo - data.CloneURI = fmt.Sprintf("\"https://%s.git\"", data.RepoURI) - data.GcsLogDir = fmt.Sprintf("gs://%s/%s", GCSBucket, LogsDir) - data.GcsPresubmitLogDir = fmt.Sprintf("gs://%s/%s", GCSBucket, presubmitLogsDir) - data.ReleaseGcs = strings.Replace(repo, data.OrgName+"/", "knative-releases/", 1) - data.AlwaysRun = true - data.Optional = false - data.Image = prowTestsDockerImage - data.ServiceAccount = testAccount - data.Command = "" - data.Args = make([]string, 0) - data.Volumes = make([]string, 0) - data.VolumeMounts = make([]string, 0) - data.Env = make([]string, 0) - data.Labels = make([]string, 0) - data.Annotations = make([]string, 0) - data.Cluster = "cluster: \"build-knative\"" - return data -} - -// General helpers. - -// createCommand returns an array with the command to run and its arguments. -func createCommand(data baseProwJobTemplateData) []string { - c := []string{data.Command} - // Prefix the pre-command if present. - if preCommand != "" { - c = append([]string{preCommand}, c...) - } - return append(c, data.Args...) -} - -func envNameToKey(key string) string { - return "- name: " + key -} - -func envValueToValue(value string) string { - return " value: " + value -} - -// addEnvToJob adds the given key/pair environment variable to the job. -func (data *baseProwJobTemplateData) addEnvToJob(key, value string) { - // Value should always be string. Add quotes if we get a number - if isNum(value) { - value = "\"" + value + "\"" - } - - data.Env = append(data.Env, envNameToKey(key), envValueToValue(value)) -} - -// addLabelToJob adds extra labels to a job -func addLabelToJob(data *baseProwJobTemplateData, key, value string) { - (*data).Labels = append((*data).Labels, []string{key + ": " + value}...) -} - -// addPubsubLabelsToJob adds the pubsub labels so the prow job message will be picked up by test-infra monitoring -func addMonitoringPubsubLabelsToJob(data *baseProwJobTemplateData, runID string) { - addLabelToJob(data, "prow.k8s.io/pubsub.project", "knative-tests") - addLabelToJob(data, "prow.k8s.io/pubsub.topic", "knative-monitoring") - addLabelToJob(data, "prow.k8s.io/pubsub.runID", runID) -} - -// addVolumeToJob adds the given mount path as volume for the job. -func addVolumeToJob(data *baseProwJobTemplateData, mountPath, name string, isSecret bool, content []string) { - (*data).VolumeMounts = append((*data).VolumeMounts, []string{"- name: " + name, " mountPath: " + mountPath}...) - if isSecret { - (*data).VolumeMounts = append((*data).VolumeMounts, " readOnly: true") - } - s := []string{"- name: " + name} - if isSecret { - arr := []string{" secret:", " secretName: " + name} - s = append(s, arr...) - } - for _, line := range content { - s = append(s, " "+line) - } - (*data).Volumes = append((*data).Volumes, s...) -} - -// configureServiceAccountForJob adds the necessary volumes for the service account for the job. -func configureServiceAccountForJob(data *baseProwJobTemplateData) { - if data.ServiceAccount == "" { - return - } - p := strings.Split(data.ServiceAccount, "/") - if len(p) != 4 || p[0] != "" || p[1] != "etc" || p[3] != "service-account.json" { - logFatalf("Service account path %q is expected to be \"/etc//service-account.json\"", data.ServiceAccount) - } - name := p[2] - addVolumeToJob(data, "/etc/"+name, name, true, nil) -} - -// addExtraEnvVarsToJob adds extra environment variables to a job. -func addExtraEnvVarsToJob(envVars []string, data *baseProwJobTemplateData) { - for _, env := range envVars { - pair := strings.SplitN(env, "=", 2) - if len(pair) == 2 { - data.addEnvToJob(pair[0], pair[1]) - } else { - logFatalf("Environment variable %q is expected to be \"key=value\"", env) - } - } -} - -// addExtraClusterInfoToJob enables extra cluster information for the given job. -// Information includes: k8s config(config file), registry certificate(registry.crt file) -// and KO_DOCKER_REPO environment variable value (ko-docker-repo file). -func addExtraClusterInfoToJob(cluster yaml.MapSlice, data *baseProwJobTemplateData) { - for _, secret := range cluster { - if secret.Key != "secret" { - return - } - secretName := getString(secret.Value) - // Volume mount for all data. Kubeconfig shoild be copied to readwrite directory in job command - addVolumeToJob(data, "/opt/cluster", secretName, true, []string{" defaultMode: 0600"}) - // KO_DOCKER_REPO env varable mount from secret - env := []string{envNameToKey("KO_DOCKER_REPO"), " valueFrom:", " secretKeyRef: ", " name: " + secretName, " key: ko-docker-repo"} - - data.Env = append(data.Env, env...) - } -} - -// setupDockerInDockerForJob enables docker-in-docker for the given job. -func setupDockerInDockerForJob(data *baseProwJobTemplateData) { - // These volumes are required for running docker command and creating kind clusters. - // Reference: https://github.com/kubernetes-sigs/kind/issues/303 - addVolumeToJob(data, "/docker-graph", "docker-graph", false, []string{"emptyDir: {}"}) - addVolumeToJob(data, "/lib/modules", "modules", false, []string{"hostPath:", " path: /lib/modules", " type: Directory"}) - addVolumeToJob(data, "/sys/fs/cgroup", "cgroup", false, []string{"hostPath:", " path: /sys/fs/cgroup", " type: Directory"}) - data.addEnvToJob("DOCKER_IN_DOCKER_ENABLED", "\"true\"") - (*data).SecurityContext = []string{"privileged: true"} -} - -// setResourcesReqForJob sets resource requirement for job -func setResourcesReqForJob(res yaml.MapSlice, data *baseProwJobTemplateData) { - data.Resources = nil - for _, val := range res { - data.Resources = append(data.Resources, fmt.Sprintf(" %s:", getString(val.Key))) - for _, item := range getMapSlice(val.Value) { - data.Resources = append(data.Resources, fmt.Sprintf(" %s: %s", getString(item.Key), getString(item.Value))) - } - } -} - -// setReporterConfigReqForJob sets reporter requirement for job -func setReporterConfigReqForJob(res yaml.MapSlice, data *baseProwJobTemplateData) { - data.ReporterConfig = nil - for _, val := range res { - data.ReporterConfig = append(data.ReporterConfig, fmt.Sprintf(" %s:", getString(val.Key))) - for _, item := range getMapSlice(val.Value) { - if arr, ok := item.Value.([]interface{}); ok { - data.JobStatesToReport = getStringArray(arr) - } else { - data.ReporterConfig = append(data.ReporterConfig, fmt.Sprintf(" %s: %s", getString(item.Key), getString(item.Value))) - } - } - } -} - -// Config parsers. - -// parseBasicJobConfigOverrides updates the given baseProwJobTemplateData with any base option present in the given config. -func parseBasicJobConfigOverrides(data *baseProwJobTemplateData, config yaml.MapSlice) { - (*data).ExtraRefs = append((*data).ExtraRefs, " base_ref: "+(*data).RepoBranch) - for i, item := range config { - switch item.Key { - case "skip_branches": - (*data).SkipBranches = getStringArray(item.Value) - case "branches": - (*data).Branches = getStringArray(item.Value) - case "args": - (*data).Args = getStringArray(item.Value) - case "timeout": - (*data).Timeout = getInt(item.Value) - case "command": - (*data).Command = getString(item.Value) - case "needs-monitor": - (*data).NeedsMonitor = getBool(item.Value) - case "needs-dind": - if getBool(item.Value) { - setupDockerInDockerForJob(data) - } - case "always-run": - (*data).AlwaysRun = getBool(item.Value) - case "performance": - for i, repo := range repositories { - if path.Base(repo.Name) == (*data).RepoName { - repositories[i].EnablePerformanceTests = getBool(item.Value) - } - } - case "env-vars": - addExtraEnvVarsToJob(getStringArray(item.Value), data) - case "optional": - (*data).Optional = getBool(item.Value) - case "resources": - setResourcesReqForJob(getMapSlice(item.Value), data) - case "reporter_config": - setReporterConfigReqForJob(getMapSlice(item.Value), data) - case "external_cluster": - addExtraClusterInfoToJob(getMapSlice(item.Value), data) - case nil: // already processed - continue - default: - logFatalf("Unknown entry %q for job", item.Key) - } - // Knock-out the item, signalling it was already parsed. - config[i] = yaml.MapItem{} - } - - // Override any values if provided by command-line flags. - if timeoutOverride > 0 { - (*data).Timeout = timeoutOverride - } -} - -// getProwConfigData gets some basic, general data for the Prow config. -func getProwConfigData(config yaml.MapSlice) prowConfigTemplateData { - var data prowConfigTemplateData - data.Year = time.Now().Year() - data.ProwHost = prowHost - data.TestGridHost = testGridHost - data.GubernatorHost = gubernatorHost - data.GcsBucket = GCSBucket - data.TestGridGcsBucket = testGridGcsBucket - data.PresubmitLogsDir = presubmitLogsDir - data.LogsDir = LogsDir - data.TideRepos = make([]string, 0) - data.ManagedRepos = make([]string, 0) - data.ManagedOrgs = make([]string, 0) - // Repos enabled for tide are all those that have presubmit jobs. - for _, section := range config { - if section.Key != "presubmits" { - continue - } - for _, repo := range getMapSlice(section.Value) { - orgRepoName := getString(repo.Key) - data.TideRepos = appendIfUnique(data.TideRepos, orgRepoName) - if strings.HasSuffix(orgRepoName, "test-infra") { - data.TestInfraRepo = orgRepoName - } - } - } - - // Sort repos to make output stable. - sort.Strings(data.TideRepos) - sort.Strings(data.ManagedOrgs) - sort.Strings(data.ManagedRepos) - return data -} - -// parseSection generate the configs from a given section of the input yaml file. -func parseSection(config yaml.MapSlice, title string, generate sectionGenerator, finalize sectionGenerator) { - for _, section := range config { - if section.Key != title { - continue - } - for _, repo := range getMapSlice(section.Value) { - repoName := getString(repo.Key) - for _, jobConfig := range getInterfaceArray(repo.Value) { - generate(title, repoName, getMapSlice(jobConfig)) - } - if finalize != nil { - finalize(title, repoName, nil) - } - } - } -} - -// Template helpers. - -// gitHubRepo returns the correct reference for the GitHub repository. -func gitHubRepo(data baseProwJobTemplateData) string { - if repositoryOverride != "" { - return repositoryOverride - } - s := data.RepoURI - if data.RepoBranch != "" { - s += "=" + data.RepoBranch - } - return s -} - -// executeTemplate outputs the given job template with the given data, respecting any filtering. -func executeJobTemplate(name, templ, title, repoName, jobName string, groupByRepo bool, data interface{}) { - if jobNameFilter != "" && jobNameFilter != jobName { - return - } - if !sectionMap[title] { - output.outputConfig(title + ":") - sectionMap[title] = true - } - if groupByRepo { - if !sectionMap[title+repoName] { - output.outputConfig(baseIndent + repoName + ":") - sectionMap[title+repoName] = true - } - } - executeTemplate(name, templ, data) -} - -// executeTemplate outputs the given template with the given data. -func executeTemplate(name, templ string, data interface{}) { - var res bytes.Buffer - funcMap := template.FuncMap{ - "indent_section": indentSection, - "indent_array_section": indentArraySection, - "indent_array": indentArray, - "indent_keys": indentKeys, - "indent_map": indentMap, - "repo": gitHubRepo, - } - t := template.Must(template.New(name).Funcs(funcMap).Delims("[[", "]]").Parse(templ)) - if err := t.Execute(&res, data); err != nil { - logFatalf("Error in template %s: %v", name, err) - } - for _, line := range strings.Split(res.String(), "\n") { - output.outputConfig(line) - } -} - -// Multi-value flag parser. - -func (a *stringArrayFlag) String() string { - return strings.Join(*a, ", ") -} - -func (a *stringArrayFlag) Set(value string) error { - *a = append(*a, value) - return nil -} - -// parseJob gets the job data from the original yaml data, now the jobName can be "presubmits" or "periodic" -func parseJob(config yaml.MapSlice, jobName string) yaml.MapSlice { - for _, section := range config { - if section.Key == jobName { - return getMapSlice(section.Value) - } - } - - logFatalf("The metadata misses %s configuration, cannot continue.", jobName) - return nil -} - -// parseGoCoverageMap constructs a map, indicating which repo is enabled for go coverage check -func parseGoCoverageMap(presubmitJob yaml.MapSlice) map[string]bool { - goCoverageMap := make(map[string]bool) - for _, repo := range presubmitJob { - repoName := strings.Split(getString(repo.Key), "/")[1] - goCoverageMap[repoName] = false - for _, jobConfig := range getInterfaceArray(repo.Value) { - for _, item := range getMapSlice(jobConfig) { - if item.Key == "go-coverage" { - goCoverageMap[repoName] = getBool(item.Value) - break - } - } - } - } - - return goCoverageMap -} - -// collectMetaData collects the meta data from the original yaml data, which can be then used for building the test groups and dashboards config -func collectMetaData(periodicJob yaml.MapSlice) { - for _, repo := range periodicJob { - rawName := getString(repo.Key) - projName := strings.Split(rawName, "/")[0] - repoName := strings.Split(rawName, "/")[1] - jobDetailMap := metaData.Get(projName) - metaData.EnsureRepo(projName, repoName) - - // parse job configs - for _, conf := range getInterfaceArray(repo.Value) { - jobDetailMap = metaData.Get(projName) - jobConfig := getMapSlice(conf) - enabled := false - jobName := "" - releaseVersion := "" - for _, item := range jobConfig { - switch item.Key { - case "continuous", "dot-release", "auto-release", "performance", - "nightly", "webhook-apicoverage": - if getBool(item.Value) { - enabled = true - jobName = getString(item.Key) - } - case "branch-ci": - enabled = getBool(item.Value) - jobName = "continuous" - case "release": - releaseVersion = getString(item.Value) - case "custom-job": - enabled = true - jobName = getString(item.Value) - default: - // continue here since we do not need to care about other entries, like cron, command, etc. - continue - } - } - // add job types for the corresponding repos, if needed - if enabled { - // if it's a job for a release branch - if releaseVersion != "" { - releaseProjName := fmt.Sprintf("%s-%s", projName, releaseVersion) - - // TODO: Why do we assign? - jobDetailMap = metaData.Get(releaseProjName) - } - jobDetailMap.Add(repoName, jobName) - } - } - updateTestCoverageJobDataIfNeeded(jobDetailMap, repoName) - } - - // add test coverage jobs for the repos that haven't been handled - addRemainingTestCoverageJobs() -} - -// updateTestCoverageJobDataIfNeeded adds test-coverage job data for the repo if it has go coverage check -func updateTestCoverageJobDataIfNeeded(jobDetailMap JobDetailMap, repoName string) { - if goCoverageMap[repoName] { - jobDetailMap.Add(repoName, "test-coverage") - // delete this repoName from the goCoverageMap to avoid it being processed again when we - // call the function addRemainingTestCoverageJobs - delete(goCoverageMap, repoName) - } -} - -// addRemainingTestCoverageJobs adds test-coverage jobs data for the repos that haven't been processed. -func addRemainingTestCoverageJobs() { - // handle repos that only have go coverage - for repoName, hasGoCoverage := range goCoverageMap { - if hasGoCoverage { - jobDetailMap := metaData.Get(metaData.projNames[0]) // TODO: WTF why projNames[0] !??!?!?!? - jobDetailMap.Add(repoName, "test-coverage") - } - } -} - -// buildProjRepoStr builds the projRepoStr used in the config file with projName and repoName -func buildProjRepoStr(projName string, repoName string) string { - projVersion := "" - if releaseRegex.MatchString(projName) { - projNameAndVersion := strings.Split(projName, "-") - // The project name can possibly contain "-" as well, so we need to consider the last part as the version, - // and the rest be the project name. - // For example, "knative-sandbox-0.15" will be split into "knative-sandbox" and "0.15" - projVersion = projNameAndVersion[len(projNameAndVersion)-1] - projName = strings.TrimRight(projName, "-"+projVersion) - } - projRepoStr := repoName - if projVersion != "" { - projRepoStr += "-" + projVersion - } - projRepoStr = projName + "-" + projRepoStr - return strings.ToLower(projRepoStr) -} - -// isReleased returns true for project name that has version -func isReleased(projName string) bool { - return releaseRegex.FindString(projName) != "" -} - -// setOutput set the given file as the output target, then all the output will be written to this file -func setOutput(fileName string) { - output = newOutputter(os.Stdout) - if fileName == "" { - return - } - configFile, err := os.OpenFile(fileName, os.O_RDWR|os.O_CREATE, 0666) - if err != nil { - logFatalf("Cannot create the configuration file %q: %v", fileName, err) - return - } - configFile.Truncate(0) - configFile.Seek(0, 0) - output = newOutputter(configFile) -} - -// main is the script entry point. -func main() { - logFatalf = log.Fatalf - // Parse flags and check them. - prowJobsConfigOutput := "" - testgridConfigOutput := "" - k8sTestgridConfigOutput := "" - var generateTestgridConfig = flag.Bool("generate-testgrid-config", true, "Whether to generate the testgrid config from the template file") - var generateK8sTestgridConfig = flag.Bool("generate-k8s-testgrid-config", true, "Whether to generate the k8s testgrid config from the template file") - var includeConfig = flag.Bool("include-config", true, "Whether to include general configuration (e.g., plank) in the generated config") - var dockerImagesBase = flag.String("image-docker", "gcr.io/knative-tests/test-infra", "Default registry for the docker images used by the jobs") - flag.StringVar(&prowJobsConfigOutput, "prow-jobs-config-output", "", "The destination for the prow jobs config output, default to be stdout") - flag.StringVar(&testgridConfigOutput, "testgrid-config-output", "", "The destination for the testgrid config output, default to be stdout") - flag.StringVar(&k8sTestgridConfigOutput, "k8s-testgrid-config-output", "", "The destination for the k8s testgrid config output, default to be stdout") - flag.StringVar(&prowHost, "prow-host", "https://prow.knative.dev", "Prow host, including HTTP protocol") - flag.StringVar(&testGridHost, "testgrid-host", "https://testgrid.knative.dev", "TestGrid host, including HTTP protocol") - flag.StringVar(&gubernatorHost, "gubernator-host", "https://gubernator.knative.dev", "Gubernator host, including HTTP protocol") - flag.StringVar(&GCSBucket, "gcs-bucket", "knative-prow", "GCS bucket to upload the logs to") - flag.StringVar(&testGridGcsBucket, "testgrid-gcs-bucket", "knative-testgrid", "TestGrid GCS bucket") - flag.StringVar(&LogsDir, "logs-dir", "logs", "Path in the GCS bucket to upload logs of periodic and post-submit jobs") - flag.StringVar(&presubmitLogsDir, "presubmit-logs-dir", "pr-logs", "Path in the GCS bucket to upload logs of pre-submit jobs") - flag.StringVar(&testAccount, "test-account", "/etc/test-account/service-account.json", "Path to the service account JSON for test jobs") - flag.StringVar(&nightlyAccount, "nightly-account", "/etc/nightly-account/service-account.json", "Path to the service account JSON for nightly release jobs") - flag.StringVar(&releaseAccount, "release-account", "/etc/release-account/service-account.json", "Path to the service account JSON for release jobs") - var prowTestsDockerImageName = flag.String("prow-tests-docker", "prow-tests:stable", "prow-tests docker image") - flag.StringVar(&presubmitScript, "presubmit-script", "./test/presubmit-tests.sh", "Executable for running presubmit tests") - flag.StringVar(&releaseScript, "release-script", "./hack/release.sh", "Executable for creating releases") - flag.StringVar(&webhookAPICoverageScript, "webhook-api-coverage-script", "./test/apicoverage.sh", "Executable for running webhook apicoverage tool") - flag.StringVar(&repositoryOverride, "repo-override", "", "Repository path (github.com/foo/bar[=branch]) to use instead for a job") - flag.IntVar(&timeoutOverride, "timeout-override", 0, "Timeout (in minutes) to use instead for a job") - flag.StringVar(&jobNameFilter, "job-filter", "", "Generate only this job, instead of all jobs") - flag.StringVar(&preCommand, "pre-command", "", "Executable for running instead of the real command of a job") - flag.BoolVar(&upgradeReleaseBranches, "upgrade-release-branches", false, "Update release branches jobs based on active branches") - flag.StringVar(&githubTokenPath, "github-token-path", "", "Token path for authenticating with github, used only when --upgrade-release-branches is on") - flag.Var(&extraEnvVars, "extra-env", "Extra environment variables (key=value) to add to a job") - flag.Parse() - if len(flag.Args()) != 1 { - log.Fatal("Pass the config file as parameter") - } - - prowTestsDockerImage = path.Join(*dockerImagesBase, *prowTestsDockerImageName) - - // We use MapSlice instead of maps to keep key order and create predictable output. - configYaml := yaml.MapSlice{} - - // Read input config. - configFileName := flag.Arg(0) - if upgradeReleaseBranches { - gc, err := ghutil.NewGithubClient(githubTokenPath) - if err != nil { - logFatalf("Failed creating github client from %q: %v", githubTokenPath, err) - } - if err := upgradeReleaseBranchesTemplate(configFileName, gc); err != nil { - logFatalf("Failed upgrade based on release branch: '%v'", err) - } - } - - configFileContent, err := ioutil.ReadFile(configFileName) - if err != nil { - logFatalf("Cannot read file %q: %v", configFileName, err) - } - if err = yaml.Unmarshal(configFileContent, &configYaml); err != nil { - logFatalf("Cannot parse config %q: %v", configFileName, err) - } - - prowConfigData := getProwConfigData(configYaml) - - // Generate Prow config. - repositories = make([]repositoryData, 0) - sectionMap = make(map[string]bool) - setOutput(prowJobsConfigOutput) - executeTemplate("general header", readTemplate(commonHeaderConfig), prowConfigData) - parseSection(configYaml, "presubmits", generatePresubmit, nil) - parseSection(configYaml, "periodics", generatePeriodic, generateGoCoveragePeriodic) - for _, repo := range repositories { // Keep order for predictable output. - if !repo.Processed && repo.EnableGoCoverage { - generateGoCoveragePeriodic("periodics", repo.Name, nil) - } - } - generatePerfClusterUpdatePeriodicJobs() - - for _, repo := range repositories { - if repo.EnableGoCoverage { - generateGoCoveragePostsubmit("postsubmits", repo.Name, nil) - } - if repo.EnablePerformanceTests { - generatePerfClusterPostsubmitJob(repo) - } - } - - // config object is modified when we generate prow config, so we'll need to reload it here - if err = yaml.Unmarshal(configFileContent, &configYaml); err != nil { - logFatalf("Cannot parse config %q: %v", configFileName, err) - } - - // Generate Testgrid config. - if *generateTestgridConfig { - setOutput(testgridConfigOutput) - - if *includeConfig { - executeTemplate("general header", readTemplate(commonHeaderConfig), newBaseTestgridTemplateData("")) - executeTemplate("general config", readTemplate(generalTestgridConfig), newBaseTestgridTemplateData("")) - } - - presubmitJobData := parseJob(configYaml, "presubmits") - goCoverageMap = parseGoCoverageMap(presubmitJobData) - - periodicJobData := parseJob(configYaml, "periodics") - collectMetaData(periodicJobData) - addCustomJobsTestgrid() - - // log.Print(spew.Sdump(metaData)) - - // These generate "test_groups:" - metaData.generateTestGridSection("test_groups", generateTestGroup, false) - metaData.generateNonAlignedTestGroups() - - // These generate "dashboards:" - metaData.generateTestGridSection("dashboards", generateDashboard, true) - metaData.generateDashboardsForReleases() - metaData.generateNonAlignedDashboards() - - // These generate "dashboard_groups:" - metaData.generateDashboardGroups() - metaData.generateNonAlignedDashboardGroups() - } - - if *generateK8sTestgridConfig { - setOutput(k8sTestgridConfigOutput) - executeTemplate("general header", readTemplate(commonHeaderConfig), newBaseTestgridTemplateData("")) - generateK8sTestgrid(metaData) - } -} - -// parseOrgAndRepoFromMapItem splits the "org/repo" string of a yaml.MapItem -// into "org" and "repo" return values. -func parseOrgAndRepoFromMapItem(mapItem yaml.MapItem) (string, string) { - orgAndRepo := strings.Split(mapItem.Key.(string), "/") - org := orgAndRepo[0] - repo := orgAndRepo[1] - return org, repo -} diff --git a/tools/config-generator/main_test.go b/tools/config-generator/main_test.go deleted file mode 100644 index 71caf41572e..00000000000 --- a/tools/config-generator/main_test.go +++ /dev/null @@ -1,974 +0,0 @@ -/* -Copyright 2020 The Knative Authors -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package main - -import ( - "bytes" - "fmt" - "testing" - - "github.com/google/go-cmp/cmp" - "gopkg.in/yaml.v2" -) - -func TestNewOutputter(t *testing.T) { - SetupForTesting() - out := newOutputter(&bytes.Buffer{}) - if out.count != 0 { - t.Fatalf("Count should be 0, was %v", out.count) - } -} - -func TestOutputConfig(t *testing.T) { - SetupForTesting() - output.outputConfig("") - if diff := cmp.Diff(GetOutput(), ""); diff != "" { - t.Fatalf("Incorrect output for empty string: (-got +want)\n%s", diff) - } - - output.outputConfig(" \t\n") - if diff := cmp.Diff(GetOutput(), ""); diff != "" { - t.Fatalf("Incorrect output for whitespace string: (-got +want)\n%s", diff) - } - if output.count != 0 { - t.Fatalf("Output count should have been 0, but was %d", output.count) - } - - inputLine := "some-key: some-value" - output.outputConfig(inputLine) - if diff := cmp.Diff(GetOutput(), inputLine+"\n"); diff != "" { - t.Fatalf("Incorrect output for whitespace string: (-got +want)\n%s", diff) - } - if output.count != 1 { - t.Fatalf("Output count should have been exactly 1, but was %d", output.count) - } -} - -func TestReadTemplate(t *testing.T) { - SetupForTesting() - templatesCache["foo"] = "bar" - if diff := cmp.Diff(readTemplate("foo"), "bar"); diff != "" { - t.Fatalf("Cached template was not returned: (-got +want)\n%s", diff) - } - - readTemplate("non/existent/file/path") - if logFatalCalls != 1 { - t.Fatalf("Non existent file should have caused error") - } - - delete(templatesCache, "foo") -} - -func TestNewbaseProwJobTemplateData(t *testing.T) { - SetupForTesting() - out := newbaseProwJobTemplateData("foo/subrepo") - if diff := cmp.Diff(out.PathAlias, ""); diff != "" { - t.Fatalf("Unexpected path alias: (-got +want)\n%s", diff) - } - - pathAliasOrgs.Insert("foo") - out = newbaseProwJobTemplateData("foo/subrepo") - expected := "path_alias: knative.dev/subrepo" - if diff := cmp.Diff(out.PathAlias, expected); diff != "" { - t.Fatalf("Unexpected path alias: (-got +want)\n%s", diff) - } - - nonPathAliasRepos.Insert("foo/subrepo") - out = newbaseProwJobTemplateData("foo/subrepo") - if diff := cmp.Diff(out.PathAlias, ""); diff != "" { - t.Fatalf("Unexpected path alias: (-got +want)\n%s", diff) - } - - // don't pollute the global setup - pathAliasOrgs.Delete("foo") - nonPathAliasRepos.Delete("foo/subrepo") -} - -func TestCreateCommand(t *testing.T) { - SetupForTesting() - preCommand = "" // global - in := baseProwJobTemplateData{Command: "foo", Args: []string{"bar", "baz"}} - out := createCommand(in) - expected := []string{"foo", "bar", "baz"} - if diff := cmp.Diff(out, expected); diff != "" { - t.Fatalf("Unexpected command & args list: (-got +want)\n%s", diff) - } - - preCommand = "expelliarmus" - out = createCommand(in) - expected = []string{"expelliarmus", "foo", "bar", "baz"} - if diff := cmp.Diff(out, expected); diff != "" { - t.Fatalf("Unexpected command & args list: (-got +want)\n%s", diff) - } - - preCommand = "" -} - -func TestEnvNameToKey(t *testing.T) { - SetupForTesting() - if diff := cmp.Diff(envNameToKey("foo"), "- name: foo"); diff != "" { - t.Fatalf("Unexpected name to key conversion: (-got +want)\n%s", diff) - } -} - -func TestEnvValueToValue(t *testing.T) { - SetupForTesting() - if diff := cmp.Diff(envValueToValue("bar"), " value: bar"); diff != "" { - t.Fatalf("Unexpected env value conversion: (-got +want)\n%s", diff) - } -} - -func TestAddEnvToJob(t *testing.T) { - SetupForTesting() - job := baseProwJobTemplateData{} - job.addEnvToJob("foo", "bar") - if diff := cmp.Diff(job.Env[0], "- name: foo"); diff != "" { - t.Fatalf("Unexpected env name: (-got +want)\n%s", diff) - } - if diff := cmp.Diff(job.Env[1], " value: bar"); diff != "" { - t.Fatalf("Unexpected env value: (-got +want)\n%s", diff) - } - - job = baseProwJobTemplateData{} - job.addEnvToJob("num", "42") - if diff := cmp.Diff(job.Env[0], "- name: num"); diff != "" { - t.Fatalf("Unexpected env name: (-got +want)\n%s", diff) - } - if diff := cmp.Diff(job.Env[1], " value: \"42\""); diff != "" { - t.Fatalf("Unexpected env value: (-got +want)\n%s", diff) - } -} - -func TestAddLabelToJob(t *testing.T) { - SetupForTesting() - job := baseProwJobTemplateData{} - addLabelToJob(&job, "foo", "bar") - - expected := []string{"foo: bar"} - if diff := cmp.Diff(job.Labels, expected); diff != "" { - t.Fatalf("Unexpected label string: (-got +want)\n%s", diff) - } -} - -func TestAddMonitoringPubsubLabelsToJob(t *testing.T) { - SetupForTesting() - job := baseProwJobTemplateData{} - addMonitoringPubsubLabelsToJob(&job, "foobar") - expected := []string{ - "prow.k8s.io/pubsub.project: knative-tests", - "prow.k8s.io/pubsub.topic: knative-monitoring", - "prow.k8s.io/pubsub.runID: foobar", - } - if diff := cmp.Diff(job.Labels, expected); diff != "" { - t.Fatalf("Unexpected pubsub label: (-got +want)\n%s", diff) - } -} - -func TestAddVolumeToJob(t *testing.T) { - SetupForTesting() - mountPath := "somePath" - name := "foo" - content := []string{"bar", "baz"} - - job := baseProwJobTemplateData{} - isSecret := false - addVolumeToJob(&job, mountPath, name, isSecret, content) - expectedVolumeMounts := []string{ - "- name: foo", - " mountPath: somePath", - } - if diff := cmp.Diff(job.VolumeMounts, expectedVolumeMounts); diff != "" { - t.Fatalf("Unexpected volume mount: (-got +want)\n%s", diff) - } - expectedVolumes := []string{ - "- name: foo", - " bar", - " baz", - } - for i := range expectedVolumes { - if diff := cmp.Diff(job.Volumes[i], expectedVolumes[i]); diff != "" { - t.Fatalf("Unexpected volume: (-got +want)\n%s", diff) - } - } - - job = baseProwJobTemplateData{} - isSecret = true - addVolumeToJob(&job, mountPath, name, isSecret, content) - expectedVolumeMounts = []string{ - "- name: foo", - " mountPath: somePath", - " readOnly: true", - } - if diff := cmp.Diff(job.VolumeMounts, expectedVolumeMounts); diff != "" { - t.Fatalf("Unexpected volume mount: (-got +want)\n%s", diff) - } - expectedVolumes = []string{ - "- name: foo", - " secret:", - " secretName: foo", - " bar", - " baz", - } - if diff := cmp.Diff(job.Volumes, expectedVolumes); diff != "" { - t.Fatalf("Unexpected volume: (-got +want)\n%s", diff) - } -} - -func TestConfigureServiceAccountForJob(t *testing.T) { - SetupForTesting() - job := baseProwJobTemplateData{ServiceAccount: ""} - configureServiceAccountForJob(&job) - if logFatalCalls != 0 || len(job.Volumes) != 0 { - t.Fatalf("Service Account was not specified, but action was performed") - } - - badAccounts := []string{ - "/etc/foo/service-account.json/bar", - "foo/etc/bar/service-account.json", - "/foo/bar/service-account.json", - "/etc/foo/some-other-account.json", - } - for _, acct := range badAccounts { - job = baseProwJobTemplateData{ServiceAccount: acct} - configureServiceAccountForJob(&job) - if logFatalCalls != 1 { - t.Fatalf("Service account %v did not cause error", acct) - } - logFatalCalls = 0 - } - - job = baseProwJobTemplateData{ServiceAccount: "/etc/foo/service-account.json"} - configureServiceAccountForJob(&job) - expectedVolumeMounts := []string{ - "- name: foo", - " mountPath: /etc/foo", - " readOnly: true", - } - if diff := cmp.Diff(job.VolumeMounts, expectedVolumeMounts); diff != "" { - t.Fatalf("Unexpected volume mount: (-got +want)\n%s", diff) - } - expectedVolumes := []string{ - "- name: foo", - " secret:", - " secretName: foo", - } - if diff := cmp.Diff(job.Volumes, expectedVolumes); diff != "" { - t.Fatalf("Unexpected volume: (-got +want)\n%s", diff) - } -} - -func TestAddExtraEnvVarsToJob(t *testing.T) { - SetupForTesting() - job := baseProwJobTemplateData{} - - in := []string{"foo=bar"} - addExtraEnvVarsToJob(in, &job) - if diff := cmp.Diff(job.Env[0], "- name: foo"); diff != "" { - t.Fatalf("Unexpected env name: (-got +want)\n%s", diff) - } - if diff := cmp.Diff(job.Env[1], " value: bar"); diff != "" { - t.Fatalf("Unexpected env value: (-got +want)\n%s", diff) - } - - in = []string{"foobar"} - addExtraEnvVarsToJob(in, &job) - if logFatalCalls != 1 { - t.Fatalf("Invalid string 'foobar' should have caused error") - } -} - -func TestAddExtraClusterInfoToJob(t *testing.T) { - SetupForTesting() - job := baseProwJobTemplateData{} - in := yaml.MapSlice{ - yaml.MapItem{Key: "secret", Value: "foo"}, - } - - addExtraClusterInfoToJob(in, &job) - - expectedVolumeMounts := []string{ - "- name: foo", - " mountPath: /opt/cluster", - " readOnly: true", - } - - if diff := cmp.Diff(job.VolumeMounts, expectedVolumeMounts); diff != "" { - t.Fatalf("Unexpected volume mount: (-got +want)\n%s", diff) - } - - expectedVolumes := []string{ - "- name: foo", - " secret:", - " secretName: foo", - " defaultMode: 0600", - } - if diff := cmp.Diff(job.Volumes, expectedVolumes); diff != "" { - t.Fatalf("Unexpected volume: (-got +want)\n%s", diff) - } - - if diff := cmp.Diff(job.Env[0], "- name: KO_DOCKER_REPO"); diff != "" { - t.Fatalf("Unexpected env value: (-got +want)\n%s", diff) - } - - in = yaml.MapSlice{ - yaml.MapItem{Key: "not-secret", Value: "foo"}, - } - - addExtraClusterInfoToJob(in, &job) - if diff := cmp.Diff(job.Volumes, expectedVolumes); diff != "" { - t.Fatalf("Unexpected new volume was added: (-got +want)\n%s", diff) - } -} - -func TestSetupDockerInDockerForJob(t *testing.T) { - SetupForTesting() - job := baseProwJobTemplateData{} - setupDockerInDockerForJob(&job) - if len(job.Volumes) == 0 || len(job.VolumeMounts) == 0 { - t.Fatalf("Docker in Docker setup did not create volumes and/or mounts") - } - if len(job.Env) == 0 || len(job.SecurityContext) == 0 { - t.Fatalf("Docker in Docker setup did not add env and/or set security context") - } -} - -func TestSetResourcesReqForJob(t *testing.T) { - SetupForTesting() - job := baseProwJobTemplateData{} - requests := yaml.MapSlice{ - yaml.MapItem{Key: "memory", Value: "12Gi"}, - yaml.MapItem{Key: "disk", Value: "12Ti"}, - } - limits := yaml.MapSlice{ - yaml.MapItem{Key: "memory", Value: "16Gi"}, - yaml.MapItem{Key: "disk", Value: "16Ti"}, - } - resources := yaml.MapSlice{ - yaml.MapItem{Key: "requests", Value: requests}, - yaml.MapItem{Key: "limits", Value: limits}, - } - setResourcesReqForJob(resources, &job) - expectedResources := []string{ - " requests:", - " memory: 12Gi", - " disk: 12Ti", - " limits:", - " memory: 16Gi", - " disk: 16Ti", - } - if diff := cmp.Diff(job.Resources, expectedResources); diff != "" { - t.Fatalf("Unexpected volume mount: (-got +want)\n%s", diff) - } -} - -func TestSetReporterConfigReqForJob(t *testing.T) { - SetupForTesting() - job := baseProwJobTemplateData{} - slack := yaml.MapSlice{ - yaml.MapItem{Key: "channel", Value: "serving-api"}, - yaml.MapItem{Key: "report_template", Value: "Report Template"}, - yaml.MapItem{Key: "foo", Value: []interface{}{"bar", "baz"}}, - } - resources := yaml.MapSlice{ - yaml.MapItem{Key: "slack", Value: slack}, - } - setReporterConfigReqForJob(resources, &job) - - expectedConfig := []string{ - " slack:", - " channel: serving-api", - " report_template: Report Template", - } - if diff := cmp.Diff(job.ReporterConfig, expectedConfig); diff != "" { - t.Fatalf("Unexpected reporter config: (-got +want)\n%s", diff) - } - expectedJobStates := []string{"bar", "baz"} - if diff := cmp.Diff(job.JobStatesToReport, expectedJobStates); diff != "" { - t.Fatalf("Unexpected job states: (-got +want)\n%s", diff) - } -} - -func TestParseBasicJobConfigOverrides(t *testing.T) { - SetupForTesting() - requests := yaml.MapSlice{ - yaml.MapItem{Key: "memory", Value: "12Gi"}, - yaml.MapItem{Key: "disk", Value: "12Ti"}, - } - limits := yaml.MapSlice{ - yaml.MapItem{Key: "memory", Value: "16Gi"}, - yaml.MapItem{Key: "disk", Value: "16Ti"}, - } - resources := yaml.MapSlice{ - yaml.MapItem{Key: "requests", Value: requests}, - yaml.MapItem{Key: "limits", Value: limits}, - } - slack := yaml.MapSlice{ - yaml.MapItem{Key: "channel", Value: "serving-api"}, - yaml.MapItem{Key: "report_template", Value: "Report Template"}, - yaml.MapItem{Key: "foo", Value: []interface{}{"bar", "baz"}}, - } - reporterConfig := yaml.MapSlice{ - yaml.MapItem{Key: "slack", Value: slack}, - } - cluster := yaml.MapSlice{ - yaml.MapItem{Key: "secret", Value: "foo"}, - } - - repoName := "foo_repo" - repositories = []repositoryData{ - {Name: repoName, EnablePerformanceTests: false}, - } - - job := baseProwJobTemplateData{RepoBranch: "my_repo_branch", RepoName: repoName} - config := yaml.MapSlice{ - yaml.MapItem{Key: "skip_branches", Value: []interface{}{"skip", "branches"}}, - yaml.MapItem{Key: "branches", Value: []interface{}{"branch1", "branch2"}}, - yaml.MapItem{Key: "args", Value: []interface{}{"arg1", "arg2"}}, - yaml.MapItem{Key: "timeout", Value: 42}, - yaml.MapItem{Key: "command", Value: "foo_command"}, - yaml.MapItem{Key: "needs-monitor", Value: true}, - yaml.MapItem{Key: "needs-dind", Value: true}, - yaml.MapItem{Key: "always-run", Value: true}, - yaml.MapItem{Key: "performance", Value: true}, - yaml.MapItem{Key: "env-vars", Value: []interface{}{"foo=bar"}}, - yaml.MapItem{Key: "optional", Value: true}, - yaml.MapItem{Key: "resources", Value: resources}, - yaml.MapItem{Key: "external_cluster", Value: cluster}, - yaml.MapItem{Key: "reporter_config", Value: reporterConfig}, - } - - parseBasicJobConfigOverrides(&job, config) - - expected := []string{" base_ref: my_repo_branch"} - if diff := cmp.Diff(job.ExtraRefs, expected); diff != "" { - t.Fatalf("Unexpected base ref: (-got +want)\n%s", diff) - } - expected = []string{"skip", "branches"} - if diff := cmp.Diff(job.SkipBranches, expected); diff != "" { - t.Fatalf("Unexpected skip branches: (-got +want)\n%s", diff) - } - expected = []string{"branch1", "branch2"} - if diff := cmp.Diff(job.Branches, expected); diff != "" { - t.Fatalf("Unexpected branches: (-got +want)\n%s", diff) - } - expected = []string{"arg1", "arg2"} - if diff := cmp.Diff(job.Args, expected); diff != "" { - t.Fatalf("Unexpected args: (-got +want)\n%s", diff) - } - if job.Timeout != 42 { - t.Fatalf("Unexpected timeout: %v", job.Timeout) - } - if diff := cmp.Diff(job.Command, "foo_command"); diff != "" { - t.Fatalf("Unexpected command: (-got +want)\n%s", diff) - } - if !job.NeedsMonitor { - t.Fatalf("Expected job.NeedsMonitor to be true") - } - if len(job.Volumes) == 0 || len(job.VolumeMounts) == 0 || len(job.SecurityContext) == 0 { - t.Fatalf("Error in Docker in Docker setup") - } - if !job.AlwaysRun { - t.Fatalf("Expected job.AlwaysRun to be true") - } - if !job.Optional { - t.Fatalf("Expected job.Optional to be true") - } - if !repositories[0].EnablePerformanceTests { - t.Fatalf("Repository performance test should have been enabled") - } - // Note that the first 2 Env variables are from the Docker in Docker setup - if diff := cmp.Diff(job.Env[2], "- name: foo"); diff != "" { - t.Fatalf("Unexpected env name: (-got +want)\n%s", diff) - } - if diff := cmp.Diff(job.Env[3], " value: bar"); diff != "" { - t.Fatalf("Unexpected env value: (-got +want)\n%s", diff) - } - if diff := cmp.Diff(job.Env[4], "- name: KO_DOCKER_REPO"); diff != "" { - t.Fatalf("Unexpected env value: (-got +want)\n%s", diff) - } - expectedResources := []string{ - " requests:", - " memory: 12Gi", - " disk: 12Ti", - " limits:", - " memory: 16Gi", - " disk: 16Ti", - } - if diff := cmp.Diff(job.Resources, expectedResources); diff != "" { - t.Fatalf("Unexpected resources (-got +want)\n%s", diff) - } - - expectedVolumeMounts := []string{ - "- name: docker-graph", - " mountPath: /docker-graph", - "- name: modules", - " mountPath: /lib/modules", - "- name: cgroup", - " mountPath: /sys/fs/cgroup", - "- name: foo", - " mountPath: /opt/cluster", - " readOnly: true", - } - if diff := cmp.Diff(job.VolumeMounts, expectedVolumeMounts); diff != "" { - t.Fatalf("Unexpected volume mounts: (-got +want)\n%s", diff) - } - expectedVolumes := []string{ - "- name: docker-graph", - " emptyDir: {}", - "- name: modules", - " hostPath:", - " path: /lib/modules", - " type: Directory", - "- name: cgroup", - " hostPath:", - " path: /sys/fs/cgroup", - " type: Directory", - "- name: foo", - " secret:", - " secretName: foo", - " defaultMode: 0600", - } - if diff := cmp.Diff(job.Volumes, expectedVolumes); diff != "" { - t.Fatalf("Unexpected volumes: (-got +want)\n%s", diff) - } - - expectedReporterConfig := []string{ - " slack:", - " channel: serving-api", - " report_template: Report Template", - } - if diff := cmp.Diff(job.ReporterConfig, expectedReporterConfig); diff != "" { - t.Fatalf("Unexpected reporter config: (-got +want)\n%s", diff) - } - expectedJobStates := []string{"bar", "baz"} - if diff := cmp.Diff(job.JobStatesToReport, expectedJobStates); diff != "" { - t.Fatalf("Unexpected job states: (-got +want)\n%s", diff) - } - - timeoutOverride = 999 - parseBasicJobConfigOverrides(&job, config) - if job.Timeout != 999 { - t.Fatalf("Timeout override did not work") - } -} - -func TestGetProwConfigData(t *testing.T) { - SetupForTesting() - presubmits := yaml.MapSlice{ - yaml.MapItem{Key: "foo-repo"}, - yaml.MapItem{Key: "bar-repo"}, - yaml.MapItem{Key: "bar-repo-test-infra"}, - yaml.MapItem{Key: "dup-repo"}, - yaml.MapItem{Key: "dup-repo"}, - } - config := yaml.MapSlice{ - yaml.MapItem{Key: "presubmits", Value: presubmits}, - yaml.MapItem{Key: "ignored-section"}, - } - - out := getProwConfigData(config) - - expectedRepos := []string{"bar-repo", "bar-repo-test-infra", "dup-repo", "foo-repo"} - if diff := cmp.Diff(out.TideRepos, expectedRepos); diff != "" { - t.Fatalf("Unexpected TideRepos: (-got +want)\n%s", diff) - } - if diff := cmp.Diff(out.TestInfraRepo, "bar-repo-test-infra"); diff != "" { - t.Fatalf("Unexpected test-infra repo: (-got +want)\n%s", diff) - } -} -func TestParseSection(t *testing.T) { - SetupForTesting() - generated := []string{} - generate := func(a, b string, s yaml.MapSlice) { - for _, v := range s { - generated = append(generated, fmt.Sprintf("%v, %v, %v, %v", a, b, v.Key, v.Value)) - } - } - finalized := []string{} - finalize := func(a, b string, s yaml.MapSlice) { - finalized = append(finalized, fmt.Sprintf("%v, %v", a, b)) - } - title := "pet-store" - dogs := []interface{}{ - yaml.MapSlice{ - yaml.MapItem{Key: "Spot", Value: "Dalmatian"}, - yaml.MapItem{Key: "Fido", Value: "Terrier"}, - }, - yaml.MapSlice{ - yaml.MapItem{Key: "Remy", Value: "Retriever"}, - }, - } - cats := []interface{}{ - yaml.MapSlice{ - yaml.MapItem{Key: "Whiskers", Value: "Calico"}, - yaml.MapItem{Key: "Twitch", Value: "Siamese"}, - }, - } - config := yaml.MapSlice{ - yaml.MapItem{Key: "pet-store", Value: yaml.MapSlice{ - yaml.MapItem{Key: "dogs", Value: dogs}, - yaml.MapItem{Key: "cats", Value: cats}, - }}, - yaml.MapItem{Key: "toy-store"}, - } - parseSection(config, title, generate, finalize) - - expected := []string{ - "pet-store, dogs, Spot, Dalmatian", - "pet-store, dogs, Fido, Terrier", - "pet-store, dogs, Remy, Retriever", - "pet-store, cats, Whiskers, Calico", - "pet-store, cats, Twitch, Siamese", - } - if diff := cmp.Diff(generated, expected); diff != "" { - t.Fatalf("Unexpected generated output: (-got +want)\n%s", diff) - } - expected = []string{ - "pet-store, dogs", - "pet-store, cats", - } - if diff := cmp.Diff(finalized, expected); diff != "" { - t.Fatalf("Unexpected finalized output: (-got +want)\n%s", diff) - } -} - -func TestGitHubRepo(t *testing.T) { - SetupForTesting() - repositoryOverride = "" - in := baseProwJobTemplateData{RepoURI: "repoURI"} - - if diff := cmp.Diff(gitHubRepo(in), "repoURI"); diff != "" { - t.Fatalf("Bad output when RepoBranch unset and no override: (-got +want)\n%s", diff) - } - - in = baseProwJobTemplateData{RepoURI: "repoURI", RepoBranch: "repoBranch"} - if diff := cmp.Diff(gitHubRepo(in), "repoURI=repoBranch"); diff != "" { - t.Fatalf("Bad output when RepoBranch set and no override: (-got +want)\n%s", diff) - } - - repositoryOverride = "repoOverride" - if diff := cmp.Diff(gitHubRepo(in), "repoOverride"); diff != "" { - t.Fatalf("Bad output when override set: (-got +want)\n%s", diff) - } -} - -func TestExecuteJobTemplate(t *testing.T) { - SetupForTesting() - name := "foo" - templ := ` -- foo: [[.Foo]] -[[indent_section 2 "bar" .Bar]] -` - title := "my-title" - repoName := "my-repo-name" - jobName := "my-job-name" - groupByRepo := false - data := struct { - Foo string - Bar []string - }{ - Foo: "Foo", - Bar: []string{"Bar", "Baz"}, - } - - jobNameFilter = "xyz" - executeJobTemplate(name, templ, title, repoName, jobName, groupByRepo, data) - if logFatalCalls != 0 { - t.Fatalf("Fatal log call recorded") - } - expected := "" - if diff := cmp.Diff(GetOutput(), expected); diff != "" { - t.Fatalf("Expected job to be filtered: (-got +want)\n%s", diff) - } - - ResetOutput() - jobNameFilter = "my-job-name" - executeJobTemplate(name, templ, title, repoName, jobName, groupByRepo, data) - if logFatalCalls != 0 { - t.Fatalf("Fatal log call recorded") - } - if GetOutput() == "" { - t.Fatalf("Job should not have been filtered") - } - - ResetOutput() - jobNameFilter = "" - sectionMap[title] = false - executeJobTemplate(name, templ, title, repoName, jobName, groupByRepo, data) - if logFatalCalls != 0 { - t.Fatalf("Fatal log call recorded") - } - expected = "my-title:\n- foo: Foo\nbar:\n \"Bar\"\n \"Baz\"\n" - if diff := cmp.Diff(GetOutput(), expected); diff != "" { - t.Fatalf("Bad execute job template output: (-got +want)\n%s", diff) - } - - ResetOutput() - sectionMap[title] = true - executeJobTemplate(name, templ, title, repoName, jobName, groupByRepo, data) - if logFatalCalls != 0 { - t.Fatalf("Fatal log call recorded") - } - expected = "- foo: Foo\nbar:\n \"Bar\"\n \"Baz\"\n" - if diff := cmp.Diff(GetOutput(), expected); diff != "" { - t.Fatalf("Bad execute job template output: (-got +want)\n%s", diff) - } - - ResetOutput() - groupByRepo = true - sectionMap[title+repoName] = false - executeJobTemplate(name, templ, title, repoName, jobName, groupByRepo, data) - if logFatalCalls != 0 { - t.Fatalf("Fatal log call recorded") - } - expected = " my-repo-name:\n- foo: Foo\nbar:\n \"Bar\"\n \"Baz\"\n" - if diff := cmp.Diff(GetOutput(), expected); diff != "" { - t.Fatalf("Bad execute job template output: (-got +want)\n%s", diff) - } -} - -func TestExecuteTemplate(t *testing.T) { - SetupForTesting() - name := "foo" - templ := ` -- foo: [[.Foo]] -[[indent_section 2 "bar" .Bar]] -` - data := struct { - Foo string - Bar []string - }{ - Foo: "Foo", - Bar: []string{"Bar", "Baz"}, - } - executeTemplate(name, templ, data) - - if logFatalCalls != 0 { - t.Fatalf("Fatal log call recorded") - } - expected := - "- foo: Foo\nbar:\n \"Bar\"\n \"Baz\"\n" - - if diff := cmp.Diff(GetOutput(), expected); diff != "" { - t.Fatalf("Bad execute template output: (-got +want)\n%s", diff) - } -} -func TestStringArrayFlagString(t *testing.T) { - SetupForTesting() - arr := stringArrayFlag{"a", "b", "c"} - if diff := cmp.Diff(arr.String(), "a, b, c"); diff != "" { - t.Fatalf("(-got +want)\n%s", diff) - } -} -func TestStringArrayFlagSet(t *testing.T) { - SetupForTesting() - arr := stringArrayFlag{"a", "b", "c"} - arr.Set("d") - if diff := cmp.Diff(arr.String(), "a, b, c, d"); diff != "" { - t.Fatalf("(-got +want)\n%s", diff) - } -} - -func TestParseJob(t *testing.T) { - SetupForTesting() - dogs := yaml.MapSlice{ - yaml.MapItem{Key: "Spot", Value: "Dalmatian"}, - yaml.MapItem{Key: "Fido", Value: "Terrier"}, - } - cats := yaml.MapSlice{ - yaml.MapItem{Key: "Fluffy", Value: "Calico"}, - yaml.MapItem{Key: "Maxine", Value: "Siamese"}, - } - pets := yaml.MapSlice{ - yaml.MapItem{Key: "dogs", Value: dogs}, - yaml.MapItem{Key: "cats", Value: cats}, - } - - out := parseJob(pets, "dogs") - expected := "[{Spot Dalmatian} {Fido Terrier}]" - if diff := cmp.Diff(fmt.Sprintf("%v", out), expected); diff != "" { - t.Fatalf("ParseJob did not return expected slice. (-got +want)\n%s", diff) - } - - out = parseJob(pets, "hamsters") - if logFatalCalls != 1 { - t.Fatalf("ParseJob did not return error as expected.") - } -} - -func TestParseGoCoverageMap(t *testing.T) { - SetupForTesting() - dogs := []interface{}{ - yaml.MapSlice{ - yaml.MapItem{Key: "Spot", Value: "Dalmatian"}, - yaml.MapItem{Key: "Fido", Value: "Terrier"}, - }, - yaml.MapSlice{ - yaml.MapItem{Key: "go-coverage", Value: true}, - }, - } - cats := []interface{}{ - yaml.MapSlice{ - yaml.MapItem{Key: "Whiskers", Value: "Calico"}, - yaml.MapItem{Key: "Twitch", Value: "Siamese"}, - }, - } - config := yaml.MapSlice{ - yaml.MapItem{Key: "pets/dog-repo", Value: dogs}, - yaml.MapItem{Key: "pets/cat-repo", Value: cats}, - } - - out := parseGoCoverageMap(config) - if out["cat-repo"] { - t.Fatalf("Go coverage should not have been enabled for cat-repo") - } - if !out["dog-repo"] { - t.Fatalf("Go coverage should have been enabled for dog-repo") - } -} - -func TestCollectMetaData(t *testing.T) { - redDetailMap := JobDetailMap{ - "red-repo": []string{"red-a", "red-b"}, - } - - metaData = TestGridMetaData{ - md: map[string]JobDetailMap{ - "red-proj": redDetailMap, - }, - projNames: []string{"red-proj"}, - } - redRepo := []interface{}{ - yaml.MapSlice{ - yaml.MapItem{Key: "continuous", Value: true}, - yaml.MapItem{Key: "dot-release", Value: true}, - yaml.MapItem{Key: "auto-release", Value: false}, - yaml.MapItem{Key: "nightly", Value: false}, - yaml.MapItem{Key: "webhook-apicoverage", Value: false}, - }, - yaml.MapSlice{ - yaml.MapItem{Key: "branch-ci", Value: true}, - }, - } - bluRepo := []interface{}{ - yaml.MapSlice{ - yaml.MapItem{Key: "release", Value: "0.1.2"}, - yaml.MapItem{Key: "custom-job", Value: "custom-job-name"}, - yaml.MapItem{Key: "ignore-me", Value: "ignore-me-too"}, - }, - } - config := yaml.MapSlice{ - yaml.MapItem{Key: "red-proj/red-repo", Value: redRepo}, - yaml.MapItem{Key: "blu-proj/blu-repo", Value: bluRepo}, - } - - collectMetaData(config) - - expected := []string{"red-a", "red-b", "dot-release", "continuous"} - if diff := cmp.Diff(metaData.md["red-proj"]["red-repo"], expected); diff != "" { - t.Fatalf("Unexpected metadata for red proj/repo. (-got +want)\n%s", diff) - } - - expected = []string{"custom-job-name"} - if diff := cmp.Diff(metaData.md["blu-proj-0.1.2"]["blu-repo"], expected); diff != "" { - t.Fatalf("Unexpected metadata for blu proj/repo. (-got +want)\n%s", diff) - } - - expected = []string{"red-proj", "blu-proj", "blu-proj-0.1.2"} - if diff := cmp.Diff(metaData.projNames, expected); diff != "" { - t.Fatalf("Unexpected list of project names. (-got +want)\n%s", diff) - } -} - -func TestUpdateTestCoverageJobDataIfNeeded(t *testing.T) { - SetupForTesting() - repoName := "foo-repo" - goCoverageMap = map[string]bool{repoName: true} - jobDetailMap := JobDetailMap{ - "bar-repo": []string{"bar-a", "bar-b"}, - } - updateTestCoverageJobDataIfNeeded(jobDetailMap, repoName) - if len(goCoverageMap) != 0 { - t.Fatalf("foo-repo was not deleted from goCoverageMap") - } - expected := []string{"test-coverage"} - if diff := cmp.Diff(jobDetailMap[repoName], expected); diff != "" { - t.Fatalf("Unexpected entry for repoName in job detail map (-got +want)\n%s", diff) - } -} - -func TestAddRemainingTestCoverageJobs(t *testing.T) { - SetupForTesting() - goCoverageMap = map[string]bool{ - "bar-repo": true, - "baz-repo": false} - jobDetailMap := JobDetailMap{ - "foo-repo": []string{"foo-a", "foo-b"}, - } - metaData = TestGridMetaData{ - md: map[string]JobDetailMap{"proj0": jobDetailMap}, - projNames: []string{"proj0"}, - } - - addRemainingTestCoverageJobs() - - expected := []string{"test-coverage"} - if diff := cmp.Diff(jobDetailMap["bar-repo"], expected); diff != "" { - t.Fatalf("Unexpected entry for bar-repo in job detail map (-got +want)\n%s", diff) - } -} -func TestBuildProjRepoStr(t *testing.T) { - SetupForTesting() - - projName := "project-name" - repoName := "repo-name" - expected := "project-name-repo-name" - actual := buildProjRepoStr(projName, repoName) - if diff := cmp.Diff(actual, expected); diff != "" { - t.Fatalf("Unexpected project repo string: (-got +want)\n%s", diff) - } - - projName = "knative-sandbox-0.15" - repoName = "repo-name" - expected = "knative-sandbox-repo-name-0.15" - actual = buildProjRepoStr(projName, repoName) - if diff := cmp.Diff(actual, expected); diff != "" { - t.Fatalf("Unexpected project repo string: (-got +want)\n%s", diff) - } -} -func TestIsReleased(t *testing.T) { - SetupForTesting() - valid := []string{"abc-0", "def-1.2.3"} - invalid := []string{"-4.5.6", "abc-1.2.3g"} - for _, v := range valid { - if !isReleased(v) { - t.Fatalf("Should be valid: %v", v) - } - } - for _, v := range invalid { - if isReleased(v) { - t.Fatalf("Should be invalid: %v", v) - } - } -} - -func TestSetOutput(t *testing.T) { - SetupForTesting() - setOutput("") - if logFatalCalls != 0 { - t.Fatalf("Fatal log call recorded") - } - // don't test setting an output file since this will create - // a local file system change -} diff --git a/tools/config-generator/perf_config.go b/tools/config-generator/perf_config.go deleted file mode 100644 index 6eee84664ed..00000000000 --- a/tools/config-generator/perf_config.go +++ /dev/null @@ -1,102 +0,0 @@ -/* -Copyright 2019 The Knative Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// data definitions that are used for the config file generation of performance -// tests cluster maintenance jobs. - -package main - -import ( - "fmt" -) - -const ( - perfTestScriptPath = "./test/performance/performance-tests.sh" - perfTestSecretName = "performance-test" -) - -// generatePerfClusterUpdatePeriodicJobs generates periodic jobs to update clusters -// that run performance testing benchmarks -func generatePerfClusterUpdatePeriodicJobs() { - for _, repo := range repositories { - if repo.EnablePerformanceTests { - perfClusterPeriodicJob( - "recreate-clusters", - recreatePerfClusterPeriodicJobCron, - perfTestScriptPath, - []string{"--recreate-clusters"}, - repo, - perfTestSecretName, - ) - perfClusterPeriodicJob( - "update-clusters", - updatePerfClusterPeriodicJobCron, - perfTestScriptPath, - []string{"--update-clusters"}, - repo, - perfTestSecretName, - ) - } - } -} - -// generatePerfClusterPostsubmitJob generates postsubmit job for the -// repo to reconcile clusters that run performance testing benchmarks. -func generatePerfClusterPostsubmitJob(repo repositoryData) { - perfClusterReconcilePostsubmitJob( - "reconcile-clusters", - perfTestScriptPath, - []string{"--reconcile-benchmark-clusters"}, - repo, - perfTestSecretName, - ) -} - -func perfClusterPeriodicJob(jobNamePostFix, cronString, command string, args []string, repo repositoryData, sa string) { - var data periodicJobTemplateData - data.Base = perfClusterBaseProwJob(command, args, repo.Name, sa) - data.Base.ExtraRefs = append(data.Base.ExtraRefs, " base_ref: "+data.Base.RepoBranch) - data.PeriodicJobName = fmt.Sprintf("ci-%s-%s", data.Base.RepoNameForJob, jobNamePostFix) - data.CronString = cronString - data.PeriodicCommand = createCommand(data.Base) - data.Base.Annotations = []string{" testgrid-create-test-group: \"false\""} - addMonitoringPubsubLabelsToJob(&data.Base, data.PeriodicJobName) - executeJobTemplate("performance tests periodic", readTemplate(periodicTestJob), - "periodics", repo.Name, data.PeriodicJobName, false, data) -} - -func perfClusterReconcilePostsubmitJob(jobNamePostFix, command string, args []string, repo repositoryData, sa string) { - var data postsubmitJobTemplateData - data.Base = perfClusterBaseProwJob(command, args, repo.Name, sa) - data.Base.Branches = []string{data.Base.RepoBranch} - data.PostsubmitJobName = fmt.Sprintf("post-%s-%s", data.Base.RepoNameForJob, jobNamePostFix) - data.PostsubmitCommand = createCommand(data.Base) - addMonitoringPubsubLabelsToJob(&data.Base, data.PostsubmitJobName) - executeJobTemplate("performance tests postsubmit", readTemplate(perfPostsubmitJob), - "postsubmits", repo.Name, data.PostsubmitJobName, true, data) -} - -func perfClusterBaseProwJob(command string, args []string, fullRepoName, sa string) baseProwJobTemplateData { - base := newbaseProwJobTemplateData(fullRepoName) - base.Command = command - base.Args = args - addVolumeToJob(&base, "/etc/performance-test", sa, true, nil) - base.addEnvToJob("GOOGLE_APPLICATION_CREDENTIALS", "/etc/performance-test/service-account.json") - base.addEnvToJob("GITHUB_TOKEN", "/etc/performance-test/github-token") - base.addEnvToJob("SLACK_READ_TOKEN", "/etc/performance-test/slack-read-token") - base.addEnvToJob("SLACK_WRITE_TOKEN", "/etc/performance-test/slack-write-token") - return base -} diff --git a/tools/config-generator/perf_config_test.go b/tools/config-generator/perf_config_test.go deleted file mode 100644 index 55ff33efaf2..00000000000 --- a/tools/config-generator/perf_config_test.go +++ /dev/null @@ -1,99 +0,0 @@ -/* -Copyright 2020 The Knative Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package main - -import ( - "testing" - - "github.com/google/go-cmp/cmp" -) - -func TestGeneratePerfClusterUpdatePeriodicJobs(t *testing.T) { - SetupForTesting() - repositories = []repositoryData{ - { - Name: "enabled-repo", - EnablePerformanceTests: true, - }, - } - generatePerfClusterUpdatePeriodicJobs() - if logFatalCalls != 0 || len(GetOutput()) == 0 { - t.Errorf("Expected job to be written without errors") - } - - SetupForTesting() - repositories = []repositoryData{ - { - Name: "disabled-repo", - EnablePerformanceTests: false, - }, - } - generatePerfClusterUpdatePeriodicJobs() - if len(GetOutput()) != 0 { - t.Errorf("Expected nothing to be written") - } -} - -func TestGeneratePerfClusterPostsubmitJob(t *testing.T) { - SetupForTesting() - generatePerfClusterPostsubmitJob(repositoryData{Name: "my-repo"}) - if logFatalCalls != 0 || len(GetOutput()) == 0 { - t.Errorf("Expected job to be written without errors") - } -} - -func TestPerfClusterPeriodicJob(t *testing.T) { - SetupForTesting() - repoData := repositoryData{Name: "my-repo"} - perfClusterPeriodicJob("postfix", "cronString", "command", []string{"arg1", "arg2"}, repoData, "sa") - - if logFatalCalls != 0 || len(GetOutput()) == 0 { - t.Errorf("Expected job to be written without errors") - } -} - -func TestPerfClusterReconcilePostsubmitJob(t *testing.T) { - SetupForTesting() - repoData := repositoryData{Name: "my-repo"} - perfClusterReconcilePostsubmitJob("postfix", "command", []string{"arg1", "arg2"}, repoData, "sa") - - if logFatalCalls != 0 || len(GetOutput()) == 0 { - t.Errorf("Expected job to be written without errors") - } -} - -func TestPerfClusterBaseProwJob(t *testing.T) { - SetupForTesting() - command := "command" - args := []string{"arg1", "arg2"} - repoName := "org-name/repo-name" - sa := "foo" - res := perfClusterBaseProwJob(command, args, repoName, sa) - - if diff := cmp.Diff(res.Command, command); diff != "" { - t.Errorf("Incorrect command: (-got +want)\n%s", diff) - } - if diff := cmp.Diff(res.Args, args); diff != "" { - t.Errorf("Incorrect args: (-got +want)\n%s", diff) - } - if diff := cmp.Diff(res.Command, command); diff != "" { - t.Errorf("Incorrect command: (-got +want)\n%s", diff) - } - if want, got := 8, len(res.Env); want != got { - t.Errorf("Expected 8 environments, got %d", len(res.Env)) - } -} diff --git a/tools/config-generator/periodic_config.go b/tools/config-generator/periodic_config.go deleted file mode 100644 index 2db0476b12b..00000000000 --- a/tools/config-generator/periodic_config.go +++ /dev/null @@ -1,298 +0,0 @@ -/* -Copyright 2019 The Knative Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// data definitions that are used for the config file generation of periodic prow jobs - -package main - -import ( - "bytes" - "encoding/gob" - "fmt" - "hash/fnv" - "log" - "strings" - - "gopkg.in/yaml.v2" -) - -const ( - // Template for periodic test/release jobs. - periodicTestJob = "prow_periodic_test_job.yaml" - - // Template for periodic custom jobs. - periodicCustomJob = "prow_periodic_custom_job.yaml" - - // Cron strings for key jobs - goCoveragePeriodicJobCron = "0 1 * * *" // Run at 01:00 every day - recreatePerfClusterPeriodicJobCron = "30 07 * * *" // Run at 00:30PST every day (07:30 UTC) - updatePerfClusterPeriodicJobCron = "5 * * * *" // Run every hour -) - -// periodicJobTemplateData contains data about a periodic Prow job. -type periodicJobTemplateData struct { - Base baseProwJobTemplateData - PeriodicJobName string - CronString string - PeriodicCommand []string -} - -func (p periodicJobTemplateData) Clone() periodicJobTemplateData { - var r periodicJobTemplateData - var err error - buff := new(bytes.Buffer) - enc := gob.NewEncoder(buff) - dec := gob.NewDecoder(buff) - if err = enc.Encode(&p); err != nil { - panic(err) - } - if err = dec.Decode(&r); err != nil { - panic(err) - } - return r -} - -func getUTCtime(i int) int { - r := i + 7 - if r > 23 { - return r - 24 - } - return r -} - -func calculateMinuteOffset(str ...string) int { - h := fnv.New32a() - for _, s := range str { - h.Write([]byte(s)) - } - return int(h.Sum32()) % 60 -} - -// Generate cron string based on job type, offset generated from jobname -// instead of assign random value to ensure consistency among runs, -// timeout is used for determining how many hours apart -func generateCron(jobType, jobName, repoName string, timeout int) string { - minutesOffset := calculateMinuteOffset(jobType, jobName) - // Determines hourly job inteval based on timeout - hours := int((timeout+5)/60) + 1 // Allow at least 5 minutes between runs - hourCron := fmt.Sprintf("%d * * * *", minutesOffset) - if hours > 1 { - hourCron = fmt.Sprintf("%d */%d * * *", minutesOffset, hours*3) - } - daily := func(pacificHour int) string { - return fmt.Sprintf("%d %d * * *", minutesOffset, getUTCtime(pacificHour)) - } - weekly := func(pacificHour, dayOfWeek int) string { - return fmt.Sprintf("%d %d * * %d", minutesOffset, getUTCtime(pacificHour), dayOfWeek) - } - - var res string - switch jobType { - case "continuous", "custom-job", "auto-release": // As much as every hour - res = hourCron - case "branch-ci": - res = daily(1) // 1 AM - case "nightly": - res = daily(2) // 2 AM - case "dot-release": - if strings.HasSuffix(repoName, "-operator") { - // Every Tuesday noon - res = weekly(12, 2) - } else { - // Every Tuesday 2 AM - res = weekly(2, 2) - } - default: - log.Printf("job type not supported for cron generation '%s'", jobName) - } - return res -} - -// generatePeriodic generates periodic job configs for the given repo and configuration. -// Normally it generates one job per call -// But if it is continuous or branch-ci job, it generates a second job for beta testing of new prow-tests images -func generatePeriodic(title string, repoName string, periodicConfig yaml.MapSlice) { - var data periodicJobTemplateData - data.Base = newbaseProwJobTemplateData(repoName) - jobNameSuffix := "" - jobTemplate := readTemplate(periodicTestJob) - jobType := "" - org := data.Base.OrgName - repo := data.Base.RepoName - dashboardName := repo - tabName := "" - // Parse the input yaml and set values data based on them - for i, item := range periodicConfig { - jobName := getString(item.Key) - switch jobName { - case "continuous": - if !getBool(item.Value) { - return - } - jobType = getString(item.Key) - jobNameSuffix = "continuous" - tabName = jobNameSuffix - // Use default command and arguments if none given. - if data.Base.Command == "" { - data.Base.Command = presubmitScript - } - if len(data.Base.Args) == 0 { - data.Base.Args = allPresubmitTests - } - data.Base.Timeout = 180 - case "nightly": - if !getBool(item.Value) { - return - } - jobType = getString(item.Key) - jobNameSuffix = "nightly-release" - tabName = jobNameSuffix - data.Base.ServiceAccount = nightlyAccount - data.Base.Command = releaseScript - data.Base.Args = releaseNightly - data.Base.Timeout = 180 - case "branch-ci": - if !getBool(item.Value) { - return - } - jobType = getString(item.Key) - jobNameSuffix = "continuous" - tabName = jobNameSuffix - data.Base.Command = releaseScript - data.Base.Args = releaseLocal - setupDockerInDockerForJob(&data.Base) - data.Base.Timeout = 180 - case "dot-release", "auto-release": - if !getBool(item.Value) { - return - } - jobType = getString(item.Key) - jobNameSuffix = getString(item.Key) - tabName = jobNameSuffix - data.Base.ServiceAccount = releaseAccount - data.Base.Command = releaseScript - data.Base.Args = []string{ - "--" + jobNameSuffix, - "--release-gcs", data.Base.ReleaseGcs, - "--release-gcr", "gcr.io/knative-releases", - "--github-token", "/etc/hub-token/token", - } - addVolumeToJob(&data.Base, "/etc/hub-token", "hub-token", true, nil) - // For dot-release and auto-release jobs, set ORG_NAME env var if the org - // name is not knative, as it's needed by release.sh - if data.Base.OrgName != "knative" { - data.Base.addEnvToJob("ORG_NAME", data.Base.OrgName) - } - data.Base.Timeout = 180 - case "custom-job": - jobType = getString(item.Key) - jobNameSuffix = getString(item.Value) - tabName = jobNameSuffix - data.Base.Timeout = 120 - case "cron": - data.CronString = getString(item.Value) - case "release": - version := getString(item.Value) - dashboardName = org + "-" + version - tabName = repo + "-" + jobNameSuffix - jobNameSuffix = version + "-" + jobNameSuffix - data.Base.RepoBranch = "release-" + version - if jobType == "dot-release" { - data.Base.Args = append(data.Base.Args, "--branch", "release-"+version) - } - default: - continue - } - // Knock-out the item, signalling it was already parsed. - periodicConfig[i] = yaml.MapItem{} - testgroupExtras := getTestgroupExtras(org, jobName) - data.Base.Annotations = generateProwJobAnnotations(dashboardName, tabName, testgroupExtras) - } - parseBasicJobConfigOverrides(&data.Base, periodicConfig) - data.PeriodicJobName = fmt.Sprintf("ci-%s", data.Base.RepoNameForJob) - if jobNameSuffix != "" { - data.PeriodicJobName += "-" + jobNameSuffix - } - if data.CronString == "" { - data.CronString = generateCron(jobType, data.PeriodicJobName, data.Base.RepoName, data.Base.Timeout) - } - // Ensure required data exist. - if data.CronString == "" { - logFatalf("Job %q is missing cron string", data.PeriodicJobName) - } - if len(data.Base.Args) == 0 && data.Base.Command == "" { - logFatalf("Job %q is missing command", data.PeriodicJobName) - } - if jobType == "branch-ci" && data.Base.RepoBranch == "" { - logFatalf("%q jobs are intended to be used on release branches", jobType) - } - - // Generate config itself. - data.PeriodicCommand = createCommand(data.Base) - if data.Base.ServiceAccount != "" { - data.Base.addEnvToJob("GOOGLE_APPLICATION_CREDENTIALS", data.Base.ServiceAccount) - data.Base.addEnvToJob("E2E_CLUSTER_REGION", "us-central1") - } - if data.Base.RepoBranch != "" && data.Base.RepoBranch != "main" { - // If it's a release version, add env var PULL_BASE_REF as ref name of the base branch. - // The reason for having it is in https://github.com/knative/test-infra/issues/780. - data.Base.addEnvToJob("PULL_BASE_REF", data.Base.RepoBranch) - } - addExtraEnvVarsToJob(extraEnvVars, &data.Base) - configureServiceAccountForJob(&data.Base) - data.Base.DecorationConfig = []string{fmt.Sprintf("timeout: %dm", data.Base.Timeout)} - - // This is where the data actually gets written out - executeJobTemplate("periodic", jobTemplate, title, repoName, data.PeriodicJobName, false, data) -} - -// generateGoCoveragePeriodic generates the go coverage periodic job config for the given repo (configuration is ignored). -func generateGoCoveragePeriodic(title string, repoName string, _ yaml.MapSlice) { - var repo *repositoryData - // Find a repository entry where repo name matches and Go Coverage is enabled - for i, repoI := range repositories { - if repoName != repoI.Name || !repoI.EnableGoCoverage { - continue - } - repo = &repositories[i] - break - } - if repo != nil && repo.EnableGoCoverage { - repo.Processed = true - var data periodicJobTemplateData - data.Base = newbaseProwJobTemplateData(repoName) - jobNameSuffix := "go-coverage" - data.PeriodicJobName = fmt.Sprintf("ci-%s-%s", data.Base.RepoNameForJob, jobNameSuffix) - data.CronString = goCoveragePeriodicJobCron - data.Base.GoCoverageThreshold = repo.GoCoverageThreshold - data.Base.Command = "runner.sh" - data.Base.Args = []string{ - "coverage", - "--artifacts=$(ARTIFACTS)", - fmt.Sprintf("--cov-threshold-percentage=%d", data.Base.GoCoverageThreshold)} - data.Base.ServiceAccount = "" - data.Base.ExtraRefs = append(data.Base.ExtraRefs, " base_ref: "+data.Base.RepoBranch) - - addExtraEnvVarsToJob(extraEnvVars, &data.Base) - addMonitoringPubsubLabelsToJob(&data.Base, data.PeriodicJobName) - configureServiceAccountForJob(&data.Base) - dashboardName := data.Base.RepoName - tabName := data.Base.RepoName + "-" + jobNameSuffix - testgroupExtras := map[string]string{"short-text-metric": "coverage"} - data.Base.Annotations = generateProwJobAnnotations(dashboardName, tabName, testgroupExtras) - executeJobTemplate("periodic go coverage", readTemplate(periodicCustomJob), title, repoName, data.PeriodicJobName, false, data) - } -} diff --git a/tools/config-generator/periodic_config_test.go b/tools/config-generator/periodic_config_test.go deleted file mode 100644 index 132758058d8..00000000000 --- a/tools/config-generator/periodic_config_test.go +++ /dev/null @@ -1,252 +0,0 @@ -/* -Copyright 2020 The Knative Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package main - -import ( - "fmt" - "testing" - - "github.com/google/go-cmp/cmp" - "gopkg.in/yaml.v2" - "knative.dev/test-infra/tools/config-generator/unstructured" -) - -func TestClone(t *testing.T) { - SetupForTesting() - base := baseProwJobTemplateData{OrgName: "org-name"} - data := periodicJobTemplateData{ - Base: base, - PeriodicJobName: "periodic-job-name", - CronString: "cron-string", - PeriodicCommand: []string{"string-a", "string-b"}, - } - if diff := cmp.Diff(data.Clone(), data); diff != "" { - t.Fatalf("Incorrect output for empty string: (-got +want)\n%s", diff) - } -} - -func TestGetUTCtime(t *testing.T) { - SetupForTesting() - for i := 0; i < 24; i++ { - utcTime := getUTCtime(i) - expected := (i + 7) % 24 - if utcTime != expected { - t.Fatalf("Expected %d, got %d", expected, utcTime) - } - } -} - -func TestCalculateMinuteOffset(t *testing.T) { - SetupForTesting() - out1 := calculateMinuteOffset("foo") - out2 := calculateMinuteOffset("foo") - if diff := cmp.Diff(out1, out2); diff != "" { - t.Fatalf("Same input should always yield same offset") - } -} - -func TestGenerateCron(t *testing.T) { - SetupForTesting() - jobName := "job-name" - tests := []struct { - jobType string - repoName string - timeout int - expected string - }{ - { - jobType: "not-supported", - expected: "", - }, - { - jobType: "continuous", - timeout: 54, - expected: fmt.Sprintf("%d * * * *", calculateMinuteOffset("continuous", jobName)), - }, - { - jobType: "continuous", - timeout: 55, - expected: fmt.Sprintf("%d */6 * * *", calculateMinuteOffset("continuous", jobName)), - }, - { - jobType: "continuous", - timeout: 60 + 55, - expected: fmt.Sprintf("%d */9 * * *", calculateMinuteOffset("continuous", jobName)), - }, - { - jobType: "custom-job", - timeout: 54, - expected: fmt.Sprintf("%d * * * *", calculateMinuteOffset("custom-job", jobName)), - }, - { - jobType: "auto-release", - timeout: 54, - expected: fmt.Sprintf("%d * * * *", calculateMinuteOffset("auto-release", jobName)), - }, - { - jobType: "branch-ci", - expected: fmt.Sprintf("%d 8 * * *", calculateMinuteOffset("branch-ci", jobName)), - }, - { - jobType: "nightly", - expected: fmt.Sprintf("%d 9 * * *", calculateMinuteOffset("nightly", jobName)), - }, - { - jobType: "dot-release", - repoName: "foo", - expected: fmt.Sprintf("%d 9 * * 2", calculateMinuteOffset("dot-release", jobName)), - }, - { - jobType: "dot-release", - repoName: "foo-operator", - expected: fmt.Sprintf("%d 19 * * 2", calculateMinuteOffset("dot-release", jobName)), - }, - } - for _, tc := range tests { - out := generateCron(tc.jobType, jobName, tc.repoName, tc.timeout) - if diff := cmp.Diff(out, tc.expected); diff != "" { - t.Fatalf("For jobType %v and timeout %d: (-got +want)\n%s", tc.jobType, tc.timeout, diff) - } - } -} - -type release struct { - version string -} - -type periodicJob struct { - jobType string - *release -} - -func TestGeneratePeriodic(t *testing.T) { - title := "title" - repoName := "repoName" - tests := []struct { - job periodicJob - assertions []unstructured.Assertion - }{ - {job: periodicJob{jobType: "continuous"}}, - {job: periodicJob{jobType: "nightly"}, assertions: []unstructured.Assertion{hasProperArgs(title, []string{ - "./hack/release.sh", - "--publish", "--tag-release", - })}}, - {job: periodicJob{jobType: "branch-ci"}}, - {job: periodicJob{jobType: "dot-release"}, assertions: []unstructured.Assertion{hasProperArgs(title, []string{ - "./hack/release.sh", - "--dot-release", "--release-gcs", repoName, - "--release-gcr", "gcr.io/knative-releases", - "--github-token", "/etc/hub-token/token", - })}}, - {job: periodicJob{jobType: "auto-release"}, assertions: []unstructured.Assertion{hasProperArgs(title, []string{ - "./hack/release.sh", - "--auto-release", "--release-gcs", repoName, - "--release-gcr", "gcr.io/knative-releases", - "--github-token", "/etc/hub-token/token", - })}}, - { - job: periodicJob{jobType: "dot-release", release: &release{version: "0.23"}}, - assertions: []unstructured.Assertion{hasProperArgs(title, []string{ - "./hack/release.sh", - "--dot-release", "--release-gcs", repoName, - "--release-gcr", "gcr.io/knative-releases", - "--github-token", "/etc/hub-token/token", - "--branch", "release-0.23", - })}, - }, - } - oldReleaseScript := releaseScript - defer func() { - releaseScript = oldReleaseScript - }() - for i, tc := range tests { - tc := tc - t.Run(fmt.Sprintf("%d-%s", i, tc.job.jobType), func(t *testing.T) { - testGeneratePeriodicEach(t, title, repoName, tc.job, tc.assertions) - }) - } -} -func testGeneratePeriodicEach( - tb testing.TB, - title, repoName string, - job periodicJob, - assertions []unstructured.Assertion, -) { - var periodicConfig yaml.MapSlice - SetupForTesting() - releaseScript = "./hack/release.sh" - periodicConfig = yaml.MapSlice{{Key: job.jobType, Value: true}} - if job.release != nil { - periodicConfig = append(periodicConfig, - yaml.MapItem{Key: "release", Value: job.version}) - } - generatePeriodic(title, repoName, periodicConfig) - out := GetOutput() - outputLen := len(out) - if outputLen == 0 { - tb.Fatal("No output") - } - if logFatalCalls != 0 { - tb.Fatal("LogFatal was called") - } - un := make(map[interface{}]interface{}) - err := yaml.Unmarshal([]byte(out), &un) - if err != nil { - tb.Fatal(err) - } - for _, assertion := range assertions { - err = assertion(un) - if err != nil { - tb.Fatal(err) - } - } -} - -func TestGenerateGoCoveragePeriodic(t *testing.T) { - SetupForTesting() - repositories = []repositoryData{ - { - Name: "repo-name", - EnableGoCoverage: true, - GoCoverageThreshold: 80, - }, - } - generateGoCoveragePeriodic("title", "repo-name", nil) - if len(GetOutput()) == 0 { - t.Fatalf("No output") - } - if logFatalCalls != 0 { - t.Fatalf("LogFatal was called.") - } -} - -func hasProperArgs(title string, want []string) unstructured.Assertion { - assert := unstructured.EqualsStringSlice(want) - query := fmt.Sprintf("%s.0.spec.containers.0.args", title) - return queryAndAssert(query, assert) -} - -func queryAndAssert(query string, assert unstructured.Assertion) unstructured.Assertion { - return func(un interface{}) error { - questioner := unstructured.NewQuestioner(un) - val, err := questioner.Query(query) - if err != nil { - return err - } - return assert(val) - } -} diff --git a/tools/config-generator/postsubmit_config.go b/tools/config-generator/postsubmit_config.go deleted file mode 100644 index 05fa456640b..00000000000 --- a/tools/config-generator/postsubmit_config.go +++ /dev/null @@ -1,59 +0,0 @@ -/* -Copyright 2019 The Knative Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package main - -import ( - "fmt" - "strings" - - "gopkg.in/yaml.v2" -) - -const ( - // goCoveragePostsubmitJob is the template for the go postsubmit coverage job. - goCoveragePostsubmitJob = "prow_postsubmit_gocoverage_job.yaml" - - // perfPostsubmitJob is the template for the performance operations - // postsubmit job. - perfPostsubmitJob = "prow_postsubmit_perf_job.yaml" -) - -// postsubmitJobTemplateData contains data about a postsubmit Prow job. -type postsubmitJobTemplateData struct { - Base baseProwJobTemplateData - PostsubmitJobName string - PostsubmitCommand []string -} - -// generateGoCoveragePostsubmit generates the go coverage postsubmit job config for the given repo. -func generateGoCoveragePostsubmit(title, repoName string, _ yaml.MapSlice) { - var data postsubmitJobTemplateData - data.Base = newbaseProwJobTemplateData(repoName) - data.Base.Branches = []string{data.Base.RepoBranch} - data.PostsubmitJobName = fmt.Sprintf("post-%s-go-coverage", data.Base.RepoNameForJob) - addExtraEnvVarsToJob(extraEnvVars, &data.Base) - configureServiceAccountForJob(&data.Base) - jobName := data.PostsubmitJobName - executeJobTemplate("postsubmit go coverage", readTemplate(goCoveragePostsubmitJob), title, repoName, jobName, true, data) - // Generate config for post-knative-serving-go-coverage-dev right after post-knative-serving-go-coverage, - // this job is mainly for debugging purpose. - if data.PostsubmitJobName == "post-knative-serving-go-coverage" { - data.PostsubmitJobName += "-dev" - data.Base.Image = strings.ReplaceAll(data.Base.Image, ":stable", ":coverage-dev") - executeJobTemplate("postsubmit go coverage", readTemplate(goCoveragePostsubmitJob), title, repoName, data.PostsubmitJobName, false, data) - } -} diff --git a/tools/config-generator/postsubmit_config_test.go b/tools/config-generator/postsubmit_config_test.go deleted file mode 100644 index 0fab51d2920..00000000000 --- a/tools/config-generator/postsubmit_config_test.go +++ /dev/null @@ -1,32 +0,0 @@ -/* -Copyright 2020 The Knative Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package main - -import ( - "testing" -) - -func TestGenerateGoCoveragePostsubmit(t *testing.T) { - SetupForTesting() - generateGoCoveragePostsubmit("title", "knative-serving", nil) - if len(GetOutput()) == 0 { - t.Errorf("No output") - } - if logFatalCalls != 0 { - t.Errorf("LogFatal was called.") - } -} diff --git a/tools/config-generator/presubmit_config.go b/tools/config-generator/presubmit_config.go deleted file mode 100644 index 8b354148251..00000000000 --- a/tools/config-generator/presubmit_config.go +++ /dev/null @@ -1,124 +0,0 @@ -/* -Copyright 2019 The Knative Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package main - -import ( - "strings" - - "gopkg.in/yaml.v2" -) - -const ( - // presubmitJob is the template for presubmit jobs. - presubmitJob = "prow_presubmit_job.yaml" - - // presubmitGoCoverageJob is the template for go coverage presubmit jobs. - presubmitGoCoverageJob = "prow_presubmit_gocoverage_job.yaml" -) - -// presubmitJobTemplateData contains data about a presubmit Prow job. -type presubmitJobTemplateData struct { - Base baseProwJobTemplateData - PresubmitJobName string - PresubmitPullJobName string - PresubmitPostJobName string - PresubmitCommand []string - RunIfChanged string -} - -// generatePresubmit generates all presubmit job configs for the given repo and configuration. -// While this function is designed to only make one "logical" presubmit, it does generate multiple separate jobs when different branches need different settings -// i.e. it creates all jobs pull-knative-serving-build-tests per single invocation -// For coverage jobs, it also generates a matching postsubmit for each presubmit (because the coverage tool itself requires it? because we like them?) -// It outputs straight to standard out -func generatePresubmit(title string, repoName string, presubmitConfig yaml.MapSlice) { - var data presubmitJobTemplateData - data.Base = newbaseProwJobTemplateData(repoName) - data.Base.Command = presubmitScript - data.Base.GoCoverageThreshold = 50 - jobTemplate := readTemplate(presubmitJob) - repoData := repositoryData{Name: repoName, EnableGoCoverage: false, GoCoverageThreshold: data.Base.GoCoverageThreshold} - generateJob := true - for i, item := range presubmitConfig { - switch item.Key { - case "build-tests", "unit-tests", "integration-tests": - if !getBool(item.Value) { - return - } - jobName := getString(item.Key) - data.PresubmitJobName = data.Base.RepoNameForJob + "-" + jobName - // Use default arguments if none given. - if len(data.Base.Args) == 0 { - data.Base.Args = []string{"--" + jobName} - } - addVolumeToJob(&data.Base, "/etc/repoview-token", "repoview-token", true, nil) - case "go-coverage": - if !getBool(item.Value) { - return - } - jobTemplate = readTemplate(presubmitGoCoverageJob) - data.PresubmitJobName = data.Base.RepoNameForJob + "-go-coverage" - data.Base.ServiceAccount = "" - repoData.EnableGoCoverage = true - addVolumeToJob(&data.Base, "/etc/covbot-token", "covbot-token", true, nil) - case "custom-test": - data.PresubmitJobName = data.Base.RepoNameForJob + "-" + getString(item.Value) - case "go-coverage-threshold": - data.Base.GoCoverageThreshold = getInt(item.Value) - repoData.GoCoverageThreshold = data.Base.GoCoverageThreshold - case "repo-settings": - generateJob = false - case "run-if-changed": - data.RunIfChanged = "run_if_changed: \"" + getString(item.Value) + "\"" - default: - continue - } - // Knock-out the item, signalling it was already parsed. - presubmitConfig[i] = yaml.MapItem{} - } - repositories = append(repositories, repoData) - parseBasicJobConfigOverrides(&data.Base, presubmitConfig) - if !generateJob { - return - } - data.PresubmitCommand = createCommand(data.Base) - data.PresubmitPullJobName = "pull-" + data.PresubmitJobName - data.PresubmitPostJobName = "post-" + data.PresubmitJobName - if data.Base.ServiceAccount != "" { - data.Base.addEnvToJob("GOOGLE_APPLICATION_CREDENTIALS", data.Base.ServiceAccount) - data.Base.addEnvToJob("E2E_CLUSTER_REGION", "us-central1") - } - if data.Base.NeedsMonitor { - addMonitoringPubsubLabelsToJob(&data.Base, data.PresubmitPullJobName) - } - addExtraEnvVarsToJob(extraEnvVars, &data.Base) - configureServiceAccountForJob(&data.Base) - jobName := data.PresubmitPullJobName - - // This is where the data actually gets written out - executeJobTemplate("presubmit", jobTemplate, title, repoName, jobName, true, data) - - // Generate config for pull-knative-serving-go-coverage-dev right after pull-knative-serving-go-coverage, - // this job is mainly for debugging purpose. - if data.PresubmitPullJobName == "pull-knative-serving-go-coverage" { - data.PresubmitPullJobName += "-dev" - data.Base.AlwaysRun = false - data.Base.Image = strings.ReplaceAll(data.Base.Image, ":stable", ":coverage-dev") - template := strings.Replace(readTemplate(presubmitGoCoverageJob), "(all|", "(", 1) - executeJobTemplate("presubmit", template, title, repoName, data.PresubmitPullJobName, true, data) - } -} diff --git a/tools/config-generator/presubmit_config_test.go b/tools/config-generator/presubmit_config_test.go deleted file mode 100644 index 0e3423d477a..00000000000 --- a/tools/config-generator/presubmit_config_test.go +++ /dev/null @@ -1,51 +0,0 @@ -/* -Copyright 2020 The Knative Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package main - -import ( - "testing" - - "gopkg.in/yaml.v2" -) - -func TestGeneratePresubmit(t *testing.T) { - SetupForTesting() - title := "title" - repoName := "repoName" - items := []yaml.MapItem{ - {Key: "build-tests", Value: true}, - {Key: "unit-tests", Value: true}, - {Key: "integration-tests", Value: true}, - {Key: "go-coverage", Value: true}, - {Key: "custom-test", Value: "foo"}, - {Key: "go-coverage-threshold", Value: 80}, - {Key: "run-if-changed", Value: "foo"}, - } - var presubmitConfig yaml.MapSlice - for _, item := range items { - presubmitConfig = yaml.MapSlice{item} - generatePresubmit(title, repoName, presubmitConfig) - outputLen := len(GetOutput()) - if outputLen == 0 { - t.Errorf("Failure for key %s: No output", item.Key) - } - if logFatalCalls != 0 { - t.Errorf("Failure for key %s: LogFatal was called.", item.Key) - } - SetupForTesting() - } -} diff --git a/tools/config-generator/templates/common_header.yaml b/tools/config-generator/templates/common_header.yaml deleted file mode 100644 index f8f74c2d0c3..00000000000 --- a/tools/config-generator/templates/common_header.yaml +++ /dev/null @@ -1,21 +0,0 @@ -# Copyright 2020 The Knative Authors -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# ####################################################################### -# #### #### -# #### THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. #### -# #### USE "./hack/generate-configs.sh" TO REGENERATE THIS FILE. #### -# #### #### -# ####################################################################### - diff --git a/tools/config-generator/templates/k8s_testgrid.yaml b/tools/config-generator/templates/k8s_testgrid.yaml deleted file mode 100644 index b86d7474aa9..00000000000 --- a/tools/config-generator/templates/k8s_testgrid.yaml +++ /dev/null @@ -1,4 +0,0 @@ -# Dashboards need to be specified here to be created on TestGrid -# A prow annotation will be invalid if it references a dashboard that doesn't exist -[[indent_array_section 0 "dashboards" .AllRepos]] -dashboard_groups: diff --git a/tools/config-generator/templates/k8s_testgrid_testgroup.yaml b/tools/config-generator/templates/k8s_testgrid_testgroup.yaml deleted file mode 100644 index 6c57b6a9fb8..00000000000 --- a/tools/config-generator/templates/k8s_testgrid_testgroup.yaml +++ /dev/null @@ -1,2 +0,0 @@ - - name: [[.Org]] - [[indent_array_section 6 "dashboard_names" .Repos]] diff --git a/tools/config-generator/templates/prow_periodic_custom_job.yaml b/tools/config-generator/templates/prow_periodic_custom_job.yaml deleted file mode 100644 index 3c79b3bfdf5..00000000000 --- a/tools/config-generator/templates/prow_periodic_custom_job.yaml +++ /dev/null @@ -1,22 +0,0 @@ -- cron: "[[.CronString]]" - name: [[.PeriodicJobName]] - [[indent_section 6 "labels" .Base.Labels]] - agent: kubernetes - decorate: true - [[.Base.Cluster]] - [[indent_section 4 "decoration_config" .Base.DecorationConfig]] - [[indent_array_section 4 "branches" .Base.Branches]] - [[indent_array_section 4 "skip_branches" .Base.SkipBranches]] - [[indent_section 2 "extra_refs" .Base.ExtraRefs]] - [[indent_section 2 "annotations" .Base.Annotations]] - spec: - containers: - - image: [[.Base.Image]] - imagePullPolicy: Always - command: - - "[[.Base.Command]]" - [[indent_array_section 6 "args" .Base.Args]] - [[indent_section 6 "volumeMounts" .Base.VolumeMounts]] - [[indent_section 6 "env" .Base.Env]] - [[indent_section 6 "resources" .Base.Resources]] - [[indent_section 4 "volumes" .Base.Volumes]] diff --git a/tools/config-generator/templates/prow_periodic_test_job.yaml b/tools/config-generator/templates/prow_periodic_test_job.yaml deleted file mode 100644 index a8f4b0266d4..00000000000 --- a/tools/config-generator/templates/prow_periodic_test_job.yaml +++ /dev/null @@ -1,26 +0,0 @@ -- cron: "[[.CronString]]" - name: [[.PeriodicJobName]] - agent: kubernetes - [[indent_section 4 "labels" .Base.Labels]] - decorate: true - [[indent_section 2 "reporter_config" .Base.ReporterConfig]] - [[indent_array_section 6 "job_states_to_report" .Base.JobStatesToReport]] - [[indent_section 4 "decoration_config" .Base.DecorationConfig]] - [[.Base.Cluster]] - [[indent_section 2 "extra_refs" .Base.ExtraRefs]] - [[indent_array_section 4 "branches" .Base.Branches]] - [[indent_array_section 4 "skip_branches" .Base.SkipBranches]] - [[indent_section 2 "annotations" .Base.Annotations]] - spec: - containers: - - image: [[.Base.Image]] - imagePullPolicy: Always - command: - - runner.sh - args: - [[indent_array 6 .PeriodicCommand]] - [[indent_section 8 "securityContext" .Base.SecurityContext]] - [[indent_section 6 "volumeMounts" .Base.VolumeMounts]] - [[indent_section 6 "env" .Base.Env]] - [[indent_section 6 "resources" .Base.Resources]] - [[indent_section 4 "volumes" .Base.Volumes]] diff --git a/tools/config-generator/templates/prow_postsubmit_gocoverage_job.yaml b/tools/config-generator/templates/prow_postsubmit_gocoverage_job.yaml deleted file mode 100644 index b81c4028ae3..00000000000 --- a/tools/config-generator/templates/prow_postsubmit_gocoverage_job.yaml +++ /dev/null @@ -1,21 +0,0 @@ - - name: [[.PostsubmitJobName]] - [[indent_array_section 4 "branches" .Base.Branches]] - annotations: - testgrid-create-test-group: "false" - agent: kubernetes - decorate: true - [[.Base.Cluster]] - [[indent_section 8 "labels" .Base.Labels]] - [[.Base.PathAlias]] - spec: - containers: - - image: [[.Base.Image]] - imagePullPolicy: Always - command: - - runner.sh - args: - - "coverage" - - "--artifacts=$(ARTIFACTS)" - - "--cov-threshold-percentage=0" - [[indent_section 8 "resources" .Base.Resources]] - [[indent_section 8 "env" .Base.Env]] diff --git a/tools/config-generator/templates/prow_postsubmit_perf_job.yaml b/tools/config-generator/templates/prow_postsubmit_perf_job.yaml deleted file mode 100644 index 47888f17e3c..00000000000 --- a/tools/config-generator/templates/prow_postsubmit_perf_job.yaml +++ /dev/null @@ -1,22 +0,0 @@ - - name: [[.PostsubmitJobName]] - [[indent_array_section 4 "branches" .Base.Branches]] - annotations: - testgrid-create-test-group: "false" - agent: kubernetes - decorate: true - max_concurrency: 1 - [[.Base.Cluster]] - [[indent_section 6 "labels" .Base.Labels]] - [[.Base.PathAlias]] - spec: - containers: - - image: [[.Base.Image]] - imagePullPolicy: Always - command: - - runner.sh - args: - [[indent_array 8 .PostsubmitCommand]] - [[indent_section 8 "volumeMounts" .Base.VolumeMounts]] - [[indent_section 8 "env" .Base.Env]] - [[indent_section 8 "resources" .Base.Resources]] - [[indent_section 6 "volumes" .Base.Volumes]] diff --git a/tools/config-generator/templates/prow_presubmit_gocoverage_job.yaml b/tools/config-generator/templates/prow_presubmit_gocoverage_job.yaml deleted file mode 100644 index 28d42ff1960..00000000000 --- a/tools/config-generator/templates/prow_presubmit_gocoverage_job.yaml +++ /dev/null @@ -1,29 +0,0 @@ - - name: [[.PresubmitPullJobName]] - agent: kubernetes - [[indent_section 6 "labels" .Base.Labels]] - context: [[.PresubmitPullJobName]] - always_run: [[.Base.AlwaysRun]] - rerun_command: "/test [[.PresubmitPullJobName]]" - trigger: "(?m)^/test (all|[[.PresubmitPullJobName]]),?(\\s+|$)" - optional: true - decorate: true - [[.Base.PathAlias]] - [[.Base.Cluster]] - [[indent_array_section 4 "branches" .Base.Branches]] - [[indent_array_section 4 "skip_branches" .Base.SkipBranches]] - spec: - containers: - - image: [[.Base.Image]] - imagePullPolicy: Always - command: - - runner.sh - args: - - "coverage" - - "--postsubmit-job-name=[[.PresubmitPostJobName]]" - - "--artifacts=$(ARTIFACTS)" - - "--cov-threshold-percentage=[[.Base.GoCoverageThreshold]]" - - "--github-token=/etc/covbot-token/token" - [[indent_section 8 "volumeMounts" .Base.VolumeMounts]] - [[indent_section 8 "env" .Base.Env]] - [[indent_section 8 "resources" .Base.Resources]] - [[indent_section 6 "volumes" .Base.Volumes]] diff --git a/tools/config-generator/templates/prow_presubmit_job.yaml b/tools/config-generator/templates/prow_presubmit_job.yaml deleted file mode 100644 index 1822d4b0dc4..00000000000 --- a/tools/config-generator/templates/prow_presubmit_job.yaml +++ /dev/null @@ -1,27 +0,0 @@ - - name: [[.PresubmitPullJobName]] - agent: kubernetes - [[indent_section 6 "labels" .Base.Labels]] - context: [[.PresubmitPullJobName]] - always_run: [[.Base.AlwaysRun]] - optional: [[.Base.Optional]] - [[.RunIfChanged]] - rerun_command: "/test [[.PresubmitPullJobName]]" - trigger: "(?m)^/test (all|[[.PresubmitPullJobName]]),?(\\s+|$)" - decorate: true - [[.Base.PathAlias]] - [[.Base.Cluster]] - [[indent_array_section 4 "branches" .Base.Branches]] - [[indent_array_section 4 "skip_branches" .Base.SkipBranches]] - spec: - containers: - - image: [[.Base.Image]] - imagePullPolicy: Always - command: - - runner.sh - args: - [[indent_array 8 .PresubmitCommand]] - [[indent_section 10 "securityContext" .Base.SecurityContext]] - [[indent_section 8 "volumeMounts" .Base.VolumeMounts]] - [[indent_section 8 "env" .Base.Env]] - [[indent_section 8 "resources" .Base.Resources]] - [[indent_section 6 "volumes" .Base.Volumes]] diff --git a/tools/config-generator/templates/testgrid_config_header.yaml b/tools/config-generator/templates/testgrid_config_header.yaml deleted file mode 100644 index c006a9cd33b..00000000000 --- a/tools/config-generator/templates/testgrid_config_header.yaml +++ /dev/null @@ -1,38 +0,0 @@ -# Default testgroup and dashboardtab, please do not change them -default_test_group: - days_of_results: 14 # Number of days of test results to gather and serve - tests_name_policy: 2 # Replace the name of the test - ignore_pending: false # Show in-progress tests - column_header: - - configuration_value: Commit # Shows the commit number on column header - - configuration_value: infra-commit - num_columns_recent: 10 # The number of columns to consider "recent" for a variety of purposes - use_kubernetes_client: true # ** This field is deprecated and should always be true ** - is_external: true # ** This field is deprecated and should always be true ** - alert_stale_results_hours: 26 # Alert if tests haven't run for a day (1 day + 2h) - num_passes_to_disable_alert: 1 # Consider a failing test passing if it has 1 or more consecutive passes - -default_dashboard_tab: - open_test_template: # The URL template to visit after clicking on a cell - url: [[.ProwHost]]/view/gcs// - file_bug_template: # The URL template to visit when filing a bug - url: https://github.com/knative/serving/issues/new - options: - - key: title - value: "Test \"\" failed" - - key: body - value: - attach_bug_template: # The URL template to visit when attaching a bug - url: # Empty - options: # Empty - # Text to show in the about menu as a link to another view of the results - results_text: See these results on Prow - results_url_template: # The URL template to visit after clicking - url: [[.ProwHost]]/job-history/ - # URL for regression search links. - code_search_path: github.com/knative/serving/search - num_columns_recent: 10 - code_search_url_template: # The URL template to visit when searching for changelists - url: https://github.com/knative/serving/compare/... - num_failures_to_alert: 0 - num_passes_to_disable_alert: 1 diff --git a/tools/config-generator/templates/testgrid_dashboardgroup.yaml b/tools/config-generator/templates/testgrid_dashboardgroup.yaml deleted file mode 100644 index 33a1d809b74..00000000000 --- a/tools/config-generator/templates/testgrid_dashboardgroup.yaml +++ /dev/null @@ -1,3 +0,0 @@ -- name: [[.Name]] - dashboard_names: - [[indent_array 2 .RepoNames]] \ No newline at end of file diff --git a/tools/config-generator/templates/testgrid_dashboardtab.yaml b/tools/config-generator/templates/testgrid_dashboardtab.yaml deleted file mode 100644 index e0cc8efad97..00000000000 --- a/tools/config-generator/templates/testgrid_dashboardtab.yaml +++ /dev/null @@ -1,4 +0,0 @@ - - name: [[.Name]] - test_group_name: [[.Base.TestGroupName]] - base_options: "[[.BaseOptions]]" - [[indent_map 4 .Extras]] diff --git a/tools/config-generator/templates/testgrid_testgroup.yaml b/tools/config-generator/templates/testgrid_testgroup.yaml deleted file mode 100644 index 728093c7d82..00000000000 --- a/tools/config-generator/templates/testgrid_testgroup.yaml +++ /dev/null @@ -1,3 +0,0 @@ -- name: [[.Base.TestGroupName]] - gcs_prefix: [[.GcsLogDir]] - [[indent_map 2 .Extras]] \ No newline at end of file diff --git a/tools/config-generator/testgrid_config.go b/tools/config-generator/testgrid_config.go deleted file mode 100644 index ec045f25e53..00000000000 --- a/tools/config-generator/testgrid_config.go +++ /dev/null @@ -1,406 +0,0 @@ -/* -Copyright 2019 The Knative Authors -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// data definitions that are used for the testgrid config file generation - -package main - -import ( - "fmt" - "regexp" - "strings" - "time" -) - -const ( - // baseOptions setting for testgrid dashboard tabs - testgridTabGroupByDir = "exclude-filter-by-regex=Overall$&group-by-directory=&expand-groups=&sort-by-name=" - testgridTabGroupByTarget = "exclude-filter-by-regex=Overall$&group-by-target=&expand-groups=&sort-by-name=" - testgridTabSortByName = "sort-by-name=" - testgridTabSortByFailures = "sort-by-failures=" - - // generalTestgridConfig contains config-wide definitions. - generalTestgridConfig = "testgrid_config_header.yaml" - - // testGroupTemplate is the template for the test group config - testGroupTemplate = "testgrid_testgroup.yaml" - - // dashboardTabTemplate is the template for the dashboard tab config - dashboardTabTemplate = "testgrid_dashboardtab.yaml" - - // dashboardGroupTemplate is the template for the dashboard tab config - dashboardGroupTemplate = "testgrid_dashboardgroup.yaml" -) - -var ( - // goCoverageMap keep track of which repo has go code coverage when parsing the simple config file - goCoverageMap map[string]bool - - metaData = NewTestGridMetaData() - - // templatesCache caches templates in memory to avoid I/O - templatesCache = make(map[string]string) - quotedEmailPattern, _ = regexp.Compile("\"(.+@.+\\..+)\"") -) - -// baseTestgridTemplateData contains basic data about the testgrid config file. -// TODO(chizhg): remove this structure and use baseProwJobTemplateData instead -type baseTestgridTemplateData struct { - ProwHost string - TestGridHost string - GubernatorHost string - TestGridGcsBucket string - TestGroupName string - Year int -} - -// testGroupTemplateData contains data about a test group -type testGroupTemplateData struct { - Base baseTestgridTemplateData - // TODO(chizhg): use baseProwJobTemplateData then this attribute can be removed - GcsLogDir string - Extras map[string]string -} - -// dashboardTabTemplateData contains data about a dashboard tab -type dashboardTabTemplateData struct { - Base baseTestgridTemplateData - Name string - BaseOptions string - Extras map[string]string -} - -// dashboardGroupTemplateData contains data about a dashboard group -type dashboardGroupTemplateData struct { - Name string - RepoNames []string -} - -// testgridEntityGenerator is a function that generates the entity given the repo name and job names -type testgridEntityGenerator func(string, string, []string) - -// newBaseTestgridTemplateData returns a testgridTemplateData type with its initial, default values. -func newBaseTestgridTemplateData(testGroupName string) baseTestgridTemplateData { - var data baseTestgridTemplateData - data.Year = time.Now().Year() - data.ProwHost = prowHost - data.TestGridHost = testGridHost - data.GubernatorHost = gubernatorHost - data.TestGridGcsBucket = testGridGcsBucket - data.TestGroupName = testGroupName - return data -} - -// Get returns the project JobDetailMap, creating it if necessary -func (t *TestGridMetaData) Get(projName string) JobDetailMap { - t.EnsureExists(projName) - return t.md[projName] -} - -func (t *TestGridMetaData) EnsureExists(projName string) bool { - if _, exists := t.md[projName]; !exists { - t.md[projName] = make(JobDetailMap) - if !strExists(t.projNames, projName) { - t.projNames = append(t.projNames, projName) - } - return false - } - return true -} - -func (t *TestGridMetaData) EnsureRepo(projName, repoName string) bool { - jdm := t.Get(projName) - if !jdm.EnsureExists(repoName) { - if !strExists(t.repoNames, repoName) { - t.repoNames = append(t.repoNames, repoName) - } - return false - } - return true -} - -// generateTestGridSection generates the configs for a TestGrid section using the given generator -func (t *TestGridMetaData) generateTestGridSection(sectionName string, generator testgridEntityGenerator, skipReleasedProj bool) { - oldCount := output.count - output.outputConfig(sectionName + ":") - for _, projName := range t.projNames { - // Do not handle the project if it is released and we want to skip it. - if skipReleasedProj && isReleased(projName) { - continue - } - repos := t.md[projName] - for _, repoName := range t.repoNames { - if jobNames, exists := repos[repoName]; exists { - generator(projName, repoName, jobNames) - } - } - } - // A TestGrid config cannot have an empty section, so add a bogus entry - // if nothing was generated, thus the config is semantically valid. - if output.count == oldCount { - output.outputConfig(baseIndent + "- name: empty") - } -} - -// generateNonAlignedTestGroups -func (t *TestGridMetaData) generateNonAlignedTestGroups() { - for _, tg := range t.nonAligned { - executeTestGroupTemplate(tg.CIJobName, getGcsLogDir(tg.CIJobName), tg.Extra) - } -} - -// -// testGroupName: This is the human-readable tab name -func (t *TestGridMetaData) AddNonAlignedTest(n NonAlignedTestGroup) { - t.nonAligned = append(t.nonAligned, n) -} - -// testGroupName: the name of the job in every case AFAICT -func getGcsLogDir(testGroupName string) string { - return fmt.Sprintf("%s/%s/%s", GCSBucket, LogsDir, testGroupName) -} - -func getTestgroupExtras(projName, jobName string) map[string]string { - extras := make(map[string]string) - switch jobName { - case "continuous": - // projName has the release encoded into it, so the main page at http://testgrid.knative.dev - // does not mix releases with the master/main branch - if releaseRegex.FindString(projName) != "" { - extras["num_failures_to_alert"] = "3" - extras["alert_options"] = "\n alert_mail_to_addresses: \"serverless-engprod-sea@google.com\"" - } else { - extras["alert_stale_results_hours"] = "3" - } - case "dot-release", "auto-release", "nightly": - extras["num_failures_to_alert"] = "1" - extras["alert_options"] = "\n alert_mail_to_addresses: \"serverless-engprod-sea@google.com\"" - if jobName == "dot-release" { - extras["alert_stale_results_hours"] = "170" // 1 week + 2h - } - case "test-coverage": - extras["short_text_metric"] = "coverage" - default: - extras["alert_stale_results_hours"] = "3" - } - return extras -} - -func generateProwJobAnnotations(dashboardName, tabName string, tgExtras map[string]string) []string { - annotations := []string{fmtDashboardAnnotation(dashboardName), fmtTabAnnotation(tabName)} - - v, ok := tgExtras["alert_stale_results_hours"] - if ok { - res := fmt.Sprintf(" testgrid-alert-stale-results-hours: \"%s\"", v) - annotations = append(annotations, res) - } - v, ok = tgExtras["short_text_metric"] - if ok { - res := " testgrid-in-cell-metric: " + v - annotations = append(annotations, res) - } - v, ok = tgExtras["alert_options"] - if ok { - email := quotedEmailPattern.FindStringSubmatch(v)[1] //index 1 is first capture group - res := fmt.Sprintf(" testgrid-alert-email: \"%s\"", email) - annotations = append(annotations, res) - } - v, ok = tgExtras["num_failures_to_alert"] - if ok { - res := fmt.Sprintf(" testgrid-num-failures-to-alert: \"%s\"", v) - annotations = append(annotations, res) - } - return annotations -} - -func fmtDashboardAnnotation(dashboardName string) string { - return fmt.Sprintf(" testgrid-dashboards: " + dashboardName) -} - -func fmtTabAnnotation(tabName string) string { - return fmt.Sprintf(" testgrid-tab-name: " + tabName) -} - -// generateTestGroup generates the test group configuration -func generateTestGroup(projName string, repoName string, jobNames []string) { - projRepoStr := buildProjRepoStr(projName, repoName) - for _, jobName := range jobNames { - testGroupName := getTestGroupName(projRepoStr, jobName) - testGroupNameForGCSLogDir := testGroupName - if jobName == "test-coverage" { - testGroupNameForGCSLogDir = fmt.Sprintf("ci-%s-%s", projRepoStr, "go-coverage") - } - gcsLogDir := getGcsLogDir(testGroupNameForGCSLogDir) - extras := getTestgroupExtras(projName, jobName) - executeTestGroupTemplate(testGroupName, gcsLogDir, extras) - } -} - -// executeTestGroupTemplate outputs the given test group config template with the given data -func executeTestGroupTemplate(testGroupName string, gcsLogDir string, extras map[string]string) { - var data testGroupTemplateData - data.Base.TestGroupName = testGroupName - data.GcsLogDir = gcsLogDir - data.Extras = extras - executeTemplate("test group", readTemplate(testGroupTemplate), data) -} - -// generateDashboard generates the dashboard configuration -func generateDashboard(projName string, repoName string, jobNames []string) { - projRepoStr := buildProjRepoStr(projName, repoName) - output.outputConfig("- name: " + strings.ToLower(repoName) + "\n" + baseIndent + "dashboard_tab:") - noExtras := make(map[string]string) - for _, jobName := range jobNames { - testGroupName := getTestGroupName(projRepoStr, jobName) - switch jobName { - case "continuous": - extras := make(map[string]string) - extras["num_failures_to_alert"] = "3" - extras["alert_options"] = "\n alert_mail_to_addresses: \"serverless-engprod-sea@google.com\"" - executeDashboardTabTemplate("continuous", testGroupName, testgridTabSortByName, extras) - // This is a special case for knative/serving, as conformance tab is just a filtered view of the continuous tab. - if projRepoStr == "knative-serving" { - executeDashboardTabTemplate("conformance", testGroupName, "include-filter-by-regex=test/conformance/&sort-by-name=", extras) - } - case "dot-release", "auto-release": - extras := make(map[string]string) - extras["num_failures_to_alert"] = "1" - extras["alert_options"] = "\n alert_mail_to_addresses: \"serverless-engprod-sea@google.com\"" - baseOptions := testgridTabSortByName - executeDashboardTabTemplate(jobName, testGroupName, baseOptions, extras) - case "nightly": - extras := make(map[string]string) - extras["num_failures_to_alert"] = "1" - extras["alert_options"] = "\n alert_mail_to_addresses: \"serverless-engprod-sea@google.com\"" - executeDashboardTabTemplate("nightly", testGroupName, testgridTabSortByName, extras) - case "test-coverage": - executeDashboardTabTemplate("coverage", testGroupName, testgridTabGroupByDir, noExtras) - default: - executeDashboardTabTemplate(jobName, testGroupName, testgridTabSortByName, noExtras) - } - } -} - -// executeTestGroupTemplate outputs the given dashboard tab config template with the given data -func executeDashboardTabTemplate(dashboardTabName string, testGroupName string, baseOptions string, extras map[string]string) { - var data dashboardTabTemplateData - data.Name = dashboardTabName - data.Base.TestGroupName = testGroupName - data.BaseOptions = baseOptions - data.Extras = extras - executeTemplate("dashboard tab", readTemplate(dashboardTabTemplate), data) -} - -// getTestGroupName get the testGroupName from the given repoName and jobName -func getTestGroupName(repoName string, jobName string) string { - switch jobName { - case "nightly": - return strings.ToLower(fmt.Sprintf("ci-%s-%s-release", repoName, jobName)) - default: - return strings.ToLower(fmt.Sprintf("ci-%s-%s", repoName, jobName)) - } -} - -// generateNonAlignedDashboards generates some of the content under "dashboards:" -func (t *TestGridMetaData) generateNonAlignedDashboards() { - // Collect them by DashboardName - var keys []string - dn := make(map[string][]NonAlignedTestGroup) - for _, tg := range t.nonAligned { - _, exists := dn[tg.DashboardName] - if !exists { - dn[tg.DashboardName] = make([]NonAlignedTestGroup, 0) - keys = append(keys, tg.DashboardName) - } - dn[tg.DashboardName] = append(dn[tg.DashboardName], tg) - } - for _, name := range keys { - tgs := dn[name] - output.outputConfig("- name: " + name + "\n" + baseIndent + "dashboard_tab:") - for _, tg := range tgs { - executeDashboardTabTemplate(tg.HumanTabName, tg.CIJobName, tg.BaseOptions, nil) - } - } -} - -// generateDashboardsForReleases generates some of the content under "dashboards:" -func (t *TestGridMetaData) generateDashboardsForReleases() { - for _, projName := range t.projNames { - // Do not handle the project if it is not released. - if !isReleased(projName) { - continue - } - repos := t.md[projName] - output.outputConfig("- name: " + projName + "\n" + baseIndent + "dashboard_tab:") - for _, repoName := range t.repoNames { - if jobNames, exists := repos[repoName]; exists { - for _, jobName := range jobNames { - extras := make(map[string]string) - extras["num_failures_to_alert"] = "3" - extras["alert_options"] = "\n alert_mail_to_addresses: \"serverless-engprod-sea@google.com\"" - testGroupName := getTestGroupName(buildProjRepoStr(projName, repoName), jobName) - executeDashboardTabTemplate(repoName+"-"+jobName, testGroupName, testgridTabSortByName, extras) - } - } - } - } -} - -// generateNonAlignedDashboardGroups generates some of the content under "dashboards:" -func (t *TestGridMetaData) generateNonAlignedDashboardGroups() { - // Collect Dashboards by DashboardGroup - var keys []string - dg := make(map[string][]string) - for _, tg := range t.nonAligned { - _, exists := dg[tg.DashboardGroup] - if !exists { - dg[tg.DashboardGroup] = make([]string, 0) - keys = append(keys, tg.DashboardGroup) - } - if !strExists(dg[tg.DashboardGroup], tg.DashboardName) { - dg[tg.DashboardGroup] = append(dg[tg.DashboardGroup], tg.DashboardName) - } - } - for _, group := range keys { - names := dg[group] - executeDashboardGroupTemplate(group, names) - } -} - -// generateDashboardGroups generates the stuff in dashboard_groups: -func (t *TestGridMetaData) generateDashboardGroups() { - output.outputConfig("dashboard_groups:") - for _, projName := range t.projNames { - // there is only one dashboard for each released project, so we do not need to group them - if isReleased(projName) { - continue - } - - dashboardRepoNames := make([]string, 0) - repos := t.md[projName] - for _, repoName := range t.repoNames { - if _, exists := repos[repoName]; exists { - dashboardRepoNames = append(dashboardRepoNames, repoName) - } - } - executeDashboardGroupTemplate(projName, dashboardRepoNames) - } -} - -// executeDashboardGroupTemplate outputs the given dashboard group config template with the given data -func executeDashboardGroupTemplate(dashboardGroupName string, dashboardRepoNames []string) { - var data dashboardGroupTemplateData - data.Name = dashboardGroupName - data.RepoNames = dashboardRepoNames - executeTemplate("dashboard group", readTemplate(dashboardGroupTemplate), data) -} diff --git a/tools/config-generator/testgrid_config_test.go b/tools/config-generator/testgrid_config_test.go deleted file mode 100644 index b69417621dd..00000000000 --- a/tools/config-generator/testgrid_config_test.go +++ /dev/null @@ -1,398 +0,0 @@ -/* -Copyright 2020 The Knative Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package main - -import ( - "fmt" - "testing" - - "github.com/google/go-cmp/cmp" -) - -func TestNewBaseTestgridTemplateData(t *testing.T) { - SetupForTesting() - data := newBaseTestgridTemplateData("foo") - if diff := cmp.Diff(data.TestGroupName, "foo"); diff != "" { - t.Errorf("(-got +want)\n%s", diff) - } -} - -func TestTestGridMetaDataGet(t *testing.T) { - SetupForTesting() - data := NewTestGridMetaData() - jobDetails := data.Get("foo") - if diff := cmp.Diff(jobDetails, data.md["foo"]); diff != "" { - t.Errorf("(-got +want\n%s", diff) - } -} - -func TestTestGridMetaDataEnsureExists(t *testing.T) { - SetupForTesting() - data := NewTestGridMetaData() - out := data.EnsureExists("foo") - if out { - t.Errorf("foo did not exist but function returned true") - } - if _, exists := data.md["foo"]; !exists { - t.Errorf("foo should have been added but was not") - } - out = data.EnsureExists("foo") - if !out { - t.Errorf("foo existed but the function returned false") - } - if diff := cmp.Diff(data.projNames, []string{"foo"}); diff != "" { - t.Errorf("(-got +want\n%s", diff) - } -} - -func TestTestGridMetaDataEnsureRepo(t *testing.T) { - SetupForTesting() - data := NewTestGridMetaData() - out := data.EnsureRepo("proj-name", "repo-name") - if out { - t.Errorf("repo did not exist but function returned true") - } - if data.repoNames[0] != "repo-name" { - t.Errorf("Should have added repo-name but did not") - } - out = data.EnsureRepo("proj-name", "repo-name") - if !out { - t.Errorf("repo existed but function returned false") - } -} - -func TestTestGridMetaDataGenerateTestGridSection(t *testing.T) { - SetupForTesting() - data := NewTestGridMetaData() - data.projNames = []string{"project-a", "project-b"} - data.repoNames = []string{"repo-1", "repo-2", "repo-3"} - data.md["project-a"] = JobDetailMap{ - "repo-1": []string{"job-1a", "job-1b"}, - "repo-2": []string{"job-2a", "job-2b"}, - } - data.md["project-b"] = JobDetailMap{ - "repo-3": []string{"job-3a", "job-3b"}, - } - skipReleasedProj := false - outputs := []string{} - generator := func(proj, repo string, jobs []string) { - outputs = append(outputs, fmt.Sprintf("%s %s %v", proj, repo, jobs)) - } - data.generateTestGridSection("section-name", generator, skipReleasedProj) - expected := []string{ - "project-a repo-1 [job-1a job-1b]", - "project-a repo-2 [job-2a job-2b]", - "project-b repo-3 [job-3a job-3b]", - } - if diff := cmp.Diff(outputs, expected); diff != "" { - t.Errorf("(-got +want): \n%s", diff) - } -} - -func TestTestGridMetaDataGenerateNonAlignedTestGroups(t *testing.T) { - SetupForTesting() - data := NewTestGridMetaData() - data.nonAligned = []NonAlignedTestGroup{ - { - CIJobName: "ci-job-name", - Extra: map[string]string{}, - }, - } - data.generateNonAlignedTestGroups() - if len(GetOutput()) == 0 { - t.Errorf("No output") - } - if logFatalCalls != 0 { - t.Errorf("LogFatal was called.") - } -} - -func TestTestGridMetaDataAddNonAlignedTest(t *testing.T) { - SetupForTesting() - data := NewTestGridMetaData() - data.AddNonAlignedTest(NonAlignedTestGroup{}) - if len(data.nonAligned) != 1 { - t.Errorf("Test was not appended.") - } -} - -func TestGetGcsLogDir(t *testing.T) { - SetupForTesting() - GCSBucket = "gcs-bucket" - LogsDir = "logs-dir" - expected := "gcs-bucket/logs-dir/tg-name" - if diff := cmp.Diff(getGcsLogDir("tg-name"), expected); diff != "" { - t.Errorf("(-got +want): \n%s", diff) - } -} - -func TestGetTestgroupExtras(t *testing.T) { - SetupForTesting() - defaultProjectName := "project-name" - tests := []struct { - ProjName string - JobName string - Expected map[string]string - }{ - { - ProjName: "proj-name-1.2.3", - JobName: "continuous", - Expected: map[string]string{ - "num_failures_to_alert": "3", - "alert_options": "\n alert_mail_to_addresses: \"serverless-engprod-sea@google.com\"", - }, - }, - { - JobName: "continuous", - Expected: map[string]string{ - "alert_stale_results_hours": "3", - }, - }, - { - JobName: "dot-release", - Expected: map[string]string{ - "num_failures_to_alert": "1", - "alert_options": "\n alert_mail_to_addresses: \"serverless-engprod-sea@google.com\"", - "alert_stale_results_hours": "170", - }, - }, - { - JobName: "auto-release", - Expected: map[string]string{ - "num_failures_to_alert": "1", - "alert_options": "\n alert_mail_to_addresses: \"serverless-engprod-sea@google.com\"", - }, - }, - { - JobName: "nightly", - Expected: map[string]string{ - "num_failures_to_alert": "1", - "alert_options": "\n alert_mail_to_addresses: \"serverless-engprod-sea@google.com\"", - }, - }, - { - JobName: "test-coverage", - Expected: map[string]string{ - "short_text_metric": "coverage", - }, - }, - { - JobName: "some-other-job-name", - Expected: map[string]string{"alert_stale_results_hours": "3"}, - }, - } - - for _, test := range tests { - projName := test.ProjName - if projName == "" { - projName = defaultProjectName - } - - out := getTestgroupExtras(test.ProjName, test.JobName) - if diff := cmp.Diff(out, test.Expected); diff != "" { - t.Errorf("(-got +want): \n%s", diff) - } - } -} - -func TestGenerateProwJobAnnotations(t *testing.T) { - SetupForTesting() - tgExtras := map[string]string{ - "alert_stale_results_hours": "48", - "alert_options": "\n alert_mail_to_addresses: \"foo-bar@google.com\"", - "num_failures_to_alert": "3", - "short_text_metric": "coverage", - } - expected := []string{ - " testgrid-dashboards: repo-name", - " testgrid-tab-name: job-name", - " testgrid-alert-stale-results-hours: \"48\"", - " testgrid-in-cell-metric: coverage", - " testgrid-alert-email: \"foo-bar@google.com\"", - " testgrid-num-failures-to-alert: \"3\"", - } - annotations := generateProwJobAnnotations("repo-name", "job-name", tgExtras) - if diff := cmp.Diff(annotations, expected); diff != "" { - t.Errorf("(-got +want): \n%s", diff) - } -} - -func TestFmtDashboardAnnotation(t *testing.T) { - if diff := cmp.Diff(fmtDashboardAnnotation("dashboardName"), " testgrid-dashboards: dashboardName"); diff != "" { - t.Errorf("(-got +want): \n%s", diff) - } -} - -func TestFmtTabAnnotation(t *testing.T) { - if diff := cmp.Diff(fmtTabAnnotation("tabName"), " testgrid-tab-name: tabName"); diff != "" { - t.Errorf("(-got +want): \n%s", diff) - } -} - -func TestTestGridMetaDataGenerateTestGroup(t *testing.T) { - SetupForTesting() - projName := "proj-name" - repoName := "repo-name" - jobNames := []string{"continuous", "dot-release", "webhook-api-coverage", "test-coverage", "default"} - generateTestGroup(projName, repoName, jobNames) - if len(GetOutput()) == 0 { - t.Errorf("No output") - } - if logFatalCalls != 0 { - t.Errorf("LogFatal was called.") - } -} - -func TestExecuteTestGroupTemplate(t *testing.T) { - SetupForTesting() - executeTestGroupTemplate("tg-name", "gcs-log-dir", map[string]string{}) - if len(GetOutput()) == 0 { - t.Errorf("No output") - } - if logFatalCalls != 0 { - t.Errorf("LogFatal was called.") - } -} - -func TestGenerateDashboard(t *testing.T) { - SetupForTesting() - projName := "proj-name" - repoName := "repo-name" - jobNames := []string{"continuous", "dot-release", "webhook-api-coverage", "nightly", "test-coverage", "default"} - generateDashboard(projName, repoName, jobNames) - if len(GetOutput()) == 0 { - t.Errorf("No output") - } - if logFatalCalls != 0 { - t.Errorf("LogFatal was called.") - } -} - -func TestExecuteDashboardTabTemplate(t *testing.T) { - SetupForTesting() - executeDashboardTabTemplate("tab-name", "tg-name", "base-opts", map[string]string{}) - if len(GetOutput()) == 0 { - t.Errorf("No output") - } - if logFatalCalls != 0 { - t.Errorf("LogFatal was called.") - } -} - -func TestGetTestGroupName(t *testing.T) { - SetupForTesting() - out := getTestGroupName("foo", "bar") - expected := "ci-foo-bar" - if diff := cmp.Diff(out, expected); diff != "" { - t.Errorf("(-got +want): \n%s", diff) - } - - out = getTestGroupName("foo", "nightly") - expected = "ci-foo-nightly-release" - if diff := cmp.Diff(out, expected); diff != "" { - t.Errorf("(-got +want): \n%s", diff) - } -} - -func TestGenerateNonAlignedDashboards(t *testing.T) { - SetupForTesting() - data := NewTestGridMetaData() - data.AddNonAlignedTest(NonAlignedTestGroup{ - DashboardName: "dashboard-name", - HumanTabName: "human-tab-name", - CIJobName: "ci-job-name", - BaseOptions: "base-opts", - }) - data.generateNonAlignedDashboards() - if len(GetOutput()) == 0 { - t.Errorf("No output") - } - if logFatalCalls != 0 { - t.Errorf("LogFatal was called.") - } -} - -func TestGenerateDashboardsForReleases(t *testing.T) { - SetupForTesting() - data := NewTestGridMetaData() - data.projNames = []string{"project-a", "project-b-2.0"} - data.repoNames = []string{"repo-1", "repo-2", "repo-3"} - data.md["project-a"] = JobDetailMap{ - "repo-1": []string{"job-1a", "job-1b"}, - "repo-2": []string{"job-2a", "job-2b"}, - } - data.md["project-b"] = JobDetailMap{ - "repo-3": []string{"job-3a", "job-3b"}, - } - data.generateDashboardsForReleases() - if len(GetOutput()) == 0 { - t.Errorf("No output") - } - if logFatalCalls != 0 { - t.Errorf("LogFatal was called.") - } -} - -func TestGenerateNonAlignedDashboardGroups(t *testing.T) { - SetupForTesting() - data := NewTestGridMetaData() - data.nonAligned = []NonAlignedTestGroup{ - { - DashboardName: "dashboard-name", - DashboardGroup: "dashboard-group", - }, - } - data.generateNonAlignedDashboardGroups() - if len(GetOutput()) == 0 { - t.Errorf("No output") - } - if logFatalCalls != 0 { - t.Errorf("LogFatal was called.") - } -} - -func TestGenerateDashboardGroups(t *testing.T) { - SetupForTesting() - data := NewTestGridMetaData() - data.projNames = []string{"project-a", "project-b-2.0"} - data.repoNames = []string{"repo-1", "repo-2", "repo-3"} - data.md["project-a"] = JobDetailMap{ - "repo-1": []string{"job-1a", "job-1b"}, - "repo-2": []string{"job-2a", "job-2b"}, - } - data.md["project-b"] = JobDetailMap{ - "repo-3": []string{"job-3a", "job-3b"}, - } - data.generateDashboardGroups() - if len(GetOutput()) == 0 { - t.Errorf("No output") - } - if logFatalCalls != 0 { - t.Errorf("LogFatal was called.") - } -} - -func TestExecuteDashboardGroupTemplate(t *testing.T) { - SetupForTesting() - executeDashboardGroupTemplate("group-name", []string{"repo1", "repo2"}) - if len(GetOutput()) == 0 { - t.Errorf("No output") - } - if logFatalCalls != 0 { - t.Errorf("LogFatal was called.") - } -} diff --git a/tools/config-generator/testutils_test.go b/tools/config-generator/testutils_test.go deleted file mode 100644 index f58eb10f3e3..00000000000 --- a/tools/config-generator/testutils_test.go +++ /dev/null @@ -1,43 +0,0 @@ -/* -Copyright 2020 The Knative Authors -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package main - -import ( - "bytes" -) - -var outputBuffer bytes.Buffer - -// logFatalCalls tracks the number of logFatalf calls that occurred within a test -var logFatalCalls int - -func logFatalfMock(format string, v ...interface{}) { - logFatalCalls++ -} - -func ResetOutput() { - outputBuffer = bytes.Buffer{} - output = newOutputter(&outputBuffer) -} - -func GetOutput() string { - return outputBuffer.String() -} - -func SetupForTesting() { - ResetOutput() // Redirect output prior to each test. - logFatalf = logFatalfMock - logFatalCalls = 0 - sectionMap = make(map[string]bool) -} diff --git a/tools/config-generator/types.go b/tools/config-generator/types.go deleted file mode 100644 index 2226c96761b..00000000000 --- a/tools/config-generator/types.go +++ /dev/null @@ -1,99 +0,0 @@ -/* -Copyright 2020 The Knative Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package main - -import ( - "fmt" - "regexp" -) - -var ( - goVersionMatcher *regexp.Regexp -) - -func init() { - goVersionMatcher = regexp.MustCompile(`go(\d+)[.](\d+)`) -} - -// jobDetailMap, key is the repo name, value is the list of job types, like continuous, nightly, etc., as well as custome names -type JobDetailMap map[string][]string - -// testGridMetaData saves the meta data needed to generate the final config file. -// key is the main project version, value is another map containing job details -type TestGridMetaData struct { - md map[string]JobDetailMap - // projNames save the proj names in a list when parsing the simple config file, for the purpose of maintaining the output sequence - projNames []string - // repoNames save the repo names in a list when parsing the simple config file, for the purpose of maintaining the output sequence - repoNames []string - nonAligned []NonAlignedTestGroup -} - -type NonAlignedTestGroup struct { - // DashboardGroup: The things shown at http://testgrid.knative.dev before you hover over anything - DashboardGroup string - // DashboardName: This is the thing with multiple tabs/test-groups/whatever-you-call-them - DashboardName string - // HumanTabName: Each set of test runs, aka test_group, with the name as shown to the human - HumanTabName string - // Used to find the logs - CIJobName string - // Becomes BaseOptions in the tab template, is something like "sort-by-failures=" - BaseOptions string - // Extra things that show up in yaml in the test_groups section - Extra map[string]string -} - -type GoVersion struct { - Major int - Minor int -} - -func (j JobDetailMap) Add(repo, jt string) { - j.EnsureExists(repo) - j[repo] = append(j[repo], jt) -} - -func NewJobDetailMap() JobDetailMap { - return make(JobDetailMap) -} - -// EnsureExists returns true if already existed or false if newly-created -func (j JobDetailMap) EnsureExists(repo string) bool { - if _, exists := j[repo]; exists == false { - j[repo] = make([]string, 0) - return false - } - return true -} - -func NewTestGridMetaData() TestGridMetaData { - return TestGridMetaData{ - md: make(map[string]JobDetailMap), - projNames: make([]string, 0), - repoNames: make([]string, 0), - nonAligned: make([]NonAlignedTestGroup, 0), - } -} - -func (v GoVersion) String() string { - return fmt.Sprintf("go%d.%d", v.Major, v.Minor) -} - -func (v GoVersion) Equals(v2 GoVersion) bool { - return v.Major == v2.Major && v.Minor == v2.Minor -} diff --git a/tools/config-generator/types_test.go b/tools/config-generator/types_test.go deleted file mode 100644 index 08e7bd20a78..00000000000 --- a/tools/config-generator/types_test.go +++ /dev/null @@ -1,21 +0,0 @@ -package main - -import "testing" - -func TestJobDetailMap(t *testing.T) { - j := NewJobDetailMap() - - local := []string{"continuous", "nightly"} - - for _, t := range local { - j.Add("serving", t) - } - - for i := range local { - if j["serving"][i] == local[i] { - t.Logf("Entry %d matched", i) - } else { - t.Errorf("Entry %d did not match: %q != %q", i, j["serving"][i], local[i]) - } - } -} diff --git a/tools/config-generator/udpaterelease.go b/tools/config-generator/udpaterelease.go deleted file mode 100644 index 5ec607eb6e4..00000000000 --- a/tools/config-generator/udpaterelease.go +++ /dev/null @@ -1,222 +0,0 @@ -/* -Copyright 2020 The Knative Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package main - -import ( - "fmt" - "io/ioutil" - "log" - "os" - "sort" - "strconv" - "strings" - - "gopkg.in/yaml.v2" - "knative.dev/test-infra/pkg/ghutil" -) - -const ( - maxReleaseBranches = 4 -) - -func upgradeReleaseBranchesTemplate(configfileName string, gc ghutil.GithubOperations) error { - config := yaml.MapSlice{} - info, err := os.Lstat(configfileName) - if err != nil { - return fmt.Errorf("failed stats file %q: %w", configfileName, err) - } - content, err := ioutil.ReadFile(configfileName) - if err != nil { - return fmt.Errorf("cannot read file %q: %w", configfileName, err) - } - if err = yaml.Unmarshal(content, &config); err != nil { - return fmt.Errorf("cannot parse config %q: %w", configfileName, err) - } - for i, repos := range config { - if repos.Key != "presubmits" { - config[i].Value, err = getReposMap(gc, repos.Value) - if err != nil { - return err - } - } - } - - updated, err := yaml.Marshal(&config) - // This shouldn't happen, just catch it in case - if err != nil { - return fmt.Errorf("failed marshal modified content: %w", err) - } - return ioutil.WriteFile(configfileName, updated, info.Mode()) -} - -func getReposMap(gc ghutil.GithubOperations, val interface{}) (interface{}, error) { - reposMap := getMapSlice(val) - for j, repo := range reposMap { - var ( - ciBranches []string - releaseBranches []string - skipCiUpdate bool - skipReleaseUpdate bool - ) - repoName := getString(repo.Key) - latest, err := latestReleaseBranch(gc, repoName) - if err != nil { - return nil, fmt.Errorf("failed getting latest release branches: %w", err) - } - if latest == "" { - continue - } - - log.Printf("Latest branch for repo %q is %q", repoName, latest) - - repoConfigs := getInterfaceArray(repo.Value) - for _, repoConfig := range repoConfigs { - jobConfig := getMapSlice(repoConfig) - ciBranch, releaseBranch := getBranch(jobConfig) - if ciBranch != "" { - ciBranches = append(ciBranches, ciBranch) - if ciBranch == latest { - skipCiUpdate = true - } - } - if releaseBranch != "" { - releaseBranches = append(releaseBranches, releaseBranch) - if releaseBranch == latest { - skipReleaseUpdate = true - } - } - } - - if !skipCiUpdate && len(ciBranches) > 0 { - repoConfigs = updateConfigForJob(repoConfigs, ciBranches, latest, - func(jobConfig yaml.MapSlice) string { - branch, _ := getBranch(jobConfig) - return branch - }) - } - - if !skipReleaseUpdate && len(releaseBranches) > 0 { - repoConfigs = updateConfigForJob(repoConfigs, releaseBranches, latest, - func(jobConfig yaml.MapSlice) string { - _, branch := getBranch(jobConfig) - return branch - }) - } - - reposMap[j].Value = repoConfigs - } - return reposMap, nil -} - -func updateConfigForJob(repoConfigs []interface{}, branches []string, latest string, - getBranchForJob func(yaml.MapSlice) string) []interface{} { - - var oldestBranchToSupport = "0.0" - sortFunc(branches) - if len(branches) >= maxReleaseBranches-1 { - oldestBranchToSupport = branches[maxReleaseBranches-2] - } - var updatedRepoConfigs []interface{} - for _, repoConfig := range repoConfigs { - jobConfig := getMapSlice(repoConfig) - branch := getBranchForJob(jobConfig) - if branch == "" { - updatedRepoConfigs = append(updatedRepoConfigs, jobConfig) - continue - } - if versionComp(branch, oldestBranchToSupport) < 0 { - log.Printf("Skipping %q for %q", branch, oldestBranchToSupport) - continue - } - updatedRepoConfigs = append(updatedRepoConfigs, jobConfig) - if branch == branches[0] { - var next yaml.MapSlice - for _, item := range jobConfig { - val := item.Value - if item.Key == "release" { - val = latest - } - next = append(next, yaml.MapItem{Key: item.Key, Value: val}) - } - updatedRepoConfigs = append(updatedRepoConfigs, next) - } - } - - return updatedRepoConfigs -} - -func getBranch(jobConfig yaml.MapSlice) (ciBranch string, releaseBranch string) { - var ( - branch string - isBranchCi bool - isRelease bool - ) - for _, item := range jobConfig { - switch item.Key { - case "branch-ci": - isBranchCi = true - case "dot-release": - isRelease = true - case "release": - branch = getString(item.Value) - } - } - if branch == "" { - return - } - if isBranchCi { - ciBranch = branch - } else if isRelease { - releaseBranch = branch - } - - return -} - -func sortFunc(strSlice []string) { - sort.Slice(strSlice, func(i, j int) bool { - return versionComp(strSlice[i], strSlice[j]) > 0 - }) -} - -func versionComp(v1, v2 string) int { - leftMajor, leftMinor := majorMinor(v1) - rightMajor, rightMinor := majorMinor(v2) - if leftMajor != rightMajor { - return leftMajor - rightMajor - } - if leftMinor != rightMinor { - return leftMinor - rightMinor - } - return 0 -} - -func mustInt(s string) int { - r, err := strconv.Atoi(s) - if err != nil { - logFatalf("Failed to parse int %q: %v", s, err) - } - return r -} - -func majorMinor(s string) (int, int) { - parts := strings.Split(s, ".") - if len(parts) != 2 { - logFatalf("Version string has to be in the form of [MAJOR].[MINOR]: %q", s) - } - return mustInt(parts[0]), mustInt(parts[1]) -} diff --git a/tools/config-generator/unstructured/assertions.go b/tools/config-generator/unstructured/assertions.go deleted file mode 100644 index 8fe69a7f7fc..00000000000 --- a/tools/config-generator/unstructured/assertions.go +++ /dev/null @@ -1,81 +0,0 @@ -/* -Copyright 2021 The Knative Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package unstructured - -import ( - "errors" - "fmt" - "reflect" -) - -// ErrAsserting when provided unstructured object doesn't match assertion. -var ErrAsserting = errors.New("asserting") - -// Assertion is a function that verifies unstructured object, and return error -// if found any problems with its structure. -type Assertion func(interface{}) error - -// Equals returns Assertion that checks if two unstructured are equal. -func Equals(want interface{}) Assertion { - return func(got interface{}) error { - if !reflect.DeepEqual(got, want) { - return fmt.Errorf("%w: %#v != %#v", ErrAsserting, - got, want) - } - return nil - } -} - -// EqualsStringSlice returns an Assertion that checks if unstructured slice -// equals given string slice. -func EqualsStringSlice(want []string) Assertion { - return func(val interface{}) error { - got, err := toStringSlice(val) - if err != nil { - return err - } - if !reflect.DeepEqual(got, want) { - return fmt.Errorf("%w: %#v != %#v", ErrAsserting, - got, want) - } - return nil - } -} - -func toStringSlice(val interface{}) ([]string, error) { - raw, ok := val.([]interface{}) - if !ok { - return nil, fmt.Errorf("%w: not a slice: %#v", ErrInvalidFormat, val) - } - strs, err := retypeSliceToStrings(raw) - if err != nil { - return nil, err - } - return strs, nil -} - -func retypeSliceToStrings(in []interface{}) ([]string, error) { - out := make([]string, len(in)) - for i, v := range in { - var ok bool - out[i], ok = v.(string) - if !ok { - return nil, fmt.Errorf("%w: not []string: %#v", ErrInvalidFormat, in) - } - } - return out, nil -} diff --git a/tools/config-generator/unstructured/digger.go b/tools/config-generator/unstructured/digger.go deleted file mode 100644 index a6329563db9..00000000000 --- a/tools/config-generator/unstructured/digger.go +++ /dev/null @@ -1,77 +0,0 @@ -/* -Copyright 2021 The Knative Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package unstructured - -import ( - "errors" - "fmt" - "strconv" -) - -// ErrInvalidFormat when provided unstructured object has invalid format. -var ErrInvalidFormat = errors.New("invalid format") - -// Digger is a function that digs in unstructured object and returns some sub -// element of that object, or an error if such sub object can't be located. -type Digger func(interface{}) (interface{}, error) - -// MapKey returns a Digger that looks up the value of a key within the map. -func MapKey(key string) Digger { - return func(un interface{}) (interface{}, error) { - m, ok := un.(map[interface{}]interface{}) - if !ok { - return nil, fmt.Errorf("%w: not a map: %#v", ErrInvalidFormat, un) - } - val, ok := m[key] - if !ok { - return nil, fmt.Errorf("%w: no key %#v in map: %#v", - ErrInvalidFormat, key, un) - } - return val, nil - } -} - -// SliceElem returns a Digger that looks up the value of slice under provided index. -func SliceElem(idx int) Digger { - return func(un interface{}) (interface{}, error) { - s, ok := un.([]interface{}) - if !ok { - return nil, fmt.Errorf("%w: not a slice: %#v", ErrInvalidFormat, un) - } - if idx < 0 || idx >= len(s) { - return nil, fmt.Errorf( - "%w: index out of range [%d] for %#v", - ErrInvalidFormat, idx, s) - } - return s[idx], nil - } -} - -func toDiggers(queries []string) []Digger { - digrs := make([]Digger, len(queries)) - for i, query := range queries { - idx, err := strconv.Atoi(query) - var next Digger - if err != nil { - next = MapKey(query) - } else { - next = SliceElem(idx) - } - digrs[i] = next - } - return digrs -} diff --git a/tools/config-generator/unstructured/questioner.go b/tools/config-generator/unstructured/questioner.go deleted file mode 100644 index 0b5770a2bcc..00000000000 --- a/tools/config-generator/unstructured/questioner.go +++ /dev/null @@ -1,57 +0,0 @@ -/* -Copyright 2021 The Knative Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package unstructured - -import "strings" - -// Questioner can be used to look up sub elements of unstructured objects, like -// those created by yaml.Unmarshal or json.Unmarshal. -type Questioner interface { - // Query will look up sub element, by provided query string. The query string - // is in format of dot-separated queries like: "foo.bar.42.fizz". In given - // example, we will be searching for: map value of key "foo", then map value - // of key "bar", then slice value of index 42, and so on. - Query(query string) (interface{}, error) - // Dig will look up sub element, by provided list of Digger's. - Dig(diggers []Digger) (interface{}, error) -} - -// NewQuestioner creates new Questioner object. -func NewQuestioner(un interface{}) Questioner { - return &defaultQuestioner{un: un} -} - -type defaultQuestioner struct { - un interface{} -} - -func (d defaultQuestioner) Query(query string) (interface{}, error) { - digrs := toDiggers(strings.Split(query, ".")) - return d.Dig(digrs) -} - -func (d defaultQuestioner) Dig(diggers []Digger) (interface{}, error) { - var err error - un := d.un - for _, dig := range diggers { - un, err = dig(un) - if err != nil { - return nil, err - } - } - return un, nil -} diff --git a/tools/config-generator/unstructured/questioner_test.go b/tools/config-generator/unstructured/questioner_test.go deleted file mode 100644 index 27c620cf34b..00000000000 --- a/tools/config-generator/unstructured/questioner_test.go +++ /dev/null @@ -1,121 +0,0 @@ -/* -Copyright 2021 The Knative Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package unstructured_test - -import ( - "errors" - "fmt" - "testing" - - "gopkg.in/yaml.v2" - "knative.dev/test-infra/tools/config-generator/unstructured" -) - -func TestQuestioner(t *testing.T) { - tests := []struct { - query string - assert unstructured.Assertion - want error - }{{ - query: "foo.bar.0.fizz", - assert: unstructured.EqualsStringSlice([]string{"alpha", "beta", "gamma"}), - }, { - query: "foo.bar.0.fizz", - assert: unstructured.EqualsStringSlice([]string{"alpha", "beta"}), - want: unstructured.ErrAsserting, - }, { - query: "foo.bar.0.bazz", - assert: unstructured.Equals(true), - }, { - query: "foo.bar.0.bazz", - assert: unstructured.Equals("yellow"), - want: unstructured.ErrAsserting, - }, { - query: "foo.bar.0.bazz", - assert: unstructured.EqualsStringSlice([]string{"alpha", "beta"}), - want: unstructured.ErrInvalidFormat, - }, { - query: "bla.dada", - want: unstructured.ErrInvalidFormat, - }, { - query: "foo.bar.42.bazz", - want: unstructured.ErrInvalidFormat, - }, { - query: "foo.42.bazz", - want: unstructured.ErrInvalidFormat, - }, { - query: "foo.bla", - want: unstructured.ErrInvalidFormat, - }, { - query: "foo.bar.bla", - want: unstructured.ErrInvalidFormat, - }, { - query: "foo.bar", - assert: unstructured.EqualsStringSlice([]string{"alpha", "beta", "gamma"}), - want: unstructured.ErrInvalidFormat, - }} - for i, tc := range tests { - tc := tc - t.Run(fmt.Sprintf("%d-%s", i, tc.query), func(t *testing.T) { - err := testQuestionerQuery(t, tc.query, tc.assert) - checkErr(t, err, tc.want) - }) - } -} - -func exampleUnstructured(tb testing.TB) interface{} { - tb.Helper() - un := make(map[interface{}]interface{}) - err := yaml.Unmarshal([]byte(`--- -foo: - bar: - - fizz: - - alpha - - beta - - gamma - bazz: true -`), &un) - if err != nil { - tb.Fatal(err) - } - return un -} - -func testQuestionerQuery(tb testing.TB, query string, assert unstructured.Assertion) error { - tb.Helper() - questioner := unstructured.NewQuestioner(exampleUnstructured(tb)) - val, err := questioner.Query(query) - if err != nil { - return err - } - if assert != nil { - return assert(val) - } - return nil -} - -func checkErr(tb testing.TB, got, want error) { - tb.Helper() - if want == nil { - if got != nil { - tb.Fatal(got) - } - } - if !errors.Is(got, want) { - tb.Fatalf("got: %#v, want: %#v", got, want) - } -} diff --git a/tools/config-generator/updaterelease_test.go b/tools/config-generator/updaterelease_test.go deleted file mode 100644 index cd3d5f06e39..00000000000 --- a/tools/config-generator/updaterelease_test.go +++ /dev/null @@ -1,277 +0,0 @@ -/* -Copyright 2020 The Knative Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package main - -import ( - "errors" - "io/ioutil" - "os" - "syscall" - "testing" - - "github.com/google/go-cmp/cmp" - "github.com/google/go-github/v32/github" - "gopkg.in/yaml.v2" - "knative.dev/test-infra/pkg/ghutil/fakeghutil" -) - -var ( - // errUnwrappable: Some errors not wrappable - errUnwrappable = errors.New("unwrappable") - latest = "release-0.6" -) - -func TestUpgradeReleaseBranchesTemplate(t *testing.T) { - tests := []struct { - name string - fileExist bool - in string - want string - wantErr error - }{ - { - "Change", - true, - `periodics: - org1/repo1: - - branch-ci: true - release: "0.5"`, - `periodics: - org1/repo1: - - branch-ci: true - release: "0.5" - - branch-ci: true - release: "0.6" -`, - nil, - }, { - "No_op", - true, - `periodics: - org1/repo1: - - branch-ci: true - release: "0.6"`, - `periodics: - org1/repo1: - - branch-ci: true - release: "0.6" -`, - nil, - }, { - "File_not_exit", - false, - `doesnt matter`, - `doesnt matter`, - syscall.ENOENT, // os.PathError is not usable, use syscall instead - }, { - "Not marshallable", - true, - `doesnt matter`, - `doesnt matter`, - errUnwrappable, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - fgc := fakeghutil.NewFakeGithubClient() - fgc.Branches = make(map[string][]*github.Branch) - fgc.Branches["org1/repo1"] = []*github.Branch{ - {Name: &latest}, - } - var fn string - fn = "file_not_exist" - if tt.fileExist { - fi, err := ioutil.TempFile(os.TempDir(), "TestUpgradeReleaseBranchesTemplate") - if err == nil { - fn = fi.Name() - err = ioutil.WriteFile(fi.Name(), []byte(tt.in), 0644) - } - if err != nil { - t.Fatalf("Failed creating temp file: %v", err) - } - t.Logf("Temp file created at %q", fi.Name()) - } - err := upgradeReleaseBranchesTemplate(fn, fgc) - if !errors.Is(err, tt.wantErr) && (err != nil && tt.wantErr != errUnwrappable) { - t.Fatalf("Error not expected. Want: '%v', got: '%v'", tt.wantErr, err) - } - if !tt.fileExist { - return - } - gotBytes, err := ioutil.ReadFile(fn) - if !errors.Is(err, tt.wantErr) && (err != nil && tt.wantErr != errUnwrappable) { - t.Fatalf("Error not expected. Want: '%v', got: '%v'", tt.wantErr, err) - } - got := string(gotBytes) - if diff := cmp.Diff(tt.want, got); diff != "" { - t.Fatalf("Mismatch, got(+), want(-): \n%s", diff) - } - }) - } -} - -func TestGetReposMap(t *testing.T) { - tests := []struct { - name string - in string - want string - }{ - { - "Simple_update_case", - `org1/repo1: -- branch-ci: true - release: "0.5"`, - `org1/repo1: -- branch-ci: true - release: "0.5" -- branch-ci: true - release: "0.6" -`, - }, { - "Simple_update_case2", - `org1/repo1: -- branch-ci: true - release: "0.1"`, - `org1/repo1: -- branch-ci: true - release: "0.1" -- branch-ci: true - release: "0.6" -`, - }, { - "Simple_update_case3", - `org1/repo1: -- branch-ci: true - release: "0.1" -- branch-ci: true - release: "0.3"`, - `org1/repo1: -- branch-ci: true - release: "0.1" -- branch-ci: true - release: "0.3" -- branch-ci: true - release: "0.6" -`, - }, { - "Simple_update_case4", - `org1/repo1: -- dot-release: true - release: "0.5"`, - `org1/repo1: -- dot-release: true - release: "0.5" -- dot-release: true - release: "0.6" -`, - }, { - "Delete_old_branches", - `org1/repo1: -- branch-ci: true - release: "0.2" -- branch-ci: true - release: "0.3" -- branch-ci: true - release: "0.4" -- branch-ci: true - release: "0.5"`, - `org1/repo1: -- branch-ci: true - release: "0.3" -- branch-ci: true - release: "0.4" -- branch-ci: true - release: "0.5" -- branch-ci: true - release: "0.6" -`, - }, { - "No_op", - `org1/repo1: -- branch-ci: true - release: "0.3" -- branch-ci: true - release: "0.4" -- branch-ci: true - release: "0.5" -- branch-ci: true - release: "0.6"`, - `org1/repo1: -- branch-ci: true - release: "0.3" -- branch-ci: true - release: "0.4" -- branch-ci: true - release: "0.5" -- branch-ci: true - release: "0.6" -`, - }, { - "No_delete", - `org1/repo1: -- branch-ci: true - release: "0.2" -- branch-ci: true - release: "0.3" -- branch-ci: true - release: "0.4" -- branch-ci: true - release: "0.5" -- branch-ci: true - release: "0.6"`, - `org1/repo1: -- branch-ci: true - release: "0.2" -- branch-ci: true - release: "0.3" -- branch-ci: true - release: "0.4" -- branch-ci: true - release: "0.5" -- branch-ci: true - release: "0.6" -`, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - fgc := fakeghutil.NewFakeGithubClient() - fgc.Branches = make(map[string][]*github.Branch) - fgc.Branches["org1/repo1"] = []*github.Branch{ - {Name: &latest}, - } - inStruct := yaml.MapSlice{} - if err := yaml.Unmarshal([]byte(tt.in), &inStruct); err != nil { - t.Fatalf("Failed unmarshal %q: %v", tt.in, err) - } - gotStruct, err := getReposMap(fgc, inStruct) - if err != nil { - t.Fatalf("Failed get repos map: %v", err) - } - gotBytes, err := yaml.Marshal(gotStruct) - if err != nil { - t.Fatalf("Failed marshal: %v", err) - } - got := string(gotBytes) - if diff := cmp.Diff(tt.want, got); diff != "" { - t.Fatalf("Mismatch, got(+), want(-): \n%s", diff) - } - }) - } -} diff --git a/tools/config-generator/utils.go b/tools/config-generator/utils.go deleted file mode 100644 index cfe2e42a358..00000000000 --- a/tools/config-generator/utils.go +++ /dev/null @@ -1,187 +0,0 @@ -/* -Copyright 2020 The Knative Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package main - -import ( - "sort" - "strconv" - "strings" - - "gopkg.in/yaml.v2" -) - -// getString casts the given interface (expected string) as string. -// An array of length 1 is also considered a single string. -func getString(s interface{}) string { - if _, ok := s.([]interface{}); ok { - values := getStringArray(s) - if len(values) == 1 { - return values[0] - } - logFatalf("Entry %v is not a string or string array of size 1", s) - } - if str, ok := s.(string); ok { - return str - } - logFatalf("Entry %v is not a string", s) - return "" -} - -// getInt casts the given interface (expected int) as int. -func getInt(s interface{}) int { - if value, ok := s.(int); ok { - return value - } - logFatalf("Entry %v is not an integer", s) - return 0 -} - -// getBool casts the given interface (expected bool) as bool. -func getBool(s interface{}) bool { - if value, ok := s.(bool); ok { - return value - } - logFatalf("Entry %v is not a boolean", s) - return false -} - -// getInterfaceArray casts the given interface (expected interface array) as interface array. -func getInterfaceArray(s interface{}) []interface{} { - if interfaceArray, ok := s.([]interface{}); ok { - return interfaceArray - } - logFatalf("Entry %v is not an interface array", s) - return nil -} - -// getStringArray casts the given interface (expected string array) as string array. -func getStringArray(s interface{}) []string { - interfaceArray := getInterfaceArray(s) - strArray := make([]string, len(interfaceArray)) - for i := range interfaceArray { - strArray[i] = getString(interfaceArray[i]) - } - return strArray -} - -// getMapSlice casts the given interface (expected MapSlice) as MapSlice. -func getMapSlice(m interface{}) yaml.MapSlice { - if mm, ok := m.(yaml.MapSlice); ok { - return mm - } - logFatalf("Entry %v is not a yaml.MapSlice", m) - return nil -} - -// appendIfUnique appends an element to an array of strings, unless it's already present. -func appendIfUnique(a1 []string, e2 string) []string { - var res []string - res = append(res, a1...) - for _, e1 := range a1 { - if e1 == e2 { - return res - } - } - return append(res, e2) -} - -// isNum checks if the given string is a valid number -func isNum(s string) bool { - _, err := strconv.ParseFloat(s, 64) - return err == nil -} - -// quote returns the given string quoted if it's not a number, or not a key/value pair, or already quoted. -func quote(s string) string { - if isNum(s) { - return s - } - if strings.HasPrefix(s, "'") || strings.HasPrefix(s, "\"") || strings.Contains(s, ": ") || strings.HasSuffix(s, ":") { - return s - } - return "\"" + s + "\"" -} - -// indentBase is a helper function which returns the given array indented. -func indentBase(indentation int, prefix string, indentFirstLine bool, array []string) string { - s := "" - if len(array) == 0 { - return s - } - indent := strings.Repeat(" ", indentation) - for i := 0; i < len(array); i++ { - if i > 0 || indentFirstLine { - s += indent - } - s += prefix + quote(array[i]) + "\n" - } - return s -} - -// indentArray returns the given array indented, prefixed by "-". -func indentArray(indentation int, array []string) string { - return indentBase(indentation, "- ", false, array) -} - -// indentKeys returns the given array of key/value pairs indented. -func indentKeys(indentation int, array []string) string { - return indentBase(indentation, "", false, array) -} - -// indentSectionBase is a helper function which returns the given array of key/value pairs indented inside a section. -func indentSectionBase(indentation int, title string, prefix string, array []string) string { - keys := indentBase(indentation, prefix, true, array) - if keys == "" { - return keys - } - return title + ":\n" + keys -} - -// indentArraySection returns the given array indented inside a section. -func indentArraySection(indentation int, title string, array []string) string { - return indentSectionBase(indentation, title, "- ", array) -} - -// indentSection returns the given array of key/value pairs indented inside a section. -func indentSection(indentation int, title string, array []string) string { - return indentSectionBase(indentation, title, "", array) -} - -// indentMap returns the given map indented, with each key/value separated by ": " -func indentMap(indentation int, mp map[string]string) string { - // Extract map keys to keep order consistent. - keys := make([]string, 0, len(mp)) - for key := range mp { - keys = append(keys, key) - } - sort.Strings(keys) - arr := make([]string, len(mp)) - for i := 0; i < len(mp); i++ { - arr[i] = keys[i] + ": " + quote(mp[keys[i]]) - } - return indentBase(indentation, "", false, arr) -} - -// strExists checks if the given string exists in the array -func strExists(arr []string, str string) bool { - for _, s := range arr { - if str == s { - return true - } - } - return false -} diff --git a/tools/config-generator/utils_test.go b/tools/config-generator/utils_test.go deleted file mode 100644 index 021617f30df..00000000000 --- a/tools/config-generator/utils_test.go +++ /dev/null @@ -1,333 +0,0 @@ -/* -Copyright 2020 The Knative Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package main - -import ( - "fmt" - "testing" - - "github.com/google/go-cmp/cmp" - "gopkg.in/yaml.v2" -) - -func TestGetString(t *testing.T) { - SetupForTesting() - var in interface{} = "abcdefg" - out := getString(in) - if diff := cmp.Diff(out, "abcdefg"); diff != "" { - t.Fatalf("Unexpected output (-got +want):\n%s", diff) - } - if logFatalCalls != 0 { - t.Fatalf("logFatal was called for %v", in) - } - - out = getString(42) - if logFatalCalls != 1 { - t.Fatalf("logFatal was not called for %v", in) - } -} - -func TestGetInt(t *testing.T) { - SetupForTesting() - var in interface{} = 123 - out := getInt(in) - if logFatalCalls != 0 { - t.Fatalf("logFatal was called for %v", in) - } - if out != 123 { - t.Fatalf("Expected 123, got %v", out) - } - - getInt("abc") - if logFatalCalls == 0 { - t.Fatalf("Expected logFatal to be called") - } -} - -func TestGetBool(t *testing.T) { - SetupForTesting() - var in interface{} = true - out := getBool(in) - if logFatalCalls != 0 { - t.Fatalf("logFatal was called for %v", in) - } - if !out { - t.Fatalf("Expected true, got %v", out) - } - - getBool(123) - if logFatalCalls == 0 { - t.Fatalf("Expected logFatal to be called") - } -} - -func TestGetInterfaceArray(t *testing.T) { - SetupForTesting() - in1 := []interface{}{"foo", "bar", "baz"} - out1 := getInterfaceArray(in1) - if fmt.Sprint(in1) != fmt.Sprint(out1) { - t.Fatalf("Did not get same interface slice back.") - } - if logFatalCalls != 0 { - t.Fatalf("Interface slice caused logFatal call") - } - - in2 := []string{"foo", "bar", "baz"} - getInterfaceArray(in2) - if logFatalCalls != 1 { - t.Fatalf("Non interface slice should have caused logFatal call") - } -} - -func TestGetStringArray(t *testing.T) { - SetupForTesting() - in := []interface{}{"foo", "bar", "baz"} - out := getStringArray(in) - if logFatalCalls != 0 { - t.Fatalf("Input %v should not have caused logFatal call.", in) - } - if fmt.Sprint(out) != fmt.Sprint(in) { - t.Fatalf("Expected input %v and output %v to have identical string output.", in, out) - } -} - -func TestGetMapSlice(t *testing.T) { - SetupForTesting() - var in interface{} = yaml.MapSlice{ - yaml.MapItem{Key: "abc", Value: 123}, - yaml.MapItem{Key: "def", Value: 456}, - } - out := getMapSlice(in) - if logFatalCalls != 0 { - t.Fatalf("Input %v should not have caused logFatal call.", in) - } - if fmt.Sprint(out) != fmt.Sprint(in) { - t.Fatalf("Expected input %v and output %v to have identical string output.", in, out) - } -} - -func TestAppendIfUnique(t *testing.T) { - SetupForTesting() - arr := []string{"foo", "bar"} - arr = appendIfUnique(arr, "foo") - if len(arr) != 2 { - t.Fatalf("Expected length 2 but was %v", len(arr)) - } - arr = appendIfUnique(arr, "baz") - if arr[2] != "baz" { - t.Fatalf("Expected 'baz' to be appended but wasn't.") - } -} - -func TestIsNum(t *testing.T) { - SetupForTesting() - nums := []string{"-123456.789", "-123", "0", "0.0", ".0", "123", "123456.789"} - for _, n := range nums { - if !isNum(n) { - t.Fatalf("Input %v should be a num, but wasn't.", n) - } - } - notNums := []string{"", ".", "abc", "123 "} - for _, n := range notNums { - if isNum(n) { - t.Fatalf("Input %v should not be a num, but was identified as one.", n) - } - } -} - -func TestQuote(t *testing.T) { - SetupForTesting() - tests := []struct { - in string - expectQuotes bool - }{ - {"foo bar baz", true}, - {"", true}, - {"\"foo bar\"", false}, - {"'foo bar'", false}, - {"123", false}, - {"abc:def", true}, // Not recognized as a key value pair without space after colon - {"abc: def", false}, - {"abc:", false}, - } - for _, test := range tests { - out := quote(test.in) - quoted := "\"" + test.in + "\"" - if test.expectQuotes && out != "\""+test.in+"\"" { - t.Fatalf("Expected %v, got %v", quoted, out) - } - if !test.expectQuotes && test.in != out { - t.Fatalf("Expected %v, got %v", test.in, out) - } - } -} - -func TestIndentBase(t *testing.T) { - SetupForTesting() - tests := []struct { - input []string - indentation int - prefix string - indentFirstLine bool - expected string - }{ - { - input: []string{"foo", "bar", "baz"}, - indentation: 2, - prefix: "", - indentFirstLine: false, - expected: fmt.Sprintf("%q\n %q\n %q\n", "foo", "bar", "baz"), - }, - { - input: []string{"foo", "bar", "baz"}, - indentation: 0, - prefix: "", - indentFirstLine: false, - expected: fmt.Sprintf("%q\n%q\n%q\n", "foo", "bar", "baz"), - }, - { - input: []string{"foo", "bar", "baz"}, - indentation: 2, - prefix: "", - indentFirstLine: true, - expected: fmt.Sprintf(" %q\n %q\n %q\n", "foo", "bar", "baz"), - }, - { - input: []string{"foo", "bar", "baz"}, - indentation: 2, - prefix: "__", - indentFirstLine: false, - expected: fmt.Sprintf("__%q\n __%q\n __%q\n", "foo", "bar", "baz"), - }, - } - for _, test := range tests { - out := indentBase( - test.indentation, - test.prefix, - test.indentFirstLine, - test.input) - if diff := cmp.Diff(out, test.expected); diff != "" { - t.Fatalf("Unexpected output (-got +want):\n%s", diff) - } - } -} - -func TestIndentArray(t *testing.T) { - SetupForTesting() - input := []string{"'foo'", "42", "key: value", "bar"} - indentation := 2 - expected := "- 'foo'\n - 42\n - key: value\n - \"bar\"\n" - - if diff := cmp.Diff(indentArray(indentation, input), expected); diff != "" { - t.Fatalf("Unexpected output (-got +want):\n%s", diff) - } -} - -func TestIndentKeys(t *testing.T) { - SetupForTesting() - input := []string{"abc: def", "foo: bar"} - indentation := 2 - expected := "abc: def\n foo: bar\n" - - if diff := cmp.Diff(indentKeys(indentation, input), expected); diff != "" { - t.Fatalf("Unexpected output (-got +want):\n%s", diff) - } -} - -func TestIndentSectionBase(t *testing.T) { - SetupForTesting() - indentation := 2 - title := "foo" - prefix := "__" - input := []string{"abc: def", "bar", "42"} - expected := "foo:\n __abc: def\n __\"bar\"\n __42\n" - - out := indentSectionBase(indentation, title, prefix, input) - if diff := cmp.Diff(out, expected); diff != "" { - t.Fatalf("Unexpected output (-got +want):\n%s", diff) - } - - out = indentSectionBase(indentation, title, prefix, []string{}) - if diff := cmp.Diff(out, ""); diff != "" { - t.Fatalf("Unexpected output (-got +want):\n%s", diff) - } -} - -func TestIndentArraySection(t *testing.T) { - SetupForTesting() - indentation := 2 - title := "foo" - input := []string{"abc: def", "bar", "42"} - expected := "foo:\n - abc: def\n - \"bar\"\n - 42\n" - - out := indentArraySection(indentation, title, input) - if diff := cmp.Diff(out, expected); diff != "" { - t.Fatalf("Unexpected output (-got +want):\n%s", diff) - } - - out = indentArraySection(indentation, title, []string{}) - if diff := cmp.Diff(out, ""); diff != "" { - t.Fatalf("Unexpected output (-got +want):\n%s", diff) - } -} - -func TestIndentSection(t *testing.T) { - SetupForTesting() - indentation := 2 - title := "foo" - input := []string{"abc: def", "bar: baz", "magic_num: 42"} - expected := "foo:\n abc: def\n bar: baz\n magic_num: 42\n" - - out := indentSection(indentation, title, input) - if diff := cmp.Diff(out, expected); diff != "" { - t.Fatalf("Unexpected output (-got +want):\n%s", diff) - } - - out = indentSection(indentation, title, []string{}) - if diff := cmp.Diff(out, ""); diff != "" { - t.Fatalf("Unexpected output (-got +want):\n%s", diff) - } -} - -func TestIndentMap(t *testing.T) { - SetupForTesting() - indentation := 2 - input := map[string]string{ - "foo": "bar", - "abc": "def", - "num": "42", - } - expected := "abc: \"def\"\n foo: \"bar\"\n num: 42\n" - - out := indentMap(indentation, input) - if diff := cmp.Diff(out, expected); diff != "" { - t.Fatalf("Unexpected output (-got +want):\n%s", diff) - } -} - -func TestStrExists(t *testing.T) { - SetupForTesting() - sArray := []string{"foo", "bar", "baz"} - - if strExists(sArray, "abc") { - t.Fatalf("String abc should not exist in %v", sArray) - } - - if !strExists(sArray, "bar") { - t.Fatalf("String bar should exist in %v", sArray) - } -} diff --git a/tools/configgen/README.md b/tools/configgen/README.md new file mode 100644 index 00000000000..8babc5b175d --- /dev/null +++ b/tools/configgen/README.md @@ -0,0 +1,27 @@ +# README + +`configgen` is a tool for generating Prow and TestGrid config files for Knative +projects. + +## Prow configgen + +Prow configgen part is based on [istio +prowgen](https://github.com/istio/test-infra/tree/master/tools/prowgen), it does +the following things: + +1. Add annotations that can be used by [TestGrid + configurator](https://github.com/kubernetes/test-infra/tree/master/testgrid/cmd/configurator) + for generating TestGrid config file. + +1. Calculate and add schedule for periodic Prow jobs to try to distribute the + workloads evenly to avoid overloading Prow. + +1. Use [istio + prowgen](https://github.com/istio/test-infra/tree/master/tools/prowgen) to + generate the Prow config files. + +## TestGrid configgen + +TestGrid configgen part generates the TestGrid config file that can be used by +[TestGrid configurator](https://github.com/kubernetes/test-infra/tree/master/testgrid/cmd/configurator) +to configure [testgrid.knative.dev](https://testgrid.knative.dev) diff --git a/tools/configgen/main.go b/tools/configgen/main.go new file mode 100644 index 00000000000..62396b0d8b8 --- /dev/null +++ b/tools/configgen/main.go @@ -0,0 +1,60 @@ +/* +Copyright 2022 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "flag" + "log" + + "knative.dev/test-infra/tools/configgen/pkg" +) + +var ( + prowJobsConfigInput string + prowJobsConfigOutput string + allProwJobsConfig string + testgridConfigOutput string +) + +func main() { + flag.StringVar(&prowJobsConfigInput, "prow-jobs-config-input", "", "The input path for the prow jobs config") + flag.StringVar(&prowJobsConfigOutput, "prow-jobs-config-output", "", "The output path for the prow jobs config") + flag.StringVar(&allProwJobsConfig, "all-prow-jobs-config", "", "The path for all prow jobs config") + flag.StringVar(&testgridConfigOutput, "testgrid-config-output", "", "The output path for the testgrid config") + + flag.Parse() + if prowJobsConfigInput == "" { + log.Fatal("--prow-jobs-config-input must be specified") + } + if prowJobsConfigOutput == "" { + log.Fatal("--prow-jobs-config-output must be specified") + } + if allProwJobsConfig == "" { + log.Fatal("--all-prow-jobs-config must be specified") + } + if testgridConfigOutput == "" { + log.Fatal("--testgrid-config-output must be specified") + } + + if err := pkg.GenerateProwJobsConfig(prowJobsConfigInput, prowJobsConfigOutput); err != nil { + log.Fatalf("Error generating Prow jobs: %v", err) + } + + if err := pkg.GenerateTestGridConfig(allProwJobsConfig, testgridConfigOutput); err != nil { + log.Fatalf("Error generating TestGrid config: %v", err) + } +} diff --git a/tools/configgen/pkg/annotation.go b/tools/configgen/pkg/annotation.go new file mode 100644 index 00000000000..284374a714e --- /dev/null +++ b/tools/configgen/pkg/annotation.go @@ -0,0 +1,45 @@ +/* +Copyright 2022 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package pkg + +import "istio.io/test-infra/tools/prowgen/pkg/spec" + +const ( + testgridDashboardAnnotation = "testgrid-dashboards" + testgridDashboardTabAnnoation = "testgrid-tab-name" +) + +// addAnnotations adds extra annotations for generating TestGrid config. +func addAnnotations(jobsConfig spec.JobsConfig) spec.JobsConfig { + for i, job := range jobsConfig.Jobs { + if hasPeriodic(job.Types) { + if job.Annotations == nil { + job.Annotations = map[string]string{} + } + + if jobsConfig.Branches[0] == "main" { + job.Annotations[testgridDashboardAnnotation] = jobsConfig.Repo + job.Annotations[testgridDashboardTabAnnoation] = job.Name + } else { + job.Annotations[testgridDashboardAnnotation] = jobsConfig.Org + "-" + jobsConfig.Branches[0] + job.Annotations[testgridDashboardTabAnnoation] = jobsConfig.Repo + "-" + job.Name + } + } + jobsConfig.Jobs[i] = job + } + return jobsConfig +} diff --git a/tools/configgen/pkg/prow.go b/tools/configgen/pkg/prow.go new file mode 100644 index 00000000000..add131d9b2e --- /dev/null +++ b/tools/configgen/pkg/prow.go @@ -0,0 +1,68 @@ +/* +Copyright 2022 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package pkg + +import ( + "fmt" + "log" + "os" + "path/filepath" + "strings" + + prowgenpkg "istio.io/test-infra/tools/prowgen/pkg" +) + +// GenerateProwJobsConfig will generate Prow jobs from prowJobsConfigInput, and write +// them to prowJobsConfigOutput. +func GenerateProwJobsConfig(prowJobsConfigInput, prowJobsConfigOutput string) error { + + bc := prowgenpkg.ReadBase(nil, filepath.Join(prowJobsConfigInput, ".base.yaml")) + cli := &prowgenpkg.Client{ + BaseConfig: bc, + LongJobNamesAllowed: true, + } + + if err := filepath.WalkDir(prowJobsConfigInput, func(path string, d os.DirEntry, err error) error { + log.Printf("Generating Prow jobs for %q", path) + // Skip directory, base config file and other unrelated files. + if d.IsDir() || d.Name() == ".base.yaml" || !strings.HasSuffix(path, ".yaml") { + return nil + } + + jobsConfig := cli.ReadJobsConfig(path) + jobsConfig = addSchedule(jobsConfig) + jobsConfig = addAnnotations(jobsConfig) + output, err := cli.ConvertJobConfig(path, jobsConfig, jobsConfig.Branches[0]) + if err != nil { + return fmt.Errorf("error generating Prow jobs config for %q: %w", path, err) + } + + outputFile := filepath.Join(prowJobsConfigOutput, + fmt.Sprintf("%s/%s-%s.gen.yaml", jobsConfig.Org, jobsConfig.Repo, jobsConfig.Branches[0])) + log.Printf("Writing the generated Prow config to %q", outputFile) + if err := prowgenpkg.Write(output, outputFile, bc.AutogenHeader); err != nil { + return fmt.Errorf("error writing generated Prow jobs config to %q: %w", outputFile, err) + } + + return nil + + }); err != nil { + return fmt.Errorf("error walking dir %q: %w", prowJobsConfigInput, err) + } + + return nil +} diff --git a/tools/configgen/pkg/scheduler.go b/tools/configgen/pkg/scheduler.go new file mode 100644 index 00000000000..1d6b366bc14 --- /dev/null +++ b/tools/configgen/pkg/scheduler.go @@ -0,0 +1,126 @@ +/* +Copyright 2022 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package pkg + +import ( + "fmt" + "hash/fnv" + + "istio.io/test-infra/tools/prowgen/pkg/spec" +) + +const ( + // default timeout is 2 hours + defaultTimeout = 120 + + // name of the main branch + mainBranchName = "main" + + // type of periodic Prow job + periodicProwJobType = "periodic" +) + +// addSchedule calculates and adds schedule for periodic Prow jobs to try to +// distribute the workloads evenly to avoid overloading Prow. +func addSchedule(jobsConfig spec.JobsConfig) spec.JobsConfig { + org := jobsConfig.Org + repo := jobsConfig.Repo + branch := jobsConfig.Branches[0] + for i, job := range jobsConfig.Jobs { + // Only add the calculated cron schedule if both Schedule and Cron are + // empty. + if hasPeriodic(job.Types) && job.Interval == "" && job.Cron == "" { + var timeout int + if job.Timeout != nil { + timeout = int(job.Timeout.Minutes()) + } + if timeout == 0 { + timeout = defaultTimeout + } + job.Cron = generateCron(org, repo, branch, job.Name, timeout) + } + jobsConfig.Jobs[i] = job + } + + return jobsConfig +} + +func hasPeriodic(pjTypes []string) bool { + for _, tp := range pjTypes { + if tp == periodicProwJobType { + return true + } + } + return false +} + +// Generate cron string based on job type, offset generated from jobname +// instead of assign random value to ensure consistency among runs, +// timeout is used for determining how many hours apart +func generateCron(org, repo, branch, jobName string, timeout int) string { + minutesOffset := calculateMinuteOffset(org, repo, branch, jobName) + // Determines hourly job inteval based on timeout + hours := int((timeout+5)/60) + 1 // Allow at least 5 minutes between runs + hourCron := fmt.Sprintf("%d */%d * * *", minutesOffset, hours*3) + daily := func(pacificHour int) string { + return fmt.Sprintf("%d %d * * *", minutesOffset, utcTime(pacificHour)) + } + weekly := func(pacificHour, dayOfWeek int) string { + return fmt.Sprintf("%d %d * * %d", minutesOffset, utcTime(pacificHour), dayOfWeek) + } + + var res string + switch jobName { + case "continuous": + if branch == mainBranchName { + res = hourCron // Multiple times per day + } else { + res = daily(1) // 1 AM + } + case "nightly": + res = daily(2) // 2 AM + case "release": + if branch == mainBranchName { + res = hourCron // Multiple times per day + } else { + res = weekly(2, 2) // Every Tuesday 2 AM + } + default: + if repo == "serving" { + res = hourCron // Multiple times per day for knative/serving periodic Prow jobs + } else { + res = daily(3) // 3 AM + } + } + return res +} + +func utcTime(i int) int { + r := i + 7 + if r > 23 { + return r - 24 + } + return r +} + +func calculateMinuteOffset(str ...string) int { + h := fnv.New32a() + for _, s := range str { + h.Write([]byte(s)) + } + return int(h.Sum32()) % 60 +} diff --git a/tools/configgen/pkg/testgrid.go b/tools/configgen/pkg/testgrid.go new file mode 100644 index 00000000000..e7b714d863a --- /dev/null +++ b/tools/configgen/pkg/testgrid.go @@ -0,0 +1,131 @@ +/* +Copyright 2022 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package pkg + +import ( + "fmt" + "io/ioutil" + "log" + "os" + "path/filepath" + "strings" + + "knative.dev/test-infra/pkg/testgrid" + + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/test-infra/prow/config" + "sigs.k8s.io/yaml" +) + +const ( + testgridConfigFileHeader = `# ####################################################################### +# #### #### +# #### THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. #### +# #### USE "./hack/generate-configs.sh" TO REGENERATE THIS FILE. #### +# #### #### +# ####################################################################### +# Dashboards need to be specified here to be created on TestGrid +# A prow annotation will be invalid if it references a dashboard that doesn't exist +` +) + +var ( + dashboardNames = sets.NewString() + // Key is the dashboard group name, value is the dashboard names + dashboardGroupsMap = map[string]sets.String{} +) + +func GenerateTestGridConfig(prowJobsConfig, testGridConfigOutput string) error { + + if err := filepath.WalkDir(prowJobsConfig, func(path string, d os.DirEntry, err error) error { + log.Printf("Parsing TestGrid annotations for %q", path) + // Skip directory, base config file and other unrelated files. + if d.IsDir() || !strings.HasSuffix(path, ".yaml") { + return nil + } + + jobConfig := &config.JobConfig{} + bs, err := ioutil.ReadFile(path) + if err != nil { + return fmt.Errorf("error reading file %q: %w", path, err) + } + if err := yaml.Unmarshal(bs, jobConfig); err != nil { + return fmt.Errorf("error parsing Prow job config %q: %w", path, err) + } + + parseTestGridAnnotations(jobConfig) + + return nil + }); err != nil { + return fmt.Errorf("error walking dir %q: %w", prowJobsConfig, err) + } + + if err := writeTestGridConfig(testGridConfigOutput); err != nil { + return fmt.Errorf("error writing generated TestGrid config to %q: %w", testGridConfigOutput, err) + } + + return nil +} + +// parseTestGridAnnotations parse the testgrid annotations in the Prow jobs +// config +func parseTestGridAnnotations(jobConfig *config.JobConfig) { + for _, periodic := range jobConfig.Periodics { + dashboardName := periodic.Annotations[testgridDashboardAnnotation] + dashboardTabName := periodic.Annotations[testgridDashboardTabAnnoation] + if dashboardName == "" || dashboardTabName == "" { + continue + } + if len(periodic.ExtraRefs) == 0 { + continue + } + + dashboardNames.Insert(dashboardName) + + org := periodic.ExtraRefs[0].Org + branch := periodic.ExtraRefs[0].BaseRef + // If the job is for the main branch, add the dashboard to the + // dashboard group for better visualization. + if branch == "main" { + if _, ok := dashboardGroupsMap[org]; !ok { + dashboardGroupsMap[org] = sets.NewString() + } + dashboardGroupsMap[org].Insert(dashboardName) + } + } +} + +// writeTestGridConfig generate the final testgrid Config and write it to the +// config file. +func writeTestGridConfig(testGridConfigOutput string) error { + // Constructure the final TestGrid Config. + dashboards := []testgrid.Dashboard{} + for _, name := range dashboardNames.List() { + dashboards = append(dashboards, testgrid.Dashboard{Name: name}) + } + dashboardGroups := []testgrid.DashboardGroup{} + for dg, ds := range dashboardGroupsMap { + dashboardGroup := testgrid.DashboardGroup{Name: dg, DashboardNames: ds.List()} + dashboardGroups = append(dashboardGroups, dashboardGroup) + } + config := testgrid.Config{Dashboards: dashboards, DashboardGroups: dashboardGroups} + + log.Printf("Writing the generated TestGrid config to %q", testGridConfigOutput) + bs, _ := yaml.Marshal(config) + bs = append([]byte(testgridConfigFileHeader), bs...) + return ioutil.WriteFile(testGridConfigOutput, bs, 0o644) +} diff --git a/tools/flaky-test-reporter/config/config.go b/tools/flaky-test-reporter/config/config.go index 921fd7b237f..0a9e0a57f78 100644 --- a/tools/flaky-test-reporter/config/config.go +++ b/tools/flaky-test-reporter/config/config.go @@ -24,7 +24,7 @@ import ( "os" "path/filepath" - yaml "gopkg.in/yaml.v2" + "sigs.k8s.io/yaml" ) // configFile saves all information we need, this path is caller based diff --git a/tools/flaky-test-reporter/config/config.yaml b/tools/flaky-test-reporter/config/config.yaml index f8f3ce476bb..fed0fec951d 100644 --- a/tools/flaky-test-reporter/config/config.yaml +++ b/tools/flaky-test-reporter/config/config.yaml @@ -13,7 +13,7 @@ # limitations under the License. jobConfigs: - - name: ci-knative-serving-continuous + - name: continuous_serving_main_periodic org: knative repo: serving type: postsubmit @@ -21,49 +21,49 @@ jobConfigs: slackChannels: - name: serving-api identity: CA4DNJ9A4 - - name: ci-knative-serving-istio-latest-mesh + - name: istio-latest-mesh_serving_main_periodic org: knative repo: serving type: postsubmit slackChannels: - name: net-istio identity: C012AK2FPK7 - - name: ci-knative-serving-istio-latest-no-mesh + - name: istio-latest-no-mesh_serving_main_periodic org: knative repo: serving type: postsubmit slackChannels: - name: net-istio identity: C012AK2FPK7 - - name: ci-knative-serving-contour-latest + - name: contour-latest_serving_main_periodic org: knative repo: serving type: postsubmit slackChannels: - name: net-contour identity: C012J5TCS6Q - - name: ci-knative-serving-s390x-contour-tests + - name: s390x-contour-tests_serving_main_periodic org: knative repo: serving type: postsubmit slackChannels: - name: s390x identity: C027YB4QUUU - - name: ci-knative-serving-kourier-stable + - name: kourier-stable_serving_main org: knative repo: serving type: postsubmit slackChannels: - name: net-kourier identity: C012C0VQJAW - - name: ci-knative-serving-s390x-kourier-tests + - name: s390x-kourier-tests_serving_main_periodic org: knative repo: serving type: postsubmit slackChannels: - name: s390x identity: C027YB4QUUU - - name: ci-knative-eventing-continuous + - name: continuous_eventing_main_periodic org: knative repo: eventing type: postsubmit @@ -71,34 +71,26 @@ jobConfigs: slackChannels: - name: eventing identity: C9JP909F0 - - name: ci-knative-eventing-s390x-e2e-tests + - name: s390x-e2e-tests_eventing_main_periodic org: knative repo: eventing type: postsubmit slackChannels: - name: s390x identity: C027YB4QUUU - - name: ci-knative-sandbox-eventing-kafka-broker-continuous + - name: continuous_eventing-kafka-broker_main_periodic org: knative-sandbox repo: eventing-kafka-broker type: postsubmit issueRepo: eventing-kafka-broker - - name: ci-knative-test-infra-continuous - org: knative - repo: test-infra - type: postsubmit - - name: ci-google-knative-gcp-continuous - org: google - repo: knative-gcp - type: postsubmit - - name: ci-knative-operator-s390x-e2e-tests + - name: s390x-e2e-tests_operator_main_periodic org: knative repo: operator type: postsubmit slackChannels: - name: s390x identity: C027YB4QUUU - - name: ci-knative-client-s390x-e2e-tests + - name: s390x-e2e-tests_client_main_periodic org: knative repo: client type: postsubmit diff --git a/tools/prow-jobs-syncer/README.md b/tools/prow-jobs-syncer/README.md deleted file mode 100644 index 1affbeff7c4..00000000000 --- a/tools/prow-jobs-syncer/README.md +++ /dev/null @@ -1,3 +0,0 @@ -Used to update Prow configs when new releases appear in Knative repos. It makes a PR with the changes. - -Run automatically by a Prow job. diff --git a/tools/prow-jobs-syncer/config.go b/tools/prow-jobs-syncer/config.go deleted file mode 100644 index 34cd8e0e3b1..00000000000 --- a/tools/prow-jobs-syncer/config.go +++ /dev/null @@ -1,49 +0,0 @@ -/* -Copyright 2020 The Knative Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package main - -import ( - "knative.dev/test-infra/pkg/ghutil" -) - -const ( - org = "knative" - repo = "test-infra" - // PRHead is branch name where the changes occur - PRHead = "releasebranch" - // PRBase is the branch name where PR targets - PRBase = "main" - - // Paths - repoPath = "src/knative.dev/test-infra" - coreConfigPath = "prow/config.yaml" - jobConfigPath = "prow/jobs/config.yaml" - pluginPath = "prow/plugins.yaml" - testgridConfigPath = "config/prow/testgrid/testgrid.yaml" - templateConfigPath = "prow/config_knative.yaml" - - configGenPath = "tools/config-generator" - - configGenScript = "hack/generate-configs.sh" - - oncallAddress = "https://storage.googleapis.com/knative-infra-oncall/oncall.json" -) - -// GHClientWrapper handles methods for github issues -type GHClientWrapper struct { - ghutil.GithubOperations -} diff --git a/tools/prow-jobs-syncer/main.go b/tools/prow-jobs-syncer/main.go deleted file mode 100644 index ff774b9eb8f..00000000000 --- a/tools/prow-jobs-syncer/main.go +++ /dev/null @@ -1,91 +0,0 @@ -/* -Copyright 2020 The Knative Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// prow-jobs-syncer fetches release branches, -// and creates PRs updating them in knative/test-infra - -package main - -import ( - "flag" - "fmt" - "log" - "os" - "path" - "strings" - - "knative.dev/test-infra/pkg/cmd" - "knative.dev/test-infra/pkg/ghutil" - - "knative.dev/test-infra/pkg/git" -) - -func main() { - githubAccount := flag.String("github-account", "", "Token file for Github authentication") - gitUserID := flag.String("git-userid", "", "The github ID of user for hosting fork, i.e. Github ID of bot") - gitUserName := flag.String("git-username", "", "The username to use on the git commit. Requires --git-email") - gitEmail := flag.String("git-email", "", "The email to use on the git commit. Requires --git-username") - label := flag.String("label", "", "The label to add on the PR") - dryrun := flag.Bool("dry-run", false, "dry run switch") - flag.Parse() - - if *dryrun { - log.Println("Running in [dry run mode]") - } - - gopath := os.Getenv("GOPATH") - - configgenArgs := []string{ - "--prow-jobs-config-output", - path.Join(gopath, repoPath, jobConfigPath), - "--testgrid-config-output", - path.Join(gopath, repoPath, testgridConfigPath), - "--upgrade-release-branches", - "--github-token-path", - *githubAccount, - path.Join(gopath, repoPath, templateConfigPath), - } - - configgenFullPath := path.Join(gopath, repoPath, configGenPath) - - log.Print(cmd.RunCommand(fmt.Sprintf("go run %s %s", - configgenFullPath, strings.Join(configgenArgs, " ")))) - - // The code gen above updates the template file, which might not be - // sufficient for generating all prow/testgrid configs, rerun config gen - // script to make everything up-to-date - log.Print(cmd.RunCommand(configGenScript)) - - gc, err := ghutil.NewGithubClient(*githubAccount) - if err != nil { - log.Fatalf("cannot authenticate to github: %v", err) - } - - targetGI := git.Info{ - Org: org, - Repo: repo, - Head: PRHead, - Base: PRBase, - UserID: *gitUserID, - UserName: *gitUserName, - Email: *gitEmail, - } - - gcw := &GHClientWrapper{gc} - if err = createOrUpdatePR(gcw, targetGI, *label, *dryrun); err != nil { - log.Fatalf("failed creating pullrequest: '%v'", err) - } -} diff --git a/tools/prow-jobs-syncer/pullrequest.go b/tools/prow-jobs-syncer/pullrequest.go deleted file mode 100644 index 4b391e6ce28..00000000000 --- a/tools/prow-jobs-syncer/pullrequest.go +++ /dev/null @@ -1,151 +0,0 @@ -/* -Copyright 2020 The Knative Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// pullrequest.go creates git commits and Pull Requests - -package main - -import ( - "encoding/json" - "fmt" - "log" - "net/http" - "strings" - - "github.com/google/go-github/v32/github" - "knative.dev/test-infra/pkg/ghutil" - "knative.dev/test-infra/pkg/helpers" - - "knative.dev/test-infra/pkg/git" -) - -func generatePRBody() string { - body := "PR created for syncing release branches changes\n" - oncaller, err := getOncaller() - assignment := "Nobody is currently oncall." - if err == nil { - if oncaller != "" { - assignment = fmt.Sprintf("/assign @%s\n/cc @%s\n", oncaller, oncaller) - } - } else { - assignment = fmt.Sprintf("An error occurred while finding an assignee: `%v`.", err) - } - - return body + assignment -} - -func getOncaller() (string, error) { - req, err := http.Get(oncallAddress) - if err != nil { - return "", err - } - defer req.Body.Close() - if req.StatusCode != http.StatusOK { - return "", fmt.Errorf("HTTP error %d (%q) fetching current oncaller", req.StatusCode, req.Status) - } - oncall := struct { - Oncall struct { - ToolsInfra string `json:"tools-infra"` - } `json:"Oncall"` - }{} - if err := json.NewDecoder(req.Body).Decode(&oncall); err != nil { - return "", err - } - return oncall.Oncall.ToolsInfra, nil -} - -// Get existing open PR not merged yet -func getExistingPR(gcw *GHClientWrapper, gi git.Info, matchTitle string) (*github.PullRequest, error) { - var res *github.PullRequest - PRs, err := gcw.ListPullRequests(gi.Org, gi.Repo, gi.GetHeadRef(), gi.Base) - if err == nil { - for _, PR := range PRs { - if string(ghutil.PullRequestOpenState) == *PR.State && strings.Contains(*PR.Title, matchTitle) { - res = PR - break - } - } - } - return res, err -} - -func createOrUpdatePR(gcw *GHClientWrapper, gi git.Info, label string, dryrun bool) error { - const matchTitle = "[Auto] Update prow jobs for release branches" - commitMsg := matchTitle - title := commitMsg - body := generatePRBody() - hasUpdates, err := git.MakeCommit(gi, commitMsg, dryrun) - if err != nil { - return fmt.Errorf("failed git commit: %w", err) - } - if !hasUpdates { - log.Print("There is nothing committed, skip PR") - return nil - } - - var existPR *github.PullRequest - existPR, err = getExistingPR(gcw, gi, matchTitle) - if err != nil { - return fmt.Errorf("failed querying existing pullrequests: %w", err) - } - if existPR != nil { - log.Printf("Found open PR %d", *existPR.Number) - if err := helpers.Run( - fmt.Sprintf("Updating PR %d, title: %q, body: %q", *existPR.Number, title, body), - func() error { - if _, err := gcw.EditPullRequest(gi.Org, gi.Repo, *existPR.Number, title, body); err != nil { - return fmt.Errorf("failed updating pullrequest: %w", err) - } - return nil - }, - dryrun, - ); err != nil { - return err - } - } else { - if err := helpers.Run( - fmt.Sprintf("Creating PR, title: %q, body: %q", title, body), - func() error { - existPR, err = gcw.CreatePullRequest(gi.Org, gi.Repo, gi.GetHeadRef(), gi.Base, title, body) - if err != nil { - return fmt.Errorf("failed creating pullrequest: %w", err) - } - return nil - }, - dryrun, - ); err != nil { - return err - } - } - - if label != "" { - if err := helpers.Run( - fmt.Sprintf("Ensure label %q exists for PR", label), - func() error { - err = gcw.EnsureLabelForPullRequest(gi.Org, gi.Repo, *existPR.Number, label) - if err != nil { - return fmt.Errorf("failed ensuring label %q exists: %w", label, err) - } - return nil - }, - dryrun, - ); err != nil { - return err - } - } - - return nil -} diff --git a/vendor/cloud.google.com/go/.gitignore b/vendor/cloud.google.com/go/.gitignore index ee9694b8780..cc7e53b46c0 100644 --- a/vendor/cloud.google.com/go/.gitignore +++ b/vendor/cloud.google.com/go/.gitignore @@ -2,6 +2,7 @@ .idea .vscode *.swp +.history # Test files *.test diff --git a/vendor/cloud.google.com/go/CHANGES.md b/vendor/cloud.google.com/go/CHANGES.md index b711a8cc1fd..6fc75f1f105 100644 --- a/vendor/cloud.google.com/go/CHANGES.md +++ b/vendor/cloud.google.com/go/CHANGES.md @@ -1,5 +1,393 @@ # Changes + +## [0.81.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.80.0...v0.81.0) (2021-04-02) + + +### Features + +* **datacatalog:** Policy Tag Manager v1 API service feat: new RenameTagTemplateFieldEnumValue API feat: adding fully_qualified_name in lookup and search feat: added DATAPROC_METASTORE integrated system along with new entry types: DATABASE and SERVICE docs: Documentation improvements ([2b02a03](https://www.github.com/googleapis/google-cloud-go/commit/2b02a03ff9f78884da5a8e7b64a336014c61bde7)) +* **dialogflow/cx:** include original user query in WebhookRequest; add GetTextCaseresult API. doc: clarify resource format for session response. ([a0b1f6f](https://www.github.com/googleapis/google-cloud-go/commit/a0b1f6faae77d014fdee166ab018ddcd6f846ab4)) +* **dialogflow/cx:** include original user query in WebhookRequest; add GetTextCaseresult API. doc: clarify resource format for session response. ([b5b4da6](https://www.github.com/googleapis/google-cloud-go/commit/b5b4da6952922440d03051f629f3166f731dfaa3)) +* **dialogflow:** expose MP3_64_KBPS and MULAW for output audio encodings. ([b5b4da6](https://www.github.com/googleapis/google-cloud-go/commit/b5b4da6952922440d03051f629f3166f731dfaa3)) +* **secretmanager:** Rotation for Secrets ([2b02a03](https://www.github.com/googleapis/google-cloud-go/commit/2b02a03ff9f78884da5a8e7b64a336014c61bde7)) + + +### Bug Fixes + +* **internal/godocfx:** filter out non-Cloud ([#3878](https://www.github.com/googleapis/google-cloud-go/issues/3878)) ([625aef9](https://www.github.com/googleapis/google-cloud-go/commit/625aef9b47181cf627587cc9cde9e400713c6678)) + +## [0.80.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.79.0...v0.80.0) (2021-03-23) + + +### ⚠ BREAKING CHANGES + +* **all:** This is a breaking change in dialogflow + +### Features + +* **appengine:** added vm_liveness, search_api_available, network_settings, service_account, build_env_variables, kms_key_reference to v1 API ([fd04a55](https://www.github.com/googleapis/google-cloud-go/commit/fd04a552213f99619c714b5858548f61f4948493)) +* **assuredworkloads:** Add 'resource_settings' field to provide custom properties (ids) for the provisioned projects. ([ab4824a](https://www.github.com/googleapis/google-cloud-go/commit/ab4824a7914864228e59b244d6382de862139524)) +* **assuredworkloads:** add HIPAA and HITRUST compliance regimes ([ab4824a](https://www.github.com/googleapis/google-cloud-go/commit/ab4824a7914864228e59b244d6382de862139524)) +* **dialogflow/cx:** added fallback option when restoring an agent docs: clarified experiment length ([cd70aa9](https://www.github.com/googleapis/google-cloud-go/commit/cd70aa9cc1a5dccfe4e49d2d6ca6db2119553c86)) +* **dialogflow/cx:** start generating apiv3 ([#3850](https://www.github.com/googleapis/google-cloud-go/issues/3850)) ([febbdcf](https://www.github.com/googleapis/google-cloud-go/commit/febbdcf13fcea3f5d8186c3d3dface1c0d27ef9e)), refs [#3634](https://www.github.com/googleapis/google-cloud-go/issues/3634) +* **documentai:** add EVAL_SKIPPED value to the Provenance.OperationType enum in document.proto. ([cb43066](https://www.github.com/googleapis/google-cloud-go/commit/cb4306683926843f6e977f207fa6070bb9242a61)) +* **documentai:** start generating apiv1 ([#3853](https://www.github.com/googleapis/google-cloud-go/issues/3853)) ([d68e604](https://www.github.com/googleapis/google-cloud-go/commit/d68e604c953eea90489f6134e71849b24dd0fcbf)) +* **internal/godocfx:** add prettyprint class to code blocks ([#3819](https://www.github.com/googleapis/google-cloud-go/issues/3819)) ([6e49f21](https://www.github.com/googleapis/google-cloud-go/commit/6e49f2148b116ee439c8a882dcfeefb6e7647c57)) +* **internal/godocfx:** handle Markdown content ([#3816](https://www.github.com/googleapis/google-cloud-go/issues/3816)) ([56d5d0a](https://www.github.com/googleapis/google-cloud-go/commit/56d5d0a900197fb2de46120a0eda649f2c17448f)) +* **kms:** Add maxAttempts to retry policy for KMS gRPC service config feat: Add Bazel exports_files entry for KMS gRPC service config ([fd04a55](https://www.github.com/googleapis/google-cloud-go/commit/fd04a552213f99619c714b5858548f61f4948493)) +* **resourcesettings:** start generating apiv1 ([#3854](https://www.github.com/googleapis/google-cloud-go/issues/3854)) ([3b288b4](https://www.github.com/googleapis/google-cloud-go/commit/3b288b4fa593c6cb418f696b5b26768967c20b9e)) +* **speech:** Support output transcript to GCS for LongRunningRecognize. ([fd04a55](https://www.github.com/googleapis/google-cloud-go/commit/fd04a552213f99619c714b5858548f61f4948493)) +* **speech:** Support output transcript to GCS for LongRunningRecognize. ([cd70aa9](https://www.github.com/googleapis/google-cloud-go/commit/cd70aa9cc1a5dccfe4e49d2d6ca6db2119553c86)) +* **speech:** Support output transcript to GCS for LongRunningRecognize. ([35a8706](https://www.github.com/googleapis/google-cloud-go/commit/35a870662df8bf63c4ec10a0233d1d7a708007ee)) + + +### Miscellaneous Chores + +* **all:** auto-regenerate gapics ([#3837](https://www.github.com/googleapis/google-cloud-go/issues/3837)) ([ab4824a](https://www.github.com/googleapis/google-cloud-go/commit/ab4824a7914864228e59b244d6382de862139524)) + +## [0.79.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.78.0...v0.79.0) (2021-03-10) + + +### Features + +* **apigateway:** start generating apiv1 ([#3726](https://www.github.com/googleapis/google-cloud-go/issues/3726)) ([66046da](https://www.github.com/googleapis/google-cloud-go/commit/66046da2a4be5971ce2655dc6a5e1fadb08c3d1f)) +* **channel:** addition of billing_account field on Plan. docs: clarification that valid address lines are required for all customers. ([d4246aa](https://www.github.com/googleapis/google-cloud-go/commit/d4246aad4da3c3ef12350385f229bb908e3fb215)) +* **dialogflow/cx:** allow to disable webhook invocation per request ([d4246aa](https://www.github.com/googleapis/google-cloud-go/commit/d4246aad4da3c3ef12350385f229bb908e3fb215)) +* **dialogflow/cx:** allow to disable webhook invocation per request ([44c6bf9](https://www.github.com/googleapis/google-cloud-go/commit/44c6bf986f39a3c9fddf46788ae63bfbb3739441)) +* **dialogflow:** Add CCAI API ([18c88c4](https://www.github.com/googleapis/google-cloud-go/commit/18c88c437bd1741eaf5bf5911b9da6f6ea7cd75d)) +* **documentai:** remove the translation fields in document.proto. ([18c88c4](https://www.github.com/googleapis/google-cloud-go/commit/18c88c437bd1741eaf5bf5911b9da6f6ea7cd75d)) +* **documentai:** Update documentai/v1beta3 protos: add support for boolean normalized value ([529925b](https://www.github.com/googleapis/google-cloud-go/commit/529925ba79f4d3191ef80a13e566d86210fe4d25)) +* **internal/godocfx:** keep some cross links on same domain ([#3767](https://www.github.com/googleapis/google-cloud-go/issues/3767)) ([77f76ed](https://www.github.com/googleapis/google-cloud-go/commit/77f76ed09cb07a090ba9054063a7c002a35bca4e)) +* **internal:** add ability to regenerate one module's docs ([#3777](https://www.github.com/googleapis/google-cloud-go/issues/3777)) ([dc15995](https://www.github.com/googleapis/google-cloud-go/commit/dc15995521bd065da4cfaae95642588919a8c548)) +* **metastore:** added support for release channels when creating service ([18c88c4](https://www.github.com/googleapis/google-cloud-go/commit/18c88c437bd1741eaf5bf5911b9da6f6ea7cd75d)) +* **metastore:** Publish Dataproc Metastore v1alpha API ([18c88c4](https://www.github.com/googleapis/google-cloud-go/commit/18c88c437bd1741eaf5bf5911b9da6f6ea7cd75d)) +* **metastore:** start generating apiv1alpha ([#3747](https://www.github.com/googleapis/google-cloud-go/issues/3747)) ([359312a](https://www.github.com/googleapis/google-cloud-go/commit/359312ad6d4f61fb341d41ffa35fc0634979e650)) +* **metastore:** start generating apiv1beta ([#3788](https://www.github.com/googleapis/google-cloud-go/issues/3788)) ([2977095](https://www.github.com/googleapis/google-cloud-go/commit/297709593ad32f234c0fbcfa228cffcfd3e591f4)) +* **secretmanager:** added topic field to Secret ([f1323b1](https://www.github.com/googleapis/google-cloud-go/commit/f1323b10a3c7cc1d215730cefd3062064ef54c01)) + + +### Bug Fixes + +* **analytics/admin:** add `https://www.googleapis.com/auth/analytics.edit` OAuth2 scope to the list of acceptable scopes for all read only methods of the Admin API docs: update the documentation of the `update_mask` field used by Update() methods ([f1323b1](https://www.github.com/googleapis/google-cloud-go/commit/f1323b10a3c7cc1d215730cefd3062064ef54c01)) +* **apigateway:** Provide resource definitions for service management and IAM resources ([18c88c4](https://www.github.com/googleapis/google-cloud-go/commit/18c88c437bd1741eaf5bf5911b9da6f6ea7cd75d)) +* **functions:** Fix service namespace in grpc_service_config. ([7811a34](https://www.github.com/googleapis/google-cloud-go/commit/7811a34ef64d722480c640810251bb3a0d65d495)) +* **internal/godocfx:** prevent index out of bounds when pkg == mod ([#3768](https://www.github.com/googleapis/google-cloud-go/issues/3768)) ([3d80b4e](https://www.github.com/googleapis/google-cloud-go/commit/3d80b4e93b0f7e857d6e9681d8d6a429750ecf80)) +* **internal/godocfx:** use correct anchor links ([#3738](https://www.github.com/googleapis/google-cloud-go/issues/3738)) ([919039a](https://www.github.com/googleapis/google-cloud-go/commit/919039a01a006c41e720218bd55f83ce98a5edef)) +* **internal:** fix Bash syntax ([#3779](https://www.github.com/googleapis/google-cloud-go/issues/3779)) ([3dd245d](https://www.github.com/googleapis/google-cloud-go/commit/3dd245dbdbfa84f0bbe5a476412d8463fe3e700c)) +* **tables:** use area120tables_v1alpha1.yaml as api-service-config ([#3759](https://www.github.com/googleapis/google-cloud-go/issues/3759)) ([b130ec0](https://www.github.com/googleapis/google-cloud-go/commit/b130ec0aa946b1a1eaa4d5a7c33e72353ac1612e)) + +## [0.78.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.77.0...v0.78.0) (2021-02-22) + + +### Features + +* **area120/tables:** Added ListWorkspaces, GetWorkspace, BatchDeleteRows APIs. ([16597fa](https://www.github.com/googleapis/google-cloud-go/commit/16597fa1ce549053c7183e8456e23f554a5501de)) +* **area120/tables:** Added ListWorkspaces, GetWorkspace, BatchDeleteRows APIs. ([0bd21d7](https://www.github.com/googleapis/google-cloud-go/commit/0bd21d793f75924e5a2d033c58e8aaef89cf8113)) +* **dialogflow:** add additional_bindings to Dialogflow v2 ListIntents API docs: update copyrights and session docs ([0bd21d7](https://www.github.com/googleapis/google-cloud-go/commit/0bd21d793f75924e5a2d033c58e8aaef89cf8113)) +* **documentai:** Update documentai/v1beta3 protos ([613ced7](https://www.github.com/googleapis/google-cloud-go/commit/613ced702bbc82a154a4d3641b483f71c7cd1af4)) +* **gkehub:** Update Membership API v1beta1 proto ([613ced7](https://www.github.com/googleapis/google-cloud-go/commit/613ced702bbc82a154a4d3641b483f71c7cd1af4)) +* **servicecontrol:** Update the ruby_cloud_gapic_library rules for the libraries published to google-cloud-ruby to the form that works with build_gen (separate parameters for ruby_cloud_title and ruby_cloud_description). chore: Update Bazel-Ruby rules version. chore: Update build_gen version. ([0bd21d7](https://www.github.com/googleapis/google-cloud-go/commit/0bd21d793f75924e5a2d033c58e8aaef89cf8113)) +* **speech:** Support Model Adaptation. ([0bd21d7](https://www.github.com/googleapis/google-cloud-go/commit/0bd21d793f75924e5a2d033c58e8aaef89cf8113)) + + +### Bug Fixes + +* **dialogflow/cx:** RunTestCase http template. PHP REST client lib can be generated. feat: Support transition route group coverage for Test Cases. ([613ced7](https://www.github.com/googleapis/google-cloud-go/commit/613ced702bbc82a154a4d3641b483f71c7cd1af4)) +* **errorreporting:** Fixes ruby gem build ([0bd21d7](https://www.github.com/googleapis/google-cloud-go/commit/0bd21d793f75924e5a2d033c58e8aaef89cf8113)) + +## [0.77.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.76.0...v0.77.0) (2021-02-16) + + +### Features + +* **channel:** Add Pub/Sub endpoints for Cloud Channel API. ([1aea7c8](https://www.github.com/googleapis/google-cloud-go/commit/1aea7c87d39eed87620b488ba0dd60b88ff26c04)) +* **dialogflow/cx:** supports SentimentAnalysisResult in webhook request docs: minor updates in wording ([2b4414d](https://www.github.com/googleapis/google-cloud-go/commit/2b4414d973e3445725cd38901bf75340c97fc663)) +* **errorreporting:** Make resolution status field available for error groups. Now callers can set the status of an error group by passing this to UpdateGroup. When not specified, it's treated like OPEN. feat: Make source location available for error groups created from GAE. ([2b4414d](https://www.github.com/googleapis/google-cloud-go/commit/2b4414d973e3445725cd38901bf75340c97fc663)) +* **errorreporting:** Make resolution status field available for error groups. Now callers can set the status of an error group by passing this to UpdateGroup. When not specified, it's treated like OPEN. feat: Make source location available for error groups created from GAE. ([f66114b](https://www.github.com/googleapis/google-cloud-go/commit/f66114bc7233ad06e18f38dd39497a74d85fdbd8)) +* **gkehub:** start generating apiv1beta1 ([#3698](https://www.github.com/googleapis/google-cloud-go/issues/3698)) ([8aed3bd](https://www.github.com/googleapis/google-cloud-go/commit/8aed3bd1bbbe983e4891c813e4c5dc9b3aa1b9b2)) +* **internal/docfx:** full cross reference linking ([#3656](https://www.github.com/googleapis/google-cloud-go/issues/3656)) ([fcb7318](https://www.github.com/googleapis/google-cloud-go/commit/fcb7318eb338bf3828ac831ed06ca630e1876418)) +* **memcache:** added ApplySoftwareUpdate API docs: various clarifications, new documentation for ApplySoftwareUpdate chore: update proto annotations ([2b4414d](https://www.github.com/googleapis/google-cloud-go/commit/2b4414d973e3445725cd38901bf75340c97fc663)) +* **networkconnectivity:** Add state field in resources docs: Minor changes ([0b4370a](https://www.github.com/googleapis/google-cloud-go/commit/0b4370a0d397913d932dbbdc2046a958dc3b836a)) +* **networkconnectivity:** Add state field in resources docs: Minor changes ([b4b5898](https://www.github.com/googleapis/google-cloud-go/commit/b4b58987368f80494bbc7f651f50e9123200fb3f)) +* **recommendationengine:** start generating apiv1beta1 ([#3686](https://www.github.com/googleapis/google-cloud-go/issues/3686)) ([8f4e130](https://www.github.com/googleapis/google-cloud-go/commit/8f4e13009444d88a5a56144129f055623a2205ac)) + + +### Bug Fixes + +* **errorreporting:** Remove dependency on AppEngine's proto definitions. This also removes the source_references field. ([2b4414d](https://www.github.com/googleapis/google-cloud-go/commit/2b4414d973e3445725cd38901bf75340c97fc663)) +* **errorreporting:** Update bazel builds for ER client libraries. ([0b4370a](https://www.github.com/googleapis/google-cloud-go/commit/0b4370a0d397913d932dbbdc2046a958dc3b836a)) +* **internal/godocfx:** use exact list of top-level decls ([#3665](https://www.github.com/googleapis/google-cloud-go/issues/3665)) ([3cd2961](https://www.github.com/googleapis/google-cloud-go/commit/3cd2961bd7b9c29d82a21ba8850eff00c7c332fd)) +* **kms:** do not retry on 13 INTERNAL ([2b4414d](https://www.github.com/googleapis/google-cloud-go/commit/2b4414d973e3445725cd38901bf75340c97fc663)) +* **orgpolicy:** Fix constraint resource pattern annotation ([f66114b](https://www.github.com/googleapis/google-cloud-go/commit/f66114bc7233ad06e18f38dd39497a74d85fdbd8)) +* **orgpolicy:** Fix constraint resource pattern annotation ([0b4370a](https://www.github.com/googleapis/google-cloud-go/commit/0b4370a0d397913d932dbbdc2046a958dc3b836a)) +* **profiler:** make sure retries use the most up-to-date copy of the trailer ([#3660](https://www.github.com/googleapis/google-cloud-go/issues/3660)) ([3ba9ebc](https://www.github.com/googleapis/google-cloud-go/commit/3ba9ebcee2b8b43cdf2c8f8a3d810516a604b363)) +* **vision:** sync vision v1 protos to get extra FaceAnnotation Landmark Types ([2b4414d](https://www.github.com/googleapis/google-cloud-go/commit/2b4414d973e3445725cd38901bf75340c97fc663)) + +## [0.76.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.75.0...v0.76.0) (2021-02-02) + + +### Features + +* **accessapproval:** Migrate the Bazel rules for the libraries published to google-cloud-ruby to use the gapic-generator-ruby instead of the monolith generator. ([ac22beb](https://www.github.com/googleapis/google-cloud-go/commit/ac22beb9b90771b24c8b35db7587ad3f5c0a970e)) +* **all:** auto-regenerate gapics ([#3526](https://www.github.com/googleapis/google-cloud-go/issues/3526)) ([ab2af0b](https://www.github.com/googleapis/google-cloud-go/commit/ab2af0b32630dd97f44800f4e273184f887375db)) +* **all:** auto-regenerate gapics ([#3539](https://www.github.com/googleapis/google-cloud-go/issues/3539)) ([84d4d8a](https://www.github.com/googleapis/google-cloud-go/commit/84d4d8ae2d3fbf34a4a312a0a2e4062d18caaa3d)) +* **all:** auto-regenerate gapics ([#3546](https://www.github.com/googleapis/google-cloud-go/issues/3546)) ([959fde5](https://www.github.com/googleapis/google-cloud-go/commit/959fde5ab12f7aee206dd46022e3cad1bc3470f7)) +* **all:** auto-regenerate gapics ([#3563](https://www.github.com/googleapis/google-cloud-go/issues/3563)) ([102112a](https://www.github.com/googleapis/google-cloud-go/commit/102112a4e9285a16645aabc89789f613d4f47c9e)) +* **all:** auto-regenerate gapics ([#3576](https://www.github.com/googleapis/google-cloud-go/issues/3576)) ([ac22beb](https://www.github.com/googleapis/google-cloud-go/commit/ac22beb9b90771b24c8b35db7587ad3f5c0a970e)) +* **all:** auto-regenerate gapics ([#3580](https://www.github.com/googleapis/google-cloud-go/issues/3580)) ([9974a80](https://www.github.com/googleapis/google-cloud-go/commit/9974a8017b5de8129a586f2404a23396caea0ee1)) +* **all:** auto-regenerate gapics ([#3587](https://www.github.com/googleapis/google-cloud-go/issues/3587)) ([3859a6f](https://www.github.com/googleapis/google-cloud-go/commit/3859a6ffc447e9c0b4ef231e2788fbbcfe48a94f)) +* **all:** auto-regenerate gapics ([#3598](https://www.github.com/googleapis/google-cloud-go/issues/3598)) ([7bdebad](https://www.github.com/googleapis/google-cloud-go/commit/7bdebadbe06774c94ab745dfef4ce58ce40a5582)) +* **appengine:** start generating apiv1 ([#3561](https://www.github.com/googleapis/google-cloud-go/issues/3561)) ([2b6a3b4](https://www.github.com/googleapis/google-cloud-go/commit/2b6a3b4609e389da418a83eb60a8ae3710d646d7)) +* **assuredworkloads:** updated google.cloud.assuredworkloads.v1beta1.AssuredWorkloadsService service. Clients can now create workloads with US_REGIONAL_ACCESS compliance regime ([7bdebad](https://www.github.com/googleapis/google-cloud-go/commit/7bdebadbe06774c94ab745dfef4ce58ce40a5582)) +* **binaryauthorization:** start generating apiv1beta1 ([#3562](https://www.github.com/googleapis/google-cloud-go/issues/3562)) ([56e18a6](https://www.github.com/googleapis/google-cloud-go/commit/56e18a64836ab9482528b212eb139f649f7a35c3)) +* **channel:** Add Pub/Sub endpoints for Cloud Channel API. ([9070c86](https://www.github.com/googleapis/google-cloud-go/commit/9070c86e2c69f9405d42fc0e6fe7afd4a256d8b8)) +* **cloudtasks:** introducing field: ListQueuesRequest.read_mask, GetQueueRequest.read_mask, Queue.task_ttl, Queue.tombstone_ttl, Queue.stats, Task.pull_message and introducing messages: QueueStats PullMessage docs: updates to max burst size description ([7bdebad](https://www.github.com/googleapis/google-cloud-go/commit/7bdebadbe06774c94ab745dfef4ce58ce40a5582)) +* **cloudtasks:** introducing fields: ListQueuesRequest.read_mask, GetQueueRequest.read_mask, Queue.task_ttl, Queue.tombstone_ttl, Queue.stats and introducing messages: QueueStats docs: updates to AppEngineHttpRequest description ([7bdebad](https://www.github.com/googleapis/google-cloud-go/commit/7bdebadbe06774c94ab745dfef4ce58ce40a5582)) +* **datalabeling:** start generating apiv1beta1 ([#3582](https://www.github.com/googleapis/google-cloud-go/issues/3582)) ([d8a7fee](https://www.github.com/googleapis/google-cloud-go/commit/d8a7feef51d3344fa7e258aba1d9fbdab56dadcf)) +* **dataqna:** start generating apiv1alpha ([#3586](https://www.github.com/googleapis/google-cloud-go/issues/3586)) ([24c5b8f](https://www.github.com/googleapis/google-cloud-go/commit/24c5b8f4f45f8cd8b3001b1ca5a8d80e9f3b39d5)) +* **dialogflow/cx:** Add new Experiment service docs: minor doc update on redact field in intent.proto and page.proto ([0959f27](https://www.github.com/googleapis/google-cloud-go/commit/0959f27e85efe94d39437ceef0ff62ddceb8e7a7)) +* **dialogflow/cx:** added support for test cases and agent validation ([7bdebad](https://www.github.com/googleapis/google-cloud-go/commit/7bdebadbe06774c94ab745dfef4ce58ce40a5582)) +* **dialogflow/cx:** added support for test cases and agent validation ([3859a6f](https://www.github.com/googleapis/google-cloud-go/commit/3859a6ffc447e9c0b4ef231e2788fbbcfe48a94f)) +* **dialogflow:** add C++ targets for DialogFlow ([959fde5](https://www.github.com/googleapis/google-cloud-go/commit/959fde5ab12f7aee206dd46022e3cad1bc3470f7)) +* **documentai:** start generating apiv1beta3 ([#3595](https://www.github.com/googleapis/google-cloud-go/issues/3595)) ([5ae21fa](https://www.github.com/googleapis/google-cloud-go/commit/5ae21fa1cfb8b8dacbcd0fc43eee430f7db63102)) +* **domains:** start generating apiv1beta1 ([#3632](https://www.github.com/googleapis/google-cloud-go/issues/3632)) ([b8ada6f](https://www.github.com/googleapis/google-cloud-go/commit/b8ada6f197e680d0bb26aa031e6431bc099a3149)) +* **godocfx:** include alt documentation link ([#3530](https://www.github.com/googleapis/google-cloud-go/issues/3530)) ([806cdd5](https://www.github.com/googleapis/google-cloud-go/commit/806cdd56fb6fdddd7a6c1354e55e0d1259bd6c8b)) +* **internal/gapicgen:** change commit formatting to match standard ([#3500](https://www.github.com/googleapis/google-cloud-go/issues/3500)) ([d1e3d46](https://www.github.com/googleapis/google-cloud-go/commit/d1e3d46c47c425581e2b149c07f8e27ffc373c7e)) +* **internal/godocfx:** xref function declarations ([#3615](https://www.github.com/googleapis/google-cloud-go/issues/3615)) ([2bdbb87](https://www.github.com/googleapis/google-cloud-go/commit/2bdbb87a682d799cf5e262a61a3ef1faf41151af)) +* **mediatranslation:** start generating apiv1beta1 ([#3636](https://www.github.com/googleapis/google-cloud-go/issues/3636)) ([4129469](https://www.github.com/googleapis/google-cloud-go/commit/412946966cf7f53c51deff1b1cc1a12d62ed0279)) +* **memcache:** start generating apiv1 ([#3579](https://www.github.com/googleapis/google-cloud-go/issues/3579)) ([eabf7cf](https://www.github.com/googleapis/google-cloud-go/commit/eabf7cfde7b3a3cc1b35c320ba52e07be9926359)) +* **networkconnectivity:** initial generation of apiv1alpha1 ([#3567](https://www.github.com/googleapis/google-cloud-go/issues/3567)) ([adf489a](https://www.github.com/googleapis/google-cloud-go/commit/adf489a536292e3196677621477eae0d52761e7f)) +* **orgpolicy:** start generating apiv2 ([#3652](https://www.github.com/googleapis/google-cloud-go/issues/3652)) ([c103847](https://www.github.com/googleapis/google-cloud-go/commit/c1038475779fda3589aa9659d4ad0b703036b531)) +* **osconfig/agentendpoint:** add ApplyConfigTask to AgentEndpoint API ([9070c86](https://www.github.com/googleapis/google-cloud-go/commit/9070c86e2c69f9405d42fc0e6fe7afd4a256d8b8)) +* **osconfig/agentendpoint:** add ApplyConfigTask to AgentEndpoint API ([9af529c](https://www.github.com/googleapis/google-cloud-go/commit/9af529c21e98b62c4617f7a7191c307659cf8bb8)) +* **recommender:** add bindings for folder/org type resources for protos in recommendations, insights and recommender_service to enable v1 api for folder/org ([7bdebad](https://www.github.com/googleapis/google-cloud-go/commit/7bdebadbe06774c94ab745dfef4ce58ce40a5582)) +* **recommender:** auto generated cl for enabling v1beta1 folder/org APIs and integration test ([7bdebad](https://www.github.com/googleapis/google-cloud-go/commit/7bdebadbe06774c94ab745dfef4ce58ce40a5582)) +* **resourcemanager:** start generating apiv2 ([#3575](https://www.github.com/googleapis/google-cloud-go/issues/3575)) ([93d0ebc](https://www.github.com/googleapis/google-cloud-go/commit/93d0ebceb4270351518a13958005bb68f0cace60)) +* **secretmanager:** added expire_time and ttl fields to Secret ([9974a80](https://www.github.com/googleapis/google-cloud-go/commit/9974a8017b5de8129a586f2404a23396caea0ee1)) +* **secretmanager:** added expire_time and ttl fields to Secret ([ac22beb](https://www.github.com/googleapis/google-cloud-go/commit/ac22beb9b90771b24c8b35db7587ad3f5c0a970e)) +* **servicecontrol:** start generating apiv1 ([#3644](https://www.github.com/googleapis/google-cloud-go/issues/3644)) ([f84938b](https://www.github.com/googleapis/google-cloud-go/commit/f84938bb4042a5629fd66bda42de028fd833648a)) +* **servicemanagement:** start generating apiv1 ([#3614](https://www.github.com/googleapis/google-cloud-go/issues/3614)) ([b96134f](https://www.github.com/googleapis/google-cloud-go/commit/b96134fe91c182237359000cd544af5fec60d7db)) + + +### Bug Fixes + +* **datacatalog:** Update PHP package name casing to match the PHP namespace in the proto files ([c7ecf0f](https://www.github.com/googleapis/google-cloud-go/commit/c7ecf0f3f454606b124e52d20af2545b2c68646f)) +* **internal/godocfx:** add TOC element for module root package ([#3599](https://www.github.com/googleapis/google-cloud-go/issues/3599)) ([1d6eb23](https://www.github.com/googleapis/google-cloud-go/commit/1d6eb238206fcf8815d88981527ef176851afd7a)) +* **profiler:** Force gax to retry in case of certificate errors ([#3178](https://www.github.com/googleapis/google-cloud-go/issues/3178)) ([35dcd72](https://www.github.com/googleapis/google-cloud-go/commit/35dcd725dcd03266ed7439de40c277376b38cd71)) + +## [0.75.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.74.0...v0.75.0) (2021-01-11) + + +### Features + +* **all:** auto-regenerate gapics , refs [#3514](https://www.github.com/googleapis/google-cloud-go/issues/3514) [#3501](https://www.github.com/googleapis/google-cloud-go/issues/3501) [#3497](https://www.github.com/googleapis/google-cloud-go/issues/3497) [#3455](https://www.github.com/googleapis/google-cloud-go/issues/3455) [#3448](https://www.github.com/googleapis/google-cloud-go/issues/3448) +* **channel:** start generating apiv1 ([#3517](https://www.github.com/googleapis/google-cloud-go/issues/3517)) ([2cf3b3c](https://www.github.com/googleapis/google-cloud-go/commit/2cf3b3cf7d99f2efd6868a710fad9e935fc87965)) + + +### Bug Fixes + +* **internal/gapicgen:** don't regen files that have been deleted ([#3471](https://www.github.com/googleapis/google-cloud-go/issues/3471)) ([112ca94](https://www.github.com/googleapis/google-cloud-go/commit/112ca9416cc8a2502b32547dc8d789655452f84a)) + +## [0.74.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.73.0...v0.74.0) (2020-12-10) + + +### Features + +* **all:** auto-regenerate gapics , refs [#3440](https://www.github.com/googleapis/google-cloud-go/issues/3440) [#3436](https://www.github.com/googleapis/google-cloud-go/issues/3436) [#3394](https://www.github.com/googleapis/google-cloud-go/issues/3394) [#3391](https://www.github.com/googleapis/google-cloud-go/issues/3391) [#3374](https://www.github.com/googleapis/google-cloud-go/issues/3374) +* **internal/gapicgen:** support generating only gapics with genlocal ([#3383](https://www.github.com/googleapis/google-cloud-go/issues/3383)) ([eaa742a](https://www.github.com/googleapis/google-cloud-go/commit/eaa742a248dc7d93c019863248f28e37f88aae84)) +* **servicedirectory:** start generating apiv1 ([#3382](https://www.github.com/googleapis/google-cloud-go/issues/3382)) ([2774925](https://www.github.com/googleapis/google-cloud-go/commit/2774925925909071ebc585cf7400373334c156ba)) + + +### Bug Fixes + +* **internal/gapicgen:** don't create genproto pr as draft ([#3379](https://www.github.com/googleapis/google-cloud-go/issues/3379)) ([517ab0f](https://www.github.com/googleapis/google-cloud-go/commit/517ab0f25e544498c5374b256354bc41ba936ad5)) + +## [0.73.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.72.0...v0.73.0) (2020-12-04) + + +### Features + +* **all:** auto-regenerate gapics , refs [#3335](https://www.github.com/googleapis/google-cloud-go/issues/3335) [#3294](https://www.github.com/googleapis/google-cloud-go/issues/3294) [#3250](https://www.github.com/googleapis/google-cloud-go/issues/3250) [#3229](https://www.github.com/googleapis/google-cloud-go/issues/3229) [#3211](https://www.github.com/googleapis/google-cloud-go/issues/3211) [#3217](https://www.github.com/googleapis/google-cloud-go/issues/3217) [#3212](https://www.github.com/googleapis/google-cloud-go/issues/3212) [#3209](https://www.github.com/googleapis/google-cloud-go/issues/3209) [#3206](https://www.github.com/googleapis/google-cloud-go/issues/3206) [#3199](https://www.github.com/googleapis/google-cloud-go/issues/3199) +* **artifactregistry:** start generating apiv1beta2 ([#3352](https://www.github.com/googleapis/google-cloud-go/issues/3352)) ([2e6f20b](https://www.github.com/googleapis/google-cloud-go/commit/2e6f20b0ab438b0b366a1a3802fc64d1a0e66fff)) +* **internal:** copy pubsub Message and PublishResult to internal/pubsub ([#3351](https://www.github.com/googleapis/google-cloud-go/issues/3351)) ([82521ee](https://www.github.com/googleapis/google-cloud-go/commit/82521ee5038735c1663525658d27e4df00ec90be)) +* **internal/gapicgen:** support adding context to regen ([#3174](https://www.github.com/googleapis/google-cloud-go/issues/3174)) ([941ab02](https://www.github.com/googleapis/google-cloud-go/commit/941ab029ba6f7f33e8b2e31e3818aeb68312a999)) +* **internal/kokoro:** add ability to regen all DocFX YAML ([#3191](https://www.github.com/googleapis/google-cloud-go/issues/3191)) ([e12046b](https://www.github.com/googleapis/google-cloud-go/commit/e12046bc4431d33aee72c324e6eb5cc907a4214a)) + + +### Bug Fixes + +* **internal/godocfx:** filter out test packages from other modules ([#3197](https://www.github.com/googleapis/google-cloud-go/issues/3197)) ([1d397aa](https://www.github.com/googleapis/google-cloud-go/commit/1d397aa8b41f8f980cba1d3dcc50f11e4d4f4ca0)) + +## [0.72.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.71.0...v0.72.0) (2020-11-10) + + +### Features + +* **all:** auto-regenerate gapics , refs [#3177](https://www.github.com/googleapis/google-cloud-go/issues/3177) [#3164](https://www.github.com/googleapis/google-cloud-go/issues/3164) [#3149](https://www.github.com/googleapis/google-cloud-go/issues/3149) [#3142](https://www.github.com/googleapis/google-cloud-go/issues/3142) [#3136](https://www.github.com/googleapis/google-cloud-go/issues/3136) [#3130](https://www.github.com/googleapis/google-cloud-go/issues/3130) [#3121](https://www.github.com/googleapis/google-cloud-go/issues/3121) [#3119](https://www.github.com/googleapis/google-cloud-go/issues/3119) + + +### Bug Fixes + +* **all:** Update hand-written clients to not use WithEndpoint override ([#3111](https://www.github.com/googleapis/google-cloud-go/issues/3111)) ([f0cfd05](https://www.github.com/googleapis/google-cloud-go/commit/f0cfd0532f5204ff16f7bae406efa72603d16f44)) +* **internal/godocfx:** rename README files to pkg-readme ([#3185](https://www.github.com/googleapis/google-cloud-go/issues/3185)) ([d3a8571](https://www.github.com/googleapis/google-cloud-go/commit/d3a85719be411b692aede3331abb29b5a7b3da9a)) + + +## [0.71.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.70.0...v0.71.0) (2020-10-30) + + +### Features + +* **all:** auto-regenerate gapics , refs [#3115](https://www.github.com/googleapis/google-cloud-go/issues/3115) [#3106](https://www.github.com/googleapis/google-cloud-go/issues/3106) [#3102](https://www.github.com/googleapis/google-cloud-go/issues/3102) [#3083](https://www.github.com/googleapis/google-cloud-go/issues/3083) [#3073](https://www.github.com/googleapis/google-cloud-go/issues/3073) [#3057](https://www.github.com/googleapis/google-cloud-go/issues/3057) [#3044](https://www.github.com/googleapis/google-cloud-go/issues/3044) +* **billing/budgets:** start generating apiv1 ([#3099](https://www.github.com/googleapis/google-cloud-go/issues/3099)) ([e760c85](https://www.github.com/googleapis/google-cloud-go/commit/e760c859de88a6e79b6dffc653dbf75f1630d8e3)) +* **internal:** auto-run godocfx on new mods ([#3069](https://www.github.com/googleapis/google-cloud-go/issues/3069)) ([49f497e](https://www.github.com/googleapis/google-cloud-go/commit/49f497eab80ce34dfb4ca41f033a5c0429ff5e42)) +* **pubsublite:** Added Pub/Sub Lite clients and routing headers ([#3105](https://www.github.com/googleapis/google-cloud-go/issues/3105)) ([98668fa](https://www.github.com/googleapis/google-cloud-go/commit/98668fa5457d26ed34debee708614f027020e5bc)) +* **pubsublite:** Message type and message routers ([#3077](https://www.github.com/googleapis/google-cloud-go/issues/3077)) ([179fc55](https://www.github.com/googleapis/google-cloud-go/commit/179fc550b545a5344358a243da7007ffaa7b5171)) +* **pubsublite:** Pub/Sub Lite admin client ([#3036](https://www.github.com/googleapis/google-cloud-go/issues/3036)) ([749473e](https://www.github.com/googleapis/google-cloud-go/commit/749473ead30bf1872634821d3238d1299b99acc6)) +* **pubsublite:** Publish settings and errors ([#3075](https://www.github.com/googleapis/google-cloud-go/issues/3075)) ([9eb9fcb](https://www.github.com/googleapis/google-cloud-go/commit/9eb9fcb79f17ad7c08c77c455ba3e8d89e3bdbf2)) +* **pubsublite:** Retryable stream wrapper ([#3068](https://www.github.com/googleapis/google-cloud-go/issues/3068)) ([97cfd45](https://www.github.com/googleapis/google-cloud-go/commit/97cfd4587f2f51996bd685ff486308b70eb51900)) + + +### Bug Fixes + +* **internal/kokoro:** remove unnecessary cd ([#3071](https://www.github.com/googleapis/google-cloud-go/issues/3071)) ([c1a4c3e](https://www.github.com/googleapis/google-cloud-go/commit/c1a4c3eaffcdc3cffe0e223fcfa1f60879cd23bb)) +* **pubsublite:** Disable integration tests for project id ([#3087](https://www.github.com/googleapis/google-cloud-go/issues/3087)) ([a0982f7](https://www.github.com/googleapis/google-cloud-go/commit/a0982f79d6461feabdf31363f29fed7dc5677fe7)) + +## [0.70.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.69.0...v0.70.0) (2020-10-19) + + +### Features + +* **all:** auto-regenerate gapics , refs [#3047](https://www.github.com/googleapis/google-cloud-go/issues/3047) [#3035](https://www.github.com/googleapis/google-cloud-go/issues/3035) [#3025](https://www.github.com/googleapis/google-cloud-go/issues/3025) +* **managedidentities:** start generating apiv1 ([#3032](https://www.github.com/googleapis/google-cloud-go/issues/3032)) ([10ccca2](https://www.github.com/googleapis/google-cloud-go/commit/10ccca238074d24fea580a4cd8e64478818b0b44)) +* **pubsublite:** Types for resource paths and topic/subscription configs ([#3026](https://www.github.com/googleapis/google-cloud-go/issues/3026)) ([6f7fa86](https://www.github.com/googleapis/google-cloud-go/commit/6f7fa86ed906258f98d996aab40184f3a46f9714)) + +## [0.69.1](https://www.github.com/googleapis/google-cloud-go/compare/v0.69.0...v0.69.1) (2020-10-14) + +This is an empty release that was created solely to aid in pubsublite's module +carve out. See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository. + +## [0.69.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.68.0...v0.69.0) (2020-10-14) + + +### Features + +* **accessapproval:** start generating apiv1 ([#3002](https://www.github.com/googleapis/google-cloud-go/issues/3002)) ([709d6e7](https://www.github.com/googleapis/google-cloud-go/commit/709d6e76393e6ac00ff488efd83bfe873173b045)) +* **all:** auto-regenerate gapics , refs [#3010](https://www.github.com/googleapis/google-cloud-go/issues/3010) [#3005](https://www.github.com/googleapis/google-cloud-go/issues/3005) [#2993](https://www.github.com/googleapis/google-cloud-go/issues/2993) [#2989](https://www.github.com/googleapis/google-cloud-go/issues/2989) [#2981](https://www.github.com/googleapis/google-cloud-go/issues/2981) [#2976](https://www.github.com/googleapis/google-cloud-go/issues/2976) [#2968](https://www.github.com/googleapis/google-cloud-go/issues/2968) [#2958](https://www.github.com/googleapis/google-cloud-go/issues/2958) +* **cmd/go-cloud-debug-agent:** mark as deprecated ([#2964](https://www.github.com/googleapis/google-cloud-go/issues/2964)) ([276ec88](https://www.github.com/googleapis/google-cloud-go/commit/276ec88b05852c33a3ba437e18d072f7ffd8fd33)) +* **godocfx:** add nesting to TOC ([#2972](https://www.github.com/googleapis/google-cloud-go/issues/2972)) ([3a49b2d](https://www.github.com/googleapis/google-cloud-go/commit/3a49b2d142a353f98429235c3f380431430b4dbf)) +* **internal/godocfx:** HTML-ify package summary ([#2986](https://www.github.com/googleapis/google-cloud-go/issues/2986)) ([9e64b01](https://www.github.com/googleapis/google-cloud-go/commit/9e64b018255bd8d9b31d60e8f396966251de946b)) +* **internal/kokoro:** make publish_docs VERSION optional ([#2979](https://www.github.com/googleapis/google-cloud-go/issues/2979)) ([76e35f6](https://www.github.com/googleapis/google-cloud-go/commit/76e35f689cb60bd5db8e14b8c8d367c5902bcb0e)) +* **websecurityscanner:** start generating apiv1 ([#3006](https://www.github.com/googleapis/google-cloud-go/issues/3006)) ([1d92e20](https://www.github.com/googleapis/google-cloud-go/commit/1d92e2062a13f62d7a96be53a7354c0cacca6a85)) + + +### Bug Fixes + +* **godocfx:** make extra files optional, filter out third_party ([#2985](https://www.github.com/googleapis/google-cloud-go/issues/2985)) ([f268921](https://www.github.com/googleapis/google-cloud-go/commit/f2689214a24b2e325d3e8f54441bb11fbef925f0)) + +## [0.68.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.67.0...v0.68.0) (2020-10-02) + + +### Features + +* **all:** auto-regenerate gapics , refs [#2952](https://www.github.com/googleapis/google-cloud-go/issues/2952) [#2944](https://www.github.com/googleapis/google-cloud-go/issues/2944) [#2935](https://www.github.com/googleapis/google-cloud-go/issues/2935) + +## [0.67.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.66.0...v0.67.0) (2020-09-29) + + +### Features + +* **all:** auto-regenerate gapics , refs [#2933](https://www.github.com/googleapis/google-cloud-go/issues/2933) [#2919](https://www.github.com/googleapis/google-cloud-go/issues/2919) [#2913](https://www.github.com/googleapis/google-cloud-go/issues/2913) [#2910](https://www.github.com/googleapis/google-cloud-go/issues/2910) [#2899](https://www.github.com/googleapis/google-cloud-go/issues/2899) [#2897](https://www.github.com/googleapis/google-cloud-go/issues/2897) [#2886](https://www.github.com/googleapis/google-cloud-go/issues/2886) [#2877](https://www.github.com/googleapis/google-cloud-go/issues/2877) [#2869](https://www.github.com/googleapis/google-cloud-go/issues/2869) [#2864](https://www.github.com/googleapis/google-cloud-go/issues/2864) +* **assuredworkloads:** start generating apiv1beta1 ([#2866](https://www.github.com/googleapis/google-cloud-go/issues/2866)) ([7598c4d](https://www.github.com/googleapis/google-cloud-go/commit/7598c4dd2462e8270a2c7b1f496af58ca81ff568)) +* **dialogflow/cx:** start generating apiv3beta1 ([#2875](https://www.github.com/googleapis/google-cloud-go/issues/2875)) ([37ca93a](https://www.github.com/googleapis/google-cloud-go/commit/37ca93ad69eda363d956f0174d444ed5914f5a72)) +* **docfx:** add support for examples ([#2884](https://www.github.com/googleapis/google-cloud-go/issues/2884)) ([0cc0de3](https://www.github.com/googleapis/google-cloud-go/commit/0cc0de300d58be6d3b7eeb2f1baebfa6df076830)) +* **godocfx:** include README in output ([#2927](https://www.github.com/googleapis/google-cloud-go/issues/2927)) ([f084690](https://www.github.com/googleapis/google-cloud-go/commit/f084690a2ea08ce73bafaaced95ad271fd01e11e)) +* **talent:** start generating apiv4 ([#2871](https://www.github.com/googleapis/google-cloud-go/issues/2871)) ([5c98071](https://www.github.com/googleapis/google-cloud-go/commit/5c98071b03822c58862d1fa5442ff36d627f1a61)) + + +### Bug Fixes + +* **godocfx:** filter out other modules, sort pkgs ([#2894](https://www.github.com/googleapis/google-cloud-go/issues/2894)) ([868db45](https://www.github.com/googleapis/google-cloud-go/commit/868db45e2e6f4e9ad48432be86c849f335e1083d)) +* **godocfx:** shorten function names ([#2880](https://www.github.com/googleapis/google-cloud-go/issues/2880)) ([48a0217](https://www.github.com/googleapis/google-cloud-go/commit/48a0217930750c1f4327f2622b0f2a3ec8afc0b7)) +* **translate:** properly name examples ([#2892](https://www.github.com/googleapis/google-cloud-go/issues/2892)) ([c19e141](https://www.github.com/googleapis/google-cloud-go/commit/c19e1415e6fa76b7ea66a7fc67ad3ba22670a2ba)), refs [#2883](https://www.github.com/googleapis/google-cloud-go/issues/2883) + +## [0.66.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.65.0...v0.66.0) (2020-09-15) + + +### Features + +* **all:** auto-regenerate gapics , refs [#2849](https://www.github.com/googleapis/google-cloud-go/issues/2849) [#2843](https://www.github.com/googleapis/google-cloud-go/issues/2843) [#2841](https://www.github.com/googleapis/google-cloud-go/issues/2841) [#2819](https://www.github.com/googleapis/google-cloud-go/issues/2819) [#2816](https://www.github.com/googleapis/google-cloud-go/issues/2816) [#2809](https://www.github.com/googleapis/google-cloud-go/issues/2809) [#2801](https://www.github.com/googleapis/google-cloud-go/issues/2801) [#2795](https://www.github.com/googleapis/google-cloud-go/issues/2795) [#2791](https://www.github.com/googleapis/google-cloud-go/issues/2791) [#2788](https://www.github.com/googleapis/google-cloud-go/issues/2788) [#2781](https://www.github.com/googleapis/google-cloud-go/issues/2781) +* **analytics/data:** start generating apiv1alpha ([#2796](https://www.github.com/googleapis/google-cloud-go/issues/2796)) ([e93132c](https://www.github.com/googleapis/google-cloud-go/commit/e93132c77725de3c80c34d566df269eabfcfde93)) +* **area120/tables:** start generating apiv1alpha1 ([#2807](https://www.github.com/googleapis/google-cloud-go/issues/2807)) ([9e5a4d0](https://www.github.com/googleapis/google-cloud-go/commit/9e5a4d0dee0d83be0c020797a2f579d9e42ef521)) +* **cloudbuild:** Start generating apiv1/v3 ([#2830](https://www.github.com/googleapis/google-cloud-go/issues/2830)) ([358a536](https://www.github.com/googleapis/google-cloud-go/commit/358a5368da64cf4868551652e852ceb453504f64)) +* **godocfx:** create Go DocFX YAML generator ([#2854](https://www.github.com/googleapis/google-cloud-go/issues/2854)) ([37c70ac](https://www.github.com/googleapis/google-cloud-go/commit/37c70acd91768567106ff3b2b130835998d974c5)) +* **security/privateca:** start generating apiv1beta1 ([#2806](https://www.github.com/googleapis/google-cloud-go/issues/2806)) ([f985141](https://www.github.com/googleapis/google-cloud-go/commit/f9851412183989dc69733a7e61ad39a9378cd893)) +* **video/transcoder:** start generating apiv1beta1 ([#2797](https://www.github.com/googleapis/google-cloud-go/issues/2797)) ([390dda8](https://www.github.com/googleapis/google-cloud-go/commit/390dda8ff2c526e325e434ad0aec778b7aa97ea4)) +* **workflows:** start generating apiv1beta ([#2799](https://www.github.com/googleapis/google-cloud-go/issues/2799)) ([0e39665](https://www.github.com/googleapis/google-cloud-go/commit/0e39665ccb788caec800e2887d433ca6e0cf9901)) +* **workflows/executions:** start generating apiv1beta ([#2800](https://www.github.com/googleapis/google-cloud-go/issues/2800)) ([7eaa0d1](https://www.github.com/googleapis/google-cloud-go/commit/7eaa0d184c6a2141d8bf4514b3fd20715b50a580)) + + +### Bug Fixes + +* **internal/kokoro:** install the right version of docuploader ([#2861](https://www.github.com/googleapis/google-cloud-go/issues/2861)) ([d8489c1](https://www.github.com/googleapis/google-cloud-go/commit/d8489c141b8b02e83d6426f4baebd3658ae11639)) +* **internal/kokoro:** remove extra dash in doc tarball ([#2862](https://www.github.com/googleapis/google-cloud-go/issues/2862)) ([690ddcc](https://www.github.com/googleapis/google-cloud-go/commit/690ddccc5202b5a70f1afa5c518dca37b6a0861c)) +* **profiler:** do not collect disabled profile types ([#2836](https://www.github.com/googleapis/google-cloud-go/issues/2836)) ([faeb498](https://www.github.com/googleapis/google-cloud-go/commit/faeb4985bf6afdcddba4553efa874642bf7f08ed)), refs [#2835](https://www.github.com/googleapis/google-cloud-go/issues/2835) + + +### Reverts + +* **cloudbuild): "feat(cloudbuild:** Start generating apiv1/v3" ([#2840](https://www.github.com/googleapis/google-cloud-go/issues/2840)) ([3aaf755](https://www.github.com/googleapis/google-cloud-go/commit/3aaf755476dfea1700986fc086f53fc1ab756557)) + +## [0.65.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.64.0...v0.65.0) (2020-08-27) + + +### Announcements + +The following changes will be included in an upcoming release and are not +included in this one. + +#### Default Deadlines + +By default, non-streaming methods, like Create or Get methods, will have a +default deadline applied to the context provided at call time, unless a context +deadline is already set. Streaming methods have no default deadline and will run +indefinitely, unless the context provided at call time contains a deadline. + +To opt-out of this behavior, set the environment variable +`GOOGLE_API_GO_EXPERIMENTAL_DISABLE_DEFAULT_DEADLINE` to `true` prior to +initializing a client. This opt-out mechanism will be removed in a later +release, with a notice similar to this one ahead of its removal. + + +### Features + +* **all:** auto-regenerate gapics , refs [#2774](https://www.github.com/googleapis/google-cloud-go/issues/2774) [#2764](https://www.github.com/googleapis/google-cloud-go/issues/2764) + + +### Bug Fixes + +* **all:** correct minor typos ([#2756](https://www.github.com/googleapis/google-cloud-go/issues/2756)) ([03d78b5](https://www.github.com/googleapis/google-cloud-go/commit/03d78b5627819cb64d1f3866f90043f709e825e1)) +* **compute/metadata:** remove leading slash for Get suffix ([#2760](https://www.github.com/googleapis/google-cloud-go/issues/2760)) ([f0d605c](https://www.github.com/googleapis/google-cloud-go/commit/f0d605ccf32391a9da056a2c551158bd076c128d)) + +## [0.64.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.63.0...v0.64.0) (2020-08-18) + + +### Features + +* **all:** auto-regenerate gapics , refs [#2734](https://www.github.com/googleapis/google-cloud-go/issues/2734) [#2731](https://www.github.com/googleapis/google-cloud-go/issues/2731) [#2730](https://www.github.com/googleapis/google-cloud-go/issues/2730) [#2725](https://www.github.com/googleapis/google-cloud-go/issues/2725) [#2722](https://www.github.com/googleapis/google-cloud-go/issues/2722) [#2706](https://www.github.com/googleapis/google-cloud-go/issues/2706) +* **pubsublite:** start generating v1 ([#2700](https://www.github.com/googleapis/google-cloud-go/issues/2700)) ([d2e777f](https://www.github.com/googleapis/google-cloud-go/commit/d2e777f56e08146646b3ffb7a78856795094ab4e)) + +## [0.63.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.62.0...v0.63.0) (2020-08-05) + + +### Features + +* **all:** auto-regenerate gapics ([#2682](https://www.github.com/googleapis/google-cloud-go/issues/2682)) ([63bfd63](https://www.github.com/googleapis/google-cloud-go/commit/63bfd638da169e0f1f4fa4a5125da2955022dc04)) +* **analytics/admin:** start generating apiv1alpha ([#2670](https://www.github.com/googleapis/google-cloud-go/issues/2670)) ([268199e](https://www.github.com/googleapis/google-cloud-go/commit/268199e5350a64a83ecf198e0e0fa4863f00fa6c)) +* **functions/metadata:** Special-case marshaling ([#2669](https://www.github.com/googleapis/google-cloud-go/issues/2669)) ([d8d7fc6](https://www.github.com/googleapis/google-cloud-go/commit/d8d7fc66cbc42f79bec25fb0daaf53d926e3645b)) +* **gaming:** start generate apiv1 ([#2681](https://www.github.com/googleapis/google-cloud-go/issues/2681)) ([1adfd0a](https://www.github.com/googleapis/google-cloud-go/commit/1adfd0aed6b2c0e1dd0c575a5ec0f49388fa5601)) +* **internal/kokoro:** add script to test compatibility with samples ([#2637](https://www.github.com/googleapis/google-cloud-go/issues/2637)) ([f2aa76a](https://www.github.com/googleapis/google-cloud-go/commit/f2aa76a0058e86c1c33bb634d2c084b58f77ab32)) + ## v0.62.0 ### Announcements @@ -1590,4 +1978,3 @@ Natural Language. [`cloud.google.com/go/preview/logging`](https://godoc.org/cloud.google.com/go/preview/logging). This client uses gRPC as its transport layer, and supports log reading, sinks and metrics. It will replace the current client at `cloud.google.com/go/logging` shortly. - diff --git a/vendor/cloud.google.com/go/CONTRIBUTING.md b/vendor/cloud.google.com/go/CONTRIBUTING.md index d9775744e5c..6ca285bee22 100644 --- a/vendor/cloud.google.com/go/CONTRIBUTING.md +++ b/vendor/cloud.google.com/go/CONTRIBUTING.md @@ -47,7 +47,13 @@ Commits will be squashed when they're merged. -## Integration Tests +## Testing + +We test code against two versions of Go, the minimum and maximum versions +supported by our clients. To see which versions these are checkout our +[README](README.md#supported-versions). + +### Integration Tests In addition to the unit tests, you may run the integration test suite. These directions describe setting up your environment to run integration tests for @@ -97,7 +103,8 @@ Next, ensure the following APIs are enabled in the general project: - Google Compute Engine Instance Group Updater API - Google Compute Engine Instance Groups API - Kubernetes Engine API -- Stackdriver Error Reporting API +- Cloud Error Reporting API +- Pub/Sub Lite API Next, create a Datastore database in the general project, and a Firestore database in the Firestore project. @@ -122,10 +129,13 @@ project's service account. (e.g. doorway-cliff-677) for the Firestore project. - `GCLOUD_TESTS_GOLANG_FIRESTORE_KEY`: The path to the JSON key file of the Firestore project's service account. +- `GCLOUD_TESTS_API_KEY`: API key for using the Translate API created above. + +As part of the setup that follows, the following variables will be configured: + - `GCLOUD_TESTS_GOLANG_KEYRING`: The full name of the keyring for the tests, in the form "projects/P/locations/L/keyRings/R". The creation of this is described below. -- `GCLOUD_TESTS_API_KEY`: API key for using the Translate API. - `GCLOUD_TESTS_GOLANG_ZONE`: Compute Engine zone. Install the [gcloud command-line tool][gcloudcli] to your machine and use it to @@ -144,7 +154,7 @@ $ gcloud auth login $ gcloud datastore indexes create datastore/testdata/index.yaml # Creates a Google Cloud storage bucket with the same name as your test project, -# and with the Stackdriver Logging service account as owner, for the sink +# and with the Cloud Logging service account as owner, for the sink # integration tests in logging. $ gsutil mb gs://$GCLOUD_TESTS_GOLANG_PROJECT_ID $ gsutil acl ch -g cloud-logs@google.com:O gs://$GCLOUD_TESTS_GOLANG_PROJECT_ID @@ -152,7 +162,7 @@ $ gsutil acl ch -g cloud-logs@google.com:O gs://$GCLOUD_TESTS_GOLANG_PROJECT_ID # Creates a PubSub topic for integration tests of storage notifications. $ gcloud beta pubsub topics create go-storage-notification-test # Next, go to the Pub/Sub dashboard in GCP console. Authorize the user -# "service-@gs-project-accounts.iam.gserviceaccount.com" +# "service-@gs-project-accounts.iam.gserviceaccount.com" # as a publisher to that topic. # Creates a Spanner instance for the spanner integration tests. @@ -171,7 +181,38 @@ $ gcloud kms keys create key2 --keyring $MY_KEYRING --location $MY_LOCATION --pu # Sets the GCLOUD_TESTS_GOLANG_KEYRING environment variable. $ export GCLOUD_TESTS_GOLANG_KEYRING=projects/$GCLOUD_TESTS_GOLANG_PROJECT_ID/locations/$MY_LOCATION/keyRings/$MY_KEYRING # Authorizes Google Cloud Storage to encrypt and decrypt using key1. -gsutil kms authorize -p $GCLOUD_TESTS_GOLANG_PROJECT_ID -k $GCLOUD_TESTS_GOLANG_KEYRING/cryptoKeys/key1 +$ gsutil kms authorize -p $GCLOUD_TESTS_GOLANG_PROJECT_ID -k $GCLOUD_TESTS_GOLANG_KEYRING/cryptoKeys/key1 +``` + +It may be useful to add exports to your shell initialization for future use. +For instance, in `.zshrc`: + +```sh +#### START GO SDK Test Variables +# Developers Console project's ID (e.g. bamboo-shift-455) for the general project. +export GCLOUD_TESTS_GOLANG_PROJECT_ID=your-project + +# The path to the JSON key file of the general project's service account. +export GCLOUD_TESTS_GOLANG_KEY=~/directory/your-project-abcd1234.json + +# Developers Console project's ID (e.g. doorway-cliff-677) for the Firestore project. +export GCLOUD_TESTS_GOLANG_FIRESTORE_PROJECT_ID=your-firestore-project + +# The path to the JSON key file of the Firestore project's service account. +export GCLOUD_TESTS_GOLANG_FIRESTORE_KEY=~/directory/your-firestore-project-abcd1234.json + +# The full name of the keyring for the tests, in the form "projects/P/locations/L/keyRings/R". +# The creation of this is described below. +export MY_KEYRING=my-golang-sdk-test +export MY_LOCATION=global +export GCLOUD_TESTS_GOLANG_KEYRING=projects/$GCLOUD_TESTS_GOLANG_PROJECT_ID/locations/$MY_LOCATION/keyRings/$MY_KEYRING + +# API key for using the Translate API. +export GCLOUD_TESTS_API_KEY=abcdefghijk123456789 + +# Compute Engine zone. (https://cloud.google.com/compute/docs/regions-zones) +export GCLOUD_TESTS_GOLANG_ZONE=your-chosen-region +#### END GO SDK Test Variables ``` #### Running @@ -180,7 +221,15 @@ Once you've done the necessary setup, you can run the integration tests by running: ``` sh -$ go test -v cloud.google.com/go/... +$ go test -v ./... +``` + +Note that the above command will not run the tests in other modules. To run +tests on other modules, first navigate to the appropriate +subdirectory. For instance, to run only the tests for datastore: +``` sh +$ cd datastore +$ go test -v ./... ``` #### Replay diff --git a/vendor/cloud.google.com/go/README.md b/vendor/cloud.google.com/go/README.md index b115812c269..ba024f5abea 100644 --- a/vendor/cloud.google.com/go/README.md +++ b/vendor/cloud.google.com/go/README.md @@ -1,6 +1,6 @@ # Google Cloud Client Libraries for Go -[![GoDoc](https://godoc.org/cloud.google.com/go?status.svg)](https://pkg.go.dev/cloud.google.com/go) +[![Go Reference](https://pkg.go.dev/badge/cloud.google.com/go.svg)](https://pkg.go.dev/cloud.google.com/go) Go packages for [Google Cloud Platform](https://cloud.google.com) services. @@ -25,52 +25,51 @@ To install the packages on your system, *do not clone the repo*. Instead: **NOTE:** Some of these packages are under development, and may occasionally make backwards-incompatible changes. -**NOTE:** Github repo is a mirror of [https://code.googlesource.com/gocloud](https://code.googlesource.com/gocloud). - ## Supported APIs -Google API | Status | Package -------------------------------------------------|--------------|----------------------------------------------------------- -[Asset][cloud-asset] | stable | [`cloud.google.com/go/asset/apiv1`](https://pkg.go.dev/cloud.google.com/go/asset/v1beta) -[Automl][cloud-automl] | stable | [`cloud.google.com/go/automl/apiv1`](https://pkg.go.dev/cloud.google.com/go/automl/apiv1) -[BigQuery][cloud-bigquery] | stable | [`cloud.google.com/go/bigquery`](https://pkg.go.dev/cloud.google.com/go/bigquery) -[Bigtable][cloud-bigtable] | stable | [`cloud.google.com/go/bigtable`](https://pkg.go.dev/cloud.google.com/go/bigtable) -[Cloudbuild][cloud-build] | stable | [`cloud.google.com/go/cloudbuild/apiv1`](https://pkg.go.dev/cloud.google.com/go/cloudbuild/apiv1) -[Cloudtasks][cloud-tasks] | stable | [`cloud.google.com/go/cloudtasks/apiv2`](https://pkg.go.dev/cloud.google.com/go/cloudtasks/apiv2) -[Container][cloud-container] | stable | [`cloud.google.com/go/container/apiv1`](https://pkg.go.dev/cloud.google.com/go/container/apiv1) -[ContainerAnalysis][cloud-containeranalysis] | beta | [`cloud.google.com/go/containeranalysis/apiv1`](https://pkg.go.dev/cloud.google.com/go/containeranalysis/apiv1) -[Dataproc][cloud-dataproc] | stable | [`cloud.google.com/go/dataproc/apiv1`](https://pkg.go.dev/cloud.google.com/go/dataproc/apiv1) -[Datastore][cloud-datastore] | stable | [`cloud.google.com/go/datastore`](https://pkg.go.dev/cloud.google.com/go/datastore) -[Debugger][cloud-debugger] | stable | [`cloud.google.com/go/debugger/apiv2`](https://pkg.go.dev/cloud.google.com/go/debugger/apiv2) -[Dialogflow][cloud-dialogflow] | stable | [`cloud.google.com/go/dialogflow/apiv2`](https://pkg.go.dev/cloud.google.com/go/dialogflow/apiv2) -[Data Loss Prevention][cloud-dlp] | stable | [`cloud.google.com/go/dlp/apiv2`](https://pkg.go.dev/cloud.google.com/go/dlp/apiv2) -[ErrorReporting][cloud-errors] | alpha | [`cloud.google.com/go/errorreporting`](https://pkg.go.dev/cloud.google.com/go/errorreporting) -[Firestore][cloud-firestore] | stable | [`cloud.google.com/go/firestore`](https://pkg.go.dev/cloud.google.com/go/firestore) -[IAM][cloud-iam] | stable | [`cloud.google.com/go/iam`](https://pkg.go.dev/cloud.google.com/go/iam) -[IoT][cloud-iot] | stable | [`cloud.google.com/go/iot/apiv1`](https://pkg.go.dev/cloud.google.com/go/iot/apiv1) -[IRM][cloud-irm] | alpha | [`cloud.google.com/go/irm/apiv1alpha2`](https://pkg.go.dev/cloud.google.com/go/irm/apiv1alpha2) -[KMS][cloud-kms] | stable | [`cloud.google.com/go/kms/apiv1`](https://pkg.go.dev/cloud.google.com/go/kms/apiv1) -[Natural Language][cloud-natural-language] | stable | [`cloud.google.com/go/language/apiv1`](https://pkg.go.dev/cloud.google.com/go/language/apiv1) -[Logging][cloud-logging] | stable | [`cloud.google.com/go/logging`](https://pkg.go.dev/cloud.google.com/go/logging) -[Memorystore][cloud-memorystore] | alpha | [`cloud.google.com/go/redis/apiv1`](https://pkg.go.dev/cloud.google.com/go/redis/apiv1) -[Monitoring][cloud-monitoring] | stable | [`cloud.google.com/go/monitoring/apiv3`](https://pkg.go.dev/cloud.google.com/go/monitoring/apiv3) -[OS Login][cloud-oslogin] | stable | [`cloud.google.com/go/oslogin/apiv1`](https://pkg.go.dev/cloud.google.com/go/oslogin/apiv1) -[Pub/Sub][cloud-pubsub] | stable | [`cloud.google.com/go/pubsub`](https://pkg.go.dev/cloud.google.com/go/pubsub) -[Phishing Protection][cloud-phishingprotection] | alpha | [`cloud.google.com/go/phishingprotection/apiv1beta1`](https://pkg.go.dev/cloud.google.com/go/phishingprotection/apiv1beta1) -[reCAPTCHA Enterprise][cloud-recaptcha] | alpha | [`cloud.google.com/go/recaptchaenterprise/apiv1beta1`](https://pkg.go.dev/cloud.google.com/go/recaptchaenterprise/apiv1beta1) -[Recommender][cloud-recommender] | beta | [`cloud.google.com/go/recommender/apiv1beta1`](https://pkg.go.dev/cloud.google.com/go/recommender/apiv1beta1) -[Scheduler][cloud-scheduler] | stable | [`cloud.google.com/go/scheduler/apiv1`](https://pkg.go.dev/cloud.google.com/go/scheduler/apiv1) -[Securitycenter][cloud-securitycenter] | stable | [`cloud.google.com/go/securitycenter/apiv1`](https://pkg.go.dev/cloud.google.com/go/securitycenter/apiv1) -[Spanner][cloud-spanner] | stable | [`cloud.google.com/go/spanner`](https://pkg.go.dev/cloud.google.com/go/spanner) -[Speech][cloud-speech] | stable | [`cloud.google.com/go/speech/apiv1`](https://pkg.go.dev/cloud.google.com/go/speech/apiv1) -[Storage][cloud-storage] | stable | [`cloud.google.com/go/storage`](https://pkg.go.dev/cloud.google.com/go/storage) -[Talent][cloud-talent] | alpha | [`cloud.google.com/go/talent/apiv4beta1`](https://pkg.go.dev/cloud.google.com/go/talent/apiv4beta1) -[Text To Speech][cloud-texttospeech] | stable | [`cloud.google.com/go/texttospeech/apiv1`](https://pkg.go.dev/cloud.google.com/go/texttospeech/apiv1) -[Trace][cloud-trace] | stable | [`cloud.google.com/go/trace/apiv2`](https://pkg.go.dev/cloud.google.com/go/trace/apiv2) -[Translate][cloud-translate] | stable | [`cloud.google.com/go/translate`](https://pkg.go.dev/cloud.google.com/go/translate) -[Video Intelligence][cloud-video] | beta | [`cloud.google.com/go/videointelligence/apiv1beta2`](https://pkg.go.dev/cloud.google.com/go/videointelligence/apiv1beta2) -[Vision][cloud-vision] | stable | [`cloud.google.com/go/vision/apiv1`](https://pkg.go.dev/cloud.google.com/go/vision/apiv1) -[Webrisk][cloud-webrisk] | alpha | [`cloud.google.com/go/webrisk/apiv1beta1`](https://pkg.go.dev/cloud.google.com/go/webrisk/apiv1beta1) +| Google API | Status | Package | +| ----------------------------------------------- | ------ | ----------------------------------------------------------------------------------------------------------------------------- | +| [Asset][cloud-asset] | stable | [`cloud.google.com/go/asset/apiv1`](https://pkg.go.dev/cloud.google.com/go/asset/v1beta) | +| [Automl][cloud-automl] | stable | [`cloud.google.com/go/automl/apiv1`](https://pkg.go.dev/cloud.google.com/go/automl/apiv1) | +| [BigQuery][cloud-bigquery] | stable | [`cloud.google.com/go/bigquery`](https://pkg.go.dev/cloud.google.com/go/bigquery) | +| [Bigtable][cloud-bigtable] | stable | [`cloud.google.com/go/bigtable`](https://pkg.go.dev/cloud.google.com/go/bigtable) | +| [Cloudbuild][cloud-build] | stable | [`cloud.google.com/go/cloudbuild/apiv1`](https://pkg.go.dev/cloud.google.com/go/cloudbuild/apiv1) | +| [Cloudtasks][cloud-tasks] | stable | [`cloud.google.com/go/cloudtasks/apiv2`](https://pkg.go.dev/cloud.google.com/go/cloudtasks/apiv2) | +| [Container][cloud-container] | stable | [`cloud.google.com/go/container/apiv1`](https://pkg.go.dev/cloud.google.com/go/container/apiv1) | +| [ContainerAnalysis][cloud-containeranalysis] | beta | [`cloud.google.com/go/containeranalysis/apiv1`](https://pkg.go.dev/cloud.google.com/go/containeranalysis/apiv1) | +| [Dataproc][cloud-dataproc] | stable | [`cloud.google.com/go/dataproc/apiv1`](https://pkg.go.dev/cloud.google.com/go/dataproc/apiv1) | +| [Datastore][cloud-datastore] | stable | [`cloud.google.com/go/datastore`](https://pkg.go.dev/cloud.google.com/go/datastore) | +| [Debugger][cloud-debugger] | stable | [`cloud.google.com/go/debugger/apiv2`](https://pkg.go.dev/cloud.google.com/go/debugger/apiv2) | +| [Dialogflow][cloud-dialogflow] | stable | [`cloud.google.com/go/dialogflow/apiv2`](https://pkg.go.dev/cloud.google.com/go/dialogflow/apiv2) | +| [Data Loss Prevention][cloud-dlp] | stable | [`cloud.google.com/go/dlp/apiv2`](https://pkg.go.dev/cloud.google.com/go/dlp/apiv2) | +| [ErrorReporting][cloud-errors] | alpha | [`cloud.google.com/go/errorreporting`](https://pkg.go.dev/cloud.google.com/go/errorreporting) | +| [Firestore][cloud-firestore] | stable | [`cloud.google.com/go/firestore`](https://pkg.go.dev/cloud.google.com/go/firestore) | +| [IAM][cloud-iam] | stable | [`cloud.google.com/go/iam`](https://pkg.go.dev/cloud.google.com/go/iam) | +| [IoT][cloud-iot] | stable | [`cloud.google.com/go/iot/apiv1`](https://pkg.go.dev/cloud.google.com/go/iot/apiv1) | +| [IRM][cloud-irm] | alpha | [`cloud.google.com/go/irm/apiv1alpha2`](https://pkg.go.dev/cloud.google.com/go/irm/apiv1alpha2) | +| [KMS][cloud-kms] | stable | [`cloud.google.com/go/kms/apiv1`](https://pkg.go.dev/cloud.google.com/go/kms/apiv1) | +| [Natural Language][cloud-natural-language] | stable | [`cloud.google.com/go/language/apiv1`](https://pkg.go.dev/cloud.google.com/go/language/apiv1) | +| [Logging][cloud-logging] | stable | [`cloud.google.com/go/logging`](https://pkg.go.dev/cloud.google.com/go/logging) | +| [Memorystore][cloud-memorystore] | alpha | [`cloud.google.com/go/redis/apiv1`](https://pkg.go.dev/cloud.google.com/go/redis/apiv1) | +| [Monitoring][cloud-monitoring] | stable | [`cloud.google.com/go/monitoring/apiv3`](https://pkg.go.dev/cloud.google.com/go/monitoring/apiv3) | +| [OS Login][cloud-oslogin] | stable | [`cloud.google.com/go/oslogin/apiv1`](https://pkg.go.dev/cloud.google.com/go/oslogin/apiv1) | +| [Pub/Sub][cloud-pubsub] | stable | [`cloud.google.com/go/pubsub`](https://pkg.go.dev/cloud.google.com/go/pubsub) | +| [Pub/Sub Lite][cloud-pubsublite] | beta | [`cloud.google.com/go/pubsublite`](https://pkg.go.dev/cloud.google.com/go/pubsublite) | +| [Phishing Protection][cloud-phishingprotection] | alpha | [`cloud.google.com/go/phishingprotection/apiv1beta1`](https://pkg.go.dev/cloud.google.com/go/phishingprotection/apiv1beta1) | +| [reCAPTCHA Enterprise][cloud-recaptcha] | alpha | [`cloud.google.com/go/recaptchaenterprise/apiv1beta1`](https://pkg.go.dev/cloud.google.com/go/recaptchaenterprise/apiv1beta1) | +| [Recommender][cloud-recommender] | beta | [`cloud.google.com/go/recommender/apiv1beta1`](https://pkg.go.dev/cloud.google.com/go/recommender/apiv1beta1) | +| [Scheduler][cloud-scheduler] | stable | [`cloud.google.com/go/scheduler/apiv1`](https://pkg.go.dev/cloud.google.com/go/scheduler/apiv1) | +| [Securitycenter][cloud-securitycenter] | stable | [`cloud.google.com/go/securitycenter/apiv1`](https://pkg.go.dev/cloud.google.com/go/securitycenter/apiv1) | +| [Spanner][cloud-spanner] | stable | [`cloud.google.com/go/spanner`](https://pkg.go.dev/cloud.google.com/go/spanner) | +| [Speech][cloud-speech] | stable | [`cloud.google.com/go/speech/apiv1`](https://pkg.go.dev/cloud.google.com/go/speech/apiv1) | +| [Storage][cloud-storage] | stable | [`cloud.google.com/go/storage`](https://pkg.go.dev/cloud.google.com/go/storage) | +| [Talent][cloud-talent] | alpha | [`cloud.google.com/go/talent/apiv4beta1`](https://pkg.go.dev/cloud.google.com/go/talent/apiv4beta1) | +| [Text To Speech][cloud-texttospeech] | stable | [`cloud.google.com/go/texttospeech/apiv1`](https://pkg.go.dev/cloud.google.com/go/texttospeech/apiv1) | +| [Trace][cloud-trace] | stable | [`cloud.google.com/go/trace/apiv2`](https://pkg.go.dev/cloud.google.com/go/trace/apiv2) | +| [Translate][cloud-translate] | stable | [`cloud.google.com/go/translate`](https://pkg.go.dev/cloud.google.com/go/translate) | +| [Video Intelligence][cloud-video] | beta | [`cloud.google.com/go/videointelligence/apiv1beta2`](https://pkg.go.dev/cloud.google.com/go/videointelligence/apiv1beta2) | +| [Vision][cloud-vision] | stable | [`cloud.google.com/go/vision/apiv1`](https://pkg.go.dev/cloud.google.com/go/vision/apiv1) | +| [Webrisk][cloud-webrisk] | alpha | [`cloud.google.com/go/webrisk/apiv1beta1`](https://pkg.go.dev/cloud.google.com/go/webrisk/apiv1beta1) | > **Alpha status**: the API is still being actively developed. As a > result, it might change in backward-incompatible ways and is not recommended @@ -85,10 +84,9 @@ Google API | Status | Package Documentation and examples are available at [pkg.go.dev/cloud.google.com/go](https://pkg.go.dev/cloud.google.com/go) -## Go Versions Supported +## [Go Versions Supported](#supported-versions) -We support the two most recent major versions of Go. If Google App Engine uses -an older version, we support that as well. +We currently support Go versions 1.11 and newer. ## Authorization @@ -153,6 +151,7 @@ for more information. [cloud-irm]: https://cloud.google.com/incident-response/docs/concepts [cloud-kms]: https://cloud.google.com/kms/ [cloud-pubsub]: https://cloud.google.com/pubsub/ +[cloud-pubsublite]: https://cloud.google.com/pubsub/lite [cloud-storage]: https://cloud.google.com/storage/ [cloud-language]: https://cloud.google.com/natural-language [cloud-logging]: https://cloud.google.com/logging/ @@ -176,3 +175,11 @@ for more information. [cloud-video]: https://cloud.google.com/video-intelligence/ [cloud-vision]: https://cloud.google.com/vision [cloud-webrisk]: https://cloud.google.com/web-risk/ + +## Links + +- [Go on Google Cloud](https://cloud.google.com/go/home) +- [Getting started with Go on Google Cloud](https://cloud.google.com/go/getting-started) +- [App Engine Quickstart](https://cloud.google.com/appengine/docs/standard/go/quickstart) +- [Cloud Functions Quickstart](https://cloud.google.com/functions/docs/quickstart-go) +- [Cloud Run Quickstart](https://cloud.google.com/run/docs/quickstarts/build-and-deploy#go) diff --git a/vendor/cloud.google.com/go/RELEASING.md b/vendor/cloud.google.com/go/RELEASING.md index c8c7f933527..d04176097dc 100644 --- a/vendor/cloud.google.com/go/RELEASING.md +++ b/vendor/cloud.google.com/go/RELEASING.md @@ -1,25 +1,6 @@ -# Setup from scratch +# Releasing -1. [Install Go](https://golang.org/dl/). - 1. Ensure that your `GOBIN` directory (by default `$(go env GOPATH)/bin`) - is in your `PATH`. - 1. Check it's working by running `go version`. - * If it doesn't work, check the install location, usually - `/usr/local/go`, is on your `PATH`. - -1. Sign one of the -[contributor license agreements](#contributor-license-agreements) below. - -1. Clone the repo: - `git clone https://github.com/googleapis/google-cloud-go` - -1. Change into the checked out source: - `cd google-cloud-go` - -1. Fork the repo and add your fork as a secondary remote (this is necessary in - order to create PRs). - -# Which module to release? +## Determine which module to release The Go client libraries have several modules. Each module does not strictly correspond to a single library - they correspond to trees of directories. If a @@ -27,17 +8,22 @@ file needs to be released, you must release the closest ancestor module. To see all modules: -``` +```bash $ cat `find . -name go.mod` | grep module +module cloud.google.com/go/pubsub +module cloud.google.com/go/spanner module cloud.google.com/go module cloud.google.com/go/bigtable -module cloud.google.com/go/firestore module cloud.google.com/go/bigquery module cloud.google.com/go/storage -module cloud.google.com/go/datastore -module cloud.google.com/go/pubsub -module cloud.google.com/go/spanner +module cloud.google.com/go/pubsublite +module cloud.google.com/go/firestore module cloud.google.com/go/logging +module cloud.google.com/go/internal/gapicgen +module cloud.google.com/go/internal/godocfx +module cloud.google.com/go/internal/examples/fake +module cloud.google.com/go/internal/examples/mock +module cloud.google.com/go/datastore ``` The `cloud.google.com/go` is the repository root module. Each other module is @@ -53,18 +39,47 @@ of the `cloud.google.com/go` repository root module. Note: releasing `cloud.google.com/go` has no impact on any of the submodules, and vice-versa. They are released entirely independently. -# Test failures +## Test failures If there are any test failures in the Kokoro build, releases are blocked until the failures have been resolved. -# How to release `cloud.google.com/go` +## How to release +### Automated Releases (`cloud.google.com/go` and submodules) + +We now use [release-please](https://github.com/googleapis/release-please) to +perform automated releases for `cloud.google.com/go` and all submodules. + +1. If there are changes that have not yet been released, a + [pull request](https://github.com/googleapis/google-cloud-go/pull/2971) will + be automatically opened by release-please + with a title like "chore: release X.Y.Z" (for the root module) or + "chore: release datastore X.Y.Z" (for the datastore submodule), where X.Y.Z + is the next version to be released. Find the desired pull request + [here](https://github.com/googleapis/google-cloud-go/pulls) 1. Check for failures in the - [continuous Kokoro build](http://go/google-cloud-go-continuous). If there are any - failures in the most recent build, address them before proceeding with the - release. -1. Navigate to `~/code/gocloud/` and switch to master. + [continuous Kokoro build](http://go/google-cloud-go-continuous). If there are + any failures in the most recent build, address them before proceeding with + the release. (This applies even if the failures are in a different submodule + from the one being released.) +1. Review the release notes. These are automatically generated from the titles + of any merged commits since the previous release. If you would like to edit + them, this can be done by updating the changes in the release PR. +1. To cut a release, approve and merge the pull request. Doing so will + update the `CHANGES.md`, tag the merged commit with the appropriate version, + and draft a GitHub release which will copy the notes from `CHANGES.md`. + +### Manual Release (`cloud.google.com/go`) + +If for whatever reason the automated release process is not working as expected, +here is how to manually cut a release of `cloud.google.com/go`. + +1. Check for failures in the + [continuous Kokoro build](http://go/google-cloud-go-continuous). If there are + any failures in the most recent build, address them before proceeding with + the release. +1. Navigate to `google-cloud-go/` and switch to master. 1. `git pull` 1. Run `git tag -l | grep -v beta | grep -v alpha` to see all existing releases. The current latest tag `$CV` is the largest tag. It should look something @@ -76,8 +91,11 @@ the failures have been resolved. (the `git log` is going to show you things in submodules, which are not going to be part of your release). 1. Edit `CHANGES.md` to include a summary of the changes. -1. `cd internal/version && go generate && cd -` -1. Commit the changes, push to your fork, and create a PR. +1. In `internal/version/version.go`, update `const Repo` to today's date with + the format `YYYYMMDD`. +1. In `internal/version` run `go generate`. +1. Commit the changes, ignoring the generated `.go-r` file. Push to your fork, + and create a PR titled `chore: release $NV`. 1. Wait for the PR to be reviewed and merged. Once it's merged, and without merging any other PRs in the meantime: a. Switch to master. @@ -85,24 +103,22 @@ the failures have been resolved. c. Tag the repo with the next version: `git tag $NV`. d. Push the tag to origin: `git push origin $NV` -2. Update [the releases page](https://github.com/googleapis/google-cloud-go/releases) +1. Update [the releases page](https://github.com/googleapis/google-cloud-go/releases) with the new release, copying the contents of `CHANGES.md`. -# How to release a submodule - -We have several submodules, including `cloud.google.com/go/logging`, -`cloud.google.com/go/datastore`, and so on. +### Manual Releases (submodules) -To release a submodule: +If for whatever reason the automated release process is not working as expected, +here is how to manually cut a release of a submodule. (these instructions assume we're releasing `cloud.google.com/go/datastore` - adjust accordingly) 1. Check for failures in the - [continuous Kokoro build](http://go/google-cloud-go-continuous). If there are any - failures in the most recent build, address them before proceeding with the - release. (This applies even if the failures are in a different submodule from the one - being released.) -1. Navigate to `~/code/gocloud/` and switch to master. + [continuous Kokoro build](http://go/google-cloud-go-continuous). If there are + any failures in the most recent build, address them before proceeding with + the release. (This applies even if the failures are in a different submodule + from the one being released.) +1. Navigate to `google-cloud-go/` and switch to master. 1. `git pull` 1. Run `git tag -l | grep datastore | grep -v beta | grep -v alpha` to see all existing releases. The current latest tag `$CV` is the largest tag. It @@ -111,8 +127,9 @@ To release a submodule: 1. On master, run `git log $CV.. -- datastore/` to list all the changes to the submodule directory since the last release. 1. Edit `datastore/CHANGES.md` to include a summary of the changes. -1. `cd internal/version && go generate && cd -` -1. Commit the changes, push to your fork, and create a PR. +1. In `internal/version` run `go generate`. +1. Commit the changes, ignoring the generated `.go-r` file. Push to your fork, + and create a PR titled `chore(datastore): release $NV`. 1. Wait for the PR to be reviewed and merged. Once it's merged, and without merging any other PRs in the meantime: a. Switch to master. @@ -122,7 +139,3 @@ To release a submodule: `git push origin $NV` 1. Update [the releases page](https://github.com/googleapis/google-cloud-go/releases) with the new release, copying the contents of `datastore/CHANGES.md`. - -# Appendix - -1: This should get better as submodule tooling matures. diff --git a/vendor/cloud.google.com/go/compute/metadata/metadata.go b/vendor/cloud.google.com/go/compute/metadata/metadata.go index 6b13424fd97..545bd9d379c 100644 --- a/vendor/cloud.google.com/go/compute/metadata/metadata.go +++ b/vendor/cloud.google.com/go/compute/metadata/metadata.go @@ -296,6 +296,7 @@ func (c *Client) getETag(suffix string) (value, etag string, err error) { // being stable anyway. host = metadataIP } + suffix = strings.TrimLeft(suffix, "/") u := "http://" + host + "/computeMetadata/v1/" + suffix req, err := http.NewRequest("GET", u, nil) if err != nil { diff --git a/vendor/cloud.google.com/go/doc.go b/vendor/cloud.google.com/go/doc.go index 237d84561ce..b667cc8b254 100644 --- a/vendor/cloud.google.com/go/doc.go +++ b/vendor/cloud.google.com/go/doc.go @@ -34,9 +34,18 @@ in this package for details. Timeouts and Cancellation -By default, all requests in sub-packages will run indefinitely, retrying on transient -errors when correctness allows. To set timeouts or arrange for cancellation, use -contexts. See the examples for details. +By default, non-streaming methods, like Create or Get, will have a default deadline applied to the +context provided at call time, unless a context deadline is already set. Streaming +methods have no default deadline and will run indefinitely. To set timeouts or +arrange for cancellation, use contexts. See the examples for details. Transient +errors will be retried when correctness allows. + +To opt out of default deadlines, set the temporary environment variable +GOOGLE_API_GO_EXPERIMENTAL_DISABLE_DEFAULT_DEADLINE to "true" prior to client +creation. This affects all Google Cloud Go client libraries. This opt-out +mechanism will be removed in a future release. File an issue at +https://github.com/googleapis/google-cloud-go if the default deadlines +cannot work for you. Do not attempt to control the initial connection (dialing) of a service by setting a timeout on the context passed to NewClient. Dialing is non-blocking, so timeouts @@ -76,6 +85,20 @@ https://godoc.org/google.golang.org/grpc/grpclog for more information. For HTTP logging, set the GODEBUG environment variable to "http2debug=1" or "http2debug=2". +Inspecting errors + +Most of the errors returned by the generated clients can be converted into a +`grpc.Status`. Converting your errors to this type can be a useful to get +more information about what went wrong while debugging. + if err != { + if s, ok := status.FromError(err); ok { + log.Println(s.Message()) + for _, d := range s.Proto().Details { + log.Println(d) + } + } + } + Client Stability Clients in this repository are considered alpha or beta unless otherwise diff --git a/vendor/cloud.google.com/go/go.mod b/vendor/cloud.google.com/go/go.mod index 9f97d93a69c..4fa03cae5d2 100644 --- a/vendor/cloud.google.com/go/go.mod +++ b/vendor/cloud.google.com/go/go.mod @@ -4,21 +4,20 @@ go 1.11 require ( cloud.google.com/go/storage v1.10.0 - github.com/golang/mock v1.4.3 - github.com/golang/protobuf v1.4.2 - github.com/google/go-cmp v0.5.1 - github.com/google/martian/v3 v3.0.0 - github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99 + github.com/golang/mock v1.5.0 + github.com/golang/protobuf v1.5.1 + github.com/google/go-cmp v0.5.5 + github.com/google/martian/v3 v3.1.0 + github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5 github.com/googleapis/gax-go/v2 v2.0.5 github.com/jstemmer/go-junit-report v0.9.1 - go.opencensus.io v0.22.4 - golang.org/x/lint v0.0.0-20200302205851-738671d3881b - golang.org/x/net v0.0.0-20200707034311-ab3426394381 - golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d - golang.org/x/text v0.3.3 - golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7 - google.golang.org/api v0.29.0 - google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f - google.golang.org/grpc v1.30.0 - google.golang.org/protobuf v1.25.0 // indirect + go.opencensus.io v0.23.0 + golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5 + golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4 + golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84 + golang.org/x/text v0.3.5 + golang.org/x/tools v0.1.0 + google.golang.org/api v0.43.0 + google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1 + google.golang.org/grpc v1.36.1 ) diff --git a/vendor/cloud.google.com/go/go.sum b/vendor/cloud.google.com/go/go.sum index ae83ec26d2c..d0209b286d1 100644 --- a/vendor/cloud.google.com/go/go.sum +++ b/vendor/cloud.google.com/go/go.sum @@ -11,79 +11,68 @@ cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6 cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= -cloud.google.com/go/bigquery v1.0.1 h1:hL+ycaJpVE9M7nLoiXb/Pn10ENE2u+oddxbD8uu0ZVU= +cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= +cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= +cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= +cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= +cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= +cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= -cloud.google.com/go/bigquery v1.3.0 h1:sAbMqjY1PEQKZBWfbu6Y6bsupJ9c4QdHnzg/VvYTLcE= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= -cloud.google.com/go/bigquery v1.4.0 h1:xE3CPsOgttP4ACBePh79zTKALtXwn/Edhcr16R5hMWU= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= -cloud.google.com/go/bigquery v1.7.0 h1:a/O/bK/vWrYGOTFtH8di4rBxMZnmkjy+Y5LxpDwo+dA= cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= -cloud.google.com/go/datastore v1.0.0 h1:Kt+gOPPp2LEPWp8CSfxhsM8ik9CcyE/gYu+0r+RnZvM= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= -cloud.google.com/go/datastore v1.1.0 h1:/May9ojXjRkPBNVrq+oWLqmWCkr4OU5uRY29bu0mRyQ= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= -cloud.google.com/go/pubsub v1.0.1 h1:W9tAK3E57P75u0XLLR82LZyw8VpAnhmyTOxW9qzmyj8= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= -cloud.google.com/go/pubsub v1.1.0 h1:9/vpR43S4aJaROxqQHQ3nH9lfyKKV0dC3vOmnw8ebQQ= cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= -cloud.google.com/go/pubsub v1.2.0 h1:Lpy6hKgdcl7a3WGSfJIFmxmcdjSpP6OmBEfcOv1Y680= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= -cloud.google.com/go/pubsub v1.3.1 h1:ukjixP1wl0LpnZ6LWtZJ0mX5tBmjp1f8Sqer8Z2OMUU= cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= -cloud.google.com/go/storage v1.0.0 h1:VV2nUM3wwLLGh9lSABFgZMjInyUbJeaRSE64WuAIQ+4= cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= -cloud.google.com/go/storage v1.5.0 h1:RPUcBvDeYgQFMfQu1eBMq6piD1SXmLH+vK3qjewZPus= cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= -cloud.google.com/go/storage v1.6.0 h1:UDpwYIwla4jHGzZJaEJYx1tOejbgSoNqsAfHAUYe2r8= cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= -cloud.google.com/go/storage v1.8.0 h1:86K1Gel7BQ9/WmNWn7dTKMvTLFzwtBe5FNqYbi9X35g= cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= cloud.google.com/go/storage v1.10.0 h1:STgFzyU5/8miMl0//zKh2aQeTyeaUH3WN9bSUiJ09bA= cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= -github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= -github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e h1:fY5BOSpyZCqRo5OhCuC+XN+r/bBCmeuuJtjz+bCNIf8= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= +github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e h1:1r7pUrabqp18hOBcwBwiTsbnFeTZHV9eER/QT5JVZxY= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.2.0 h1:28o5sBqPkBsMGnC6b4MvE2TzSr5/AT4c/1fLqVGIwlk= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.3.1 h1:qGJ6qTW+x6xX/my+8YUVl4WNpX9B7+/l2tRsHGZ7f2s= github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= -github.com/golang/mock v1.4.0 h1:Rd1kQnQu0Hq3qvJppYSG0HtP+f5LPPUiDswTLiEegLg= github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.3 h1:GV+pQPG/EUUbkh47niozDcADz6go/dUwhVzdUQHIVRw= github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= +github.com/golang/mock v1.5.0 h1:jlYHihg//f7RRwuPfptm04yp4s7O6Kw8EZiVYIGcH0g= +github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.3 h1:gyjaxf+svBWX08ZjK86iN9geUJF0H6gp2IRKX6Nf6/I= github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= @@ -91,139 +80,128 @@ github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= -github.com/golang/protobuf v1.4.0 h1:oOuy+ugB+P/kBdUnG5QaMXSIyJ1q38wWSojYCb3z5VQ= github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= -github.com/golang/protobuf v1.4.1 h1:ZFgWrT+bLgsYPirOnRfKLYJLvssAegOj/hgyMFdJZe0= github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= -github.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0= github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c h1:964Od4U6p2jUkFxvCydnIczKteheJEzHRToSGK3Bnlw= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.1 h1:jAbXjIeW2ZSW2AwFxlGTDoc2CjI2XujLkV3ArsZFCvc= +github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/go-cmp v0.2.0 h1:+dTQ8DZQJz0Mb/HjFlkptS1FeQ4cWSnN941F8aEG4SQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.0 h1:/QaMHBdZ26BB3SSst0Iwl10Epc+xhTquomWX0oZEB6w= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.1 h1:JFrFEBb2xKufg6XkJsJr+WbKb4FQlURi5RUcBveYu9k= github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= -github.com/google/martian/v3 v3.0.0 h1:pMen7vLs8nvgEYhywH3KDWJIJTeEr2ULsVWHWYHQyBs= github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57 h1:eqyIo2HjKhKe/mJzTG8n4VqvLXIOEG+SLdDqX7xGtkY= +github.com/google/martian/v3 v3.1.0 h1:wCKgOCHuUEVfsaQLpPSJb7VdYCdTVZQAuOdYm1yc/60= +github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20190515194954-54271f7e092f h1:Jnx61latede7zDD3DiiP4gmNz33uK0U5HDUaF0a/HVQ= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99 h1:Ak8CrdlwwXwAZxzS66vgPt4U8yUZX7JwLvVR58FN5jM= github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5 h1:zIaiqGYDQwa4HVx5wGRTXbx38Pqxjemn4BP98wpzpXo= +github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= -github.com/googleapis/gax-go/v2 v2.0.4 h1:hU4mGcQI4DaAYW+IbTun+2qEZVFxK0ySjQLTbS0VQKc= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5 h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= -github.com/hashicorp/golang-lru v0.5.0 h1:CL2msUPvZTLb5O648aiLNJw3hnBxN2+1Jq8rCOH9wdo= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6 h1:UDMh68UUwekSh5iP2OMhRRZJiiBccgV7axzUG8vi56c= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024 h1:rBMNdlhTLzJjJSDIjNEXX1Pz3Hmwmz91v+zycvx9PJc= +github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1 h1:6QPYqodiu3GuPL+7mfx+NwDdp2eTkp9IfEUpgAwUN0o= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -go.opencensus.io v0.21.0 h1:mU6zScU4U1YAFPHEHYk+3JC4SY7JxgkqS10ZOSyksNg= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= -go.opencensus.io v0.22.0 h1:C9hSCOW830chIVkdja34wa6Ky+IzWllkUinR+BtRZd4= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.3 h1:8sGtKOrtQqkN1bp2AtX+misvLIlOmsEsNd+9NIcPEm8= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.4 h1:LYy1Hy3MJdrCdMwwzxA/dRok4ejH+RwNGbuoD9fCjto= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= +go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= +go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/exp v0.0.0-20190121172915-509febef88a4 h1:c2HOrn5iMezYjSlGPncknSEr/8x5LELb/ilJbXi9DEA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522 h1:OeRHuibLsmZkFj773W4LcfAGsSxJgfPONhr8cmO+eLA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= -golang.org/x/exp v0.0.0-20190829153037-c13cbed26979 h1:Agxu5KLo8o7Bb634SVDnhIfpTvxmzUwhbYAzBvXt6h4= golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= -golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6 h1:QE6XYQK6naiK1EPAe1g/ILLxN5RBoH5xkJk3CqlMI/Y= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f h1:hX65Cu3JDlGH3uEdK7I99Ii+9kjD6mvnnpfLdEAH0x4= golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190409202823-959b441ac422 h1:QzoH/1pFpZguR8NrRHLcO6jKqfv2zpuSqZLgdm7ZmjI= golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac h1:8R1esu+8QioDxo4E4mX6bFztO+dMTM49DNAaWfO5OeY= golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= -golang.org/x/lint v0.0.0-20200130185559-910be7a94367 h1:0IiAsCRByjO2QjX7ZPkw5oU9x+n1YqRL802rjC0c3Aw= golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20200302205851-738671d3881b h1:Wh+f8QHJXR411sJR8/vRBTZ7YapZaRvUcLFFJhusH0k= golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5 h1:2M3HP5CCK1Si9FQhwnzYhXdG6DXeebvUHFpre8QvbyI= +golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= -golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee h1:WG0RUwxtNT4qqaXX3DPA8zHFNm/D9xaBpxzHt1WcA/E= golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/mod v0.2.0 h1:KU7oHjnv3XNWfa5COkzUifxZmxp1TyI7ImMXqFxLwvQ= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.3.0 h1:RM4zey1++hCTbCVQfnWeKs9/IEsaBLA8vTkd0WVtmH4= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.1 h1:Kvvh58BN8Y9/lBi7hTekvtMpm07eUZ0ck5pRHpsMWrY= +golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190311183353-d8887717615a h1:oWX7TPOiFAMXLq8o0ikBYfCJVlRHBcsciT5bXOrH628= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c h1:uOCk1iQW6Vc18bnC13MfzScl+wdKBmM9Y9kU7Z83/lw= golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= -golang.org/x/net v0.0.0-20190620200207-3b0461eec859 h1:R/3boaszxrf1GEUWTVDzSKVwLmSJpwZ1yqXm8j0v2QI= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -234,47 +212,52 @@ golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5 h1:WQ8q63x+f/zpC8Ac1s9wLElVoHhm32p6tudrU72n1QA= golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200506145744-7e3656a0809f h1:QBjCr1Fz5kw158VqdE9JfI9cJnl/ymnJWAdMuinqL7Y= golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200625001655-4c5254603344 h1:vGXIOMxbNfDTk/aXCmfdLgkrSV+Z2tcbze+pEc3v5W4= golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200707034311-ab3426394381 h1:VXak5I6aEWmAXeQjA+QSZzlgNrpq9mjcfDemuexIKsU= golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4 h1:b0LrWgu8+q7z4J+0Y3Umo5q1dL7NXBkKBWkaVkAq17E= +golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421 h1:Wo7BWFiOk0QRFMLYMqJGFMd9CgUAcGx7V+qEg/h5IBI= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 h1:SVwTIAaPC2U/AvvLNZ2a7OVsmBpC8L5BlwK1whH3hm0= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d h1:TzXSXBo42m9gQenoE3b9BGiEpg5IG2JkU5FkPIawgtw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84 h1:duBc5zuJsmJXYOVVE/6PxejI+N3AaCqKjtsoLn1Je5Q= +golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6 h1:bjcUS9ztw9kFmmIxJInhon/0Is3p+EHBKNgquIzo1OI= golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190423024810-112230192c58 h1:8gQV6CLnAEikrhgkHFbMAEhagSSnXWGV915qUMm9mrU= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e h1:vcxGaoTs7kV8m5Np9uUNQin4BrLOthgV7252N8V+FwY= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a h1:WXEvlFVvvGxCJLG6REjsT03iWnKLEWinaScsxF2Vm2o= golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208 h1:qwRHBd0NqMbJxfbotnDhm2ByMI1Shq4Y6oRJo21SGJA= golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b h1:ag/x1USPSsqHud38I9BAC88qdNLDHHtQ4mlgQIZPPNA= golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0 h1:HyfiK1WMnHj5FXFXatD+Qs1A/xC2Run6RzeW1SyHxpc= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -284,30 +267,36 @@ golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae h1:/WDfKMnPU+m5M4xB+6x4kaepxRw6jWvR5iDRdvjHgy8= golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd h1:xhmwyvizuTgC2qz7ZlMluP20uW+C3Rm0FD/WLDX8884= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200501052902-10377860bb8e h1:hq86ru83GdWTlfQFZGO4nZJTU4Bs2wfHl8oFHRaXsfc= golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25 h1:OKbAoGs4fGM5cPLlVQLZGYkFC8OnOfgo6tt0Smf9XhM= golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200523222454-059865788121 h1:rITEj+UZHYC927n8GT97eC3zrpzXdb/voyeOuVKS46o= golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4 h1:EZ2mChiOa8udjfp6rRmswTbtZN/QzUQp4ptM4rnjHvc= +golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2 h1:z99zHgr7hKfrUcX/KsoJk5FJfjTceCKIp96+biqP4To= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= -golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/time v0.0.0-20181108054448-85acf8d2951c h1:fqgJT0MGcGpPgpWU7VRdRjuArfcOvC4AoJmILihzhDg= +golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.5 h1:i6eZZ+zk0SOf0xgBpEpPD18qWcJda6q1sxt3S0kzyUQ= +golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -315,18 +304,14 @@ golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGm golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312170243-e65039ee4138 h1:H3uGjxCR/6Ds0Mjgyp7LMK81+LvmbvWWEnJhzk1Pi9E= golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c h1:97SnQk1GYRXJgvwZ8fadnxDOWfKvkNQHH3CtZntPSrM= golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0 h1:Dh6fw+p6FyRl5x/FvNswO1ji0lIGzm3KP8Y9VkS9PTE= golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff h1:On1qIo75ByTwFJ4/W2bIqHcwJ9XAqtSWUs8GwRrIhtc= golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= @@ -347,63 +332,62 @@ golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapK golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= -golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d h1:lzLdP95xJmMpwQ6LUHwrc5V7js93hTiY7gkznu0BgmY= golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88 h1:4j84u0sokprDu3IdSYHJMmou+YSLflMz8p7yAx/QI4g= golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7 h1:LHW24ah7B+uV/OePwNP0p/t889F3QSyLvY8Sg/bK0SY= golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= +golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.0 h1:po9/4sTYwZU9lPhi1tOrb4hCv3qrhiQ77LZfGa2OjwY= +golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= -google.golang.org/api v0.8.0 h1:VGGbLNyPF7dvYHhcUGYBBGCRDDK0RRJAI6KCvo0CL+E= google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.9.0 h1:jbyannxz0XFD3zdjgrSUsaJbgpH4eTrkdhRChkHPfO8= google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.15.0 h1:yzlyyDW/J0w8yNFJIhiAJy4kq74S+1DOLdawELNxFMA= google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.17.0 h1:0q95w+VuFtv4PAx4PZVQdBMmYbaCHbnfKaEiDIcVyag= google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.22.0 h1:J1Pl9P2lnmYFSJvgs70DKELqHNh8CNWXPbud4njEE2s= google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.24.0 h1:cG03eaksBzhfSIk7JRGctfp3lanklcOM/mTGvow7BbQ= google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.29.0 h1:BaiDisFir8O4IJxvAabCGGkQ6yCJegNQqSVoYUNAnbk= google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= +google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= +google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= +google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= +google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= +google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= +google.golang.org/api v0.43.0 h1:4sAyIHT6ZohtAQDoxws+ez7bROYmUlOVvsUscYCDTqA= +google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/appengine v1.4.0 h1:/wp5JvzpHIxhs/dumFmF7BXTf3Z+dd4uXta4kVyO508= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.5.0 h1:KxkO13IPW4Lslp2bz+KHP2E3gtFlrIGNThxkZQ3g+4c= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.6.1 h1:QzqyMA1tlu6CgqCDUtU9V+ZKhLFT2dkJuANu5QaxI3I= google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= -google.golang.org/appengine v1.6.5 h1:tycE03LOZYQNhDpS27tcQdAzLCVMaj7QT2SXxebnpCM= google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.6 h1:lMO5rYAqUxkmaj76jAkRUvt5JZgFymx/+Q5Mzfivuhc= google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= +google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19 h1:Lj2SnHtxkRGJDqnGaSjo+CCdIieEnwVazbOXILwQemk= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873 h1:nfPFGzJkUDX6uBmpN/pSw7MbOAWegH5QDQuoXFHedLg= google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64 h1:iKtrH9Y8mcbADOP0YFaEMth7OfuHY9xHOwNj4znpM1A= google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55 h1:gSJIx1SDwno+2ElGhA4+qG2zF97qiUzTM+rQ0klBOcE= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51 h1:Ex1mq5jaJof+kRnYi3SlYJ8KKa9Ao3NHyIT5XJ1gF6U= google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= @@ -418,69 +402,69 @@ google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfG google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84 h1:pSLkPbrjnPyLDYUO2VM9mDLqo2V6CFBY84lFSZAfoi4= google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380 h1:xriR1EgvKfkKxIoU2uUvrMVl+H26359loFFUleSMXFo= google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= -google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f h1:ohwtWcCwB/fZUxh/vjazHorYmBnua3NmY3CAjwC7mEA= google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/grpc v1.19.0 h1:cfg4PD8YEdSFnm7qLV4++93WcmhH2nIUhMjhdCvl3j8= +google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1 h1:E7wSQBXkH3T3diucK+9Z1kjn4+/9tNG7lZLr75oOhh8= +google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.20.1 h1:Hz2g2wirWK7H0qIIhGIqRGTuMwTE8HEKFnDZZ7lm9NU= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= -google.golang.org/grpc v1.21.1 h1:j6XxA85m/6txkUCHvzlV5f+HBNl/1r5cZ2A/3IEFOO8= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.27.0 h1:rRYRFMVgRv6E0D70Skyfsr28tDXIuuPZyWGMPdMcnXg= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.27.1 h1:zvIju4sqAGvwKspUQOhwnpcqSbzi7/H6QomNNjTL4sk= google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= -google.golang.org/grpc v1.29.1 h1:EC2SB8S04d2r73uptxphDSUG+kTKVgjRPF+N3xpxRB4= google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= -google.golang.org/grpc v1.30.0 h1:M5a8xTlYTxwMn5ZFkwhRabsygDY5G8TYLyQDBxJNAxE= google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= +google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= +google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.36.1 h1:cmUfbeGKnz9+2DD/UYsMQXeqbHZqZDs4eQwW0sFOpBY= +google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= -google.golang.org/protobuf v1.21.0 h1:qdOKuR/EIArgaWNjetjgTzgVTAZ+S/WXVrq9HW9zimw= google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= -google.golang.org/protobuf v1.22.0 h1:cJv5/xdbk1NnMPR1VP9+HU6gupuG9MLBoH1r6RHZ2MY= google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.0 h1:4MY060fB1DLGMB/7MBTLnwQUY6+F09GEiz6SsrNqyzM= google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc h1:TnonUr8u3himcMY0vSh23jFOXA+cnucl1gB6EQTReBI= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.24.0 h1:UhZDfRO8JRQru4/+LlLE0BRKGF8L+PICnvYZmx/fEGA= google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= -google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4c= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0 h1:bxAC2xTBsZGibn2RTntX0oH50xLsqy1OxA9tTL3p/lk= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= -gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a h1:/8zB6iBfHCl1qAnEAWwGPNrUvapuy6CPla1VM0k8hQw= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a h1:LJwr7TCTghdatWv40WobzlKXc9c4s8oGa7QKJUtHhWA= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.1-2019.2.3 h1:3JgtbtFHMiCmsznwGVTUWbgGov+pVqnlf1dEJTNAXeM= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= -honnef.co/go/tools v0.0.1-2020.1.3 h1:sXmLre5bzIR6ypkjXCDI3jHPssRhc8KD/Ome589sc3U= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -rsc.io/binaryregexp v0.2.0 h1:HfqmD5MEmC0zvwBuF187nq9mdnXjXsSivRiXN7SmRkE= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= -rsc.io/quote/v3 v3.1.0 h1:9JKUTTIUgS6kzR9mK1YuGKv6Nl+DijDNIc0ghT58FaY= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= -rsc.io/sampler v1.3.0 h1:7uVkIFmeBqHfdjD+gZwtXXI+RODJ2Wc4O7MPEh/QiW4= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= diff --git a/vendor/cloud.google.com/go/internal/.repo-metadata-full.json b/vendor/cloud.google.com/go/internal/.repo-metadata-full.json index baef050bb7f..ecb5f8efda6 100644 --- a/vendor/cloud.google.com/go/internal/.repo-metadata-full.json +++ b/vendor/cloud.google.com/go/internal/.repo-metadata-full.json @@ -1,19 +1,67 @@ { - "cloud.google.com/go/asset/apiv1": { - "distribution_name": "cloud.google.com/go/asset/apiv1", - "description": "Cloud Asset API", + "cloud.google.com/go/accessapproval/apiv1": { + "distribution_name": "cloud.google.com/go/accessapproval/apiv1", + "description": "Access Approval API", "language": "Go", "client_library_type": "generated", - "docs_url": "https://pkg.go.dev/cloud.google.com/go/asset/apiv1", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/accessapproval/apiv1", "release_level": "ga" }, - "cloud.google.com/go/asset/apiv1beta1": { - "distribution_name": "cloud.google.com/go/asset/apiv1beta1", + "cloud.google.com/go/analytics/admin/apiv1alpha": { + "distribution_name": "cloud.google.com/go/analytics/admin/apiv1alpha", + "description": "Google Analytics Admin API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/analytics/admin/apiv1alpha", + "release_level": "alpha" + }, + "cloud.google.com/go/analytics/data/apiv1alpha": { + "distribution_name": "cloud.google.com/go/analytics/data/apiv1alpha", + "description": "Google Analytics Data API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/analytics/data/apiv1alpha", + "release_level": "alpha" + }, + "cloud.google.com/go/apigateway/apiv1": { + "distribution_name": "cloud.google.com/go/apigateway/apiv1", + "description": "API Gateway API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/apigateway/apiv1", + "release_level": "ga" + }, + "cloud.google.com/go/appengine/apiv1": { + "distribution_name": "cloud.google.com/go/appengine/apiv1", + "description": "App Engine Admin API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/appengine/apiv1", + "release_level": "ga" + }, + "cloud.google.com/go/area120/tables/apiv1alpha1": { + "distribution_name": "cloud.google.com/go/area120/tables/apiv1alpha1", + "description": "Area120 Tables API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/area120/tables/apiv1alpha1", + "release_level": "alpha" + }, + "cloud.google.com/go/artifactregistry/apiv1beta2": { + "distribution_name": "cloud.google.com/go/artifactregistry/apiv1beta2", + "description": "Artifact Registry API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/artifactregistry/apiv1beta2", + "release_level": "ga" + }, + "cloud.google.com/go/asset/apiv1": { + "distribution_name": "cloud.google.com/go/asset/apiv1", "description": "Cloud Asset API", "language": "Go", "client_library_type": "generated", - "docs_url": "https://pkg.go.dev/cloud.google.com/go/asset/apiv1beta1", - "release_level": "beta" + "docs_url": "https://pkg.go.dev/cloud.google.com/go/asset/apiv1", + "release_level": "ga" }, "cloud.google.com/go/asset/apiv1p2beta1": { "distribution_name": "cloud.google.com/go/asset/apiv1p2beta1", @@ -31,6 +79,14 @@ "docs_url": "https://pkg.go.dev/cloud.google.com/go/asset/apiv1p5beta1", "release_level": "beta" }, + "cloud.google.com/go/assuredworkloads/apiv1beta1": { + "distribution_name": "cloud.google.com/go/assuredworkloads/apiv1beta1", + "description": "Assured Workloads API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/assuredworkloads/apiv1beta1", + "release_level": "beta" + }, "cloud.google.com/go/automl/apiv1": { "distribution_name": "cloud.google.com/go/automl/apiv1", "description": "Cloud AutoML API", @@ -143,14 +199,38 @@ "docs_url": "https://pkg.go.dev/cloud.google.com/go/billing/apiv1", "release_level": "ga" }, + "cloud.google.com/go/billing/budgets/apiv1": { + "distribution_name": "cloud.google.com/go/billing/budgets/apiv1", + "description": "Cloud Billing Budget API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/billing/budgets/apiv1", + "release_level": "ga" + }, "cloud.google.com/go/billing/budgets/apiv1beta1": { "distribution_name": "cloud.google.com/go/billing/budgets/apiv1beta1", - "description": "", + "description": "Cloud Billing Budget API", "language": "Go", "client_library_type": "generated", "docs_url": "https://pkg.go.dev/cloud.google.com/go/billing/budgets/apiv1beta1", "release_level": "beta" }, + "cloud.google.com/go/binaryauthorization/apiv1beta1": { + "distribution_name": "cloud.google.com/go/binaryauthorization/apiv1beta1", + "description": "Binary Authorization API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/binaryauthorization/apiv1beta1", + "release_level": "beta" + }, + "cloud.google.com/go/channel/apiv1": { + "distribution_name": "cloud.google.com/go/channel/apiv1", + "description": "Cloud Channel API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/channel/apiv1", + "release_level": "ga" + }, "cloud.google.com/go/cloudbuild/apiv1/v2": { "distribution_name": "cloud.google.com/go/cloudbuild/apiv1/v2", "description": "Cloud Build API", @@ -215,6 +295,14 @@ "docs_url": "https://pkg.go.dev/cloud.google.com/go/datacatalog/apiv1beta1", "release_level": "beta" }, + "cloud.google.com/go/datalabeling/apiv1beta1": { + "distribution_name": "cloud.google.com/go/datalabeling/apiv1beta1", + "description": "Data Labeling API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/datalabeling/apiv1beta1", + "release_level": "beta" + }, "cloud.google.com/go/dataproc/apiv1": { "distribution_name": "cloud.google.com/go/dataproc/apiv1", "description": "Cloud Dataproc API", @@ -231,6 +319,14 @@ "docs_url": "https://pkg.go.dev/cloud.google.com/go/dataproc/apiv1beta2", "release_level": "beta" }, + "cloud.google.com/go/dataqna/apiv1alpha": { + "distribution_name": "cloud.google.com/go/dataqna/apiv1alpha", + "description": "Data QnA API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/dataqna/apiv1alpha", + "release_level": "alpha" + }, "cloud.google.com/go/datastore": { "distribution_name": "cloud.google.com/go/datastore", "description": "Cloud Datastore", @@ -263,6 +359,22 @@ "docs_url": "https://pkg.go.dev/cloud.google.com/go/dialogflow/apiv2", "release_level": "ga" }, + "cloud.google.com/go/dialogflow/cx/apiv3": { + "distribution_name": "cloud.google.com/go/dialogflow/cx/apiv3", + "description": "Dialogflow API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/dialogflow/cx/apiv3", + "release_level": "beta" + }, + "cloud.google.com/go/dialogflow/cx/apiv3beta1": { + "distribution_name": "cloud.google.com/go/dialogflow/cx/apiv3beta1", + "description": "Dialogflow API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/dialogflow/cx/apiv3beta1", + "release_level": "beta" + }, "cloud.google.com/go/dlp/apiv2": { "distribution_name": "cloud.google.com/go/dlp/apiv2", "description": "Cloud Data Loss Prevention (DLP) API", @@ -271,9 +383,33 @@ "docs_url": "https://pkg.go.dev/cloud.google.com/go/dlp/apiv2", "release_level": "ga" }, + "cloud.google.com/go/documentai/apiv1": { + "distribution_name": "cloud.google.com/go/documentai/apiv1", + "description": "Cloud Document AI API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/documentai/apiv1", + "release_level": "beta" + }, + "cloud.google.com/go/documentai/apiv1beta3": { + "distribution_name": "cloud.google.com/go/documentai/apiv1beta3", + "description": "Cloud Document AI API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/documentai/apiv1beta3", + "release_level": "beta" + }, + "cloud.google.com/go/domains/apiv1beta1": { + "distribution_name": "cloud.google.com/go/domains/apiv1beta1", + "description": "Cloud Domains API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/domains/apiv1beta1", + "release_level": "beta" + }, "cloud.google.com/go/errorreporting": { "distribution_name": "cloud.google.com/go/errorreporting", - "description": "Stackdriver Error Reporting API", + "description": "Cloud Error Reporting API", "language": "Go", "client_library_type": "manual", "docs_url": "https://pkg.go.dev/cloud.google.com/go/errorreporting", @@ -281,7 +417,7 @@ }, "cloud.google.com/go/errorreporting/apiv1beta1": { "distribution_name": "cloud.google.com/go/errorreporting/apiv1beta1", - "description": "Stackdriver Error Reporting API", + "description": "Error Reporting API", "language": "Go", "client_library_type": "generated", "docs_url": "https://pkg.go.dev/cloud.google.com/go/errorreporting/apiv1beta1", @@ -313,20 +449,36 @@ }, "cloud.google.com/go/functions/apiv1": { "distribution_name": "cloud.google.com/go/functions/apiv1", - "description": "", + "description": "Cloud Functions API", "language": "Go", "client_library_type": "generated", "docs_url": "https://pkg.go.dev/cloud.google.com/go/functions/apiv1", - "release_level": "beta" + "release_level": "ga" + }, + "cloud.google.com/go/gaming/apiv1": { + "distribution_name": "cloud.google.com/go/gaming/apiv1", + "description": "Game Services API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/gaming/apiv1", + "release_level": "ga" }, "cloud.google.com/go/gaming/apiv1beta": { "distribution_name": "cloud.google.com/go/gaming/apiv1beta", - "description": "", + "description": "Game Services API", "language": "Go", "client_library_type": "generated", "docs_url": "https://pkg.go.dev/cloud.google.com/go/gaming/apiv1beta", "release_level": "beta" }, + "cloud.google.com/go/gkehub/apiv1beta1": { + "distribution_name": "cloud.google.com/go/gkehub/apiv1beta1", + "description": "GKE Hub", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/gkehub/apiv1beta1", + "release_level": "beta" + }, "cloud.google.com/go/iam": { "distribution_name": "cloud.google.com/go/iam", "description": "Cloud IAM", @@ -377,7 +529,7 @@ }, "cloud.google.com/go/logging": { "distribution_name": "cloud.google.com/go/logging", - "description": "Stackdriver Logging API", + "description": "Cloud Logging API", "language": "Go", "client_library_type": "manual", "docs_url": "https://pkg.go.dev/cloud.google.com/go/logging", @@ -399,6 +551,30 @@ "docs_url": "https://pkg.go.dev/cloud.google.com/go/longrunning/autogen", "release_level": "alpha" }, + "cloud.google.com/go/managedidentities/apiv1": { + "distribution_name": "cloud.google.com/go/managedidentities/apiv1", + "description": "Managed Service for Microsoft Active Directory API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/managedidentities/apiv1", + "release_level": "ga" + }, + "cloud.google.com/go/mediatranslation/apiv1beta1": { + "distribution_name": "cloud.google.com/go/mediatranslation/apiv1beta1", + "description": "Media Translation API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/mediatranslation/apiv1beta1", + "release_level": "beta" + }, + "cloud.google.com/go/memcache/apiv1": { + "distribution_name": "cloud.google.com/go/memcache/apiv1", + "description": "Cloud Memorystore for Memcached API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/memcache/apiv1", + "release_level": "ga" + }, "cloud.google.com/go/memcache/apiv1beta2": { "distribution_name": "cloud.google.com/go/memcache/apiv1beta2", "description": "Cloud Memorystore for Memcached API", @@ -407,6 +583,22 @@ "docs_url": "https://pkg.go.dev/cloud.google.com/go/memcache/apiv1beta2", "release_level": "beta" }, + "cloud.google.com/go/metastore/apiv1alpha": { + "distribution_name": "cloud.google.com/go/metastore/apiv1alpha", + "description": "Dataproc Metastore API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/metastore/apiv1alpha", + "release_level": "alpha" + }, + "cloud.google.com/go/metastore/apiv1beta": { + "distribution_name": "cloud.google.com/go/metastore/apiv1beta", + "description": "Dataproc Metastore API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/metastore/apiv1beta", + "release_level": "beta" + }, "cloud.google.com/go/monitoring/apiv3/v2": { "distribution_name": "cloud.google.com/go/monitoring/apiv3/v2", "description": "Cloud Monitoring API", @@ -417,11 +609,19 @@ }, "cloud.google.com/go/monitoring/dashboard/apiv1": { "distribution_name": "cloud.google.com/go/monitoring/dashboard/apiv1", - "description": "", + "description": "Cloud Monitoring API", "language": "Go", "client_library_type": "generated", "docs_url": "https://pkg.go.dev/cloud.google.com/go/monitoring/dashboard/apiv1", - "release_level": "beta" + "release_level": "ga" + }, + "cloud.google.com/go/networkconnectivity/apiv1alpha1": { + "distribution_name": "cloud.google.com/go/networkconnectivity/apiv1alpha1", + "description": "Network Connectivity API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/networkconnectivity/apiv1alpha1", + "release_level": "alpha" }, "cloud.google.com/go/notebooks/apiv1beta1": { "distribution_name": "cloud.google.com/go/notebooks/apiv1beta1", @@ -431,9 +631,17 @@ "docs_url": "https://pkg.go.dev/cloud.google.com/go/notebooks/apiv1beta1", "release_level": "beta" }, + "cloud.google.com/go/orgpolicy/apiv2": { + "distribution_name": "cloud.google.com/go/orgpolicy/apiv2", + "description": "Organization Policy API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/orgpolicy/apiv2", + "release_level": "ga" + }, "cloud.google.com/go/osconfig/agentendpoint/apiv1": { "distribution_name": "cloud.google.com/go/osconfig/agentendpoint/apiv1", - "description": "Cloud OS Config API", + "description": "OS Config API", "language": "Go", "client_library_type": "generated", "docs_url": "https://pkg.go.dev/cloud.google.com/go/osconfig/agentendpoint/apiv1", @@ -449,7 +657,7 @@ }, "cloud.google.com/go/osconfig/apiv1": { "distribution_name": "cloud.google.com/go/osconfig/apiv1", - "description": "Cloud OS Config API", + "description": "OS Config API", "language": "Go", "client_library_type": "generated", "docs_url": "https://pkg.go.dev/cloud.google.com/go/osconfig/apiv1", @@ -493,7 +701,15 @@ "language": "Go", "client_library_type": "generated", "docs_url": "https://pkg.go.dev/cloud.google.com/go/policytroubleshooter/apiv1", - "release_level": "beta" + "release_level": "ga" + }, + "cloud.google.com/go/profiler": { + "distribution_name": "cloud.google.com/go/profiler", + "description": "Cloud Profiler", + "language": "Go", + "client_library_type": "manual", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/profiler", + "release_level": "ga" }, "cloud.google.com/go/pubsub": { "distribution_name": "cloud.google.com/go/pubsub", @@ -511,13 +727,21 @@ "docs_url": "https://pkg.go.dev/cloud.google.com/go/pubsub/apiv1", "release_level": "ga" }, + "cloud.google.com/go/pubsublite/apiv1": { + "distribution_name": "cloud.google.com/go/pubsublite/apiv1", + "description": "Pub/Sub Lite API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/pubsublite/apiv1", + "release_level": "beta" + }, "cloud.google.com/go/recaptchaenterprise/apiv1": { "distribution_name": "cloud.google.com/go/recaptchaenterprise/apiv1", "description": "reCAPTCHA Enterprise API", "language": "Go", "client_library_type": "generated", "docs_url": "https://pkg.go.dev/cloud.google.com/go/recaptchaenterprise/apiv1", - "release_level": "beta" + "release_level": "ga" }, "cloud.google.com/go/recaptchaenterprise/apiv1beta1": { "distribution_name": "cloud.google.com/go/recaptchaenterprise/apiv1beta1", @@ -527,6 +751,14 @@ "docs_url": "https://pkg.go.dev/cloud.google.com/go/recaptchaenterprise/apiv1beta1", "release_level": "beta" }, + "cloud.google.com/go/recommendationengine/apiv1beta1": { + "distribution_name": "cloud.google.com/go/recommendationengine/apiv1beta1", + "description": "Recommendations AI", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/recommendationengine/apiv1beta1", + "release_level": "beta" + }, "cloud.google.com/go/recommender/apiv1": { "distribution_name": "cloud.google.com/go/recommender/apiv1", "description": "Recommender API", @@ -559,6 +791,30 @@ "docs_url": "https://pkg.go.dev/cloud.google.com/go/redis/apiv1beta1", "release_level": "beta" }, + "cloud.google.com/go/resourcemanager/apiv2": { + "distribution_name": "cloud.google.com/go/resourcemanager/apiv2", + "description": "Cloud Resource Manager API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/resourcemanager/apiv2", + "release_level": "ga" + }, + "cloud.google.com/go/resourcesettings/apiv1": { + "distribution_name": "cloud.google.com/go/resourcesettings/apiv1", + "description": "Resource Settings API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/resourcesettings/apiv1", + "release_level": "beta" + }, + "cloud.google.com/go/retail/apiv2": { + "distribution_name": "cloud.google.com/go/retail/apiv2", + "description": "Retail API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/retail/apiv2", + "release_level": "ga" + }, "cloud.google.com/go/rpcreplay": { "distribution_name": "cloud.google.com/go/rpcreplay", "description": "RPC Replay", @@ -599,6 +855,14 @@ "docs_url": "https://pkg.go.dev/cloud.google.com/go/secretmanager/apiv1beta1", "release_level": "beta" }, + "cloud.google.com/go/security/privateca/apiv1beta1": { + "distribution_name": "cloud.google.com/go/security/privateca/apiv1beta1", + "description": "Certificate Authority API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/security/privateca/apiv1beta1", + "release_level": "beta" + }, "cloud.google.com/go/securitycenter/apiv1": { "distribution_name": "cloud.google.com/go/securitycenter/apiv1", "description": "Security Command Center API", @@ -631,6 +895,22 @@ "docs_url": "https://pkg.go.dev/cloud.google.com/go/securitycenter/settings/apiv1beta1", "release_level": "beta" }, + "cloud.google.com/go/servicecontrol/apiv1": { + "distribution_name": "cloud.google.com/go/servicecontrol/apiv1", + "description": "Service Control API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/servicecontrol/apiv1", + "release_level": "ga" + }, + "cloud.google.com/go/servicedirectory/apiv1": { + "distribution_name": "cloud.google.com/go/servicedirectory/apiv1", + "description": "Service Directory API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/servicedirectory/apiv1", + "release_level": "ga" + }, "cloud.google.com/go/servicedirectory/apiv1beta1": { "distribution_name": "cloud.google.com/go/servicedirectory/apiv1beta1", "description": "Service Directory API", @@ -639,6 +919,14 @@ "docs_url": "https://pkg.go.dev/cloud.google.com/go/servicedirectory/apiv1beta1", "release_level": "beta" }, + "cloud.google.com/go/servicemanagement/apiv1": { + "distribution_name": "cloud.google.com/go/servicemanagement/apiv1", + "description": "Service Management API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/servicemanagement/apiv1", + "release_level": "ga" + }, "cloud.google.com/go/spanner": { "distribution_name": "cloud.google.com/go/spanner", "description": "Cloud Spanner", @@ -695,6 +983,14 @@ "docs_url": "https://pkg.go.dev/cloud.google.com/go/storage", "release_level": "ga" }, + "cloud.google.com/go/talent/apiv4": { + "distribution_name": "cloud.google.com/go/talent/apiv4", + "description": "Cloud Talent Solution API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/talent/apiv4", + "release_level": "beta" + }, "cloud.google.com/go/talent/apiv4beta1": { "distribution_name": "cloud.google.com/go/talent/apiv4beta1", "description": "Cloud Talent Solution API", @@ -711,14 +1007,6 @@ "docs_url": "https://pkg.go.dev/cloud.google.com/go/texttospeech/apiv1", "release_level": "ga" }, - "cloud.google.com/go/trace": { - "distribution_name": "cloud.google.com/go/trace", - "description": "Stackdriver Trace", - "language": "Go", - "client_library_type": "manual", - "docs_url": "https://pkg.go.dev/cloud.google.com/go/trace", - "release_level": "ga" - }, "cloud.google.com/go/trace/apiv1": { "distribution_name": "cloud.google.com/go/trace/apiv1", "description": "Stackdriver Trace API", @@ -743,6 +1031,14 @@ "docs_url": "https://pkg.go.dev/cloud.google.com/go/translate/apiv3", "release_level": "ga" }, + "cloud.google.com/go/video/transcoder/apiv1beta1": { + "distribution_name": "cloud.google.com/go/video/transcoder/apiv1beta1", + "description": "Transcoder API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/video/transcoder/apiv1beta1", + "release_level": "beta" + }, "cloud.google.com/go/videointelligence/apiv1": { "distribution_name": "cloud.google.com/go/videointelligence/apiv1", "description": "Cloud Video Intelligence API", @@ -790,5 +1086,29 @@ "client_library_type": "generated", "docs_url": "https://pkg.go.dev/cloud.google.com/go/webrisk/apiv1beta1", "release_level": "beta" + }, + "cloud.google.com/go/websecurityscanner/apiv1": { + "distribution_name": "cloud.google.com/go/websecurityscanner/apiv1", + "description": "Web Security Scanner API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/websecurityscanner/apiv1", + "release_level": "ga" + }, + "cloud.google.com/go/workflows/apiv1beta": { + "distribution_name": "cloud.google.com/go/workflows/apiv1beta", + "description": "Workflows API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/workflows/apiv1beta", + "release_level": "beta" + }, + "cloud.google.com/go/workflows/executions/apiv1beta": { + "distribution_name": "cloud.google.com/go/workflows/executions/apiv1beta", + "description": "Workflow Executions API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/workflows/executions/apiv1beta", + "release_level": "beta" } } diff --git a/vendor/cloud.google.com/go/internal/version/version.go b/vendor/cloud.google.com/go/internal/version/version.go index ebeb65d3a43..fd9dd91e985 100644 --- a/vendor/cloud.google.com/go/internal/version/version.go +++ b/vendor/cloud.google.com/go/internal/version/version.go @@ -26,7 +26,7 @@ import ( // Repo is the current version of the client libraries in this // repo. It should be a date in YYYYMMDD format. -const Repo = "20200727" +const Repo = "20201104" // Go returns the Go runtime version. The returned string // has no whitespace. diff --git a/vendor/cloud.google.com/go/storage/CHANGES.md b/vendor/cloud.google.com/go/storage/CHANGES.md index f6d57be5085..17362780fff 100644 --- a/vendor/cloud.google.com/go/storage/CHANGES.md +++ b/vendor/cloud.google.com/go/storage/CHANGES.md @@ -1,5 +1,16 @@ # Changes +## v1.12.0 +- V4 signed URL fixes: + - Fix encoding of spaces in query parameters. + - Add fields that were missing from PostPolicyV4 policy conditions. +- Fix Query to correctly list prefixes as well as objects when SetAttrSelection + is used. + +## v1.11.0 +- Add support for CustomTime and NoncurrentTime object lifecycle management + features. + ## v1.10.0 - Bump dependency on google.golang.org/api to capture changes to retry logic which will make retries on writes more resilient. diff --git a/vendor/cloud.google.com/go/storage/bucket.go b/vendor/cloud.google.com/go/storage/bucket.go index 478482645fa..19221168f90 100644 --- a/vendor/cloud.google.com/go/storage/bucket.go +++ b/vendor/cloud.google.com/go/storage/bucket.go @@ -389,7 +389,8 @@ type RetentionPolicy struct { } const ( - // RFC3339 date with only the date segment, used for CreatedBefore in LifecycleRule. + // RFC3339 timestamp with only the date segment, used for CreatedBefore, + // CustomTimeBefore, and NoncurrentTimeBefore in LifecycleRule. rfc3339Date = "2006-01-02" // DeleteAction is a lifecycle action that deletes a live and/or archived @@ -455,6 +456,21 @@ type LifecycleCondition struct { // the specified date in UTC. CreatedBefore time.Time + // CustomTimeBefore is the CustomTime metadata field of the object. This + // condition is satisfied when an object's CustomTime timestamp is before + // midnight of the specified date in UTC. + // + // This condition can only be satisfied if CustomTime has been set. + CustomTimeBefore time.Time + + // DaysSinceCustomTime is the days elapsed since the CustomTime date of the + // object. This condition can only be satisfied if CustomTime has been set. + DaysSinceCustomTime int64 + + // DaysSinceNoncurrentTime is the days elapsed since the noncurrent timestamp + // of the object. This condition is relevant only for versioned objects. + DaysSinceNoncurrentTime int64 + // Liveness specifies the object's liveness. Relevant only for versioned objects Liveness Liveness @@ -464,6 +480,13 @@ type LifecycleCondition struct { // Values include "STANDARD", "NEARLINE", "COLDLINE" and "ARCHIVE". MatchesStorageClasses []string + // NoncurrentTimeBefore is the noncurrent timestamp of the object. This + // condition is satisfied when an object's noncurrent timestamp is before + // midnight of the specified date in UTC. + // + // This condition is relevant only for versioned objects. + NoncurrentTimeBefore time.Time + // NumNewerVersions is the condition matching objects with a number of newer versions. // // If the value is N, this condition is satisfied when there are at least N @@ -946,9 +969,11 @@ func toRawLifecycle(l Lifecycle) *raw.BucketLifecycle { StorageClass: r.Action.StorageClass, }, Condition: &raw.BucketLifecycleRuleCondition{ - Age: r.Condition.AgeInDays, - MatchesStorageClass: r.Condition.MatchesStorageClasses, - NumNewerVersions: r.Condition.NumNewerVersions, + Age: r.Condition.AgeInDays, + DaysSinceCustomTime: r.Condition.DaysSinceCustomTime, + DaysSinceNoncurrentTime: r.Condition.DaysSinceNoncurrentTime, + MatchesStorageClass: r.Condition.MatchesStorageClasses, + NumNewerVersions: r.Condition.NumNewerVersions, }, } @@ -964,6 +989,12 @@ func toRawLifecycle(l Lifecycle) *raw.BucketLifecycle { if !r.Condition.CreatedBefore.IsZero() { rr.Condition.CreatedBefore = r.Condition.CreatedBefore.Format(rfc3339Date) } + if !r.Condition.CustomTimeBefore.IsZero() { + rr.Condition.CustomTimeBefore = r.Condition.CustomTimeBefore.Format(rfc3339Date) + } + if !r.Condition.NoncurrentTimeBefore.IsZero() { + rr.Condition.NoncurrentTimeBefore = r.Condition.NoncurrentTimeBefore.Format(rfc3339Date) + } rl.Rule = append(rl.Rule, rr) } return &rl @@ -981,9 +1012,11 @@ func toLifecycle(rl *raw.BucketLifecycle) Lifecycle { StorageClass: rr.Action.StorageClass, }, Condition: LifecycleCondition{ - AgeInDays: rr.Condition.Age, - MatchesStorageClasses: rr.Condition.MatchesStorageClass, - NumNewerVersions: rr.Condition.NumNewerVersions, + AgeInDays: rr.Condition.Age, + DaysSinceCustomTime: rr.Condition.DaysSinceCustomTime, + DaysSinceNoncurrentTime: rr.Condition.DaysSinceNoncurrentTime, + MatchesStorageClasses: rr.Condition.MatchesStorageClass, + NumNewerVersions: rr.Condition.NumNewerVersions, }, } @@ -998,6 +1031,12 @@ func toLifecycle(rl *raw.BucketLifecycle) Lifecycle { if rr.Condition.CreatedBefore != "" { r.Condition.CreatedBefore, _ = time.Parse(rfc3339Date, rr.Condition.CreatedBefore) } + if rr.Condition.CustomTimeBefore != "" { + r.Condition.CustomTimeBefore, _ = time.Parse(rfc3339Date, rr.Condition.CustomTimeBefore) + } + if rr.Condition.NoncurrentTimeBefore != "" { + r.Condition.NoncurrentTimeBefore, _ = time.Parse(rfc3339Date, rr.Condition.NoncurrentTimeBefore) + } l.Rules = append(l.Rules, r) } return l @@ -1091,8 +1130,9 @@ func toUniformBucketLevelAccess(b *raw.BucketIamConfiguration) UniformBucketLeve } } -// Objects returns an iterator over the objects in the bucket that match the Query q. -// If q is nil, no filtering is done. +// Objects returns an iterator over the objects in the bucket that match the +// Query q. If q is nil, no filtering is done. Objects will be iterated over +// lexicographically by name. // // Note: The returned iterator is not safe for concurrent operations without explicit synchronization. func (b *BucketHandle) Objects(ctx context.Context, q *Query) *ObjectIterator { @@ -1131,6 +1171,13 @@ func (it *ObjectIterator) PageInfo() *iterator.PageInfo { return it.pageInfo } // there are no more results. Once Next returns iterator.Done, all subsequent // calls will return iterator.Done. // +// In addition, if Next returns an error other than iterator.Done, all +// subsequent calls will return the same error. To continue iteration, a new +// `ObjectIterator` must be created. Since objects are ordered lexicographically +// by name, `Query.StartOffset` can be used to create a new iterator which will +// start at the desired place. See +// https://pkg.go.dev/cloud.google.com/go/storage?tab=doc#hdr-Listing_objects. +// // If Query.Delimiter is non-empty, some of the ObjectAttrs returned by Next will // have a non-empty Prefix field, and a zero value for all other fields. These // represent prefixes. @@ -1151,6 +1198,8 @@ func (it *ObjectIterator) fetch(pageSize int, pageToken string) (string, error) req.Projection("full") req.Delimiter(it.query.Delimiter) req.Prefix(it.query.Prefix) + req.StartOffset(it.query.StartOffset) + req.EndOffset(it.query.EndOffset) req.Versions(it.query.Versions) if len(it.query.fieldSelection) > 0 { req.Fields("nextPageToken", googleapi.Field(it.query.fieldSelection)) diff --git a/vendor/cloud.google.com/go/storage/doc.go b/vendor/cloud.google.com/go/storage/doc.go index 614ea11a590..750e183496a 100644 --- a/vendor/cloud.google.com/go/storage/doc.go +++ b/vendor/cloud.google.com/go/storage/doc.go @@ -39,7 +39,9 @@ To start working with this package, create a client: // TODO: Handle error. } -The client will use your default application credentials. +The client will use your default application credentials. Clients should be +reused instead of created as needed. The methods of Client are safe for +concurrent use by multiple goroutines. If you only wish to access public data, you can create an unauthenticated client with @@ -136,6 +138,17 @@ Listing objects in a bucket is done with the Bucket.Objects method: names = append(names, attrs.Name) } +Objects are listed lexicographically by name. To filter objects +lexicographically, Query.StartOffset and/or Query.EndOffset can be used: + + query := &storage.Query{ + Prefix: "", + StartOffset: "bar/", // Only list objects lexicographically >= "bar/" + EndOffset: "foo/", // Only list objects lexicographically < "foo/" + } + + // ... as before + If only a subset of object attributes is needed when listing, specifying this subset using Query.SetAttrSelection may speed up the listing process: diff --git a/vendor/cloud.google.com/go/storage/go.mod b/vendor/cloud.google.com/go/storage/go.mod index 2eb6df3cbde..c45b83604e7 100644 --- a/vendor/cloud.google.com/go/storage/go.mod +++ b/vendor/cloud.google.com/go/storage/go.mod @@ -3,16 +3,13 @@ module cloud.google.com/go/storage go 1.11 require ( - cloud.google.com/go v0.57.0 - cloud.google.com/go/bigquery v1.8.0 // indirect + cloud.google.com/go v0.66.0 github.com/golang/protobuf v1.4.2 - github.com/google/go-cmp v0.4.1 + github.com/google/go-cmp v0.5.2 github.com/googleapis/gax-go/v2 v2.0.5 - golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2 // indirect - golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d - golang.org/x/sys v0.0.0-20200523222454-059865788121 // indirect - golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2 // indirect - google.golang.org/api v0.28.0 - google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790 - google.golang.org/grpc v1.29.1 + golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43 + golang.org/x/tools v0.0.0-20200918232735-d647fc253266 // indirect + google.golang.org/api v0.32.0 + google.golang.org/genproto v0.0.0-20200921151605-7abf4a1a14d5 + google.golang.org/grpc v1.32.0 ) diff --git a/vendor/cloud.google.com/go/storage/go.sum b/vendor/cloud.google.com/go/storage/go.sum index 5d3fca5f832..2033319903a 100644 --- a/vendor/cloud.google.com/go/storage/go.sum +++ b/vendor/cloud.google.com/go/storage/go.sum @@ -17,6 +17,12 @@ cloud.google.com/go v0.56.0 h1:WRz29PgAsVEyPSDHyk+0fpEkwEFyfhHn+JbksT6gIL4= cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= cloud.google.com/go v0.57.0 h1:EpMNVUorLiZIELdMZbCYX/ByTFCdoYopYAGxaGVz9ms= cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= +cloud.google.com/go v0.62.0 h1:RmDygqvj27Zf3fCQjQRtLyC7KwFcHkeJitcO0OoGOcA= +cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= +cloud.google.com/go v0.65.0 h1:Dg9iHVQfrhq82rUNu9ZxUDrJLaxFUe/HlCVaLyRruq8= +cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= +cloud.google.com/go v0.66.0 h1:DZeAkuQGQqnm9Xv36SbMJEU8aFBz4wL04UpMWPWwjzg= +cloud.google.com/go v0.66.0/go.mod h1:dgqGAjKCDxyhGTtC9dAREQGUJpkceNm1yt590Qno0Ko= cloud.google.com/go/bigquery v1.0.1 h1:hL+ycaJpVE9M7nLoiXb/Pn10ENE2u+oddxbD8uu0ZVU= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0 h1:sAbMqjY1PEQKZBWfbu6Y6bsupJ9c4QdHnzg/VvYTLcE= @@ -27,7 +33,6 @@ cloud.google.com/go/bigquery v1.5.0 h1:K2NyuHRuv15ku6eUpe0DQk5ZykPMnSOnvuVf6IHcj cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= cloud.google.com/go/bigquery v1.7.0 h1:a/O/bK/vWrYGOTFtH8di4rBxMZnmkjy+Y5LxpDwo+dA= cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= -cloud.google.com/go/bigquery v1.8.0 h1:PQcPefKFdaIzjQFbiyOgAqyx8q5djaE7x9Sqe712DPA= cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= cloud.google.com/go/datastore v1.0.0 h1:Kt+gOPPp2LEPWp8CSfxhsM8ik9CcyE/gYu+0r+RnZvM= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= @@ -45,6 +50,7 @@ cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiy cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= +cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= @@ -76,6 +82,7 @@ github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFU github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs= @@ -104,16 +111,25 @@ github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMyw github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.4.1 h1:/exdXoGamhu5ONeUJH0deniYLWYvQwW66yvlfiiKTu0= github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0 h1:/QaMHBdZ26BB3SSst0Iwl10Epc+xhTquomWX0oZEB6w= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.1 h1:JFrFEBb2xKufg6XkJsJr+WbKb4FQlURi5RUcBveYu9k= +github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.2 h1:X2ev0eStA3AbceY54o37/0PQ/UWqKEiiO2dKL5OPaFM= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/martian/v3 v3.0.0 h1:pMen7vLs8nvgEYhywH3KDWJIJTeEr2ULsVWHWYHQyBs= +github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200905233945-acf8798be1f7/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5 h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM= @@ -137,6 +153,8 @@ github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+ github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0 h1:C9hSCOW830chIVkdja34wa6Ky+IzWllkUinR+BtRZd4= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= @@ -144,10 +162,13 @@ go.opencensus.io v0.22.2 h1:75k/FF0Q2YM8QYo07VPddOLBslDt1MZOdEslOHvmzAs= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3 h1:8sGtKOrtQqkN1bp2AtX+misvLIlOmsEsNd+9NIcPEm8= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.4 h1:LYy1Hy3MJdrCdMwwzxA/dRok4ejH+RwNGbuoD9fCjto= +go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -201,6 +222,7 @@ golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190620200207-3b0461eec859 h1:R/3boaszxrf1GEUWTVDzSKVwLmSJpwZ1yqXm8j0v2QI= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa h1:F+8P+gmewFQYRk6JoLQLwjBCTu3mcIURZfNkVweuRKA= @@ -220,6 +242,14 @@ golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/ golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2 h1:eDrdRpKgkcCqKZQwyZRyeFZgfqt37SL7Kv3tok06cKE= golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200625001655-4c5254603344 h1:vGXIOMxbNfDTk/aXCmfdLgkrSV+Z2tcbze+pEc3v5W4= +golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200707034311-ab3426394381 h1:VXak5I6aEWmAXeQjA+QSZzlgNrpq9mjcfDemuexIKsU= +golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200822124328-c89045814202 h1:VvcQYSHwXgi7W+TpUR6A9g6Up98WAHf3f/ulnJ62IyA= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200904194848-62affa334b73 h1:MXfv8rhZWmFeqX3GNZRsd6vOLoaCHjYEX3qkRo3YBUA= +golang.org/x/net v0.0.0-20200904194848-62affa334b73/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 h1:SVwTIAaPC2U/AvvLNZ2a7OVsmBpC8L5BlwK1whH3hm0= @@ -227,6 +257,8 @@ golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4Iltr golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d h1:TzXSXBo42m9gQenoE3b9BGiEpg5IG2JkU5FkPIawgtw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43 h1:ld7aEMNHoBnnDAX15v1T6z31v8HwR2A9FYOuAhWqkwc= +golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -237,6 +269,7 @@ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e h1:vcxGaoTs7kV8m5Np9uUNQin4 golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a h1:WXEvlFVvvGxCJLG6REjsT03iWnKLEWinaScsxF2Vm2o= golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -272,11 +305,18 @@ golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200523222454-059865788121 h1:rITEj+UZHYC927n8GT97eC3zrpzXdb/voyeOuVKS46o= golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200803210538-64077c9b5642 h1:B6caxRw+hozq68X2MY7jEpZh/cr4/aHLv9xU8Kkadrw= +golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200828194041-157a740278f4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f h1:Fqb3ao1hUmOR3GkUOg/Y+BadLwykBIzs5q8Ez2SbHyc= +golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -326,12 +366,22 @@ golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d h1:lzLdP95xJmMpwQ6LUHwrc5V golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2 h1:FD4wDsP+CQUqh2V12OBOt90pLHVToe58P++fUu3ggV4= golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d h1:szSOL78iTCl0LF1AMjhSWJj8tIM0KixlUUnBtYXsmd8= +golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200828161849-5deb26317202/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= +golang.org/x/tools v0.0.0-20200915173823-2db8f0ff891c/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= +golang.org/x/tools v0.0.0-20200918232735-d647fc253266 h1:k7tVuG0g1JwmD3Jh8oAl1vQ1C3jb4Hi/dUl1wWDBJpQ= +golang.org/x/tools v0.0.0-20200918232735-d647fc253266/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= @@ -354,6 +404,13 @@ google.golang.org/api v0.24.0 h1:cG03eaksBzhfSIk7JRGctfp3lanklcOM/mTGvow7BbQ= google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= google.golang.org/api v0.28.0 h1:jMF5hhVfMkTZwHW1SDpKq5CkgWLXOb31Foaca9Zr3oM= google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.29.0 h1:BaiDisFir8O4IJxvAabCGGkQ6yCJegNQqSVoYUNAnbk= +google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= +google.golang.org/api v0.30.0 h1:yfrXXP61wVuLb0vBcG6qaOoIoqYEzOQS8jum51jkv2w= +google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= +google.golang.org/api v0.31.0/go.mod h1:CL+9IBCa2WWU6gRuBWaKqGWLFFwbEUXkfeMkHLQWYWo= +google.golang.org/api v0.32.0 h1:Le77IccnTqEa8ryp9wIpX5W3zYm7Gf9LhOp9PHcwFts= +google.golang.org/api v0.32.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -399,8 +456,16 @@ google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfG google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 h1:+kGHl1aib/qcwaRi1CbqBZ1rk19r85MNUf8HaBghugY= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790 h1:FGjyjrQGURdc98leD1P65IdQD9Zlr4McvRcqIlV6OSs= google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= +google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c h1:Lq4llNryJoaVFRmvrIwC/ZHH7tNt4tUYIu8+se2aayY= +google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200831141814-d751682dd103/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200914193844-75d14daec038/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200921151605-7abf4a1a14d5 h1:B9nroC8SSX5GtbVvxPF9tYIVkaCpjhVLOrlAY8ONzm8= +google.golang.org/genproto v0.0.0-20200921151605-7abf4a1a14d5/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.1 h1:j6XxA85m/6txkUCHvzlV5f+HBNl/1r5cZ2A/3IEFOO8= @@ -417,20 +482,25 @@ google.golang.org/grpc v1.28.0 h1:bO/TA4OxCOummhSf10siHuG7vJOiwh7SpRpFZDkOgl4= google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= google.golang.org/grpc v1.29.1 h1:EC2SB8S04d2r73uptxphDSUG+kTKVgjRPF+N3xpxRB4= google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= +google.golang.org/grpc v1.30.0 h1:M5a8xTlYTxwMn5ZFkwhRabsygDY5G8TYLyQDBxJNAxE= +google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.0 h1:T7P4R73V3SSDPhH7WW7ATbfViLtmamH0DKrP3f9AuDI= +google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.1 h1:SfXqXS5hkufcdZ/mHtYCh53P2b+92WQq/DZcKLgsFRs= +google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.32.0 h1:zWTV+LMdc3kaiJMSTOFz2UgSBgx8RNQoTGiZu3fR9S0= +google.golang.org/grpc v1.32.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= -google.golang.org/protobuf v1.21.0 h1:qdOKuR/EIArgaWNjetjgTzgVTAZ+S/WXVrq9HW9zimw= google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= -google.golang.org/protobuf v1.22.0 h1:cJv5/xdbk1NnMPR1VP9+HU6gupuG9MLBoH1r6RHZ2MY= google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.0 h1:4MY060fB1DLGMB/7MBTLnwQUY6+F09GEiz6SsrNqyzM= google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc h1:TnonUr8u3himcMY0vSh23jFOXA+cnucl1gB6EQTReBI= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.24.0 h1:UhZDfRO8JRQru4/+LlLE0BRKGF8L+PICnvYZmx/fEGA= google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= +google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4c= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= @@ -439,11 +509,8 @@ honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.1-2019.2.3 h1:3JgtbtFHMiCmsznwGVTUWbgGov+pVqnlf1dEJTNAXeM= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= -honnef.co/go/tools v0.0.1-2020.1.3 h1:sXmLre5bzIR6ypkjXCDI3jHPssRhc8KD/Ome589sc3U= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -honnef.co/go/tools v0.0.1-2020.1.4 h1:UoveltGrhghAA7ePc+e+QYDHXrBps2PqFZiHkGR/xK8= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= diff --git a/vendor/cloud.google.com/go/storage/post_policy_v4.go b/vendor/cloud.google.com/go/storage/post_policy_v4.go index b9df7db9581..db9d1383849 100644 --- a/vendor/cloud.google.com/go/storage/post_policy_v4.go +++ b/vendor/cloud.google.com/go/storage/post_policy_v4.go @@ -249,10 +249,16 @@ func GenerateSignedPostPolicyV4(bucket, object string, opts *PostPolicyV4Options conds := make([]PostPolicyV4Condition, len(opts.Conditions)) copy(conds, opts.Conditions) conds = append(conds, - conditionRedirectToURLOnSuccess(descFields.RedirectToURLOnSuccess), - conditionStatusCodeOnSuccess(descFields.StatusCodeOnSuccess), + // These are ordered lexicographically. Technically the order doesn't matter + // for creating the policy, but we use this order to match the + // cross-language conformance tests for this feature. &singleValueCondition{"acl", descFields.ACL}, &singleValueCondition{"cache-control", descFields.CacheControl}, + &singleValueCondition{"content-disposition", descFields.ContentDisposition}, + &singleValueCondition{"content-encoding", descFields.ContentEncoding}, + &singleValueCondition{"content-type", descFields.ContentType}, + conditionRedirectToURLOnSuccess(descFields.RedirectToURLOnSuccess), + conditionStatusCodeOnSuccess(descFields.StatusCodeOnSuccess), ) YYYYMMDD := now.Format(yearMonthDay) @@ -261,8 +267,12 @@ func GenerateSignedPostPolicyV4(bucket, object string, opts *PostPolicyV4Options "x-goog-date": now.Format(iso8601), "x-goog-credential": opts.GoogleAccessID + "/" + YYYYMMDD + "/auto/storage/goog4_request", "x-goog-algorithm": "GOOG4-RSA-SHA256", - "success_action_redirect": descFields.RedirectToURLOnSuccess, "acl": descFields.ACL, + "cache-control": descFields.CacheControl, + "content-disposition": descFields.ContentDisposition, + "content-encoding": descFields.ContentEncoding, + "content-type": descFields.ContentType, + "success_action_redirect": descFields.RedirectToURLOnSuccess, } for key, value := range descFields.Metadata { conds = append(conds, &singleValueCondition{key, value}) diff --git a/vendor/cloud.google.com/go/storage/storage.go b/vendor/cloud.google.com/go/storage/storage.go index 20d9518a42d..1fdb5ecb9c5 100644 --- a/vendor/cloud.google.com/go/storage/storage.go +++ b/vendor/cloud.google.com/go/storage/storage.go @@ -97,7 +97,11 @@ type Client struct { } // NewClient creates a new Google Cloud Storage client. -// The default scope is ScopeFullControl. To use a different scope, like ScopeReadOnly, use option.WithScopes. +// The default scope is ScopeFullControl. To use a different scope, like +// ScopeReadOnly, use option.WithScopes. +// +// Clients should be reused instead of created as needed. The methods of Client +// are safe for concurrent use by multiple goroutines. func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error) { var host, readHost, scheme string @@ -349,7 +353,7 @@ var ( ) // v2SanitizeHeaders applies the specifications for canonical extension headers at -// https://cloud.google.com/storage/docs/access-control/signed-urls#about-canonical-extension-headers. +// https://cloud.google.com/storage/docs/access-control/signed-urls-v2#about-canonical-extension-headers func v2SanitizeHeaders(hdrs []string) []string { headerMap := map[string][]string{} for _, hdr := range hdrs { @@ -397,7 +401,7 @@ func v2SanitizeHeaders(hdrs []string) []string { } // v4SanitizeHeaders applies the specifications for canonical extension headers -// at https://cloud.google.com/storage/docs/access-control/signed-urls#about-canonical-extension-headers. +// at https://cloud.google.com/storage/docs/authentication/canonical-requests#about-headers. // // V4 does a couple things differently from V2: // - Headers get sorted by key, instead of by key:value. We do this in @@ -583,8 +587,10 @@ func signedURLV4(bucket, name string, opts *SignedURLOptions, now time.Time) (st for k, v := range opts.QueryParameters { canonicalQueryString[k] = append(canonicalQueryString[k], v...) } - - fmt.Fprintf(buf, "%s\n", canonicalQueryString.Encode()) + // url.Values.Encode escaping is correct, except that a space must be replaced + // by `%20` rather than `+`. + escapedQuery := strings.Replace(canonicalQueryString.Encode(), "+", "%20", -1) + fmt.Fprintf(buf, "%s\n", escapedQuery) // Fill in the hostname based on the desired URL style. u.Host = opts.Style.host(bucket) @@ -868,6 +874,10 @@ func (o *ObjectHandle) Update(ctx context.Context, uattrs ObjectAttrsToUpdate) ( attrs.TemporaryHold = optional.ToBool(uattrs.TemporaryHold) forceSendFields = append(forceSendFields, "TemporaryHold") } + if !uattrs.CustomTime.IsZero() { + attrs.CustomTime = uattrs.CustomTime + forceSendFields = append(forceSendFields, "CustomTime") + } if uattrs.Metadata != nil { attrs.Metadata = uattrs.Metadata if len(attrs.Metadata) == 0 { @@ -940,6 +950,7 @@ type ObjectAttrsToUpdate struct { ContentEncoding optional.String ContentDisposition optional.String CacheControl optional.String + CustomTime time.Time Metadata map[string]string // set to map[string]string{} to delete ACL []ACLRule @@ -1047,6 +1058,10 @@ func (o *ObjectAttrs) toRawObject(bucket string) *raw.Object { if !o.RetentionExpirationTime.IsZero() { ret = o.RetentionExpirationTime.Format(time.RFC3339) } + var ct string + if !o.CustomTime.IsZero() { + ct = o.CustomTime.Format(time.RFC3339) + } return &raw.Object{ Bucket: bucket, Name: o.Name, @@ -1061,6 +1076,7 @@ func (o *ObjectAttrs) toRawObject(bucket string) *raw.Object { StorageClass: o.StorageClass, Acl: toRawObjectACL(o.ACL), Metadata: o.Metadata, + CustomTime: ct, } } @@ -1199,6 +1215,14 @@ type ObjectAttrs struct { // Etag is the HTTP/1.1 Entity tag for the object. // This field is read-only. Etag string + + // A user-specified timestamp which can be applied to an object. This is + // typically set in order to use the CustomTimeBefore and DaysSinceCustomTime + // LifecycleConditions to manage object lifecycles. + // + // CustomTime cannot be removed once set on an object. It can be updated to a + // later value but not to an earlier one. + CustomTime time.Time } // convertTime converts a time in RFC3339 format to time.Time. @@ -1252,6 +1276,7 @@ func newObject(o *raw.Object) *ObjectAttrs { Deleted: convertTime(o.TimeDeleted), Updated: convertTime(o.Updated), Etag: o.Etag, + CustomTime: convertTime(o.CustomTime), } } @@ -1297,6 +1322,17 @@ type Query struct { // the query. It's used internally and is populated for the user by // calling Query.SetAttrSelection fieldSelection string + + // StartOffset is used to filter results to objects whose names are + // lexicographically equal to or after startOffset. If endOffset is also set, + // the objects listed will have names between startOffset (inclusive) and + // endOffset (exclusive). + StartOffset string + + // EndOffset is used to filter results to objects whose names are + // lexicographically before endOffset. If startOffset is also set, the objects + // listed will have names between startOffset (inclusive) and endOffset (exclusive). + EndOffset string } // attrToFieldMap maps the field names of ObjectAttrs to the underlying field @@ -1329,6 +1365,7 @@ var attrToFieldMap = map[string]string{ "Deleted": "timeDeleted", "Updated": "updated", "Etag": "etag", + "CustomTime": "customTime", } // SetAttrSelection makes the query populate only specific attributes of @@ -1351,7 +1388,7 @@ func (q *Query) SetAttrSelection(attrs []string) error { if len(fieldSet) > 0 { var b bytes.Buffer - b.WriteString("items(") + b.WriteString("prefixes,items(") first := true for field := range fieldSet { if !first { diff --git a/vendor/cloud.google.com/go/testing.md b/vendor/cloud.google.com/go/testing.md new file mode 100644 index 00000000000..03867d561af --- /dev/null +++ b/vendor/cloud.google.com/go/testing.md @@ -0,0 +1,236 @@ +# Testing Code that depends on Go Client Libraries + +The Go client libraries generated as a part of `cloud.google.com/go` all take +the approach of returning concrete types instead of interfaces. That way, new +fields and methods can be added to the libraries without breaking users. This +document will go over some patterns that can be used to test code that depends +on the Go client libraries. + +## Testing gRPC services using fakes + +*Note*: You can see the full +[example code using a fake here](https://github.com/googleapis/google-cloud-go/tree/master/internal/examples/fake). + +The clients found in `cloud.google.com/go` are gRPC based, with a couple of +notable exceptions being the [`storage`](https://pkg.go.dev/cloud.google.com/go/storage) +and [`bigquery`](https://pkg.go.dev/cloud.google.com/go/bigquery) clients. +Interactions with gRPC services can be faked by serving up your own in-memory +server within your test. One benefit of using this approach is that you don’t +need to define an interface in your runtime code; you can keep using +concrete struct types. You instead define a fake server in your test code. For +example, take a look at the following function: + +```go +import ( + "context" + "fmt" + "log" + "os" + + translate "cloud.google.com/go/translate/apiv3" + "github.com/googleapis/gax-go/v2" + translatepb "google.golang.org/genproto/googleapis/cloud/translate/v3" +) + +func TranslateTextWithConcreteClient(client *translate.TranslationClient, text string, targetLang string) (string, error) { + ctx := context.Background() + log.Printf("Translating %q to %q", text, targetLang) + req := &translatepb.TranslateTextRequest{ + Parent: fmt.Sprintf("projects/%s/locations/global", os.Getenv("GOOGLE_CLOUD_PROJECT")), + TargetLanguageCode: "en-US", + Contents: []string{text}, + } + resp, err := client.TranslateText(ctx, req) + if err != nil { + return "", fmt.Errorf("unable to translate text: %v", err) + } + translations := resp.GetTranslations() + if len(translations) != 1 { + return "", fmt.Errorf("expected only one result, got %d", len(translations)) + } + return translations[0].TranslatedText, nil +} +``` + +Here is an example of what a fake server implementation would look like for +faking the interactions above: + +```go +import ( + "context" + + translatepb "google.golang.org/genproto/googleapis/cloud/translate/v3" +) + +type fakeTranslationServer struct { + translatepb.UnimplementedTranslationServiceServer +} + +func (f *fakeTranslationServer) TranslateText(ctx context.Context, req *translatepb.TranslateTextRequest) (*translatepb.TranslateTextResponse, error) { + resp := &translatepb.TranslateTextResponse{ + Translations: []*translatepb.Translation{ + &translatepb.Translation{ + TranslatedText: "Hello World", + }, + }, + } + return resp, nil +} +``` + +All of the generated protobuf code found in [google.golang.org/genproto](https://pkg.go.dev/google.golang.org/genproto) +contains a similar `package.UnimplmentedFooServer` type that is useful for +creating fakes. By embedding the unimplemented server in the +`fakeTranslationServer`, the fake will “inherit” all of the RPCs the server +exposes. Then, by providing our own `fakeTranslationServer.TranslateText` +method you can “override” the default unimplemented behavior of the one RPC that +you would like to be faked. + +The test itself does require a little bit of setup: start up a `net.Listener`, +register the server, and tell the client library to call the server: + +```go +import ( + "context" + "net" + "testing" + + translate "cloud.google.com/go/translate/apiv3" + "google.golang.org/api/option" + translatepb "google.golang.org/genproto/googleapis/cloud/translate/v3" + "google.golang.org/grpc" +) + +func TestTranslateTextWithConcreteClient(t *testing.T) { + ctx := context.Background() + + // Setup the fake server. + fakeTranslationServer := &fakeTranslationServer{} + l, err := net.Listen("tcp", "localhost:0") + if err != nil { + t.Fatal(err) + } + gsrv := grpc.NewServer() + translatepb.RegisterTranslationServiceServer(gsrv, fakeTranslationServer) + fakeServerAddr := l.Addr().String() + go func() { + if err := gsrv.Serve(l); err != nil { + panic(err) + } + }() + + // Create a client. + client, err := translate.NewTranslationClient(ctx, + option.WithEndpoint(fakeServerAddr), + option.WithoutAuthentication(), + option.WithGRPCDialOption(grpc.WithInsecure()), + ) + if err != nil { + t.Fatal(err) + } + + // Run the test. + text, err := TranslateTextWithConcreteClient(client, "Hola Mundo", "en-US") + if err != nil { + t.Fatal(err) + } + if text != "Hello World" { + t.Fatalf("got %q, want Hello World", text) + } +} +``` + +## Testing using mocks + +*Note*: You can see the full +[example code using a mock here](https://github.com/googleapis/google-cloud-go/tree/master/internal/examples/mock). + +When mocking code you need to work with interfaces. Let’s create an interface +for the `cloud.google.com/go/translate/apiv3` client used in the +`TranslateTextWithConcreteClient` function mentioned in the previous section. +The `translate.Client` has over a dozen methods but this code only uses one of +them. Here is an interface that satisfies the interactions of the +`translate.Client` in this function. + +```go +type TranslationClient interface { + TranslateText(ctx context.Context, req *translatepb.TranslateTextRequest, opts ...gax.CallOption) (*translatepb.TranslateTextResponse, error) +} +``` + +Now that we have an interface that satisfies the method being used we can +rewrite the function signature to take the interface instead of the concrete +type. + +```go +func TranslateTextWithInterfaceClient(client TranslationClient, text string, targetLang string) (string, error) { +// ... +} +``` + +This allows a real `translate.Client` to be passed to the method in production +and for a mock implementation to be passed in during testing. This pattern can +be applied to any Go code, not just `cloud.google.com/go`. This is because +interfaces in Go are implicitly satisfied. Structs in the client libraries can +implicitly implement interfaces defined in your codebase. Let’s take a look at +what it might look like to define a lightweight mock for the `TranslationClient` +interface. + +```go +import ( + "context" + "testing" + + "github.com/googleapis/gax-go/v2" + translatepb "google.golang.org/genproto/googleapis/cloud/translate/v3" +) + +type mockClient struct{} + +func (*mockClient) TranslateText(_ context.Context, req *translatepb.TranslateTextRequest, opts ...gax.CallOption) (*translatepb.TranslateTextResponse, error) { + resp := &translatepb.TranslateTextResponse{ + Translations: []*translatepb.Translation{ + &translatepb.Translation{ + TranslatedText: "Hello World", + }, + }, + } + return resp, nil +} + +func TestTranslateTextWithAbstractClient(t *testing.T) { + client := &mockClient{} + text, err := TranslateTextWithInterfaceClient(client, "Hola Mundo", "en-US") + if err != nil { + t.Fatal(err) + } + if text != "Hello World" { + t.Fatalf("got %q, want Hello World", text) + } +} +``` + +If you prefer to not write your own mocks there are mocking frameworks such as +[golang/mock](https://github.com/golang/mock) which can generate mocks for you +from an interface. As a word of caution though, try to not +[overuse mocks](https://testing.googleblog.com/2013/05/testing-on-toilet-dont-overuse-mocks.html). + +## Testing using emulators + +Some of the client libraries provided in `cloud.google.com/go` support running +against a service emulator. The concept is similar to that of using fakes, +mentioned above, but the server is managed for you. You just need to start it up +and instruct the client library to talk to the emulator by setting a service +specific emulator environment variable. Current services/environment-variables +are: + +- bigtable: `BIGTABLE_EMULATOR_HOST` +- datastore: `DATASTORE_EMULATOR_HOST` +- firestore: `FIRESTORE_EMULATOR_HOST` +- pubsub: `PUBSUB_EMULATOR_HOST` +- spanner: `SPANNER_EMULATOR_HOST` +- storage: `STORAGE_EMULATOR_HOST` + - Although the storage client supports an emulator environment variable there is no official emulator provided by gcloud. + +For more information on emulators please refer to the +[gcloud documentation](https://cloud.google.com/sdk/gcloud/reference/beta/emulators). diff --git a/vendor/github.com/Azure/go-autorest/.gitignore b/vendor/github.com/Azure/go-autorest/.gitignore new file mode 100644 index 00000000000..3350aaf7064 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/.gitignore @@ -0,0 +1,32 @@ +# The standard Go .gitignore file follows. (Sourced from: github.com/github/gitignore/master/Go.gitignore) +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test +.DS_Store +.idea/ +.vscode/ + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof + +# go-autorest specific +vendor/ +autorest/azure/example/example diff --git a/vendor/github.com/Azure/go-autorest/CHANGELOG.md b/vendor/github.com/Azure/go-autorest/CHANGELOG.md new file mode 100644 index 00000000000..d1f596bfc9b --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/CHANGELOG.md @@ -0,0 +1,1004 @@ +# CHANGELOG + +## v14.2.0 + +- Added package comment to make `github.com/Azure/go-autorest` importable. + +## v14.1.1 + +### Bug Fixes + +- Change `x-ms-authorization-auxiliary` header value separator to comma. + +## v14.1.0 + +### New Features + +- Added `azure.SetEnvironment()` that will update the global environments map with the specified values. + +## v14.0.1 + +### Bug Fixes + +- Fix race condition when refreshing token. +- Fixed some tests to work with Go 1.14. + +## v14.0.0 + +## Breaking Changes + +- By default, the `DoRetryForStatusCodes` functions will no longer infinitely retry a request when the response returns an HTTP status code of 429 (StatusTooManyRequests). To opt in to the old behavior set `autorest.Count429AsRetry` to `false`. + +## New Features + +- Variable `autorest.Max429Delay` can be used to control the maximum delay between retries when a 429 is received with no `Retry-After` header. The default is zero which means there is no cap. + +## v13.4.0 + +## New Features + +- Added field `SendDecorators` to the `Client` type. This can be used to specify a custom chain of SendDecorators per client. +- Added method `Client.Send()` which includes logic for selecting the preferred chain of SendDecorators. + +## v13.3.3 + +### Bug Fixes + +- Fixed connection leak when retrying requests. +- Enabled exponential back-off with a 2-minute cap when retrying on 429. +- Fixed some cases where errors were inadvertently dropped. + +## v13.3.2 + +### Bug Fixes + +- Updated `autorest.AsStringSlice()` to convert slice elements to their string representation. + +## v13.3.1 + +- Updated external dependencies. + +### Bug Fixes + +## v13.3.0 + +### New Features + +- Added support for shared key and shared access signature token authorization. + - `autorest.NewSharedKeyAuthorizer()` and dependent types. + - `autorest.NewSASTokenAuthorizer()` and dependent types. +- Added `ServicePrincipalToken.SetCustomRefresh()` so a custom refresh function can be invoked when a token has expired. + +### Bug Fixes + +- Fixed `cli.AccessTokensPath()` to respect `AZURE_CONFIG_DIR` when set. +- Support parsing error messages in XML responses. + +## v13.2.0 + +### New Features + +- Added the following functions to replace their versions that don't take a context. + - `adal.InitiateDeviceAuthWithContext()` + - `adal.CheckForUserCompletionWithContext()` + - `adal.WaitForUserCompletionWithContext()` + +## v13.1.0 + +### New Features + +- Added support for MSI authentication on Azure App Service and Azure Functions. + +## v13.0.2 + +### Bug Fixes + +- Always retry a request even if the sender returns a non-nil error. + +## v13.0.1 + +## Bug Fixes + +- Fixed `autorest.WithQueryParameters()` so that it properly encodes multi-value query parameters. + +## v13.0.0 + +## Breaking Changes + +The `tracing` package has been rewritten to provide a common interface for consumers to wire in the tracing package of their choice. +What this means is that by default no tracing provider will be compiled into your program and setting the `AZURE_SDK_TRACING_ENABLED` +environment variable will have no effect. To enable this previous behavior you must now add the following import to your source file. +```go + import _ "github.com/Azure/go-autorest/tracing/opencensus" +``` +The APIs required by autorest-generated code have remained but some APIs have been removed and new ones added. +The following APIs and variables have been removed (the majority of them were moved to the `opencensus` package). +- tracing.Transport +- tracing.Enable() +- tracing.EnableWithAIForwarding() +- tracing.Disable() + +The following APIs and types have been added +- tracing.Tracer +- tracing.Register() + +To hook up a tracer simply call `tracing.Register()` passing in a type that satisfies the `tracing.Tracer` interface. + +## v12.4.3 + +### Bug Fixes + +- `autorest.MultiTenantServicePrincipalTokenAuthorizer` will now properly add its auxiliary bearer tokens. + +## v12.4.2 + +### Bug Fixes + +- Improvements to the fixes made in v12.4.1. + - Remove `override` stanza from Gopkg.toml and `replace` directive from go.mod as they don't apply when being consumed as a dependency. + - Switched to latest version of `ocagent` that still depends on protobuf v1.2. + - Add indirect dependencies to the `required` clause with matching `constraint` stanzas so that `dep` dependencies match go.sum. + +## v12.4.1 + +### Bug Fixes + +- Updated OpenCensus and OCAgent versions to versions that don't depend on v1.3+ of protobuf as it was breaking kubernetes. +- Pinned opencensus-proto to a version that's compatible with our versions of OpenCensus and OCAgent. + +## v12.4.0 + +### New Features + +- Added `autorest.WithPrepareDecorators` and `autorest.GetPrepareDecorators` for adding and retrieving a custom chain of PrepareDecorators to the provided context. + +## v12.3.0 + +### New Features + +- Support for multi-tenant via x-ms-authorization-auxiliary header has been added for client credentials with + secret scenario; this basically bundles multiple OAuthConfig and ServicePrincipalToken types into corresponding + MultiTenant* types along with a new authorizer that adds the primary and auxiliary token headers to the reqest. + The authenticaion helpers have been updated to support this scenario; if environment var AZURE_AUXILIARY_TENANT_IDS + is set with a semicolon delimited list of tenants the multi-tenant codepath will kick in to create the appropriate authorizer. + See `adal.NewMultiTenantOAuthConfig`, `adal.NewMultiTenantServicePrincipalToken` and `autorest.NewMultiTenantServicePrincipalTokenAuthorizer` + along with their supporting types and methods. +- Added `autorest.WithSendDecorators` and `autorest.GetSendDecorators` for adding and retrieving a custom chain of SendDecorators to the provided context. +- Added `autorest.DoRetryForStatusCodesWithCap` and `autorest.DelayForBackoffWithCap` to enforce an upper bound on the duration between retries. + +## v12.2.0 + +### New Features + +- Added `autorest.WithXML`, `autorest.AsMerge`, `autorest.WithBytes` preparer decorators. +- Added `autorest.ByUnmarshallingBytes` response decorator. +- Added `Response.IsHTTPStatus` and `Response.HasHTTPStatus` helper methods for inspecting HTTP status code in `autorest.Response` types. + +### Bug Fixes + +- `autorest.DelayWithRetryAfter` now supports HTTP-Dates in the `Retry-After` header and is not limited to just 429 status codes. + +## v12.1.0 + +### New Features + +- Added `to.ByteSlicePtr()`. +- Added blob/queue storage resource ID to `azure.ResourceIdentifier`. + +## v12.0.0 + +### Breaking Changes + +In preparation for modules the following deprecated content has been removed. + + - async.NewFuture() + - async.Future.Done() + - async.Future.WaitForCompletion() + - async.DoPollForAsynchronous() + - The `utils` package + - validation.NewErrorWithValidationError() + - The `version` package + +## v11.9.0 + +### New Features + +- Add `ResourceIdentifiers` field to `azure.Environment` containing resource IDs for public and sovereign clouds. + +## v11.8.0 + +### New Features + +- Added `autorest.NewClientWithOptions()` to support endpoints that require free renegotiation. + +## v11.7.1 + +### Bug Fixes + +- Fix missing support for http(s) proxy when using the default sender. + +## v11.7.0 + +### New Features + +- Added methods to obtain a ServicePrincipalToken on the various credential configuration types in the `auth` package. + +## v11.6.1 + +### Bug Fixes + +- Fix ACR DNS endpoint for government clouds. +- Add Cosmos DB DNS endpoints. +- Update dependencies to resolve build breaks in OpenCensus. + +## v11.6.0 + +### New Features + +- Added type `autorest.BasicAuthorizer` to support Basic authentication. + +## v11.5.2 + +### Bug Fixes + +- Fixed `GetTokenFromCLI` did not work with zsh. + +## v11.5.1 + +### Bug Fixes + +- In `Client.sender()` set the minimum TLS version on HTTP clients to 1.2. + +## v11.5.0 + +### New Features + +- The `auth` package has been refactored so that the environment and file settings are now available. +- The methods used in `auth.NewAuthorizerFromEnvironment()` are now exported so that custom authorization chains can be created. +- Added support for certificate authorization for file-based config. + +## v11.4.0 + +### New Features + +- Added `adal.AddToUserAgent()` so callers can append custom data to the user-agent header used for ADAL requests. +- Exported `adal.UserAgent()` for parity with `autorest.Client`. + +## v11.3.2 + +### Bug Fixes + +- In `Future.WaitForCompletionRef()` if the provided context has a deadline don't add the default deadline. + +## v11.3.1 + +### Bug Fixes + +- For an LRO PUT operation the final GET URL was incorrectly set to the Location polling header in some cases. + +## v11.3.0 + +### New Features + +- Added method `ServicePrincipalToken()` to `DeviceFlowConfig` type. + +## v11.2.8 + +### Bug Fixes + +- Deprecate content in the `version` package. The functionality has been superseded by content in the `autorest` package. + +## v11.2.7 + +### Bug Fixes + +- Fix environment variable name for enabling tracing from `AZURE_SDK_TRACING_ENABELD` to `AZURE_SDK_TRACING_ENABLED`. + Note that for backward compatibility reasons, both will work until the next major version release of the package. + +## v11.2.6 + +### Bug Fixes + +- If zero bytes are read from a polling response body don't attempt to unmarshal them. + +## v11.2.5 + +### Bug Fixes + +- Removed race condition in `autorest.DoRetryForStatusCodes`. + +## v11.2.4 + +### Bug Fixes + +- Function `cli.ProfilePath` now respects environment `AZURE_CONFIG_DIR` if available. + +## v11.2.1 + +NOTE: Versions of Go prior to 1.10 have been removed from CI as they no +longer work with golint. + +### Bug Fixes + +- Method `MSIConfig.Authorizer` now supports user-assigned identities. +- The adal package now reports its own user-agent string. + +## v11.2.0 + +### New Features + +- Added `tracing` package that enables instrumentation of HTTP and API calls. + Setting the env variable `AZURE_SDK_TRACING_ENABLED` or calling `tracing.Enable` + will start instrumenting the code for metrics and traces. + Additionally, setting the env variable `OCAGENT_TRACE_EXPORTER_ENDPOINT` or + calling `tracing.EnableWithAIForwarding` will start the instrumentation and connect to an + App Insights Local Forwarder that is needs to be running. Note that if the + AI Local Forwarder is not running tracking will still be enabled. + By default, instrumentation is disabled. Once enabled, instrumentation can also + be programatically disabled by calling `Disable`. +- Added `DoneWithContext` call for checking LRO status. `Done` has been deprecated. + +### Bug Fixes + +- Don't use the initial request's context for LRO polling. +- Don't override the `refreshLock` and the `http.Client` when unmarshalling `ServicePrincipalToken` if + it is already set. + +## v11.1.1 + +### Bug Fixes + +- When creating a future always include the polling tracker even if there's a failure; this allows the underlying response to be obtained by the caller. + +## v11.1.0 + +### New Features + +- Added `auth.NewAuthorizerFromCLI` to create an authorizer configured from the Azure 2.0 CLI. +- Added `adal.NewOAuthConfigWithAPIVersion` to create an OAuthConfig with the specified API version. + +## v11.0.1 + +### New Features + +- Added `x5c` header to client assertion for certificate Issuer+Subject Name authentication. + +## v11.0.0 + +### Breaking Changes + +- To handle differences between ADFS and AAD the following fields have had their types changed from `string` to `json.Number` + - ExpiresIn + - ExpiresOn + - NotBefore + +### New Features + +- Added `auth.NewAuthorizerFromFileWithResource` to create an authorizer from the config file with the specified resource. +- Setting a client's `PollingDuration` to zero will use the provided context to control a LRO's polling duration. + +## v10.15.5 + +### Bug Fixes + +- In `DoRetryForStatusCodes`, if a request's context is cancelled return the last response. + +## v10.15.4 + +### Bug Fixes + +- If a polling operation returns a failure status code return the associated error. + +## v10.15.3 + +### Bug Fixes + +- Initialize the polling URL and method for an LRO tracker on each iteration, favoring the Azure-AsyncOperation header. + +## v10.15.2 + +### Bug Fixes + +- Use fmt.Fprint when printing request/response so that any escape sequences aren't treated as format specifiers. + +## v10.15.1 + +### Bug Fixes + +- If an LRO API returns a `Failed` provisioning state in the initial response return an error at that point so the caller doesn't have to poll. +- For failed LROs without an OData v4 error include the response body in the error's `AdditionalInfo` field to aid in diagnosing the failure. + +## v10.15.0 + +### New Features + +- Add initial support for request/response logging via setting environment variables. + Setting `AZURE_GO_SDK_LOG_LEVEL` to `LogInfo` will log request/response + without their bodies. To include the bodies set the log level to `LogDebug`. + By default the logger writes to strerr, however it can also write to stdout or a file + if specified in `AZURE_GO_SDK_LOG_FILE`. Note that if the specified file + already exists it will be truncated. + IMPORTANT: by default the logger will redact the Authorization and Ocp-Apim-Subscription-Key + headers. Any other secrets will _not_ be redacted. + +## v10.14.0 + +### New Features + +- Added package version that contains version constants and user-agent data. + +### Bug Fixes + +- Add the user-agent to token requests. + +## v10.13.0 + +- Added support for additionalInfo in ServiceError type. + +## v10.12.0 + +### New Features + +- Added field ServicePrincipalToken.MaxMSIRefreshAttempts to configure the maximun number of attempts to refresh an MSI token. + +## v10.11.4 + +### Bug Fixes + +- If an LRO returns http.StatusOK on the initial response with no async headers return the response body from Future.GetResult(). +- If there is no "final GET URL" return an error from Future.GetResult(). + +## v10.11.3 + +### Bug Fixes + +- In IMDS retry logic, if we don't receive a response don't retry. + - Renamed the retry function so it's clear it's meant for IMDS only. +- For error response bodies that aren't OData-v4 compliant stick the raw JSON in the ServiceError.Details field so the information isn't lost. + - Also add the raw HTTP response to the DetailedResponse. +- Removed superfluous wrapping of response error in azure.DoRetryWithRegistration(). + +## v10.11.2 + +### Bug Fixes + +- Validation for integers handles int and int64 types. + +## v10.11.1 + +### Bug Fixes + +- Adding User information to authorization config as parsed from CLI cache. + +## v10.11.0 + +### New Features + +- Added NewServicePrincipalTokenFromManualTokenSecret for creating a new SPT using a manual token and secret +- Added method ServicePrincipalToken.MarshalTokenJSON() to marshall the inner Token + +## v10.10.0 + +### New Features + +- Most ServicePrincipalTokens can now be marshalled/unmarshall to/from JSON (ServicePrincipalCertificateSecret and ServicePrincipalMSISecret are not supported). +- Added method ServicePrincipalToken.SetRefreshCallbacks(). + +## v10.9.2 + +### Bug Fixes + +- Refreshing a refresh token obtained from a web app authorization code now works. + +## v10.9.1 + +### Bug Fixes + +- The retry logic for MSI token requests now uses exponential backoff per the guidelines. +- IsTemporaryNetworkError() will return true for errors that don't implement the net.Error interface. + +## v10.9.0 + +### Deprecated Methods + +| Old Method | New Method | +| -------------------------: | :---------------------------: | +| azure.NewFuture() | azure.NewFutureFromResponse() | +| Future.WaitForCompletion() | Future.WaitForCompletionRef() | + +### New Features + +- Added azure.NewFutureFromResponse() for creating a Future from the initial response from an async operation. +- Added Future.GetResult() for making the final GET call to retrieve the result from an async operation. + +### Bug Fixes + +- Some futures failed to return their results, this should now be fixed. + +## v10.8.2 + +### Bug Fixes + +- Add nil-gaurd to token retry logic. + +## v10.8.1 + +### Bug Fixes + +- Return a TokenRefreshError if the sender fails on the initial request. +- Don't retry on non-temporary network errors. + +## v10.8.0 + +- Added NewAuthorizerFromEnvironmentWithResource() helper function. + +## v10.7.0 + +### New Features + +- Added \*WithContext() methods to ADAL token refresh operations. + +## v10.6.2 + +- Fixed a bug on device authentication. + +## v10.6.1 + +- Added retries to MSI token get request. + +## v10.6.0 + +- Changed MSI token implementation. Now, the token endpoint is the IMDS endpoint. + +## v10.5.1 + +### Bug Fixes + +- `DeviceFlowConfig.Authorizer()` now prints the device code message when running `go test`. `-v` flag is required. + +## v10.5.0 + +### New Features + +- Added NewPollingRequestWithContext() for use with polling asynchronous operations. + +### Bug Fixes + +- Make retry logic use the request's context instead of the deprecated Cancel object. + +## v10.4.0 + +### New Features + +- Added helper for parsing Azure Resource ID's. +- Added deprecation message to utils.GetEnvVarOrExit() + +## v10.3.0 + +### New Features + +- Added EnvironmentFromURL method to load an Environment from a given URL. This function is particularly useful in the private and hybrid Cloud model, where one may define their own endpoints +- Added TokenAudience endpoint to Environment structure. This is useful in private and hybrid cloud models where TokenAudience endpoint can be different from ResourceManagerEndpoint + +## v10.2.0 + +### New Features + +- Added endpoints for batch management. + +## v10.1.3 + +### Bug Fixes + +- In Client.Do() invoke WithInspection() last so that it will inspect WithAuthorization(). +- Fixed authorization methods to invoke p.Prepare() first, aligning them with the other preparers. + +## v10.1.2 + +- Corrected comment for auth.NewAuthorizerFromFile() function. + +## v10.1.1 + +- Updated version number to match current release. + +## v10.1.0 + +### New Features + +- Expose the polling URL for futures. + +### Bug Fixes + +- Add validation.NewErrorWithValidationError back to prevent breaking changes (it is deprecated). + +## v10.0.0 + +### New Features + +- Added target and innererror fields to ServiceError to comply with OData v4 spec. +- The Done() method on futures will now return a ServiceError object when available (it used to return a partial value of such errors). +- Added helper methods for obtaining authorizers. +- Expose the polling URL for futures. + +### Bug Fixes + +- Switched from glide to dep for dependency management. +- Fixed unmarshaling of ServiceError for JSON bodies that don't conform to the OData spec. +- Fixed a race condition in token refresh. + +### Breaking Changes + +- The ServiceError.Details field type has been changed to match the OData v4 spec. +- Go v1.7 has been dropped from CI. +- API parameter validation failures will now return a unique error type validation.Error. +- The adal.Token type has been decomposed from adal.ServicePrincipalToken (this was necessary in order to fix the token refresh race). + +## v9.10.0 + +- Fix the Service Bus suffix in Azure public env +- Add Service Bus Endpoint (AAD ResourceURI) for use in [Azure Service Bus RBAC Preview](https://docs.microsoft.com/en-us/azure/service-bus-messaging/service-bus-role-based-access-control) + +## v9.9.0 + +### New Features + +- Added EventGridKeyAuthorizer for key authorization with event grid topics. + +### Bug Fixes + +- Fixed race condition when auto-refreshing service principal tokens. + +## v9.8.1 + +### Bug Fixes + +- Added http.StatusNoContent (204) to the list of expected status codes for long-running operations. +- Updated runtime version info so it's current. + +## v9.8.0 + +### New Features + +- Added type azure.AsyncOpIncompleteError to be returned from a future's Result() method when the operation has not completed. + +## v9.7.1 + +### Bug Fixes + +- Use correct AAD and Graph endpoints for US Gov environment. + +## v9.7.0 + +### New Features + +- Added support for application/octet-stream MIME types. + +## v9.6.1 + +### Bug Fixes + +- Ensure Authorization header is added to request when polling for registration status. + +## v9.6.0 + +### New Features + +- Added support for acquiring tokens via MSI with a user assigned identity. + +## v9.5.3 + +### Bug Fixes + +- Don't remove encoding of existing URL Query parameters when calling autorest.WithQueryParameters. +- Set correct Content Type when using autorest.WithFormData. + +## v9.5.2 + +### Bug Fixes + +- Check for nil \*http.Response before dereferencing it. + +## v9.5.1 + +### Bug Fixes + +- Don't count http.StatusTooManyRequests (429) against the retry cap. +- Use retry logic when SkipResourceProviderRegistration is set to true. + +## v9.5.0 + +### New Features + +- Added support for username + password, API key, authoriazation code and cognitive services authentication. +- Added field SkipResourceProviderRegistration to clients to provide a way to skip auto-registration of RPs. +- Added utility function AsStringSlice() to convert its parameters to a string slice. + +### Bug Fixes + +- When checking for authentication failures look at the error type not the status code as it could vary. + +## v9.4.2 + +### Bug Fixes + +- Validate parameters when creating credentials. +- Don't retry requests if the returned status is a 401 (http.StatusUnauthorized) as it will never succeed. + +## v9.4.1 + +### Bug Fixes + +- Update the AccessTokensPath() to read access tokens path through AZURE_ACCESS_TOKEN_FILE. If this + environment variable is not set, it will fall back to use default path set by Azure CLI. +- Use case-insensitive string comparison for polling states. + +## v9.4.0 + +### New Features + +- Added WaitForCompletion() to Future as a default polling implementation. + +### Bug Fixes + +- Method Future.Done() shouldn't update polling status for unexpected HTTP status codes. + +## v9.3.1 + +### Bug Fixes + +- DoRetryForStatusCodes will retry if sender.Do returns a non-nil error. + +## v9.3.0 + +### New Features + +- Added PollingMethod() to Future so callers know what kind of polling mechanism is used. +- Added azure.ChangeToGet() which transforms an http.Request into a GET (to be used with LROs). + +## v9.2.0 + +### New Features + +- Added support for custom Azure Stack endpoints. +- Added type azure.Future used to track the status of long-running operations. + +### Bug Fixes + +- Preserve the original error in DoRetryWithRegistration when registration fails. + +## v9.1.1 + +- Fixes a bug regarding the cookie jar on `autorest.Client.Sender`. + +## v9.1.0 + +### New Features + +- In cases where there is a non-empty error from the service, attempt to unmarshal it instead of uniformly calling it an "Unknown" error. +- Support for loading Azure CLI Authentication files. +- Automatically register your subscription with the Azure Resource Provider if it hadn't been previously. + +### Bug Fixes + +- RetriableRequest can now tolerate a ReadSeekable body being read but not reset. +- Adding missing Apache Headers + +## v9.0.0 + +> **IMPORTANT:** This release was intially labeled incorrectly as `v8.4.0`. From the time it was released, it should have been marked `v9.0.0` because it contains breaking changes to the MSI packages. We appologize for any inconvenience this causes. + +Adding MSI Endpoint Support and CLI token rehydration. + +## v8.3.1 + +Pick up bug fix in adal for MSI support. + +## v8.3.0 + +Updates to Error string formats for clarity. Also, adding a copy of the http.Response to errors for an improved debugging experience. + +## v8.2.0 + +### New Features + +- Add support for bearer authentication callbacks +- Support 429 response codes that include "Retry-After" header +- Support validation constraint "Pattern" for map keys + +### Bug Fixes + +- Make RetriableRequest work with multiple versions of Go + +## v8.1.1 + +Updates the RetriableRequest to take advantage of GetBody() added in Go 1.8. + +## v8.1.0 + +Adds RetriableRequest type for more efficient handling of retrying HTTP requests. + +## v8.0.0 + +ADAL refactored into its own package. +Support for UNIX time. + +## v7.3.1 + +- Version Testing now removed from production bits that are shipped with the library. + +## v7.3.0 + +- Exposing new `RespondDecorator`, `ByDiscardingBody`. This allows operations + to acknowledge that they do not need either the entire or a trailing portion + of accepts response body. In doing so, Go's http library can reuse HTTP + connections more readily. +- Adding `PrepareDecorator` to target custom BaseURLs. +- Adding ACR suffix to public cloud environment. +- Updating Glide dependencies. + +## v7.2.5 + +- Fixed the Active Directory endpoint for the China cloud. +- Removes UTF-8 BOM if present in response payload. +- Added telemetry. + +## v7.2.3 + +- Fixing bug in calls to `DelayForBackoff` that caused doubling of delay + duration. + +## v7.2.2 + +- autorest/azure: added ASM and ARM VM DNS suffixes. + +## v7.2.1 + +- fixed parsing of UTC times that are not RFC3339 conformant. + +## v7.2.0 + +- autorest/validation: Reformat validation error for better error message. + +## v7.1.0 + +- preparer: Added support for multipart formdata - WithMultiPartFormdata() +- preparer: Added support for sending file in request body - WithFile +- client: Added RetryDuration parameter. +- autorest/validation: new package for validation code for Azure Go SDK. + +## v7.0.7 + +- Add trailing / to endpoint +- azure: add EnvironmentFromName + +## v7.0.6 + +- Add retry logic for 408, 500, 502, 503 and 504 status codes. +- Change url path and query encoding logic. +- Fix DelayForBackoff for proper exponential delay. +- Add CookieJar in Client. + +## v7.0.5 + +- Add check to start polling only when status is in [200,201,202]. +- Refactoring for unchecked errors. +- azure/persist changes. +- Fix 'file in use' issue in renewing token in deviceflow. +- Store header RetryAfter for subsequent requests in polling. +- Add attribute details in service error. + +## v7.0.4 + +- Better error messages for long running operation failures + +## v7.0.3 + +- Corrected DoPollForAsynchronous to properly handle the initial response + +## v7.0.2 + +- Corrected DoPollForAsynchronous to continue using the polling method first discovered + +## v7.0.1 + +- Fixed empty JSON input error in ByUnmarshallingJSON +- Fixed polling support for GET calls +- Changed format name from TimeRfc1123 to TimeRFC1123 + +## v7.0.0 + +- Added ByCopying responder with supporting TeeReadCloser +- Rewrote Azure asynchronous handling +- Reverted to only unmarshalling JSON +- Corrected handling of RFC3339 time strings and added support for Rfc1123 time format + +The `json.Decoder` does not catch bad data as thoroughly as `json.Unmarshal`. Since +`encoding/json` successfully deserializes all core types, and extended types normally provide +their custom JSON serialization handlers, the code has been reverted back to using +`json.Unmarshal`. The original change to use `json.Decode` was made to reduce duplicate +code; there is no loss of function, and there is a gain in accuracy, by reverting. + +Additionally, Azure services indicate requests to be polled by multiple means. The existing code +only checked for one of those (that is, the presence of the `Azure-AsyncOperation` header). +The new code correctly covers all cases and aligns with the other Azure SDKs. + +## v6.1.0 + +- Introduced `date.ByUnmarshallingJSONDate` and `date.ByUnmarshallingJSONTime` to enable JSON encoded values. + +## v6.0.0 + +- Completely reworked the handling of polled and asynchronous requests +- Removed unnecessary routines +- Reworked `mocks.Sender` to replay a series of `http.Response` objects +- Added `PrepareDecorators` for primitive types (e.g., bool, int32) + +Handling polled and asynchronous requests is no longer part of `Client#Send`. Instead new +`SendDecorators` implement different styles of polled behavior. See`autorest.DoPollForStatusCodes` +and `azure.DoPollForAsynchronous` for examples. + +## v5.0.0 + +- Added new RespondDecorators unmarshalling primitive types +- Corrected application of inspection and authorization PrependDecorators + +## v4.0.0 + +- Added support for Azure long-running operations. +- Added cancelation support to all decorators and functions that may delay. +- Breaking: `DelayForBackoff` now accepts a channel, which may be nil. + +## v3.1.0 + +- Add support for OAuth Device Flow authorization. +- Add support for ServicePrincipalTokens that are backed by an existing token, rather than other secret material. +- Add helpers for persisting and restoring Tokens. +- Increased code coverage in the github.com/Azure/autorest/azure package + +## v3.0.0 + +- Breaking: `NewErrorWithError` no longer takes `statusCode int`. +- Breaking: `NewErrorWithStatusCode` is replaced with `NewErrorWithResponse`. +- Breaking: `Client#Send()` no longer takes `codes ...int` argument. +- Add: XML unmarshaling support with `ByUnmarshallingXML()` +- Stopped vending dependencies locally and switched to [Glide](https://github.com/Masterminds/glide). + Applications using this library should either use Glide or vendor dependencies locally some other way. +- Add: `azure.WithErrorUnlessStatusCode()` decorator to handle Azure errors. +- Fix: use `net/http.DefaultClient` as base client. +- Fix: Missing inspection for polling responses added. +- Add: CopyAndDecode helpers. +- Improved `./autorest/to` with `[]string` helpers. +- Removed golint suppressions in .travis.yml. + +## v2.1.0 + +- Added `StatusCode` to `Error` for more easily obtaining the HTTP Reponse StatusCode (if any) + +## v2.0.0 + +- Changed `to.StringMapPtr` method signature to return a pointer +- Changed `ServicePrincipalCertificateSecret` and `NewServicePrincipalTokenFromCertificate` to support generic certificate and private keys + +## v1.0.0 + +- Added Logging inspectors to trace http.Request / Response +- Added support for User-Agent header +- Changed WithHeader PrepareDecorator to use set vs. add +- Added JSON to error when unmarshalling fails +- Added Client#Send method +- Corrected case of "Azure" in package paths +- Added "to" helpers, Azure helpers, and improved ease-of-use +- Corrected golint issues + +## v1.0.1 + +- Added CHANGELOG.md + +## v1.1.0 + +- Added mechanism to retrieve a ServicePrincipalToken using a certificate-signed JWT +- Added an example of creating a certificate-based ServicePrincipal and retrieving an OAuth token using the certificate + +## v1.1.1 + +- Introduce godeps and vendor dependencies introduced in v1.1.1 diff --git a/vendor/github.com/Azure/go-autorest/GNUmakefile b/vendor/github.com/Azure/go-autorest/GNUmakefile new file mode 100644 index 00000000000..a434e73ac49 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/GNUmakefile @@ -0,0 +1,23 @@ +DIR?=./autorest/ + +default: build + +build: fmt + go install $(DIR) + +test: + go test $(DIR) || exit 1 + +vet: + @echo "go vet ." + @go vet $(DIR)... ; if [ $$? -eq 1 ]; then \ + echo ""; \ + echo "Vet found suspicious constructs. Please check the reported constructs"; \ + echo "and fix them if necessary before submitting the code for review."; \ + exit 1; \ + fi + +fmt: + gofmt -w $(DIR) + +.PHONY: build test vet fmt diff --git a/vendor/github.com/Azure/go-autorest/Gopkg.lock b/vendor/github.com/Azure/go-autorest/Gopkg.lock new file mode 100644 index 00000000000..dc6e3e633e6 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/Gopkg.lock @@ -0,0 +1,324 @@ +# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. + + +[[projects]] + digest = "1:892e39e5c083d0943f1e80ab8351690f183c6a5ab24e1d280adcad424c26255e" + name = "contrib.go.opencensus.io/exporter/ocagent" + packages = ["."] + pruneopts = "UT" + revision = "a8a6f458bbc1d5042322ad1f9b65eeb0b69be9ea" + version = "v0.6.0" + +[[projects]] + digest = "1:8f5acd4d4462b5136af644d25101f0968a7a94ee90fcb2059cec5b7cc42e0b20" + name = "github.com/census-instrumentation/opencensus-proto" + packages = [ + "gen-go/agent/common/v1", + "gen-go/agent/metrics/v1", + "gen-go/agent/trace/v1", + "gen-go/metrics/v1", + "gen-go/resource/v1", + "gen-go/trace/v1", + ] + pruneopts = "UT" + revision = "d89fa54de508111353cb0b06403c00569be780d8" + version = "v0.2.1" + +[[projects]] + digest = "1:ffe9824d294da03b391f44e1ae8281281b4afc1bdaa9588c9097785e3af10cec" + name = "github.com/davecgh/go-spew" + packages = ["spew"] + pruneopts = "UT" + revision = "8991bc29aa16c548c550c7ff78260e27b9ab7c73" + version = "v1.1.1" + +[[projects]] + digest = "1:76dc72490af7174349349838f2fe118996381b31ea83243812a97e5a0fd5ed55" + name = "github.com/dgrijalva/jwt-go" + packages = ["."] + pruneopts = "UT" + revision = "06ea1031745cb8b3dab3f6a236daf2b0aa468b7e" + version = "v3.2.0" + +[[projects]] + digest = "1:cf0d2e435fd4ce45b789e93ef24b5f08e86be0e9807a16beb3694e2d8c9af965" + name = "github.com/dimchansky/utfbom" + packages = ["."] + pruneopts = "UT" + revision = "d2133a1ce379ef6fa992b0514a77146c60db9d1c" + version = "v1.1.0" + +[[projects]] + branch = "master" + digest = "1:b7cb6054d3dff43b38ad2e92492f220f57ae6087ee797dca298139776749ace8" + name = "github.com/golang/groupcache" + packages = ["lru"] + pruneopts = "UT" + revision = "611e8accdfc92c4187d399e95ce826046d4c8d73" + +[[projects]] + digest = "1:e3839df32927e8d3403cd5aa7253d966e8ff80fc8f10e2e35d146461cd83fcfa" + name = "github.com/golang/protobuf" + packages = [ + "descriptor", + "jsonpb", + "proto", + "protoc-gen-go/descriptor", + "ptypes", + "ptypes/any", + "ptypes/duration", + "ptypes/struct", + "ptypes/timestamp", + "ptypes/wrappers", + ] + pruneopts = "UT" + revision = "6c65a5562fc06764971b7c5d05c76c75e84bdbf7" + version = "v1.3.2" + +[[projects]] + digest = "1:c560cd79300fac84f124b96225181a637a70b60155919a3c36db50b7cca6b806" + name = "github.com/grpc-ecosystem/grpc-gateway" + packages = [ + "internal", + "runtime", + "utilities", + ] + pruneopts = "UT" + revision = "f7120437bb4f6c71f7f5076ad65a45310de2c009" + version = "v1.12.1" + +[[projects]] + digest = "1:5d231480e1c64a726869bc4142d270184c419749d34f167646baa21008eb0a79" + name = "github.com/mitchellh/go-homedir" + packages = ["."] + pruneopts = "UT" + revision = "af06845cf3004701891bf4fdb884bfe4920b3727" + version = "v1.1.0" + +[[projects]] + digest = "1:0028cb19b2e4c3112225cd871870f2d9cf49b9b4276531f03438a88e94be86fe" + name = "github.com/pmezard/go-difflib" + packages = ["difflib"] + pruneopts = "UT" + revision = "792786c7400a136282c1664665ae0a8db921c6c2" + version = "v1.0.0" + +[[projects]] + digest = "1:99d32780e5238c2621fff621123997c3e3cca96db8be13179013aea77dfab551" + name = "github.com/stretchr/testify" + packages = [ + "assert", + "require", + ] + pruneopts = "UT" + revision = "221dbe5ed46703ee255b1da0dec05086f5035f62" + version = "v1.4.0" + +[[projects]] + digest = "1:7c5e00383399fe13de0b4b65c9fdde16275407ce8ac02d867eafeaa916edcc71" + name = "go.opencensus.io" + packages = [ + ".", + "internal", + "internal/tagencoding", + "metric/metricdata", + "metric/metricproducer", + "plugin/ocgrpc", + "plugin/ochttp", + "plugin/ochttp/propagation/b3", + "plugin/ochttp/propagation/tracecontext", + "resource", + "stats", + "stats/internal", + "stats/view", + "tag", + "trace", + "trace/internal", + "trace/propagation", + "trace/tracestate", + ] + pruneopts = "UT" + revision = "aad2c527c5defcf89b5afab7f37274304195a6b2" + version = "v0.22.2" + +[[projects]] + branch = "master" + digest = "1:f604f5e2ee721b6757d962dfe7bab4f28aae50c456e39cfb2f3819762a44a6ae" + name = "golang.org/x/crypto" + packages = [ + "pkcs12", + "pkcs12/internal/rc2", + ] + pruneopts = "UT" + revision = "e9b2fee46413994441b28dfca259d911d963dfed" + +[[projects]] + branch = "master" + digest = "1:334b27eac455cb6567ea28cd424230b07b1a64334a2f861a8075ac26ce10af43" + name = "golang.org/x/lint" + packages = [ + ".", + "golint", + ] + pruneopts = "UT" + revision = "fdd1cda4f05fd1fd86124f0ef9ce31a0b72c8448" + +[[projects]] + branch = "master" + digest = "1:257a75d024975428ab9192bfc334c3490882f8cb21322ea5784ca8eca000a910" + name = "golang.org/x/net" + packages = [ + "http/httpguts", + "http2", + "http2/hpack", + "idna", + "internal/timeseries", + "trace", + ] + pruneopts = "UT" + revision = "1ddd1de85cb0337b623b740a609d35817d516a8d" + +[[projects]] + branch = "master" + digest = "1:382bb5a7fb4034db3b6a2d19e5a4a6bcf52f4750530603c01ca18a172fa3089b" + name = "golang.org/x/sync" + packages = ["semaphore"] + pruneopts = "UT" + revision = "cd5d95a43a6e21273425c7ae415d3df9ea832eeb" + +[[projects]] + branch = "master" + digest = "1:4da420ceda5f68e8d748aa2169d0ed44ffadb1bbd6537cf778a49563104189b8" + name = "golang.org/x/sys" + packages = ["unix"] + pruneopts = "UT" + revision = "ce4227a45e2eb77e5c847278dcc6a626742e2945" + +[[projects]] + digest = "1:8d8faad6b12a3a4c819a3f9618cb6ee1fa1cfc33253abeeea8b55336721e3405" + name = "golang.org/x/text" + packages = [ + "collate", + "collate/build", + "internal/colltab", + "internal/gen", + "internal/language", + "internal/language/compact", + "internal/tag", + "internal/triegen", + "internal/ucd", + "language", + "secure/bidirule", + "transform", + "unicode/bidi", + "unicode/cldr", + "unicode/norm", + "unicode/rangetable", + ] + pruneopts = "UT" + revision = "342b2e1fbaa52c93f31447ad2c6abc048c63e475" + version = "v0.3.2" + +[[projects]] + branch = "master" + digest = "1:4eb5ea8395fb60212dd58b92c9db80bab59d5e99c7435f9a6a0a528c373b60e7" + name = "golang.org/x/tools" + packages = [ + "go/ast/astutil", + "go/gcexportdata", + "go/internal/gcimporter", + "go/types/typeutil", + ] + pruneopts = "UT" + revision = "259af5ff87bdcd4abf2ecda8edc3f13f04f26a42" + +[[projects]] + digest = "1:964bb30febc27fabfbec4759fa530c6ec35e77a7c85fed90b9317ea39a054877" + name = "google.golang.org/api" + packages = ["support/bundler"] + pruneopts = "UT" + revision = "8a410c21381766a810817fd6200fce8838ecb277" + version = "v0.14.0" + +[[projects]] + branch = "master" + digest = "1:a8d5c2c6e746b3485e36908ab2a9e3d77b86b81f8156d88403c7d2b462431dfd" + name = "google.golang.org/genproto" + packages = [ + "googleapis/api/httpbody", + "googleapis/rpc/status", + "protobuf/field_mask", + ] + pruneopts = "UT" + revision = "51378566eb590fa106d1025ea12835a4416dda84" + +[[projects]] + digest = "1:b59ce3ddb11daeeccccc9cb3183b58ebf8e9a779f1c853308cd91612e817a301" + name = "google.golang.org/grpc" + packages = [ + ".", + "backoff", + "balancer", + "balancer/base", + "balancer/roundrobin", + "binarylog/grpc_binarylog_v1", + "codes", + "connectivity", + "credentials", + "credentials/internal", + "encoding", + "encoding/proto", + "grpclog", + "internal", + "internal/backoff", + "internal/balancerload", + "internal/binarylog", + "internal/buffer", + "internal/channelz", + "internal/envconfig", + "internal/grpcrand", + "internal/grpcsync", + "internal/resolver/dns", + "internal/resolver/passthrough", + "internal/syscall", + "internal/transport", + "keepalive", + "metadata", + "naming", + "peer", + "resolver", + "serviceconfig", + "stats", + "status", + "tap", + ] + pruneopts = "UT" + revision = "1a3960e4bd028ac0cec0a2afd27d7d8e67c11514" + version = "v1.25.1" + +[[projects]] + digest = "1:b75b3deb2bce8bc079e16bb2aecfe01eb80098f5650f9e93e5643ca8b7b73737" + name = "gopkg.in/yaml.v2" + packages = ["."] + pruneopts = "UT" + revision = "1f64d6156d11335c3f22d9330b0ad14fc1e789ce" + version = "v2.2.7" + +[solve-meta] + analyzer-name = "dep" + analyzer-version = 1 + input-imports = [ + "contrib.go.opencensus.io/exporter/ocagent", + "github.com/dgrijalva/jwt-go", + "github.com/dimchansky/utfbom", + "github.com/mitchellh/go-homedir", + "github.com/stretchr/testify/require", + "go.opencensus.io/plugin/ochttp", + "go.opencensus.io/plugin/ochttp/propagation/tracecontext", + "go.opencensus.io/stats/view", + "go.opencensus.io/trace", + "golang.org/x/crypto/pkcs12", + "golang.org/x/lint/golint", + ] + solver-name = "gps-cdcl" + solver-version = 1 diff --git a/vendor/github.com/Azure/go-autorest/Gopkg.toml b/vendor/github.com/Azure/go-autorest/Gopkg.toml new file mode 100644 index 00000000000..1fc28659696 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/Gopkg.toml @@ -0,0 +1,59 @@ +# Gopkg.toml example +# +# Refer to https://golang.github.io/dep/docs/Gopkg.toml.html +# for detailed Gopkg.toml documentation. +# +# required = ["github.com/user/thing/cmd/thing"] +# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"] +# +# [[constraint]] +# name = "github.com/user/project" +# version = "1.0.0" +# +# [[constraint]] +# name = "github.com/user/project2" +# branch = "dev" +# source = "github.com/myfork/project2" +# +# [[override]] +# name = "github.com/x/y" +# version = "2.4.0" +# +# [prune] +# non-go = false +# go-tests = true +# unused-packages = true + +required = ["golang.org/x/lint/golint"] + +[prune] + go-tests = true + unused-packages = true + +[[constraint]] + name = "contrib.go.opencensus.io/exporter/ocagent" + version = "0.6.0" + +[[constraint]] + name = "github.com/dgrijalva/jwt-go" + version = "3.2.0" + +[[constraint]] + name = "github.com/dimchansky/utfbom" + version = "1.1.0" + +[[constraint]] + name = "github.com/mitchellh/go-homedir" + version = "1.1.0" + +[[constraint]] + name = "github.com/stretchr/testify" + version = "1.3.0" + +[[constraint]] + name = "go.opencensus.io" + version = "0.22.0" + +[[constraint]] + branch = "master" + name = "golang.org/x/crypto" diff --git a/vendor/github.com/Azure/go-autorest/LICENSE b/vendor/github.com/Azure/go-autorest/LICENSE new file mode 100644 index 00000000000..b9d6a27ea92 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2015 Microsoft Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/Azure/go-autorest/README.md b/vendor/github.com/Azure/go-autorest/README.md new file mode 100644 index 00000000000..de1e19a44df --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/README.md @@ -0,0 +1,165 @@ +# go-autorest + +[![GoDoc](https://godoc.org/github.com/Azure/go-autorest/autorest?status.png)](https://godoc.org/github.com/Azure/go-autorest/autorest) +[![Build Status](https://dev.azure.com/azure-sdk/public/_apis/build/status/go/Azure.go-autorest?branchName=master)](https://dev.azure.com/azure-sdk/public/_build/latest?definitionId=625&branchName=master) +[![Go Report Card](https://goreportcard.com/badge/Azure/go-autorest)](https://goreportcard.com/report/Azure/go-autorest) + +Package go-autorest provides an HTTP request client for use with [Autorest](https://github.com/Azure/autorest.go)-generated API client packages. + +An authentication client tested with Azure Active Directory (AAD) is also +provided in this repo in the package +`github.com/Azure/go-autorest/autorest/adal`. Despite its name, this package +is maintained only as part of the Azure Go SDK and is not related to other +"ADAL" libraries in [github.com/AzureAD](https://github.com/AzureAD). + +## Overview + +Package go-autorest implements an HTTP request pipeline suitable for use across +multiple goroutines and provides the shared routines used by packages generated +by [Autorest](https://github.com/Azure/autorest.go). + +The package breaks sending and responding to HTTP requests into three phases: Preparing, Sending, +and Responding. A typical pattern is: + +```go + req, err := Prepare(&http.Request{}, + token.WithAuthorization()) + + resp, err := Send(req, + WithLogging(logger), + DoErrorIfStatusCode(http.StatusInternalServerError), + DoCloseIfError(), + DoRetryForAttempts(5, time.Second)) + + err = Respond(resp, + ByDiscardingBody(), + ByClosing()) +``` + +Each phase relies on decorators to modify and / or manage processing. Decorators may first modify +and then pass the data along, pass the data first and then modify the result, or wrap themselves +around passing the data (such as a logger might do). Decorators run in the order provided. For +example, the following: + +```go + req, err := Prepare(&http.Request{}, + WithBaseURL("https://microsoft.com/"), + WithPath("a"), + WithPath("b"), + WithPath("c")) +``` + +will set the URL to: + +``` + https://microsoft.com/a/b/c +``` + +Preparers and Responders may be shared and re-used (assuming the underlying decorators support +sharing and re-use). Performant use is obtained by creating one or more Preparers and Responders +shared among multiple go-routines, and a single Sender shared among multiple sending go-routines, +all bound together by means of input / output channels. + +Decorators hold their passed state within a closure (such as the path components in the example +above). Be careful to share Preparers and Responders only in a context where such held state +applies. For example, it may not make sense to share a Preparer that applies a query string from a +fixed set of values. Similarly, sharing a Responder that reads the response body into a passed +struct (e.g., `ByUnmarshallingJson`) is likely incorrect. + +Errors raised by autorest objects and methods will conform to the `autorest.Error` interface. + +See the included examples for more detail. For details on the suggested use of this package by +generated clients, see the Client described below. + +## Helpers + +### Handling Swagger Dates + +The Swagger specification (https://swagger.io) that drives AutoRest +(https://github.com/Azure/autorest/) precisely defines two date forms: date and date-time. The +github.com/Azure/go-autorest/autorest/date package provides time.Time derivations to ensure correct +parsing and formatting. + +### Handling Empty Values + +In JSON, missing values have different semantics than empty values. This is especially true for +services using the HTTP PATCH verb. The JSON submitted with a PATCH request generally contains +only those values to modify. Missing values are to be left unchanged. Developers, then, require a +means to both specify an empty value and to leave the value out of the submitted JSON. + +The Go JSON package (`encoding/json`) supports the `omitempty` tag. When specified, it omits +empty values from the rendered JSON. Since Go defines default values for all base types (such as "" +for string and 0 for int) and provides no means to mark a value as actually empty, the JSON package +treats default values as meaning empty, omitting them from the rendered JSON. This means that, using +the Go base types encoded through the default JSON package, it is not possible to create JSON to +clear a value at the server. + +The workaround within the Go community is to use pointers to base types in lieu of base types within +structures that map to JSON. For example, instead of a value of type `string`, the workaround uses +`*string`. While this enables distinguishing empty values from those to be unchanged, creating +pointers to a base type (notably constant, in-line values) requires additional variables. This, for +example, + +```go + s := struct { + S *string + }{ S: &"foo" } +``` +fails, while, this + +```go + v := "foo" + s := struct { + S *string + }{ S: &v } +``` +succeeds. + +To ease using pointers, the subpackage `to` contains helpers that convert to and from pointers for +Go base types which have Swagger analogs. It also provides a helper that converts between +`map[string]string` and `map[string]*string`, enabling the JSON to specify that the value +associated with a key should be cleared. With the helpers, the previous example becomes + +```go + s := struct { + S *string + }{ S: to.StringPtr("foo") } +``` + +## Install + +```bash +go get github.com/Azure/go-autorest/autorest +go get github.com/Azure/go-autorest/autorest/azure +go get github.com/Azure/go-autorest/autorest/date +go get github.com/Azure/go-autorest/autorest/to +``` + +### Using with Go Modules +In [v12.0.1](https://github.com/Azure/go-autorest/pull/386), this repository introduced the following modules. + +- autorest/adal +- autorest/azure/auth +- autorest/azure/cli +- autorest/date +- autorest/mocks +- autorest/to +- autorest/validation +- autorest +- logger +- tracing + +Tagging cumulative SDK releases as a whole (e.g. `v12.3.0`) is still enabled to support consumers of this repo that have not yet migrated to modules. + +## License + +See LICENSE file. + +----- + +This project has adopted the [Microsoft Open Source Code of +Conduct](https://opensource.microsoft.com/codeofconduct/). For more information +see the [Code of Conduct +FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or contact +[opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional +questions or comments. diff --git a/vendor/github.com/Azure/go-autorest/autorest/LICENSE b/vendor/github.com/Azure/go-autorest/autorest/LICENSE new file mode 100644 index 00000000000..b9d6a27ea92 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2015 Microsoft Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/LICENSE b/vendor/github.com/Azure/go-autorest/autorest/adal/LICENSE new file mode 100644 index 00000000000..b9d6a27ea92 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2015 Microsoft Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/README.md b/vendor/github.com/Azure/go-autorest/autorest/adal/README.md new file mode 100644 index 00000000000..fec416a9c41 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/README.md @@ -0,0 +1,292 @@ +# Azure Active Directory authentication for Go + +This is a standalone package for authenticating with Azure Active +Directory from other Go libraries and applications, in particular the [Azure SDK +for Go](https://github.com/Azure/azure-sdk-for-go). + +Note: Despite the package's name it is not related to other "ADAL" libraries +maintained in the [github.com/AzureAD](https://github.com/AzureAD) org. Issues +should be opened in [this repo's](https://github.com/Azure/go-autorest/issues) +or [the SDK's](https://github.com/Azure/azure-sdk-for-go/issues) issue +trackers. + +## Install + +```bash +go get -u github.com/Azure/go-autorest/autorest/adal +``` + +## Usage + +An Active Directory application is required in order to use this library. An application can be registered in the [Azure Portal](https://portal.azure.com/) by following these [guidelines](https://docs.microsoft.com/en-us/azure/active-directory/develop/active-directory-integrating-applications) or using the [Azure CLI](https://github.com/Azure/azure-cli). + +### Register an Azure AD Application with secret + + +1. Register a new application with a `secret` credential + + ``` + az ad app create \ + --display-name example-app \ + --homepage https://example-app/home \ + --identifier-uris https://example-app/app \ + --password secret + ``` + +2. Create a service principal using the `Application ID` from previous step + + ``` + az ad sp create --id "Application ID" + ``` + + * Replace `Application ID` with `appId` from step 1. + +### Register an Azure AD Application with certificate + +1. Create a private key + + ``` + openssl genrsa -out "example-app.key" 2048 + ``` + +2. Create the certificate + + ``` + openssl req -new -key "example-app.key" -subj "/CN=example-app" -out "example-app.csr" + openssl x509 -req -in "example-app.csr" -signkey "example-app.key" -out "example-app.crt" -days 10000 + ``` + +3. Create the PKCS12 version of the certificate containing also the private key + + ``` + openssl pkcs12 -export -out "example-app.pfx" -inkey "example-app.key" -in "example-app.crt" -passout pass: + + ``` + +4. Register a new application with the certificate content form `example-app.crt` + + ``` + certificateContents="$(tail -n+2 "example-app.crt" | head -n-1)" + + az ad app create \ + --display-name example-app \ + --homepage https://example-app/home \ + --identifier-uris https://example-app/app \ + --key-usage Verify --end-date 2018-01-01 \ + --key-value "${certificateContents}" + ``` + +5. Create a service principal using the `Application ID` from previous step + + ``` + az ad sp create --id "APPLICATION_ID" + ``` + + * Replace `APPLICATION_ID` with `appId` from step 4. + + +### Grant the necessary permissions + +Azure relies on a Role-Based Access Control (RBAC) model to manage the access to resources at a fine-grained +level. There is a set of [pre-defined roles](https://docs.microsoft.com/en-us/azure/active-directory/role-based-access-built-in-roles) +which can be assigned to a service principal of an Azure AD application depending of your needs. + +``` +az role assignment create --assigner "SERVICE_PRINCIPAL_ID" --role "ROLE_NAME" +``` + +* Replace the `SERVICE_PRINCIPAL_ID` with the `appId` from previous step. +* Replace the `ROLE_NAME` with a role name of your choice. + +It is also possible to define custom role definitions. + +``` +az role definition create --role-definition role-definition.json +``` + +* Check [custom roles](https://docs.microsoft.com/en-us/azure/active-directory/role-based-access-control-custom-roles) for more details regarding the content of `role-definition.json` file. + + +### Acquire Access Token + +The common configuration used by all flows: + +```Go +const activeDirectoryEndpoint = "https://login.microsoftonline.com/" +tenantID := "TENANT_ID" +oauthConfig, err := adal.NewOAuthConfig(activeDirectoryEndpoint, tenantID) + +applicationID := "APPLICATION_ID" + +callback := func(token adal.Token) error { + // This is called after the token is acquired +} + +// The resource for which the token is acquired +resource := "https://management.core.windows.net/" +``` + +* Replace the `TENANT_ID` with your tenant ID. +* Replace the `APPLICATION_ID` with the value from previous section. + +#### Client Credentials + +```Go +applicationSecret := "APPLICATION_SECRET" + +spt, err := adal.NewServicePrincipalToken( + *oauthConfig, + appliationID, + applicationSecret, + resource, + callbacks...) +if err != nil { + return nil, err +} + +// Acquire a new access token +err = spt.Refresh() +if (err == nil) { + token := spt.Token +} +``` + +* Replace the `APPLICATION_SECRET` with the `password` value from previous section. + +#### Client Certificate + +```Go +certificatePath := "./example-app.pfx" + +certData, err := ioutil.ReadFile(certificatePath) +if err != nil { + return nil, fmt.Errorf("failed to read the certificate file (%s): %v", certificatePath, err) +} + +// Get the certificate and private key from pfx file +certificate, rsaPrivateKey, err := decodePkcs12(certData, "") +if err != nil { + return nil, fmt.Errorf("failed to decode pkcs12 certificate while creating spt: %v", err) +} + +spt, err := adal.NewServicePrincipalTokenFromCertificate( + *oauthConfig, + applicationID, + certificate, + rsaPrivateKey, + resource, + callbacks...) + +// Acquire a new access token +err = spt.Refresh() +if (err == nil) { + token := spt.Token +} +``` + +* Update the certificate path to point to the example-app.pfx file which was created in previous section. + + +#### Device Code + +```Go +oauthClient := &http.Client{} + +// Acquire the device code +deviceCode, err := adal.InitiateDeviceAuth( + oauthClient, + *oauthConfig, + applicationID, + resource) +if err != nil { + return nil, fmt.Errorf("Failed to start device auth flow: %s", err) +} + +// Display the authentication message +fmt.Println(*deviceCode.Message) + +// Wait here until the user is authenticated +token, err := adal.WaitForUserCompletion(oauthClient, deviceCode) +if err != nil { + return nil, fmt.Errorf("Failed to finish device auth flow: %s", err) +} + +spt, err := adal.NewServicePrincipalTokenFromManualToken( + *oauthConfig, + applicationID, + resource, + *token, + callbacks...) + +if (err == nil) { + token := spt.Token +} +``` + +#### Username password authenticate + +```Go +spt, err := adal.NewServicePrincipalTokenFromUsernamePassword( + *oauthConfig, + applicationID, + username, + password, + resource, + callbacks...) + +if (err == nil) { + token := spt.Token +} +``` + +#### Authorization code authenticate + +``` Go +spt, err := adal.NewServicePrincipalTokenFromAuthorizationCode( + *oauthConfig, + applicationID, + clientSecret, + authorizationCode, + redirectURI, + resource, + callbacks...) + +err = spt.Refresh() +if (err == nil) { + token := spt.Token +} +``` + +### Command Line Tool + +A command line tool is available in `cmd/adal.go` that can acquire a token for a given resource. It supports all flows mentioned above. + +``` +adal -h + +Usage of ./adal: + -applicationId string + application id + -certificatePath string + path to pk12/PFC application certificate + -mode string + authentication mode (device, secret, cert, refresh) (default "device") + -resource string + resource for which the token is requested + -secret string + application secret + -tenantId string + tenant id + -tokenCachePath string + location of oath token cache (default "/home/cgc/.adal/accessToken.json") +``` + +Example acquire a token for `https://management.core.windows.net/` using device code flow: + +``` +adal -mode device \ + -applicationId "APPLICATION_ID" \ + -tenantId "TENANT_ID" \ + -resource https://management.core.windows.net/ + +``` diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/config.go b/vendor/github.com/Azure/go-autorest/autorest/adal/config.go new file mode 100644 index 00000000000..fa5964742fc --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/config.go @@ -0,0 +1,151 @@ +package adal + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "errors" + "fmt" + "net/url" +) + +const ( + activeDirectoryEndpointTemplate = "%s/oauth2/%s%s" +) + +// OAuthConfig represents the endpoints needed +// in OAuth operations +type OAuthConfig struct { + AuthorityEndpoint url.URL `json:"authorityEndpoint"` + AuthorizeEndpoint url.URL `json:"authorizeEndpoint"` + TokenEndpoint url.URL `json:"tokenEndpoint"` + DeviceCodeEndpoint url.URL `json:"deviceCodeEndpoint"` +} + +// IsZero returns true if the OAuthConfig object is zero-initialized. +func (oac OAuthConfig) IsZero() bool { + return oac == OAuthConfig{} +} + +func validateStringParam(param, name string) error { + if len(param) == 0 { + return fmt.Errorf("parameter '" + name + "' cannot be empty") + } + return nil +} + +// NewOAuthConfig returns an OAuthConfig with tenant specific urls +func NewOAuthConfig(activeDirectoryEndpoint, tenantID string) (*OAuthConfig, error) { + apiVer := "1.0" + return NewOAuthConfigWithAPIVersion(activeDirectoryEndpoint, tenantID, &apiVer) +} + +// NewOAuthConfigWithAPIVersion returns an OAuthConfig with tenant specific urls. +// If apiVersion is not nil the "api-version" query parameter will be appended to the endpoint URLs with the specified value. +func NewOAuthConfigWithAPIVersion(activeDirectoryEndpoint, tenantID string, apiVersion *string) (*OAuthConfig, error) { + if err := validateStringParam(activeDirectoryEndpoint, "activeDirectoryEndpoint"); err != nil { + return nil, err + } + api := "" + // it's legal for tenantID to be empty so don't validate it + if apiVersion != nil { + if err := validateStringParam(*apiVersion, "apiVersion"); err != nil { + return nil, err + } + api = fmt.Sprintf("?api-version=%s", *apiVersion) + } + u, err := url.Parse(activeDirectoryEndpoint) + if err != nil { + return nil, err + } + authorityURL, err := u.Parse(tenantID) + if err != nil { + return nil, err + } + authorizeURL, err := u.Parse(fmt.Sprintf(activeDirectoryEndpointTemplate, tenantID, "authorize", api)) + if err != nil { + return nil, err + } + tokenURL, err := u.Parse(fmt.Sprintf(activeDirectoryEndpointTemplate, tenantID, "token", api)) + if err != nil { + return nil, err + } + deviceCodeURL, err := u.Parse(fmt.Sprintf(activeDirectoryEndpointTemplate, tenantID, "devicecode", api)) + if err != nil { + return nil, err + } + + return &OAuthConfig{ + AuthorityEndpoint: *authorityURL, + AuthorizeEndpoint: *authorizeURL, + TokenEndpoint: *tokenURL, + DeviceCodeEndpoint: *deviceCodeURL, + }, nil +} + +// MultiTenantOAuthConfig provides endpoints for primary and aulixiary tenant IDs. +type MultiTenantOAuthConfig interface { + PrimaryTenant() *OAuthConfig + AuxiliaryTenants() []*OAuthConfig +} + +// OAuthOptions contains optional OAuthConfig creation arguments. +type OAuthOptions struct { + APIVersion string +} + +func (c OAuthOptions) apiVersion() string { + if c.APIVersion != "" { + return fmt.Sprintf("?api-version=%s", c.APIVersion) + } + return "1.0" +} + +// NewMultiTenantOAuthConfig creates an object that support multitenant OAuth configuration. +// See https://docs.microsoft.com/en-us/azure/azure-resource-manager/authenticate-multi-tenant for more information. +func NewMultiTenantOAuthConfig(activeDirectoryEndpoint, primaryTenantID string, auxiliaryTenantIDs []string, options OAuthOptions) (MultiTenantOAuthConfig, error) { + if len(auxiliaryTenantIDs) == 0 || len(auxiliaryTenantIDs) > 3 { + return nil, errors.New("must specify one to three auxiliary tenants") + } + mtCfg := multiTenantOAuthConfig{ + cfgs: make([]*OAuthConfig, len(auxiliaryTenantIDs)+1), + } + apiVer := options.apiVersion() + pri, err := NewOAuthConfigWithAPIVersion(activeDirectoryEndpoint, primaryTenantID, &apiVer) + if err != nil { + return nil, fmt.Errorf("failed to create OAuthConfig for primary tenant: %v", err) + } + mtCfg.cfgs[0] = pri + for i := range auxiliaryTenantIDs { + aux, err := NewOAuthConfig(activeDirectoryEndpoint, auxiliaryTenantIDs[i]) + if err != nil { + return nil, fmt.Errorf("failed to create OAuthConfig for tenant '%s': %v", auxiliaryTenantIDs[i], err) + } + mtCfg.cfgs[i+1] = aux + } + return mtCfg, nil +} + +type multiTenantOAuthConfig struct { + // first config in the slice is the primary tenant + cfgs []*OAuthConfig +} + +func (m multiTenantOAuthConfig) PrimaryTenant() *OAuthConfig { + return m.cfgs[0] +} + +func (m multiTenantOAuthConfig) AuxiliaryTenants() []*OAuthConfig { + return m.cfgs[1:] +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/devicetoken.go b/vendor/github.com/Azure/go-autorest/autorest/adal/devicetoken.go new file mode 100644 index 00000000000..9daa4b58b88 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/devicetoken.go @@ -0,0 +1,273 @@ +package adal + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* + This file is largely based on rjw57/oauth2device's code, with the follow differences: + * scope -> resource, and only allow a single one + * receive "Message" in the DeviceCode struct and show it to users as the prompt + * azure-xplat-cli has the following behavior that this emulates: + - does not send client_secret during the token exchange + - sends resource again in the token exchange request +*/ + +import ( + "context" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "net/url" + "strings" + "time" +) + +const ( + logPrefix = "autorest/adal/devicetoken:" +) + +var ( + // ErrDeviceGeneric represents an unknown error from the token endpoint when using device flow + ErrDeviceGeneric = fmt.Errorf("%s Error while retrieving OAuth token: Unknown Error", logPrefix) + + // ErrDeviceAccessDenied represents an access denied error from the token endpoint when using device flow + ErrDeviceAccessDenied = fmt.Errorf("%s Error while retrieving OAuth token: Access Denied", logPrefix) + + // ErrDeviceAuthorizationPending represents the server waiting on the user to complete the device flow + ErrDeviceAuthorizationPending = fmt.Errorf("%s Error while retrieving OAuth token: Authorization Pending", logPrefix) + + // ErrDeviceCodeExpired represents the server timing out and expiring the code during device flow + ErrDeviceCodeExpired = fmt.Errorf("%s Error while retrieving OAuth token: Code Expired", logPrefix) + + // ErrDeviceSlowDown represents the service telling us we're polling too often during device flow + ErrDeviceSlowDown = fmt.Errorf("%s Error while retrieving OAuth token: Slow Down", logPrefix) + + // ErrDeviceCodeEmpty represents an empty device code from the device endpoint while using device flow + ErrDeviceCodeEmpty = fmt.Errorf("%s Error while retrieving device code: Device Code Empty", logPrefix) + + // ErrOAuthTokenEmpty represents an empty OAuth token from the token endpoint when using device flow + ErrOAuthTokenEmpty = fmt.Errorf("%s Error while retrieving OAuth token: Token Empty", logPrefix) + + errCodeSendingFails = "Error occurred while sending request for Device Authorization Code" + errCodeHandlingFails = "Error occurred while handling response from the Device Endpoint" + errTokenSendingFails = "Error occurred while sending request with device code for a token" + errTokenHandlingFails = "Error occurred while handling response from the Token Endpoint (during device flow)" + errStatusNotOK = "Error HTTP status != 200" +) + +// DeviceCode is the object returned by the device auth endpoint +// It contains information to instruct the user to complete the auth flow +type DeviceCode struct { + DeviceCode *string `json:"device_code,omitempty"` + UserCode *string `json:"user_code,omitempty"` + VerificationURL *string `json:"verification_url,omitempty"` + ExpiresIn *int64 `json:"expires_in,string,omitempty"` + Interval *int64 `json:"interval,string,omitempty"` + + Message *string `json:"message"` // Azure specific + Resource string // store the following, stored when initiating, used when exchanging + OAuthConfig OAuthConfig + ClientID string +} + +// TokenError is the object returned by the token exchange endpoint +// when something is amiss +type TokenError struct { + Error *string `json:"error,omitempty"` + ErrorCodes []int `json:"error_codes,omitempty"` + ErrorDescription *string `json:"error_description,omitempty"` + Timestamp *string `json:"timestamp,omitempty"` + TraceID *string `json:"trace_id,omitempty"` +} + +// DeviceToken is the object return by the token exchange endpoint +// It can either look like a Token or an ErrorToken, so put both here +// and check for presence of "Error" to know if we are in error state +type deviceToken struct { + Token + TokenError +} + +// InitiateDeviceAuth initiates a device auth flow. It returns a DeviceCode +// that can be used with CheckForUserCompletion or WaitForUserCompletion. +// Deprecated: use InitiateDeviceAuthWithContext() instead. +func InitiateDeviceAuth(sender Sender, oauthConfig OAuthConfig, clientID, resource string) (*DeviceCode, error) { + return InitiateDeviceAuthWithContext(context.Background(), sender, oauthConfig, clientID, resource) +} + +// InitiateDeviceAuthWithContext initiates a device auth flow. It returns a DeviceCode +// that can be used with CheckForUserCompletion or WaitForUserCompletion. +func InitiateDeviceAuthWithContext(ctx context.Context, sender Sender, oauthConfig OAuthConfig, clientID, resource string) (*DeviceCode, error) { + v := url.Values{ + "client_id": []string{clientID}, + "resource": []string{resource}, + } + + s := v.Encode() + body := ioutil.NopCloser(strings.NewReader(s)) + + req, err := http.NewRequest(http.MethodPost, oauthConfig.DeviceCodeEndpoint.String(), body) + if err != nil { + return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeSendingFails, err.Error()) + } + + req.ContentLength = int64(len(s)) + req.Header.Set(contentType, mimeTypeFormPost) + resp, err := sender.Do(req.WithContext(ctx)) + if err != nil { + return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeSendingFails, err.Error()) + } + defer resp.Body.Close() + + rb, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeHandlingFails, err.Error()) + } + + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeHandlingFails, errStatusNotOK) + } + + if len(strings.Trim(string(rb), " ")) == 0 { + return nil, ErrDeviceCodeEmpty + } + + var code DeviceCode + err = json.Unmarshal(rb, &code) + if err != nil { + return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeHandlingFails, err.Error()) + } + + code.ClientID = clientID + code.Resource = resource + code.OAuthConfig = oauthConfig + + return &code, nil +} + +// CheckForUserCompletion takes a DeviceCode and checks with the Azure AD OAuth endpoint +// to see if the device flow has: been completed, timed out, or otherwise failed +// Deprecated: use CheckForUserCompletionWithContext() instead. +func CheckForUserCompletion(sender Sender, code *DeviceCode) (*Token, error) { + return CheckForUserCompletionWithContext(context.Background(), sender, code) +} + +// CheckForUserCompletionWithContext takes a DeviceCode and checks with the Azure AD OAuth endpoint +// to see if the device flow has: been completed, timed out, or otherwise failed +func CheckForUserCompletionWithContext(ctx context.Context, sender Sender, code *DeviceCode) (*Token, error) { + v := url.Values{ + "client_id": []string{code.ClientID}, + "code": []string{*code.DeviceCode}, + "grant_type": []string{OAuthGrantTypeDeviceCode}, + "resource": []string{code.Resource}, + } + + s := v.Encode() + body := ioutil.NopCloser(strings.NewReader(s)) + + req, err := http.NewRequest(http.MethodPost, code.OAuthConfig.TokenEndpoint.String(), body) + if err != nil { + return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenSendingFails, err.Error()) + } + + req.ContentLength = int64(len(s)) + req.Header.Set(contentType, mimeTypeFormPost) + resp, err := sender.Do(req.WithContext(ctx)) + if err != nil { + return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenSendingFails, err.Error()) + } + defer resp.Body.Close() + + rb, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenHandlingFails, err.Error()) + } + + if resp.StatusCode != http.StatusOK && len(strings.Trim(string(rb), " ")) == 0 { + return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenHandlingFails, errStatusNotOK) + } + if len(strings.Trim(string(rb), " ")) == 0 { + return nil, ErrOAuthTokenEmpty + } + + var token deviceToken + err = json.Unmarshal(rb, &token) + if err != nil { + return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenHandlingFails, err.Error()) + } + + if token.Error == nil { + return &token.Token, nil + } + + switch *token.Error { + case "authorization_pending": + return nil, ErrDeviceAuthorizationPending + case "slow_down": + return nil, ErrDeviceSlowDown + case "access_denied": + return nil, ErrDeviceAccessDenied + case "code_expired": + return nil, ErrDeviceCodeExpired + default: + // return a more meaningful error message if available + if token.ErrorDescription != nil { + return nil, fmt.Errorf("%s %s: %s", logPrefix, *token.Error, *token.ErrorDescription) + } + return nil, ErrDeviceGeneric + } +} + +// WaitForUserCompletion calls CheckForUserCompletion repeatedly until a token is granted or an error state occurs. +// This prevents the user from looping and checking against 'ErrDeviceAuthorizationPending'. +// Deprecated: use WaitForUserCompletionWithContext() instead. +func WaitForUserCompletion(sender Sender, code *DeviceCode) (*Token, error) { + return WaitForUserCompletionWithContext(context.Background(), sender, code) +} + +// WaitForUserCompletionWithContext calls CheckForUserCompletion repeatedly until a token is granted or an error +// state occurs. This prevents the user from looping and checking against 'ErrDeviceAuthorizationPending'. +func WaitForUserCompletionWithContext(ctx context.Context, sender Sender, code *DeviceCode) (*Token, error) { + intervalDuration := time.Duration(*code.Interval) * time.Second + waitDuration := intervalDuration + + for { + token, err := CheckForUserCompletionWithContext(ctx, sender, code) + + if err == nil { + return token, nil + } + + switch err { + case ErrDeviceSlowDown: + waitDuration += waitDuration + case ErrDeviceAuthorizationPending: + // noop + default: // everything else is "fatal" to us + return nil, err + } + + if waitDuration > (intervalDuration * 3) { + return nil, fmt.Errorf("%s Error waiting for user to complete device flow. Server told us to slow_down too much", logPrefix) + } + + select { + case <-time.After(waitDuration): + // noop + case <-ctx.Done(): + return nil, ctx.Err() + } + } +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/go.mod b/vendor/github.com/Azure/go-autorest/autorest/adal/go.mod new file mode 100644 index 00000000000..8c5d36ca61d --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/go.mod @@ -0,0 +1,13 @@ +module github.com/Azure/go-autorest/autorest/adal + +go 1.12 + +require ( + github.com/Azure/go-autorest v14.2.0+incompatible + github.com/Azure/go-autorest/autorest/date v0.3.0 + github.com/Azure/go-autorest/autorest/mocks v0.4.1 + github.com/Azure/go-autorest/logger v0.2.1 + github.com/Azure/go-autorest/tracing v0.6.0 + github.com/form3tech-oss/jwt-go v3.2.2+incompatible + golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0 +) diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/go.sum b/vendor/github.com/Azure/go-autorest/autorest/adal/go.sum new file mode 100644 index 00000000000..5ee68e70010 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/go.sum @@ -0,0 +1,21 @@ +github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= +github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= +github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw= +github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= +github.com/Azure/go-autorest/autorest/mocks v0.4.1 h1:K0laFcLE6VLTOwNgSxaGbUcLPuGXlNkbVvq4cW4nIHk= +github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= +github.com/Azure/go-autorest/logger v0.2.1 h1:IG7i4p/mDa2Ce4TRyAO8IHnVhAVF3RFU+ZtXWSmf4Tg= +github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= +github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo= +github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= +github.com/form3tech-oss/jwt-go v3.2.2+incompatible h1:TcekIExNqud5crz4xD2pavyTgWiPvpYe4Xau31I0PRk= +github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0 h1:hb9wdF1z5waM+dSIICn1l0DkLVDT3hqhhQsDNUmHPRE= +golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/go_mod_tidy_hack.go b/vendor/github.com/Azure/go-autorest/autorest/adal/go_mod_tidy_hack.go new file mode 100644 index 00000000000..7551b79235d --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/go_mod_tidy_hack.go @@ -0,0 +1,24 @@ +// +build modhack + +package adal + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This file, and the github.com/Azure/go-autorest import, won't actually become part of +// the resultant binary. + +// Necessary for safely adding multi-module repo. +// See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository +import _ "github.com/Azure/go-autorest" diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/persist.go b/vendor/github.com/Azure/go-autorest/autorest/adal/persist.go new file mode 100644 index 00000000000..2a974a39b3c --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/persist.go @@ -0,0 +1,135 @@ +package adal + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "crypto/rsa" + "crypto/x509" + "encoding/json" + "errors" + "fmt" + "io/ioutil" + "os" + "path/filepath" + + "golang.org/x/crypto/pkcs12" +) + +var ( + // ErrMissingCertificate is returned when no local certificate is found in the provided PFX data. + ErrMissingCertificate = errors.New("adal: certificate missing") + + // ErrMissingPrivateKey is returned when no private key is found in the provided PFX data. + ErrMissingPrivateKey = errors.New("adal: private key missing") +) + +// LoadToken restores a Token object from a file located at 'path'. +func LoadToken(path string) (*Token, error) { + file, err := os.Open(path) + if err != nil { + return nil, fmt.Errorf("failed to open file (%s) while loading token: %v", path, err) + } + defer file.Close() + + var token Token + + dec := json.NewDecoder(file) + if err = dec.Decode(&token); err != nil { + return nil, fmt.Errorf("failed to decode contents of file (%s) into Token representation: %v", path, err) + } + return &token, nil +} + +// SaveToken persists an oauth token at the given location on disk. +// It moves the new file into place so it can safely be used to replace an existing file +// that maybe accessed by multiple processes. +func SaveToken(path string, mode os.FileMode, token Token) error { + dir := filepath.Dir(path) + err := os.MkdirAll(dir, os.ModePerm) + if err != nil { + return fmt.Errorf("failed to create directory (%s) to store token in: %v", dir, err) + } + + newFile, err := ioutil.TempFile(dir, "token") + if err != nil { + return fmt.Errorf("failed to create the temp file to write the token: %v", err) + } + tempPath := newFile.Name() + + if err := json.NewEncoder(newFile).Encode(token); err != nil { + return fmt.Errorf("failed to encode token to file (%s) while saving token: %v", tempPath, err) + } + if err := newFile.Close(); err != nil { + return fmt.Errorf("failed to close temp file %s: %v", tempPath, err) + } + + // Atomic replace to avoid multi-writer file corruptions + if err := os.Rename(tempPath, path); err != nil { + return fmt.Errorf("failed to move temporary token to desired output location. src=%s dst=%s: %v", tempPath, path, err) + } + if err := os.Chmod(path, mode); err != nil { + return fmt.Errorf("failed to chmod the token file %s: %v", path, err) + } + return nil +} + +// DecodePfxCertificateData extracts the x509 certificate and RSA private key from the provided PFX data. +// The PFX data must contain a private key along with a certificate whose public key matches that of the +// private key or an error is returned. +// If the private key is not password protected pass the empty string for password. +func DecodePfxCertificateData(pfxData []byte, password string) (*x509.Certificate, *rsa.PrivateKey, error) { + blocks, err := pkcs12.ToPEM(pfxData, password) + if err != nil { + return nil, nil, err + } + // first extract the private key + var priv *rsa.PrivateKey + for _, block := range blocks { + if block.Type == "PRIVATE KEY" { + priv, err = x509.ParsePKCS1PrivateKey(block.Bytes) + if err != nil { + return nil, nil, err + } + break + } + } + if priv == nil { + return nil, nil, ErrMissingPrivateKey + } + // now find the certificate with the matching public key of our private key + var cert *x509.Certificate + for _, block := range blocks { + if block.Type == "CERTIFICATE" { + pcert, err := x509.ParseCertificate(block.Bytes) + if err != nil { + return nil, nil, err + } + certKey, ok := pcert.PublicKey.(*rsa.PublicKey) + if !ok { + // keep looking + continue + } + if priv.E == certKey.E && priv.N.Cmp(certKey.N) == 0 { + // found a match + cert = pcert + break + } + } + } + if cert == nil { + return nil, nil, ErrMissingCertificate + } + return cert, priv, nil +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/sender.go b/vendor/github.com/Azure/go-autorest/autorest/adal/sender.go new file mode 100644 index 00000000000..1826a68dc82 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/sender.go @@ -0,0 +1,96 @@ +package adal + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "crypto/tls" + "net/http" + "net/http/cookiejar" + "sync" + + "github.com/Azure/go-autorest/tracing" +) + +const ( + contentType = "Content-Type" + mimeTypeFormPost = "application/x-www-form-urlencoded" +) + +// DO NOT ACCESS THIS DIRECTLY. go through sender() +var defaultSender Sender +var defaultSenderInit = &sync.Once{} + +// Sender is the interface that wraps the Do method to send HTTP requests. +// +// The standard http.Client conforms to this interface. +type Sender interface { + Do(*http.Request) (*http.Response, error) +} + +// SenderFunc is a method that implements the Sender interface. +type SenderFunc func(*http.Request) (*http.Response, error) + +// Do implements the Sender interface on SenderFunc. +func (sf SenderFunc) Do(r *http.Request) (*http.Response, error) { + return sf(r) +} + +// SendDecorator takes and possibly decorates, by wrapping, a Sender. Decorators may affect the +// http.Request and pass it along or, first, pass the http.Request along then react to the +// http.Response result. +type SendDecorator func(Sender) Sender + +// CreateSender creates, decorates, and returns, as a Sender, the default http.Client. +func CreateSender(decorators ...SendDecorator) Sender { + return DecorateSender(sender(), decorators...) +} + +// DecorateSender accepts a Sender and a, possibly empty, set of SendDecorators, which is applies to +// the Sender. Decorators are applied in the order received, but their affect upon the request +// depends on whether they are a pre-decorator (change the http.Request and then pass it along) or a +// post-decorator (pass the http.Request along and react to the results in http.Response). +func DecorateSender(s Sender, decorators ...SendDecorator) Sender { + for _, decorate := range decorators { + s = decorate(s) + } + return s +} + +func sender() Sender { + // note that we can't init defaultSender in init() since it will + // execute before calling code has had a chance to enable tracing + defaultSenderInit.Do(func() { + // Use behaviour compatible with DefaultTransport, but require TLS minimum version. + defaultTransport := http.DefaultTransport.(*http.Transport) + transport := &http.Transport{ + Proxy: defaultTransport.Proxy, + DialContext: defaultTransport.DialContext, + MaxIdleConns: defaultTransport.MaxIdleConns, + IdleConnTimeout: defaultTransport.IdleConnTimeout, + TLSHandshakeTimeout: defaultTransport.TLSHandshakeTimeout, + ExpectContinueTimeout: defaultTransport.ExpectContinueTimeout, + TLSClientConfig: &tls.Config{ + MinVersion: tls.VersionTLS12, + }, + } + var roundTripper http.RoundTripper = transport + if tracing.IsEnabled() { + roundTripper = tracing.NewTransport(transport) + } + j, _ := cookiejar.New(nil) + defaultSender = &http.Client{Jar: j, Transport: roundTripper} + }) + return defaultSender +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/token.go b/vendor/github.com/Azure/go-autorest/autorest/adal/token.go new file mode 100644 index 00000000000..c870ef4ec03 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/token.go @@ -0,0 +1,1336 @@ +package adal + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "context" + "crypto/rand" + "crypto/rsa" + "crypto/sha1" + "crypto/x509" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "math" + "net/http" + "net/url" + "os" + "strconv" + "strings" + "sync" + "time" + + "github.com/Azure/go-autorest/autorest/date" + "github.com/Azure/go-autorest/logger" + "github.com/form3tech-oss/jwt-go" +) + +const ( + defaultRefresh = 5 * time.Minute + + // OAuthGrantTypeDeviceCode is the "grant_type" identifier used in device flow + OAuthGrantTypeDeviceCode = "device_code" + + // OAuthGrantTypeClientCredentials is the "grant_type" identifier used in credential flows + OAuthGrantTypeClientCredentials = "client_credentials" + + // OAuthGrantTypeUserPass is the "grant_type" identifier used in username and password auth flows + OAuthGrantTypeUserPass = "password" + + // OAuthGrantTypeRefreshToken is the "grant_type" identifier used in refresh token flows + OAuthGrantTypeRefreshToken = "refresh_token" + + // OAuthGrantTypeAuthorizationCode is the "grant_type" identifier used in authorization code flows + OAuthGrantTypeAuthorizationCode = "authorization_code" + + // metadataHeader is the header required by MSI extension + metadataHeader = "Metadata" + + // msiEndpoint is the well known endpoint for getting MSI authentications tokens + msiEndpoint = "http://169.254.169.254/metadata/identity/oauth2/token" + + // the API version to use for the MSI endpoint + msiAPIVersion = "2018-02-01" + + // the default number of attempts to refresh an MSI authentication token + defaultMaxMSIRefreshAttempts = 5 + + // asMSIEndpointEnv is the environment variable used to store the endpoint on App Service and Functions + msiEndpointEnv = "MSI_ENDPOINT" + + // asMSISecretEnv is the environment variable used to store the request secret on App Service and Functions + msiSecretEnv = "MSI_SECRET" + + // the API version to use for the legacy App Service MSI endpoint + appServiceAPIVersion2017 = "2017-09-01" + + // secret header used when authenticating against app service MSI endpoint + secretHeader = "Secret" + + // the format for expires_on in UTC with AM/PM + expiresOnDateFormatPM = "1/2/2006 15:04:05 PM +00:00" + + // the format for expires_on in UTC without AM/PM + expiresOnDateFormat = "1/2/2006 15:04:05 +00:00" +) + +// OAuthTokenProvider is an interface which should be implemented by an access token retriever +type OAuthTokenProvider interface { + OAuthToken() string +} + +// MultitenantOAuthTokenProvider provides tokens used for multi-tenant authorization. +type MultitenantOAuthTokenProvider interface { + PrimaryOAuthToken() string + AuxiliaryOAuthTokens() []string +} + +// TokenRefreshError is an interface used by errors returned during token refresh. +type TokenRefreshError interface { + error + Response() *http.Response +} + +// Refresher is an interface for token refresh functionality +type Refresher interface { + Refresh() error + RefreshExchange(resource string) error + EnsureFresh() error +} + +// RefresherWithContext is an interface for token refresh functionality +type RefresherWithContext interface { + RefreshWithContext(ctx context.Context) error + RefreshExchangeWithContext(ctx context.Context, resource string) error + EnsureFreshWithContext(ctx context.Context) error +} + +// TokenRefreshCallback is the type representing callbacks that will be called after +// a successful token refresh +type TokenRefreshCallback func(Token) error + +// TokenRefresh is a type representing a custom callback to refresh a token +type TokenRefresh func(ctx context.Context, resource string) (*Token, error) + +// Token encapsulates the access token used to authorize Azure requests. +// https://docs.microsoft.com/en-us/azure/active-directory/develop/v1-oauth2-client-creds-grant-flow#service-to-service-access-token-response +type Token struct { + AccessToken string `json:"access_token"` + RefreshToken string `json:"refresh_token"` + + ExpiresIn json.Number `json:"expires_in"` + ExpiresOn json.Number `json:"expires_on"` + NotBefore json.Number `json:"not_before"` + + Resource string `json:"resource"` + Type string `json:"token_type"` +} + +func newToken() Token { + return Token{ + ExpiresIn: "0", + ExpiresOn: "0", + NotBefore: "0", + } +} + +// IsZero returns true if the token object is zero-initialized. +func (t Token) IsZero() bool { + return t == Token{} +} + +// Expires returns the time.Time when the Token expires. +func (t Token) Expires() time.Time { + s, err := t.ExpiresOn.Float64() + if err != nil { + s = -3600 + } + + expiration := date.NewUnixTimeFromSeconds(s) + + return time.Time(expiration).UTC() +} + +// IsExpired returns true if the Token is expired, false otherwise. +func (t Token) IsExpired() bool { + return t.WillExpireIn(0) +} + +// WillExpireIn returns true if the Token will expire after the passed time.Duration interval +// from now, false otherwise. +func (t Token) WillExpireIn(d time.Duration) bool { + return !t.Expires().After(time.Now().Add(d)) +} + +//OAuthToken return the current access token +func (t *Token) OAuthToken() string { + return t.AccessToken +} + +// ServicePrincipalSecret is an interface that allows various secret mechanism to fill the form +// that is submitted when acquiring an oAuth token. +type ServicePrincipalSecret interface { + SetAuthenticationValues(spt *ServicePrincipalToken, values *url.Values) error +} + +// ServicePrincipalNoSecret represents a secret type that contains no secret +// meaning it is not valid for fetching a fresh token. This is used by Manual +type ServicePrincipalNoSecret struct { +} + +// SetAuthenticationValues is a method of the interface ServicePrincipalSecret +// It only returns an error for the ServicePrincipalNoSecret type +func (noSecret *ServicePrincipalNoSecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error { + return fmt.Errorf("Manually created ServicePrincipalToken does not contain secret material to retrieve a new access token") +} + +// MarshalJSON implements the json.Marshaler interface. +func (noSecret ServicePrincipalNoSecret) MarshalJSON() ([]byte, error) { + type tokenType struct { + Type string `json:"type"` + } + return json.Marshal(tokenType{ + Type: "ServicePrincipalNoSecret", + }) +} + +// ServicePrincipalTokenSecret implements ServicePrincipalSecret for client_secret type authorization. +type ServicePrincipalTokenSecret struct { + ClientSecret string `json:"value"` +} + +// SetAuthenticationValues is a method of the interface ServicePrincipalSecret. +// It will populate the form submitted during oAuth Token Acquisition using the client_secret. +func (tokenSecret *ServicePrincipalTokenSecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error { + v.Set("client_secret", tokenSecret.ClientSecret) + return nil +} + +// MarshalJSON implements the json.Marshaler interface. +func (tokenSecret ServicePrincipalTokenSecret) MarshalJSON() ([]byte, error) { + type tokenType struct { + Type string `json:"type"` + Value string `json:"value"` + } + return json.Marshal(tokenType{ + Type: "ServicePrincipalTokenSecret", + Value: tokenSecret.ClientSecret, + }) +} + +// ServicePrincipalCertificateSecret implements ServicePrincipalSecret for generic RSA cert auth with signed JWTs. +type ServicePrincipalCertificateSecret struct { + Certificate *x509.Certificate + PrivateKey *rsa.PrivateKey +} + +// SignJwt returns the JWT signed with the certificate's private key. +func (secret *ServicePrincipalCertificateSecret) SignJwt(spt *ServicePrincipalToken) (string, error) { + hasher := sha1.New() + _, err := hasher.Write(secret.Certificate.Raw) + if err != nil { + return "", err + } + + thumbprint := base64.URLEncoding.EncodeToString(hasher.Sum(nil)) + + // The jti (JWT ID) claim provides a unique identifier for the JWT. + jti := make([]byte, 20) + _, err = rand.Read(jti) + if err != nil { + return "", err + } + + token := jwt.New(jwt.SigningMethodRS256) + token.Header["x5t"] = thumbprint + x5c := []string{base64.StdEncoding.EncodeToString(secret.Certificate.Raw)} + token.Header["x5c"] = x5c + token.Claims = jwt.MapClaims{ + "aud": spt.inner.OauthConfig.TokenEndpoint.String(), + "iss": spt.inner.ClientID, + "sub": spt.inner.ClientID, + "jti": base64.URLEncoding.EncodeToString(jti), + "nbf": time.Now().Unix(), + "exp": time.Now().Add(24 * time.Hour).Unix(), + } + + signedString, err := token.SignedString(secret.PrivateKey) + return signedString, err +} + +// SetAuthenticationValues is a method of the interface ServicePrincipalSecret. +// It will populate the form submitted during oAuth Token Acquisition using a JWT signed with a certificate. +func (secret *ServicePrincipalCertificateSecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error { + jwt, err := secret.SignJwt(spt) + if err != nil { + return err + } + + v.Set("client_assertion", jwt) + v.Set("client_assertion_type", "urn:ietf:params:oauth:client-assertion-type:jwt-bearer") + return nil +} + +// MarshalJSON implements the json.Marshaler interface. +func (secret ServicePrincipalCertificateSecret) MarshalJSON() ([]byte, error) { + return nil, errors.New("marshalling ServicePrincipalCertificateSecret is not supported") +} + +// ServicePrincipalMSISecret implements ServicePrincipalSecret for machines running the MSI Extension. +type ServicePrincipalMSISecret struct { + msiType msiType + clientResourceID string +} + +// SetAuthenticationValues is a method of the interface ServicePrincipalSecret. +func (msiSecret *ServicePrincipalMSISecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error { + return nil +} + +// MarshalJSON implements the json.Marshaler interface. +func (msiSecret ServicePrincipalMSISecret) MarshalJSON() ([]byte, error) { + return nil, errors.New("marshalling ServicePrincipalMSISecret is not supported") +} + +// ServicePrincipalUsernamePasswordSecret implements ServicePrincipalSecret for username and password auth. +type ServicePrincipalUsernamePasswordSecret struct { + Username string `json:"username"` + Password string `json:"password"` +} + +// SetAuthenticationValues is a method of the interface ServicePrincipalSecret. +func (secret *ServicePrincipalUsernamePasswordSecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error { + v.Set("username", secret.Username) + v.Set("password", secret.Password) + return nil +} + +// MarshalJSON implements the json.Marshaler interface. +func (secret ServicePrincipalUsernamePasswordSecret) MarshalJSON() ([]byte, error) { + type tokenType struct { + Type string `json:"type"` + Username string `json:"username"` + Password string `json:"password"` + } + return json.Marshal(tokenType{ + Type: "ServicePrincipalUsernamePasswordSecret", + Username: secret.Username, + Password: secret.Password, + }) +} + +// ServicePrincipalAuthorizationCodeSecret implements ServicePrincipalSecret for authorization code auth. +type ServicePrincipalAuthorizationCodeSecret struct { + ClientSecret string `json:"value"` + AuthorizationCode string `json:"authCode"` + RedirectURI string `json:"redirect"` +} + +// SetAuthenticationValues is a method of the interface ServicePrincipalSecret. +func (secret *ServicePrincipalAuthorizationCodeSecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error { + v.Set("code", secret.AuthorizationCode) + v.Set("client_secret", secret.ClientSecret) + v.Set("redirect_uri", secret.RedirectURI) + return nil +} + +// MarshalJSON implements the json.Marshaler interface. +func (secret ServicePrincipalAuthorizationCodeSecret) MarshalJSON() ([]byte, error) { + type tokenType struct { + Type string `json:"type"` + Value string `json:"value"` + AuthCode string `json:"authCode"` + Redirect string `json:"redirect"` + } + return json.Marshal(tokenType{ + Type: "ServicePrincipalAuthorizationCodeSecret", + Value: secret.ClientSecret, + AuthCode: secret.AuthorizationCode, + Redirect: secret.RedirectURI, + }) +} + +// ServicePrincipalToken encapsulates a Token created for a Service Principal. +type ServicePrincipalToken struct { + inner servicePrincipalToken + refreshLock *sync.RWMutex + sender Sender + customRefreshFunc TokenRefresh + refreshCallbacks []TokenRefreshCallback + // MaxMSIRefreshAttempts is the maximum number of attempts to refresh an MSI token. + // Settings this to a value less than 1 will use the default value. + MaxMSIRefreshAttempts int +} + +// MarshalTokenJSON returns the marshalled inner token. +func (spt ServicePrincipalToken) MarshalTokenJSON() ([]byte, error) { + return json.Marshal(spt.inner.Token) +} + +// SetRefreshCallbacks replaces any existing refresh callbacks with the specified callbacks. +func (spt *ServicePrincipalToken) SetRefreshCallbacks(callbacks []TokenRefreshCallback) { + spt.refreshCallbacks = callbacks +} + +// SetCustomRefreshFunc sets a custom refresh function used to refresh the token. +func (spt *ServicePrincipalToken) SetCustomRefreshFunc(customRefreshFunc TokenRefresh) { + spt.customRefreshFunc = customRefreshFunc +} + +// MarshalJSON implements the json.Marshaler interface. +func (spt ServicePrincipalToken) MarshalJSON() ([]byte, error) { + return json.Marshal(spt.inner) +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (spt *ServicePrincipalToken) UnmarshalJSON(data []byte) error { + // need to determine the token type + raw := map[string]interface{}{} + err := json.Unmarshal(data, &raw) + if err != nil { + return err + } + secret := raw["secret"].(map[string]interface{}) + switch secret["type"] { + case "ServicePrincipalNoSecret": + spt.inner.Secret = &ServicePrincipalNoSecret{} + case "ServicePrincipalTokenSecret": + spt.inner.Secret = &ServicePrincipalTokenSecret{} + case "ServicePrincipalCertificateSecret": + return errors.New("unmarshalling ServicePrincipalCertificateSecret is not supported") + case "ServicePrincipalMSISecret": + return errors.New("unmarshalling ServicePrincipalMSISecret is not supported") + case "ServicePrincipalUsernamePasswordSecret": + spt.inner.Secret = &ServicePrincipalUsernamePasswordSecret{} + case "ServicePrincipalAuthorizationCodeSecret": + spt.inner.Secret = &ServicePrincipalAuthorizationCodeSecret{} + default: + return fmt.Errorf("unrecognized token type '%s'", secret["type"]) + } + err = json.Unmarshal(data, &spt.inner) + if err != nil { + return err + } + // Don't override the refreshLock or the sender if those have been already set. + if spt.refreshLock == nil { + spt.refreshLock = &sync.RWMutex{} + } + if spt.sender == nil { + spt.sender = sender() + } + return nil +} + +// internal type used for marshalling/unmarshalling +type servicePrincipalToken struct { + Token Token `json:"token"` + Secret ServicePrincipalSecret `json:"secret"` + OauthConfig OAuthConfig `json:"oauth"` + ClientID string `json:"clientID"` + Resource string `json:"resource"` + AutoRefresh bool `json:"autoRefresh"` + RefreshWithin time.Duration `json:"refreshWithin"` +} + +func validateOAuthConfig(oac OAuthConfig) error { + if oac.IsZero() { + return fmt.Errorf("parameter 'oauthConfig' cannot be zero-initialized") + } + return nil +} + +// NewServicePrincipalTokenWithSecret create a ServicePrincipalToken using the supplied ServicePrincipalSecret implementation. +func NewServicePrincipalTokenWithSecret(oauthConfig OAuthConfig, id string, resource string, secret ServicePrincipalSecret, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + if err := validateOAuthConfig(oauthConfig); err != nil { + return nil, err + } + if err := validateStringParam(id, "id"); err != nil { + return nil, err + } + if err := validateStringParam(resource, "resource"); err != nil { + return nil, err + } + if secret == nil { + return nil, fmt.Errorf("parameter 'secret' cannot be nil") + } + spt := &ServicePrincipalToken{ + inner: servicePrincipalToken{ + Token: newToken(), + OauthConfig: oauthConfig, + Secret: secret, + ClientID: id, + Resource: resource, + AutoRefresh: true, + RefreshWithin: defaultRefresh, + }, + refreshLock: &sync.RWMutex{}, + sender: sender(), + refreshCallbacks: callbacks, + } + return spt, nil +} + +// NewServicePrincipalTokenFromManualToken creates a ServicePrincipalToken using the supplied token +func NewServicePrincipalTokenFromManualToken(oauthConfig OAuthConfig, clientID string, resource string, token Token, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + if err := validateOAuthConfig(oauthConfig); err != nil { + return nil, err + } + if err := validateStringParam(clientID, "clientID"); err != nil { + return nil, err + } + if err := validateStringParam(resource, "resource"); err != nil { + return nil, err + } + if token.IsZero() { + return nil, fmt.Errorf("parameter 'token' cannot be zero-initialized") + } + spt, err := NewServicePrincipalTokenWithSecret( + oauthConfig, + clientID, + resource, + &ServicePrincipalNoSecret{}, + callbacks...) + if err != nil { + return nil, err + } + + spt.inner.Token = token + + return spt, nil +} + +// NewServicePrincipalTokenFromManualTokenSecret creates a ServicePrincipalToken using the supplied token and secret +func NewServicePrincipalTokenFromManualTokenSecret(oauthConfig OAuthConfig, clientID string, resource string, token Token, secret ServicePrincipalSecret, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + if err := validateOAuthConfig(oauthConfig); err != nil { + return nil, err + } + if err := validateStringParam(clientID, "clientID"); err != nil { + return nil, err + } + if err := validateStringParam(resource, "resource"); err != nil { + return nil, err + } + if secret == nil { + return nil, fmt.Errorf("parameter 'secret' cannot be nil") + } + if token.IsZero() { + return nil, fmt.Errorf("parameter 'token' cannot be zero-initialized") + } + spt, err := NewServicePrincipalTokenWithSecret( + oauthConfig, + clientID, + resource, + secret, + callbacks...) + if err != nil { + return nil, err + } + + spt.inner.Token = token + + return spt, nil +} + +// NewServicePrincipalToken creates a ServicePrincipalToken from the supplied Service Principal +// credentials scoped to the named resource. +func NewServicePrincipalToken(oauthConfig OAuthConfig, clientID string, secret string, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + if err := validateOAuthConfig(oauthConfig); err != nil { + return nil, err + } + if err := validateStringParam(clientID, "clientID"); err != nil { + return nil, err + } + if err := validateStringParam(secret, "secret"); err != nil { + return nil, err + } + if err := validateStringParam(resource, "resource"); err != nil { + return nil, err + } + return NewServicePrincipalTokenWithSecret( + oauthConfig, + clientID, + resource, + &ServicePrincipalTokenSecret{ + ClientSecret: secret, + }, + callbacks..., + ) +} + +// NewServicePrincipalTokenFromCertificate creates a ServicePrincipalToken from the supplied pkcs12 bytes. +func NewServicePrincipalTokenFromCertificate(oauthConfig OAuthConfig, clientID string, certificate *x509.Certificate, privateKey *rsa.PrivateKey, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + if err := validateOAuthConfig(oauthConfig); err != nil { + return nil, err + } + if err := validateStringParam(clientID, "clientID"); err != nil { + return nil, err + } + if err := validateStringParam(resource, "resource"); err != nil { + return nil, err + } + if certificate == nil { + return nil, fmt.Errorf("parameter 'certificate' cannot be nil") + } + if privateKey == nil { + return nil, fmt.Errorf("parameter 'privateKey' cannot be nil") + } + return NewServicePrincipalTokenWithSecret( + oauthConfig, + clientID, + resource, + &ServicePrincipalCertificateSecret{ + PrivateKey: privateKey, + Certificate: certificate, + }, + callbacks..., + ) +} + +// NewServicePrincipalTokenFromUsernamePassword creates a ServicePrincipalToken from the username and password. +func NewServicePrincipalTokenFromUsernamePassword(oauthConfig OAuthConfig, clientID string, username string, password string, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + if err := validateOAuthConfig(oauthConfig); err != nil { + return nil, err + } + if err := validateStringParam(clientID, "clientID"); err != nil { + return nil, err + } + if err := validateStringParam(username, "username"); err != nil { + return nil, err + } + if err := validateStringParam(password, "password"); err != nil { + return nil, err + } + if err := validateStringParam(resource, "resource"); err != nil { + return nil, err + } + return NewServicePrincipalTokenWithSecret( + oauthConfig, + clientID, + resource, + &ServicePrincipalUsernamePasswordSecret{ + Username: username, + Password: password, + }, + callbacks..., + ) +} + +// NewServicePrincipalTokenFromAuthorizationCode creates a ServicePrincipalToken from the +func NewServicePrincipalTokenFromAuthorizationCode(oauthConfig OAuthConfig, clientID string, clientSecret string, authorizationCode string, redirectURI string, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + + if err := validateOAuthConfig(oauthConfig); err != nil { + return nil, err + } + if err := validateStringParam(clientID, "clientID"); err != nil { + return nil, err + } + if err := validateStringParam(clientSecret, "clientSecret"); err != nil { + return nil, err + } + if err := validateStringParam(authorizationCode, "authorizationCode"); err != nil { + return nil, err + } + if err := validateStringParam(redirectURI, "redirectURI"); err != nil { + return nil, err + } + if err := validateStringParam(resource, "resource"); err != nil { + return nil, err + } + + return NewServicePrincipalTokenWithSecret( + oauthConfig, + clientID, + resource, + &ServicePrincipalAuthorizationCodeSecret{ + ClientSecret: clientSecret, + AuthorizationCode: authorizationCode, + RedirectURI: redirectURI, + }, + callbacks..., + ) +} + +type msiType int + +const ( + msiTypeUnavailable msiType = iota + msiTypeAppServiceV20170901 + msiTypeCloudShell + msiTypeIMDS +) + +func (m msiType) String() string { + switch m { + case msiTypeUnavailable: + return "unavailable" + case msiTypeAppServiceV20170901: + return "AppServiceV20170901" + case msiTypeCloudShell: + return "CloudShell" + case msiTypeIMDS: + return "IMDS" + default: + return fmt.Sprintf("unhandled MSI type %d", m) + } +} + +// returns the MSI type and endpoint, or an error +func getMSIType() (msiType, string, error) { + if endpointEnvVar := os.Getenv(msiEndpointEnv); endpointEnvVar != "" { + // if the env var MSI_ENDPOINT is set + if secretEnvVar := os.Getenv(msiSecretEnv); secretEnvVar != "" { + // if BOTH the env vars MSI_ENDPOINT and MSI_SECRET are set the msiType is AppService + return msiTypeAppServiceV20170901, endpointEnvVar, nil + } + // if ONLY the env var MSI_ENDPOINT is set the msiType is CloudShell + return msiTypeCloudShell, endpointEnvVar, nil + } else if msiAvailableHook(context.Background(), sender()) { + // if MSI_ENDPOINT is NOT set AND the IMDS endpoint is available the msiType is IMDS. This will timeout after 500 milliseconds + return msiTypeIMDS, msiEndpoint, nil + } else { + // if MSI_ENDPOINT is NOT set and IMDS endpoint is not available Managed Identity is not available + return msiTypeUnavailable, "", errors.New("MSI not available") + } +} + +// GetMSIVMEndpoint gets the MSI endpoint on Virtual Machines. +// NOTE: this always returns the IMDS endpoint, it does not work for app services or cloud shell. +// Deprecated: NewServicePrincipalTokenFromMSI() and variants will automatically detect the endpoint. +func GetMSIVMEndpoint() (string, error) { + return msiEndpoint, nil +} + +// GetMSIAppServiceEndpoint get the MSI endpoint for App Service and Functions. +// It will return an error when not running in an app service/functions environment. +// Deprecated: NewServicePrincipalTokenFromMSI() and variants will automatically detect the endpoint. +func GetMSIAppServiceEndpoint() (string, error) { + msiType, endpoint, err := getMSIType() + if err != nil { + return "", err + } + switch msiType { + case msiTypeAppServiceV20170901: + return endpoint, nil + default: + return "", fmt.Errorf("%s is not app service environment", msiType) + } +} + +// GetMSIEndpoint get the appropriate MSI endpoint depending on the runtime environment +// Deprecated: NewServicePrincipalTokenFromMSI() and variants will automatically detect the endpoint. +func GetMSIEndpoint() (string, error) { + _, endpoint, err := getMSIType() + return endpoint, err +} + +// NewServicePrincipalTokenFromMSI creates a ServicePrincipalToken via the MSI VM Extension. +// It will use the system assigned identity when creating the token. +// msiEndpoint - empty string, or pass a non-empty string to override the default value. +// Deprecated: use NewServicePrincipalTokenFromManagedIdentity() instead. +func NewServicePrincipalTokenFromMSI(msiEndpoint, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + return newServicePrincipalTokenFromMSI(msiEndpoint, resource, "", "", callbacks...) +} + +// NewServicePrincipalTokenFromMSIWithUserAssignedID creates a ServicePrincipalToken via the MSI VM Extension. +// It will use the clientID of specified user assigned identity when creating the token. +// msiEndpoint - empty string, or pass a non-empty string to override the default value. +// Deprecated: use NewServicePrincipalTokenFromManagedIdentity() instead. +func NewServicePrincipalTokenFromMSIWithUserAssignedID(msiEndpoint, resource string, userAssignedID string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + if err := validateStringParam(userAssignedID, "userAssignedID"); err != nil { + return nil, err + } + return newServicePrincipalTokenFromMSI(msiEndpoint, resource, userAssignedID, "", callbacks...) +} + +// NewServicePrincipalTokenFromMSIWithIdentityResourceID creates a ServicePrincipalToken via the MSI VM Extension. +// It will use the azure resource id of user assigned identity when creating the token. +// msiEndpoint - empty string, or pass a non-empty string to override the default value. +// Deprecated: use NewServicePrincipalTokenFromManagedIdentity() instead. +func NewServicePrincipalTokenFromMSIWithIdentityResourceID(msiEndpoint, resource string, identityResourceID string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + if err := validateStringParam(identityResourceID, "identityResourceID"); err != nil { + return nil, err + } + return newServicePrincipalTokenFromMSI(msiEndpoint, resource, "", identityResourceID, callbacks...) +} + +// ManagedIdentityOptions contains optional values for configuring managed identity authentication. +type ManagedIdentityOptions struct { + // ClientID is the user-assigned identity to use during authentication. + // It is mutually exclusive with IdentityResourceID. + ClientID string + + // IdentityResourceID is the resource ID of the user-assigned identity to use during authentication. + // It is mutually exclusive with ClientID. + IdentityResourceID string +} + +// NewServicePrincipalTokenFromManagedIdentity creates a ServicePrincipalToken using a managed identity. +// It supports the following managed identity environments. +// - App Service Environment (API version 2017-09-01 only) +// - Cloud shell +// - IMDS with a system or user assigned identity +func NewServicePrincipalTokenFromManagedIdentity(resource string, options *ManagedIdentityOptions, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + if options == nil { + options = &ManagedIdentityOptions{} + } + return newServicePrincipalTokenFromMSI("", resource, options.ClientID, options.IdentityResourceID, callbacks...) +} + +func newServicePrincipalTokenFromMSI(msiEndpoint, resource, userAssignedID, identityResourceID string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + if err := validateStringParam(resource, "resource"); err != nil { + return nil, err + } + if userAssignedID != "" && identityResourceID != "" { + return nil, errors.New("cannot specify userAssignedID and identityResourceID") + } + msiType, endpoint, err := getMSIType() + if err != nil { + logger.Instance.Writef(logger.LogError, "Error determining managed identity environment: %v", err) + return nil, err + } + logger.Instance.Writef(logger.LogInfo, "Managed identity environment is %s, endpoint is %s", msiType, endpoint) + if msiEndpoint != "" { + endpoint = msiEndpoint + logger.Instance.Writef(logger.LogInfo, "Managed identity custom endpoint is %s", endpoint) + } + msiEndpointURL, err := url.Parse(endpoint) + if err != nil { + return nil, err + } + // cloud shell sends its data in the request body + if msiType != msiTypeCloudShell { + v := url.Values{} + v.Set("resource", resource) + clientIDParam := "client_id" + switch msiType { + case msiTypeAppServiceV20170901: + clientIDParam = "clientid" + v.Set("api-version", appServiceAPIVersion2017) + break + case msiTypeIMDS: + v.Set("api-version", msiAPIVersion) + } + if userAssignedID != "" { + v.Set(clientIDParam, userAssignedID) + } else if identityResourceID != "" { + v.Set("mi_res_id", identityResourceID) + } + msiEndpointURL.RawQuery = v.Encode() + } + + spt := &ServicePrincipalToken{ + inner: servicePrincipalToken{ + Token: newToken(), + OauthConfig: OAuthConfig{ + TokenEndpoint: *msiEndpointURL, + }, + Secret: &ServicePrincipalMSISecret{ + msiType: msiType, + clientResourceID: identityResourceID, + }, + Resource: resource, + AutoRefresh: true, + RefreshWithin: defaultRefresh, + ClientID: userAssignedID, + }, + refreshLock: &sync.RWMutex{}, + sender: sender(), + refreshCallbacks: callbacks, + MaxMSIRefreshAttempts: defaultMaxMSIRefreshAttempts, + } + + return spt, nil +} + +// internal type that implements TokenRefreshError +type tokenRefreshError struct { + message string + resp *http.Response +} + +// Error implements the error interface which is part of the TokenRefreshError interface. +func (tre tokenRefreshError) Error() string { + return tre.message +} + +// Response implements the TokenRefreshError interface, it returns the raw HTTP response from the refresh operation. +func (tre tokenRefreshError) Response() *http.Response { + return tre.resp +} + +func newTokenRefreshError(message string, resp *http.Response) TokenRefreshError { + return tokenRefreshError{message: message, resp: resp} +} + +// EnsureFresh will refresh the token if it will expire within the refresh window (as set by +// RefreshWithin) and autoRefresh flag is on. This method is safe for concurrent use. +func (spt *ServicePrincipalToken) EnsureFresh() error { + return spt.EnsureFreshWithContext(context.Background()) +} + +// EnsureFreshWithContext will refresh the token if it will expire within the refresh window (as set by +// RefreshWithin) and autoRefresh flag is on. This method is safe for concurrent use. +func (spt *ServicePrincipalToken) EnsureFreshWithContext(ctx context.Context) error { + // must take the read lock when initially checking the token's expiration + if spt.inner.AutoRefresh && spt.Token().WillExpireIn(spt.inner.RefreshWithin) { + // take the write lock then check again to see if the token was already refreshed + spt.refreshLock.Lock() + defer spt.refreshLock.Unlock() + if spt.inner.Token.WillExpireIn(spt.inner.RefreshWithin) { + return spt.refreshInternal(ctx, spt.inner.Resource) + } + } + return nil +} + +// InvokeRefreshCallbacks calls any TokenRefreshCallbacks that were added to the SPT during initialization +func (spt *ServicePrincipalToken) InvokeRefreshCallbacks(token Token) error { + if spt.refreshCallbacks != nil { + for _, callback := range spt.refreshCallbacks { + err := callback(spt.inner.Token) + if err != nil { + return fmt.Errorf("adal: TokenRefreshCallback handler failed. Error = '%v'", err) + } + } + } + return nil +} + +// Refresh obtains a fresh token for the Service Principal. +// This method is safe for concurrent use. +func (spt *ServicePrincipalToken) Refresh() error { + return spt.RefreshWithContext(context.Background()) +} + +// RefreshWithContext obtains a fresh token for the Service Principal. +// This method is safe for concurrent use. +func (spt *ServicePrincipalToken) RefreshWithContext(ctx context.Context) error { + spt.refreshLock.Lock() + defer spt.refreshLock.Unlock() + return spt.refreshInternal(ctx, spt.inner.Resource) +} + +// RefreshExchange refreshes the token, but for a different resource. +// This method is safe for concurrent use. +func (spt *ServicePrincipalToken) RefreshExchange(resource string) error { + return spt.RefreshExchangeWithContext(context.Background(), resource) +} + +// RefreshExchangeWithContext refreshes the token, but for a different resource. +// This method is safe for concurrent use. +func (spt *ServicePrincipalToken) RefreshExchangeWithContext(ctx context.Context, resource string) error { + spt.refreshLock.Lock() + defer spt.refreshLock.Unlock() + return spt.refreshInternal(ctx, resource) +} + +func (spt *ServicePrincipalToken) getGrantType() string { + switch spt.inner.Secret.(type) { + case *ServicePrincipalUsernamePasswordSecret: + return OAuthGrantTypeUserPass + case *ServicePrincipalAuthorizationCodeSecret: + return OAuthGrantTypeAuthorizationCode + default: + return OAuthGrantTypeClientCredentials + } +} + +func (spt *ServicePrincipalToken) refreshInternal(ctx context.Context, resource string) error { + if spt.customRefreshFunc != nil { + token, err := spt.customRefreshFunc(ctx, resource) + if err != nil { + return err + } + spt.inner.Token = *token + return spt.InvokeRefreshCallbacks(spt.inner.Token) + } + req, err := http.NewRequest(http.MethodPost, spt.inner.OauthConfig.TokenEndpoint.String(), nil) + if err != nil { + return fmt.Errorf("adal: Failed to build the refresh request. Error = '%v'", err) + } + req.Header.Add("User-Agent", UserAgent()) + req = req.WithContext(ctx) + var resp *http.Response + authBodyFilter := func(b []byte) []byte { + if logger.Level() != logger.LogAuth { + return []byte("**REDACTED** authentication body") + } + return b + } + if msiSecret, ok := spt.inner.Secret.(*ServicePrincipalMSISecret); ok { + switch msiSecret.msiType { + case msiTypeAppServiceV20170901: + req.Method = http.MethodGet + req.Header.Set("secret", os.Getenv(msiSecretEnv)) + break + case msiTypeCloudShell: + req.Header.Set("Metadata", "true") + data := url.Values{} + data.Set("resource", spt.inner.Resource) + if spt.inner.ClientID != "" { + data.Set("client_id", spt.inner.ClientID) + } else if msiSecret.clientResourceID != "" { + data.Set("msi_res_id", msiSecret.clientResourceID) + } + req.Body = ioutil.NopCloser(strings.NewReader(data.Encode())) + req.Header.Set("Content-Type", "application/x-www-form-urlencoded") + break + case msiTypeIMDS: + req.Method = http.MethodGet + req.Header.Set("Metadata", "true") + break + } + logger.Instance.WriteRequest(req, logger.Filter{Body: authBodyFilter}) + resp, err = retryForIMDS(spt.sender, req, spt.MaxMSIRefreshAttempts) + } else { + v := url.Values{} + v.Set("client_id", spt.inner.ClientID) + v.Set("resource", resource) + + if spt.inner.Token.RefreshToken != "" { + v.Set("grant_type", OAuthGrantTypeRefreshToken) + v.Set("refresh_token", spt.inner.Token.RefreshToken) + // web apps must specify client_secret when refreshing tokens + // see https://docs.microsoft.com/en-us/azure/active-directory/develop/active-directory-protocols-oauth-code#refreshing-the-access-tokens + if spt.getGrantType() == OAuthGrantTypeAuthorizationCode { + err := spt.inner.Secret.SetAuthenticationValues(spt, &v) + if err != nil { + return err + } + } + } else { + v.Set("grant_type", spt.getGrantType()) + err := spt.inner.Secret.SetAuthenticationValues(spt, &v) + if err != nil { + return err + } + } + + s := v.Encode() + body := ioutil.NopCloser(strings.NewReader(s)) + req.ContentLength = int64(len(s)) + req.Header.Set(contentType, mimeTypeFormPost) + req.Body = body + logger.Instance.WriteRequest(req, logger.Filter{Body: authBodyFilter}) + resp, err = spt.sender.Do(req) + } + + // don't return a TokenRefreshError here; this will allow retry logic to apply + if err != nil { + return fmt.Errorf("adal: Failed to execute the refresh request. Error = '%v'", err) + } else if resp == nil { + return fmt.Errorf("adal: received nil response and error") + } + + logger.Instance.WriteResponse(resp, logger.Filter{Body: authBodyFilter}) + defer resp.Body.Close() + rb, err := ioutil.ReadAll(resp.Body) + + if resp.StatusCode != http.StatusOK { + if err != nil { + return newTokenRefreshError(fmt.Sprintf("adal: Refresh request failed. Status Code = '%d'. Failed reading response body: %v Endpoint %s", resp.StatusCode, err, req.URL.String()), resp) + } + return newTokenRefreshError(fmt.Sprintf("adal: Refresh request failed. Status Code = '%d'. Response body: %s Endpoint %s", resp.StatusCode, string(rb), req.URL.String()), resp) + } + + // for the following error cases don't return a TokenRefreshError. the operation succeeded + // but some transient failure happened during deserialization. by returning a generic error + // the retry logic will kick in (we don't retry on TokenRefreshError). + + if err != nil { + return fmt.Errorf("adal: Failed to read a new service principal token during refresh. Error = '%v'", err) + } + if len(strings.Trim(string(rb), " ")) == 0 { + return fmt.Errorf("adal: Empty service principal token received during refresh") + } + token := struct { + AccessToken string `json:"access_token"` + RefreshToken string `json:"refresh_token"` + + // AAD returns expires_in as a string, ADFS returns it as an int + ExpiresIn json.Number `json:"expires_in"` + // expires_on can be in two formats, a UTC time stamp or the number of seconds. + ExpiresOn string `json:"expires_on"` + NotBefore json.Number `json:"not_before"` + + Resource string `json:"resource"` + Type string `json:"token_type"` + }{} + // return a TokenRefreshError in the follow error cases as the token is in an unexpected format + err = json.Unmarshal(rb, &token) + if err != nil { + return newTokenRefreshError(fmt.Sprintf("adal: Failed to unmarshal the service principal token during refresh. Error = '%v' JSON = '%s'", err, string(rb)), resp) + } + expiresOn := json.Number("") + // ADFS doesn't include the expires_on field + if token.ExpiresOn != "" { + if expiresOn, err = parseExpiresOn(token.ExpiresOn); err != nil { + return newTokenRefreshError(fmt.Sprintf("adal: failed to parse expires_on: %v value '%s'", err, token.ExpiresOn), resp) + } + } + spt.inner.Token.AccessToken = token.AccessToken + spt.inner.Token.RefreshToken = token.RefreshToken + spt.inner.Token.ExpiresIn = token.ExpiresIn + spt.inner.Token.ExpiresOn = expiresOn + spt.inner.Token.NotBefore = token.NotBefore + spt.inner.Token.Resource = token.Resource + spt.inner.Token.Type = token.Type + + return spt.InvokeRefreshCallbacks(spt.inner.Token) +} + +// converts expires_on to the number of seconds +func parseExpiresOn(s string) (json.Number, error) { + // convert the expiration date to the number of seconds from now + timeToDuration := func(t time.Time) json.Number { + dur := t.Sub(time.Now().UTC()) + return json.Number(strconv.FormatInt(int64(dur.Round(time.Second).Seconds()), 10)) + } + if _, err := strconv.ParseInt(s, 10, 64); err == nil { + // this is the number of seconds case, no conversion required + return json.Number(s), nil + } else if eo, err := time.Parse(expiresOnDateFormatPM, s); err == nil { + return timeToDuration(eo), nil + } else if eo, err := time.Parse(expiresOnDateFormat, s); err == nil { + return timeToDuration(eo), nil + } else { + // unknown format + return json.Number(""), err + } +} + +// retry logic specific to retrieving a token from the IMDS endpoint +func retryForIMDS(sender Sender, req *http.Request, maxAttempts int) (resp *http.Response, err error) { + // copied from client.go due to circular dependency + retries := []int{ + http.StatusRequestTimeout, // 408 + http.StatusTooManyRequests, // 429 + http.StatusInternalServerError, // 500 + http.StatusBadGateway, // 502 + http.StatusServiceUnavailable, // 503 + http.StatusGatewayTimeout, // 504 + } + // extra retry status codes specific to IMDS + retries = append(retries, + http.StatusNotFound, + http.StatusGone, + // all remaining 5xx + http.StatusNotImplemented, + http.StatusHTTPVersionNotSupported, + http.StatusVariantAlsoNegotiates, + http.StatusInsufficientStorage, + http.StatusLoopDetected, + http.StatusNotExtended, + http.StatusNetworkAuthenticationRequired) + + // see https://docs.microsoft.com/en-us/azure/active-directory/managed-service-identity/how-to-use-vm-token#retry-guidance + + const maxDelay time.Duration = 60 * time.Second + + attempt := 0 + delay := time.Duration(0) + + // maxAttempts is user-specified, ensure that its value is greater than zero else no request will be made + if maxAttempts < 1 { + maxAttempts = defaultMaxMSIRefreshAttempts + } + + for attempt < maxAttempts { + if resp != nil && resp.Body != nil { + io.Copy(ioutil.Discard, resp.Body) + resp.Body.Close() + } + resp, err = sender.Do(req) + // we want to retry if err is not nil or the status code is in the list of retry codes + if err == nil && !responseHasStatusCode(resp, retries...) { + return + } + + // perform exponential backoff with a cap. + // must increment attempt before calculating delay. + attempt++ + // the base value of 2 is the "delta backoff" as specified in the guidance doc + delay += (time.Duration(math.Pow(2, float64(attempt))) * time.Second) + if delay > maxDelay { + delay = maxDelay + } + + select { + case <-time.After(delay): + // intentionally left blank + case <-req.Context().Done(): + err = req.Context().Err() + return + } + } + return +} + +func responseHasStatusCode(resp *http.Response, codes ...int) bool { + if resp != nil { + for _, i := range codes { + if i == resp.StatusCode { + return true + } + } + } + return false +} + +// SetAutoRefresh enables or disables automatic refreshing of stale tokens. +func (spt *ServicePrincipalToken) SetAutoRefresh(autoRefresh bool) { + spt.inner.AutoRefresh = autoRefresh +} + +// SetRefreshWithin sets the interval within which if the token will expire, EnsureFresh will +// refresh the token. +func (spt *ServicePrincipalToken) SetRefreshWithin(d time.Duration) { + spt.inner.RefreshWithin = d + return +} + +// SetSender sets the http.Client used when obtaining the Service Principal token. An +// undecorated http.Client is used by default. +func (spt *ServicePrincipalToken) SetSender(s Sender) { spt.sender = s } + +// OAuthToken implements the OAuthTokenProvider interface. It returns the current access token. +func (spt *ServicePrincipalToken) OAuthToken() string { + spt.refreshLock.RLock() + defer spt.refreshLock.RUnlock() + return spt.inner.Token.OAuthToken() +} + +// Token returns a copy of the current token. +func (spt *ServicePrincipalToken) Token() Token { + spt.refreshLock.RLock() + defer spt.refreshLock.RUnlock() + return spt.inner.Token +} + +// MultiTenantServicePrincipalToken contains tokens for multi-tenant authorization. +type MultiTenantServicePrincipalToken struct { + PrimaryToken *ServicePrincipalToken + AuxiliaryTokens []*ServicePrincipalToken +} + +// PrimaryOAuthToken returns the primary authorization token. +func (mt *MultiTenantServicePrincipalToken) PrimaryOAuthToken() string { + return mt.PrimaryToken.OAuthToken() +} + +// AuxiliaryOAuthTokens returns one to three auxiliary authorization tokens. +func (mt *MultiTenantServicePrincipalToken) AuxiliaryOAuthTokens() []string { + tokens := make([]string, len(mt.AuxiliaryTokens)) + for i := range mt.AuxiliaryTokens { + tokens[i] = mt.AuxiliaryTokens[i].OAuthToken() + } + return tokens +} + +// NewMultiTenantServicePrincipalToken creates a new MultiTenantServicePrincipalToken with the specified credentials and resource. +func NewMultiTenantServicePrincipalToken(multiTenantCfg MultiTenantOAuthConfig, clientID string, secret string, resource string) (*MultiTenantServicePrincipalToken, error) { + if err := validateStringParam(clientID, "clientID"); err != nil { + return nil, err + } + if err := validateStringParam(secret, "secret"); err != nil { + return nil, err + } + if err := validateStringParam(resource, "resource"); err != nil { + return nil, err + } + auxTenants := multiTenantCfg.AuxiliaryTenants() + m := MultiTenantServicePrincipalToken{ + AuxiliaryTokens: make([]*ServicePrincipalToken, len(auxTenants)), + } + primary, err := NewServicePrincipalToken(*multiTenantCfg.PrimaryTenant(), clientID, secret, resource) + if err != nil { + return nil, fmt.Errorf("failed to create SPT for primary tenant: %v", err) + } + m.PrimaryToken = primary + for i := range auxTenants { + aux, err := NewServicePrincipalToken(*auxTenants[i], clientID, secret, resource) + if err != nil { + return nil, fmt.Errorf("failed to create SPT for auxiliary tenant: %v", err) + } + m.AuxiliaryTokens[i] = aux + } + return &m, nil +} + +// NewMultiTenantServicePrincipalTokenFromCertificate creates a new MultiTenantServicePrincipalToken with the specified certificate credentials and resource. +func NewMultiTenantServicePrincipalTokenFromCertificate(multiTenantCfg MultiTenantOAuthConfig, clientID string, certificate *x509.Certificate, privateKey *rsa.PrivateKey, resource string) (*MultiTenantServicePrincipalToken, error) { + if err := validateStringParam(clientID, "clientID"); err != nil { + return nil, err + } + if err := validateStringParam(resource, "resource"); err != nil { + return nil, err + } + if certificate == nil { + return nil, fmt.Errorf("parameter 'certificate' cannot be nil") + } + if privateKey == nil { + return nil, fmt.Errorf("parameter 'privateKey' cannot be nil") + } + auxTenants := multiTenantCfg.AuxiliaryTenants() + m := MultiTenantServicePrincipalToken{ + AuxiliaryTokens: make([]*ServicePrincipalToken, len(auxTenants)), + } + primary, err := NewServicePrincipalTokenWithSecret( + *multiTenantCfg.PrimaryTenant(), + clientID, + resource, + &ServicePrincipalCertificateSecret{ + PrivateKey: privateKey, + Certificate: certificate, + }, + ) + if err != nil { + return nil, fmt.Errorf("failed to create SPT for primary tenant: %v", err) + } + m.PrimaryToken = primary + for i := range auxTenants { + aux, err := NewServicePrincipalTokenWithSecret( + *auxTenants[i], + clientID, + resource, + &ServicePrincipalCertificateSecret{ + PrivateKey: privateKey, + Certificate: certificate, + }, + ) + if err != nil { + return nil, fmt.Errorf("failed to create SPT for auxiliary tenant: %v", err) + } + m.AuxiliaryTokens[i] = aux + } + return &m, nil +} + +// MSIAvailable returns true if the MSI endpoint is available for authentication. +func MSIAvailable(ctx context.Context, sender Sender) bool { + resp, err := getMSIEndpoint(ctx, sender) + if err == nil { + resp.Body.Close() + } + return err == nil +} + +// used for testing purposes +var msiAvailableHook = func(ctx context.Context, sender Sender) bool { + return MSIAvailable(ctx, sender) +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/token_1.13.go b/vendor/github.com/Azure/go-autorest/autorest/adal/token_1.13.go new file mode 100644 index 00000000000..953f7550282 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/token_1.13.go @@ -0,0 +1,75 @@ +// +build go1.13 + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package adal + +import ( + "context" + "fmt" + "net/http" + "time" +) + +func getMSIEndpoint(ctx context.Context, sender Sender) (*http.Response, error) { + tempCtx, cancel := context.WithTimeout(ctx, 500*time.Millisecond) + defer cancel() + // http.NewRequestWithContext() was added in Go 1.13 + req, _ := http.NewRequestWithContext(tempCtx, http.MethodGet, msiEndpoint, nil) + q := req.URL.Query() + q.Add("api-version", msiAPIVersion) + req.URL.RawQuery = q.Encode() + return sender.Do(req) +} + +// EnsureFreshWithContext will refresh the token if it will expire within the refresh window (as set by +// RefreshWithin) and autoRefresh flag is on. This method is safe for concurrent use. +func (mt *MultiTenantServicePrincipalToken) EnsureFreshWithContext(ctx context.Context) error { + if err := mt.PrimaryToken.EnsureFreshWithContext(ctx); err != nil { + return fmt.Errorf("failed to refresh primary token: %w", err) + } + for _, aux := range mt.AuxiliaryTokens { + if err := aux.EnsureFreshWithContext(ctx); err != nil { + return fmt.Errorf("failed to refresh auxiliary token: %w", err) + } + } + return nil +} + +// RefreshWithContext obtains a fresh token for the Service Principal. +func (mt *MultiTenantServicePrincipalToken) RefreshWithContext(ctx context.Context) error { + if err := mt.PrimaryToken.RefreshWithContext(ctx); err != nil { + return fmt.Errorf("failed to refresh primary token: %w", err) + } + for _, aux := range mt.AuxiliaryTokens { + if err := aux.RefreshWithContext(ctx); err != nil { + return fmt.Errorf("failed to refresh auxiliary token: %w", err) + } + } + return nil +} + +// RefreshExchangeWithContext refreshes the token, but for a different resource. +func (mt *MultiTenantServicePrincipalToken) RefreshExchangeWithContext(ctx context.Context, resource string) error { + if err := mt.PrimaryToken.RefreshExchangeWithContext(ctx, resource); err != nil { + return fmt.Errorf("failed to refresh primary token: %w", err) + } + for _, aux := range mt.AuxiliaryTokens { + if err := aux.RefreshExchangeWithContext(ctx, resource); err != nil { + return fmt.Errorf("failed to refresh auxiliary token: %w", err) + } + } + return nil +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/token_legacy.go b/vendor/github.com/Azure/go-autorest/autorest/adal/token_legacy.go new file mode 100644 index 00000000000..729bfbd0abf --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/token_legacy.go @@ -0,0 +1,74 @@ +// +build !go1.13 + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package adal + +import ( + "context" + "net/http" + "time" +) + +func getMSIEndpoint(ctx context.Context, sender Sender) (*http.Response, error) { + tempCtx, cancel := context.WithTimeout(ctx, 500*time.Millisecond) + defer cancel() + req, _ := http.NewRequest(http.MethodGet, msiEndpoint, nil) + req = req.WithContext(tempCtx) + q := req.URL.Query() + q.Add("api-version", msiAPIVersion) + req.URL.RawQuery = q.Encode() + return sender.Do(req) +} + +// EnsureFreshWithContext will refresh the token if it will expire within the refresh window (as set by +// RefreshWithin) and autoRefresh flag is on. This method is safe for concurrent use. +func (mt *MultiTenantServicePrincipalToken) EnsureFreshWithContext(ctx context.Context) error { + if err := mt.PrimaryToken.EnsureFreshWithContext(ctx); err != nil { + return err + } + for _, aux := range mt.AuxiliaryTokens { + if err := aux.EnsureFreshWithContext(ctx); err != nil { + return err + } + } + return nil +} + +// RefreshWithContext obtains a fresh token for the Service Principal. +func (mt *MultiTenantServicePrincipalToken) RefreshWithContext(ctx context.Context) error { + if err := mt.PrimaryToken.RefreshWithContext(ctx); err != nil { + return err + } + for _, aux := range mt.AuxiliaryTokens { + if err := aux.RefreshWithContext(ctx); err != nil { + return err + } + } + return nil +} + +// RefreshExchangeWithContext refreshes the token, but for a different resource. +func (mt *MultiTenantServicePrincipalToken) RefreshExchangeWithContext(ctx context.Context, resource string) error { + if err := mt.PrimaryToken.RefreshExchangeWithContext(ctx, resource); err != nil { + return err + } + for _, aux := range mt.AuxiliaryTokens { + if err := aux.RefreshExchangeWithContext(ctx, resource); err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/version.go b/vendor/github.com/Azure/go-autorest/autorest/adal/version.go new file mode 100644 index 00000000000..c867b348439 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/version.go @@ -0,0 +1,45 @@ +package adal + +import ( + "fmt" + "runtime" +) + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +const number = "v1.0.0" + +var ( + ua = fmt.Sprintf("Go/%s (%s-%s) go-autorest/adal/%s", + runtime.Version(), + runtime.GOARCH, + runtime.GOOS, + number, + ) +) + +// UserAgent returns a string containing the Go version, system architecture and OS, and the adal version. +func UserAgent() string { + return ua +} + +// AddToUserAgent adds an extension to the current user agent +func AddToUserAgent(extension string) error { + if extension != "" { + ua = fmt.Sprintf("%s %s", ua, extension) + return nil + } + return fmt.Errorf("Extension was empty, User Agent remained as '%s'", ua) +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/authorization.go b/vendor/github.com/Azure/go-autorest/autorest/authorization.go new file mode 100644 index 00000000000..1226c411150 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/authorization.go @@ -0,0 +1,353 @@ +package autorest + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "crypto/tls" + "encoding/base64" + "fmt" + "net/http" + "net/url" + "strings" + + "github.com/Azure/go-autorest/autorest/adal" +) + +const ( + bearerChallengeHeader = "Www-Authenticate" + bearer = "Bearer" + tenantID = "tenantID" + apiKeyAuthorizerHeader = "Ocp-Apim-Subscription-Key" + bingAPISdkHeader = "X-BingApis-SDK-Client" + golangBingAPISdkHeaderValue = "Go-SDK" + authorization = "Authorization" + basic = "Basic" +) + +// Authorizer is the interface that provides a PrepareDecorator used to supply request +// authorization. Most often, the Authorizer decorator runs last so it has access to the full +// state of the formed HTTP request. +type Authorizer interface { + WithAuthorization() PrepareDecorator +} + +// NullAuthorizer implements a default, "do nothing" Authorizer. +type NullAuthorizer struct{} + +// WithAuthorization returns a PrepareDecorator that does nothing. +func (na NullAuthorizer) WithAuthorization() PrepareDecorator { + return WithNothing() +} + +// APIKeyAuthorizer implements API Key authorization. +type APIKeyAuthorizer struct { + headers map[string]interface{} + queryParameters map[string]interface{} +} + +// NewAPIKeyAuthorizerWithHeaders creates an ApiKeyAuthorizer with headers. +func NewAPIKeyAuthorizerWithHeaders(headers map[string]interface{}) *APIKeyAuthorizer { + return NewAPIKeyAuthorizer(headers, nil) +} + +// NewAPIKeyAuthorizerWithQueryParameters creates an ApiKeyAuthorizer with query parameters. +func NewAPIKeyAuthorizerWithQueryParameters(queryParameters map[string]interface{}) *APIKeyAuthorizer { + return NewAPIKeyAuthorizer(nil, queryParameters) +} + +// NewAPIKeyAuthorizer creates an ApiKeyAuthorizer with headers. +func NewAPIKeyAuthorizer(headers map[string]interface{}, queryParameters map[string]interface{}) *APIKeyAuthorizer { + return &APIKeyAuthorizer{headers: headers, queryParameters: queryParameters} +} + +// WithAuthorization returns a PrepareDecorator that adds an HTTP headers and Query Parameters. +func (aka *APIKeyAuthorizer) WithAuthorization() PrepareDecorator { + return func(p Preparer) Preparer { + return DecoratePreparer(p, WithHeaders(aka.headers), WithQueryParameters(aka.queryParameters)) + } +} + +// CognitiveServicesAuthorizer implements authorization for Cognitive Services. +type CognitiveServicesAuthorizer struct { + subscriptionKey string +} + +// NewCognitiveServicesAuthorizer is +func NewCognitiveServicesAuthorizer(subscriptionKey string) *CognitiveServicesAuthorizer { + return &CognitiveServicesAuthorizer{subscriptionKey: subscriptionKey} +} + +// WithAuthorization is +func (csa *CognitiveServicesAuthorizer) WithAuthorization() PrepareDecorator { + headers := make(map[string]interface{}) + headers[apiKeyAuthorizerHeader] = csa.subscriptionKey + headers[bingAPISdkHeader] = golangBingAPISdkHeaderValue + + return NewAPIKeyAuthorizerWithHeaders(headers).WithAuthorization() +} + +// BearerAuthorizer implements the bearer authorization +type BearerAuthorizer struct { + tokenProvider adal.OAuthTokenProvider +} + +// NewBearerAuthorizer crates a BearerAuthorizer using the given token provider +func NewBearerAuthorizer(tp adal.OAuthTokenProvider) *BearerAuthorizer { + return &BearerAuthorizer{tokenProvider: tp} +} + +// WithAuthorization returns a PrepareDecorator that adds an HTTP Authorization header whose +// value is "Bearer " followed by the token. +// +// By default, the token will be automatically refreshed through the Refresher interface. +func (ba *BearerAuthorizer) WithAuthorization() PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + // the ordering is important here, prefer RefresherWithContext if available + if refresher, ok := ba.tokenProvider.(adal.RefresherWithContext); ok { + err = refresher.EnsureFreshWithContext(r.Context()) + } else if refresher, ok := ba.tokenProvider.(adal.Refresher); ok { + err = refresher.EnsureFresh() + } + if err != nil { + var resp *http.Response + if tokError, ok := err.(adal.TokenRefreshError); ok { + resp = tokError.Response() + } + return r, NewErrorWithError(err, "azure.BearerAuthorizer", "WithAuthorization", resp, + "Failed to refresh the Token for request to %s", r.URL) + } + return Prepare(r, WithHeader(headerAuthorization, fmt.Sprintf("Bearer %s", ba.tokenProvider.OAuthToken()))) + } + return r, err + }) + } +} + +// TokenProvider returns OAuthTokenProvider so that it can be used for authorization outside the REST. +func (ba *BearerAuthorizer) TokenProvider() adal.OAuthTokenProvider { + return ba.tokenProvider +} + +// BearerAuthorizerCallbackFunc is the authentication callback signature. +type BearerAuthorizerCallbackFunc func(tenantID, resource string) (*BearerAuthorizer, error) + +// BearerAuthorizerCallback implements bearer authorization via a callback. +type BearerAuthorizerCallback struct { + sender Sender + callback BearerAuthorizerCallbackFunc +} + +// NewBearerAuthorizerCallback creates a bearer authorization callback. The callback +// is invoked when the HTTP request is submitted. +func NewBearerAuthorizerCallback(s Sender, callback BearerAuthorizerCallbackFunc) *BearerAuthorizerCallback { + if s == nil { + s = sender(tls.RenegotiateNever) + } + return &BearerAuthorizerCallback{sender: s, callback: callback} +} + +// WithAuthorization returns a PrepareDecorator that adds an HTTP Authorization header whose value +// is "Bearer " followed by the token. The BearerAuthorizer is obtained via a user-supplied callback. +// +// By default, the token will be automatically refreshed through the Refresher interface. +func (bacb *BearerAuthorizerCallback) WithAuthorization() PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + // make a copy of the request and remove the body as it's not + // required and avoids us having to create a copy of it. + rCopy := *r + removeRequestBody(&rCopy) + + resp, err := bacb.sender.Do(&rCopy) + if err != nil { + return r, err + } + DrainResponseBody(resp) + if resp.StatusCode == 401 && hasBearerChallenge(resp.Header) { + bc, err := newBearerChallenge(resp.Header) + if err != nil { + return r, err + } + if bacb.callback != nil { + ba, err := bacb.callback(bc.values[tenantID], bc.values["resource"]) + if err != nil { + return r, err + } + return Prepare(r, ba.WithAuthorization()) + } + } + } + return r, err + }) + } +} + +// returns true if the HTTP response contains a bearer challenge +func hasBearerChallenge(header http.Header) bool { + authHeader := header.Get(bearerChallengeHeader) + if len(authHeader) == 0 || strings.Index(authHeader, bearer) < 0 { + return false + } + return true +} + +type bearerChallenge struct { + values map[string]string +} + +func newBearerChallenge(header http.Header) (bc bearerChallenge, err error) { + challenge := strings.TrimSpace(header.Get(bearerChallengeHeader)) + trimmedChallenge := challenge[len(bearer)+1:] + + // challenge is a set of key=value pairs that are comma delimited + pairs := strings.Split(trimmedChallenge, ",") + if len(pairs) < 1 { + err = fmt.Errorf("challenge '%s' contains no pairs", challenge) + return bc, err + } + + bc.values = make(map[string]string) + for i := range pairs { + trimmedPair := strings.TrimSpace(pairs[i]) + pair := strings.Split(trimmedPair, "=") + if len(pair) == 2 { + // remove the enclosing quotes + key := strings.Trim(pair[0], "\"") + value := strings.Trim(pair[1], "\"") + + switch key { + case "authorization", "authorization_uri": + // strip the tenant ID from the authorization URL + asURL, err := url.Parse(value) + if err != nil { + return bc, err + } + bc.values[tenantID] = asURL.Path[1:] + default: + bc.values[key] = value + } + } + } + + return bc, err +} + +// EventGridKeyAuthorizer implements authorization for event grid using key authentication. +type EventGridKeyAuthorizer struct { + topicKey string +} + +// NewEventGridKeyAuthorizer creates a new EventGridKeyAuthorizer +// with the specified topic key. +func NewEventGridKeyAuthorizer(topicKey string) EventGridKeyAuthorizer { + return EventGridKeyAuthorizer{topicKey: topicKey} +} + +// WithAuthorization returns a PrepareDecorator that adds the aeg-sas-key authentication header. +func (egta EventGridKeyAuthorizer) WithAuthorization() PrepareDecorator { + headers := map[string]interface{}{ + "aeg-sas-key": egta.topicKey, + } + return NewAPIKeyAuthorizerWithHeaders(headers).WithAuthorization() +} + +// BasicAuthorizer implements basic HTTP authorization by adding the Authorization HTTP header +// with the value "Basic " where is a base64-encoded username:password tuple. +type BasicAuthorizer struct { + userName string + password string +} + +// NewBasicAuthorizer creates a new BasicAuthorizer with the specified username and password. +func NewBasicAuthorizer(userName, password string) *BasicAuthorizer { + return &BasicAuthorizer{ + userName: userName, + password: password, + } +} + +// WithAuthorization returns a PrepareDecorator that adds an HTTP Authorization header whose +// value is "Basic " followed by the base64-encoded username:password tuple. +func (ba *BasicAuthorizer) WithAuthorization() PrepareDecorator { + headers := make(map[string]interface{}) + headers[authorization] = basic + " " + base64.StdEncoding.EncodeToString([]byte(fmt.Sprintf("%s:%s", ba.userName, ba.password))) + + return NewAPIKeyAuthorizerWithHeaders(headers).WithAuthorization() +} + +// MultiTenantServicePrincipalTokenAuthorizer provides authentication across tenants. +type MultiTenantServicePrincipalTokenAuthorizer interface { + WithAuthorization() PrepareDecorator +} + +// NewMultiTenantServicePrincipalTokenAuthorizer crates a BearerAuthorizer using the given token provider +func NewMultiTenantServicePrincipalTokenAuthorizer(tp adal.MultitenantOAuthTokenProvider) MultiTenantServicePrincipalTokenAuthorizer { + return NewMultiTenantBearerAuthorizer(tp) +} + +// MultiTenantBearerAuthorizer implements bearer authorization across multiple tenants. +type MultiTenantBearerAuthorizer struct { + tp adal.MultitenantOAuthTokenProvider +} + +// NewMultiTenantBearerAuthorizer creates a MultiTenantBearerAuthorizer using the given token provider. +func NewMultiTenantBearerAuthorizer(tp adal.MultitenantOAuthTokenProvider) *MultiTenantBearerAuthorizer { + return &MultiTenantBearerAuthorizer{tp: tp} +} + +// WithAuthorization returns a PrepareDecorator that adds an HTTP Authorization header using the +// primary token along with the auxiliary authorization header using the auxiliary tokens. +// +// By default, the token will be automatically refreshed through the Refresher interface. +func (mt *MultiTenantBearerAuthorizer) WithAuthorization() PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err != nil { + return r, err + } + if refresher, ok := mt.tp.(adal.RefresherWithContext); ok { + err = refresher.EnsureFreshWithContext(r.Context()) + if err != nil { + var resp *http.Response + if tokError, ok := err.(adal.TokenRefreshError); ok { + resp = tokError.Response() + } + return r, NewErrorWithError(err, "azure.multiTenantSPTAuthorizer", "WithAuthorization", resp, + "Failed to refresh one or more Tokens for request to %s", r.URL) + } + } + r, err = Prepare(r, WithHeader(headerAuthorization, fmt.Sprintf("Bearer %s", mt.tp.PrimaryOAuthToken()))) + if err != nil { + return r, err + } + auxTokens := mt.tp.AuxiliaryOAuthTokens() + for i := range auxTokens { + auxTokens[i] = fmt.Sprintf("Bearer %s", auxTokens[i]) + } + return Prepare(r, WithHeader(headerAuxAuthorization, strings.Join(auxTokens, ", "))) + }) + } +} + +// TokenProvider returns the underlying MultitenantOAuthTokenProvider for this authorizer. +func (mt *MultiTenantBearerAuthorizer) TokenProvider() adal.MultitenantOAuthTokenProvider { + return mt.tp +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/authorization_sas.go b/vendor/github.com/Azure/go-autorest/autorest/authorization_sas.go new file mode 100644 index 00000000000..66501493bd6 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/authorization_sas.go @@ -0,0 +1,66 @@ +package autorest + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "fmt" + "net/http" + "strings" +) + +// SASTokenAuthorizer implements an authorization for SAS Token Authentication +// this can be used for interaction with Blob Storage Endpoints +type SASTokenAuthorizer struct { + sasToken string +} + +// NewSASTokenAuthorizer creates a SASTokenAuthorizer using the given credentials +func NewSASTokenAuthorizer(sasToken string) (*SASTokenAuthorizer, error) { + if strings.TrimSpace(sasToken) == "" { + return nil, fmt.Errorf("sasToken cannot be empty") + } + + token := sasToken + if strings.HasPrefix(sasToken, "?") { + token = strings.TrimPrefix(sasToken, "?") + } + + return &SASTokenAuthorizer{ + sasToken: token, + }, nil +} + +// WithAuthorization returns a PrepareDecorator that adds a shared access signature token to the +// URI's query parameters. This can be used for the Blob, Queue, and File Services. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/delegate-access-with-shared-access-signature +func (sas *SASTokenAuthorizer) WithAuthorization() PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err != nil { + return r, err + } + + if r.URL.RawQuery == "" { + r.URL.RawQuery = sas.sasToken + } else if !strings.Contains(r.URL.RawQuery, sas.sasToken) { + r.URL.RawQuery = fmt.Sprintf("%s&%s", r.URL.RawQuery, sas.sasToken) + } + + return Prepare(r) + }) + } +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/authorization_storage.go b/vendor/github.com/Azure/go-autorest/autorest/authorization_storage.go new file mode 100644 index 00000000000..2af5030a1cd --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/authorization_storage.go @@ -0,0 +1,307 @@ +package autorest + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "crypto/hmac" + "crypto/sha256" + "encoding/base64" + "fmt" + "net/http" + "net/url" + "sort" + "strings" + "time" +) + +// SharedKeyType defines the enumeration for the various shared key types. +// See https://docs.microsoft.com/en-us/rest/api/storageservices/authorize-with-shared-key for details on the shared key types. +type SharedKeyType string + +const ( + // SharedKey is used to authorize against blobs, files and queues services. + SharedKey SharedKeyType = "sharedKey" + + // SharedKeyForTable is used to authorize against the table service. + SharedKeyForTable SharedKeyType = "sharedKeyTable" + + // SharedKeyLite is used to authorize against blobs, files and queues services. It's provided for + // backwards compatibility with API versions before 2009-09-19. Prefer SharedKey instead. + SharedKeyLite SharedKeyType = "sharedKeyLite" + + // SharedKeyLiteForTable is used to authorize against the table service. It's provided for + // backwards compatibility with older table API versions. Prefer SharedKeyForTable instead. + SharedKeyLiteForTable SharedKeyType = "sharedKeyLiteTable" +) + +const ( + headerAccept = "Accept" + headerAcceptCharset = "Accept-Charset" + headerContentEncoding = "Content-Encoding" + headerContentLength = "Content-Length" + headerContentMD5 = "Content-MD5" + headerContentLanguage = "Content-Language" + headerIfModifiedSince = "If-Modified-Since" + headerIfMatch = "If-Match" + headerIfNoneMatch = "If-None-Match" + headerIfUnmodifiedSince = "If-Unmodified-Since" + headerDate = "Date" + headerXMSDate = "X-Ms-Date" + headerXMSVersion = "x-ms-version" + headerRange = "Range" +) + +const storageEmulatorAccountName = "devstoreaccount1" + +// SharedKeyAuthorizer implements an authorization for Shared Key +// this can be used for interaction with Blob, File and Queue Storage Endpoints +type SharedKeyAuthorizer struct { + accountName string + accountKey []byte + keyType SharedKeyType +} + +// NewSharedKeyAuthorizer creates a SharedKeyAuthorizer using the provided credentials and shared key type. +func NewSharedKeyAuthorizer(accountName, accountKey string, keyType SharedKeyType) (*SharedKeyAuthorizer, error) { + key, err := base64.StdEncoding.DecodeString(accountKey) + if err != nil { + return nil, fmt.Errorf("malformed storage account key: %v", err) + } + return &SharedKeyAuthorizer{ + accountName: accountName, + accountKey: key, + keyType: keyType, + }, nil +} + +// WithAuthorization returns a PrepareDecorator that adds an HTTP Authorization header whose +// value is " " followed by the computed key. +// This can be used for the Blob, Queue, and File Services +// +// from: https://docs.microsoft.com/en-us/rest/api/storageservices/authorize-with-shared-key +// You may use Shared Key authorization to authorize a request made against the +// 2009-09-19 version and later of the Blob and Queue services, +// and version 2014-02-14 and later of the File services. +func (sk *SharedKeyAuthorizer) WithAuthorization() PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err != nil { + return r, err + } + + sk, err := buildSharedKey(sk.accountName, sk.accountKey, r, sk.keyType) + if err != nil { + return r, err + } + return Prepare(r, WithHeader(headerAuthorization, sk)) + }) + } +} + +func buildSharedKey(accName string, accKey []byte, req *http.Request, keyType SharedKeyType) (string, error) { + canRes, err := buildCanonicalizedResource(accName, req.URL.String(), keyType) + if err != nil { + return "", err + } + + if req.Header == nil { + req.Header = http.Header{} + } + + // ensure date is set + if req.Header.Get(headerDate) == "" && req.Header.Get(headerXMSDate) == "" { + date := time.Now().UTC().Format(http.TimeFormat) + req.Header.Set(headerXMSDate, date) + } + canString, err := buildCanonicalizedString(req.Method, req.Header, canRes, keyType) + if err != nil { + return "", err + } + return createAuthorizationHeader(accName, accKey, canString, keyType), nil +} + +func buildCanonicalizedResource(accountName, uri string, keyType SharedKeyType) (string, error) { + errMsg := "buildCanonicalizedResource error: %s" + u, err := url.Parse(uri) + if err != nil { + return "", fmt.Errorf(errMsg, err.Error()) + } + + cr := bytes.NewBufferString("") + if accountName != storageEmulatorAccountName { + cr.WriteString("/") + cr.WriteString(getCanonicalizedAccountName(accountName)) + } + + if len(u.Path) > 0 { + // Any portion of the CanonicalizedResource string that is derived from + // the resource's URI should be encoded exactly as it is in the URI. + // -- https://msdn.microsoft.com/en-gb/library/azure/dd179428.aspx + cr.WriteString(u.EscapedPath()) + } else { + // a slash is required to indicate the root path + cr.WriteString("/") + } + + params, err := url.ParseQuery(u.RawQuery) + if err != nil { + return "", fmt.Errorf(errMsg, err.Error()) + } + + // See https://github.com/Azure/azure-storage-net/blob/master/Lib/Common/Core/Util/AuthenticationUtility.cs#L277 + if keyType == SharedKey { + if len(params) > 0 { + cr.WriteString("\n") + + keys := []string{} + for key := range params { + keys = append(keys, key) + } + sort.Strings(keys) + + completeParams := []string{} + for _, key := range keys { + if len(params[key]) > 1 { + sort.Strings(params[key]) + } + + completeParams = append(completeParams, fmt.Sprintf("%s:%s", key, strings.Join(params[key], ","))) + } + cr.WriteString(strings.Join(completeParams, "\n")) + } + } else { + // search for "comp" parameter, if exists then add it to canonicalizedresource + if v, ok := params["comp"]; ok { + cr.WriteString("?comp=" + v[0]) + } + } + + return string(cr.Bytes()), nil +} + +func getCanonicalizedAccountName(accountName string) string { + // since we may be trying to access a secondary storage account, we need to + // remove the -secondary part of the storage name + return strings.TrimSuffix(accountName, "-secondary") +} + +func buildCanonicalizedString(verb string, headers http.Header, canonicalizedResource string, keyType SharedKeyType) (string, error) { + contentLength := headers.Get(headerContentLength) + if contentLength == "0" { + contentLength = "" + } + date := headers.Get(headerDate) + if v := headers.Get(headerXMSDate); v != "" { + if keyType == SharedKey || keyType == SharedKeyLite { + date = "" + } else { + date = v + } + } + var canString string + switch keyType { + case SharedKey: + canString = strings.Join([]string{ + verb, + headers.Get(headerContentEncoding), + headers.Get(headerContentLanguage), + contentLength, + headers.Get(headerContentMD5), + headers.Get(headerContentType), + date, + headers.Get(headerIfModifiedSince), + headers.Get(headerIfMatch), + headers.Get(headerIfNoneMatch), + headers.Get(headerIfUnmodifiedSince), + headers.Get(headerRange), + buildCanonicalizedHeader(headers), + canonicalizedResource, + }, "\n") + case SharedKeyForTable: + canString = strings.Join([]string{ + verb, + headers.Get(headerContentMD5), + headers.Get(headerContentType), + date, + canonicalizedResource, + }, "\n") + case SharedKeyLite: + canString = strings.Join([]string{ + verb, + headers.Get(headerContentMD5), + headers.Get(headerContentType), + date, + buildCanonicalizedHeader(headers), + canonicalizedResource, + }, "\n") + case SharedKeyLiteForTable: + canString = strings.Join([]string{ + date, + canonicalizedResource, + }, "\n") + default: + return "", fmt.Errorf("key type '%s' is not supported", keyType) + } + return canString, nil +} + +func buildCanonicalizedHeader(headers http.Header) string { + cm := make(map[string]string) + + for k := range headers { + headerName := strings.TrimSpace(strings.ToLower(k)) + if strings.HasPrefix(headerName, "x-ms-") { + cm[headerName] = headers.Get(k) + } + } + + if len(cm) == 0 { + return "" + } + + keys := []string{} + for key := range cm { + keys = append(keys, key) + } + + sort.Strings(keys) + + ch := bytes.NewBufferString("") + + for _, key := range keys { + ch.WriteString(key) + ch.WriteRune(':') + ch.WriteString(cm[key]) + ch.WriteRune('\n') + } + + return strings.TrimSuffix(string(ch.Bytes()), "\n") +} + +func createAuthorizationHeader(accountName string, accountKey []byte, canonicalizedString string, keyType SharedKeyType) string { + h := hmac.New(sha256.New, accountKey) + h.Write([]byte(canonicalizedString)) + signature := base64.StdEncoding.EncodeToString(h.Sum(nil)) + var key string + switch keyType { + case SharedKey, SharedKeyForTable: + key = "SharedKey" + case SharedKeyLite, SharedKeyLiteForTable: + key = "SharedKeyLite" + } + return fmt.Sprintf("%s %s:%s", key, getCanonicalizedAccountName(accountName), signature) +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/autorest.go b/vendor/github.com/Azure/go-autorest/autorest/autorest.go new file mode 100644 index 00000000000..aafdf021fd6 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/autorest.go @@ -0,0 +1,150 @@ +/* +Package autorest implements an HTTP request pipeline suitable for use across multiple go-routines +and provides the shared routines relied on by AutoRest (see https://github.com/Azure/autorest/) +generated Go code. + +The package breaks sending and responding to HTTP requests into three phases: Preparing, Sending, +and Responding. A typical pattern is: + + req, err := Prepare(&http.Request{}, + token.WithAuthorization()) + + resp, err := Send(req, + WithLogging(logger), + DoErrorIfStatusCode(http.StatusInternalServerError), + DoCloseIfError(), + DoRetryForAttempts(5, time.Second)) + + err = Respond(resp, + ByDiscardingBody(), + ByClosing()) + +Each phase relies on decorators to modify and / or manage processing. Decorators may first modify +and then pass the data along, pass the data first and then modify the result, or wrap themselves +around passing the data (such as a logger might do). Decorators run in the order provided. For +example, the following: + + req, err := Prepare(&http.Request{}, + WithBaseURL("https://microsoft.com/"), + WithPath("a"), + WithPath("b"), + WithPath("c")) + +will set the URL to: + + https://microsoft.com/a/b/c + +Preparers and Responders may be shared and re-used (assuming the underlying decorators support +sharing and re-use). Performant use is obtained by creating one or more Preparers and Responders +shared among multiple go-routines, and a single Sender shared among multiple sending go-routines, +all bound together by means of input / output channels. + +Decorators hold their passed state within a closure (such as the path components in the example +above). Be careful to share Preparers and Responders only in a context where such held state +applies. For example, it may not make sense to share a Preparer that applies a query string from a +fixed set of values. Similarly, sharing a Responder that reads the response body into a passed +struct (e.g., ByUnmarshallingJson) is likely incorrect. + +Lastly, the Swagger specification (https://swagger.io) that drives AutoRest +(https://github.com/Azure/autorest/) precisely defines two date forms: date and date-time. The +github.com/Azure/go-autorest/autorest/date package provides time.Time derivations to ensure +correct parsing and formatting. + +Errors raised by autorest objects and methods will conform to the autorest.Error interface. + +See the included examples for more detail. For details on the suggested use of this package by +generated clients, see the Client described below. +*/ +package autorest + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "context" + "net/http" + "time" +) + +const ( + // HeaderLocation specifies the HTTP Location header. + HeaderLocation = "Location" + + // HeaderRetryAfter specifies the HTTP Retry-After header. + HeaderRetryAfter = "Retry-After" +) + +// ResponseHasStatusCode returns true if the status code in the HTTP Response is in the passed set +// and false otherwise. +func ResponseHasStatusCode(resp *http.Response, codes ...int) bool { + if resp == nil { + return false + } + return containsInt(codes, resp.StatusCode) +} + +// GetLocation retrieves the URL from the Location header of the passed response. +func GetLocation(resp *http.Response) string { + return resp.Header.Get(HeaderLocation) +} + +// GetRetryAfter extracts the retry delay from the Retry-After header of the passed response. If +// the header is absent or is malformed, it will return the supplied default delay time.Duration. +func GetRetryAfter(resp *http.Response, defaultDelay time.Duration) time.Duration { + retry := resp.Header.Get(HeaderRetryAfter) + if retry == "" { + return defaultDelay + } + + d, err := time.ParseDuration(retry + "s") + if err != nil { + return defaultDelay + } + + return d +} + +// NewPollingRequest allocates and returns a new http.Request to poll for the passed response. +func NewPollingRequest(resp *http.Response, cancel <-chan struct{}) (*http.Request, error) { + location := GetLocation(resp) + if location == "" { + return nil, NewErrorWithResponse("autorest", "NewPollingRequest", resp, "Location header missing from response that requires polling") + } + + req, err := Prepare(&http.Request{Cancel: cancel}, + AsGet(), + WithBaseURL(location)) + if err != nil { + return nil, NewErrorWithError(err, "autorest", "NewPollingRequest", nil, "Failure creating poll request to %s", location) + } + + return req, nil +} + +// NewPollingRequestWithContext allocates and returns a new http.Request with the specified context to poll for the passed response. +func NewPollingRequestWithContext(ctx context.Context, resp *http.Response) (*http.Request, error) { + location := GetLocation(resp) + if location == "" { + return nil, NewErrorWithResponse("autorest", "NewPollingRequestWithContext", resp, "Location header missing from response that requires polling") + } + + req, err := Prepare((&http.Request{}).WithContext(ctx), + AsGet(), + WithBaseURL(location)) + if err != nil { + return nil, NewErrorWithError(err, "autorest", "NewPollingRequestWithContext", nil, "Failure creating poll request to %s", location) + } + + return req, nil +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/async.go b/vendor/github.com/Azure/go-autorest/autorest/azure/async.go new file mode 100644 index 00000000000..42e28cf2e4d --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/async.go @@ -0,0 +1,991 @@ +package azure + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "net/url" + "strings" + "time" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/tracing" +) + +const ( + headerAsyncOperation = "Azure-AsyncOperation" +) + +const ( + operationInProgress string = "InProgress" + operationCanceled string = "Canceled" + operationFailed string = "Failed" + operationSucceeded string = "Succeeded" +) + +var pollingCodes = [...]int{http.StatusNoContent, http.StatusAccepted, http.StatusCreated, http.StatusOK} + +// FutureAPI contains the set of methods on the Future type. +type FutureAPI interface { + // Response returns the last HTTP response. + Response() *http.Response + + // Status returns the last status message of the operation. + Status() string + + // PollingMethod returns the method used to monitor the status of the asynchronous operation. + PollingMethod() PollingMethodType + + // DoneWithContext queries the service to see if the operation has completed. + DoneWithContext(context.Context, autorest.Sender) (bool, error) + + // GetPollingDelay returns a duration the application should wait before checking + // the status of the asynchronous request and true; this value is returned from + // the service via the Retry-After response header. If the header wasn't returned + // then the function returns the zero-value time.Duration and false. + GetPollingDelay() (time.Duration, bool) + + // WaitForCompletionRef will return when one of the following conditions is met: the long + // running operation has completed, the provided context is cancelled, or the client's + // polling duration has been exceeded. It will retry failed polling attempts based on + // the retry value defined in the client up to the maximum retry attempts. + // If no deadline is specified in the context then the client.PollingDuration will be + // used to determine if a default deadline should be used. + // If PollingDuration is greater than zero the value will be used as the context's timeout. + // If PollingDuration is zero then no default deadline will be used. + WaitForCompletionRef(context.Context, autorest.Client) error + + // MarshalJSON implements the json.Marshaler interface. + MarshalJSON() ([]byte, error) + + // MarshalJSON implements the json.Unmarshaler interface. + UnmarshalJSON([]byte) error + + // PollingURL returns the URL used for retrieving the status of the long-running operation. + PollingURL() string + + // GetResult should be called once polling has completed successfully. + // It makes the final GET call to retrieve the resultant payload. + GetResult(autorest.Sender) (*http.Response, error) +} + +var _ FutureAPI = (*Future)(nil) + +// Future provides a mechanism to access the status and results of an asynchronous request. +// Since futures are stateful they should be passed by value to avoid race conditions. +type Future struct { + pt pollingTracker +} + +// NewFutureFromResponse returns a new Future object initialized +// with the initial response from an asynchronous operation. +func NewFutureFromResponse(resp *http.Response) (Future, error) { + pt, err := createPollingTracker(resp) + return Future{pt: pt}, err +} + +// Response returns the last HTTP response. +func (f Future) Response() *http.Response { + if f.pt == nil { + return nil + } + return f.pt.latestResponse() +} + +// Status returns the last status message of the operation. +func (f Future) Status() string { + if f.pt == nil { + return "" + } + return f.pt.pollingStatus() +} + +// PollingMethod returns the method used to monitor the status of the asynchronous operation. +func (f Future) PollingMethod() PollingMethodType { + if f.pt == nil { + return PollingUnknown + } + return f.pt.pollingMethod() +} + +// DoneWithContext queries the service to see if the operation has completed. +func (f *Future) DoneWithContext(ctx context.Context, sender autorest.Sender) (done bool, err error) { + ctx = tracing.StartSpan(ctx, "github.com/Azure/go-autorest/autorest/azure/async.DoneWithContext") + defer func() { + sc := -1 + resp := f.Response() + if resp != nil { + sc = resp.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + + if f.pt == nil { + return false, autorest.NewError("Future", "Done", "future is not initialized") + } + if f.pt.hasTerminated() { + return true, f.pt.pollingError() + } + if err := f.pt.pollForStatus(ctx, sender); err != nil { + return false, err + } + if err := f.pt.checkForErrors(); err != nil { + return f.pt.hasTerminated(), err + } + if err := f.pt.updatePollingState(f.pt.provisioningStateApplicable()); err != nil { + return false, err + } + if err := f.pt.initPollingMethod(); err != nil { + return false, err + } + if err := f.pt.updatePollingMethod(); err != nil { + return false, err + } + return f.pt.hasTerminated(), f.pt.pollingError() +} + +// GetPollingDelay returns a duration the application should wait before checking +// the status of the asynchronous request and true; this value is returned from +// the service via the Retry-After response header. If the header wasn't returned +// then the function returns the zero-value time.Duration and false. +func (f Future) GetPollingDelay() (time.Duration, bool) { + if f.pt == nil { + return 0, false + } + resp := f.pt.latestResponse() + if resp == nil { + return 0, false + } + + retry := resp.Header.Get(autorest.HeaderRetryAfter) + if retry == "" { + return 0, false + } + + d, err := time.ParseDuration(retry + "s") + if err != nil { + panic(err) + } + + return d, true +} + +// WaitForCompletionRef will return when one of the following conditions is met: the long +// running operation has completed, the provided context is cancelled, or the client's +// polling duration has been exceeded. It will retry failed polling attempts based on +// the retry value defined in the client up to the maximum retry attempts. +// If no deadline is specified in the context then the client.PollingDuration will be +// used to determine if a default deadline should be used. +// If PollingDuration is greater than zero the value will be used as the context's timeout. +// If PollingDuration is zero then no default deadline will be used. +func (f *Future) WaitForCompletionRef(ctx context.Context, client autorest.Client) (err error) { + ctx = tracing.StartSpan(ctx, "github.com/Azure/go-autorest/autorest/azure/async.WaitForCompletionRef") + defer func() { + sc := -1 + resp := f.Response() + if resp != nil { + sc = resp.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + cancelCtx := ctx + // if the provided context already has a deadline don't override it + _, hasDeadline := ctx.Deadline() + if d := client.PollingDuration; !hasDeadline && d != 0 { + var cancel context.CancelFunc + cancelCtx, cancel = context.WithTimeout(ctx, d) + defer cancel() + } + // if the initial response has a Retry-After, sleep for the specified amount of time before starting to poll + if delay, ok := f.GetPollingDelay(); ok { + if delayElapsed := autorest.DelayForBackoff(delay, 0, cancelCtx.Done()); !delayElapsed { + err = cancelCtx.Err() + return + } + } + done, err := f.DoneWithContext(ctx, client) + for attempts := 0; !done; done, err = f.DoneWithContext(ctx, client) { + if attempts >= client.RetryAttempts { + return autorest.NewErrorWithError(err, "Future", "WaitForCompletion", f.pt.latestResponse(), "the number of retries has been exceeded") + } + // we want delayAttempt to be zero in the non-error case so + // that DelayForBackoff doesn't perform exponential back-off + var delayAttempt int + var delay time.Duration + if err == nil { + // check for Retry-After delay, if not present use the client's polling delay + var ok bool + delay, ok = f.GetPollingDelay() + if !ok { + delay = client.PollingDelay + } + } else { + // there was an error polling for status so perform exponential + // back-off based on the number of attempts using the client's retry + // duration. update attempts after delayAttempt to avoid off-by-one. + delayAttempt = attempts + delay = client.RetryDuration + attempts++ + } + // wait until the delay elapses or the context is cancelled + delayElapsed := autorest.DelayForBackoff(delay, delayAttempt, cancelCtx.Done()) + if !delayElapsed { + return autorest.NewErrorWithError(cancelCtx.Err(), "Future", "WaitForCompletion", f.pt.latestResponse(), "context has been cancelled") + } + } + return +} + +// MarshalJSON implements the json.Marshaler interface. +func (f Future) MarshalJSON() ([]byte, error) { + return json.Marshal(f.pt) +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (f *Future) UnmarshalJSON(data []byte) error { + // unmarshal into JSON object to determine the tracker type + obj := map[string]interface{}{} + err := json.Unmarshal(data, &obj) + if err != nil { + return err + } + if obj["method"] == nil { + return autorest.NewError("Future", "UnmarshalJSON", "missing 'method' property") + } + method := obj["method"].(string) + switch strings.ToUpper(method) { + case http.MethodDelete: + f.pt = &pollingTrackerDelete{} + case http.MethodPatch: + f.pt = &pollingTrackerPatch{} + case http.MethodPost: + f.pt = &pollingTrackerPost{} + case http.MethodPut: + f.pt = &pollingTrackerPut{} + default: + return autorest.NewError("Future", "UnmarshalJSON", "unsupoorted method '%s'", method) + } + // now unmarshal into the tracker + return json.Unmarshal(data, &f.pt) +} + +// PollingURL returns the URL used for retrieving the status of the long-running operation. +func (f Future) PollingURL() string { + if f.pt == nil { + return "" + } + return f.pt.pollingURL() +} + +// GetResult should be called once polling has completed successfully. +// It makes the final GET call to retrieve the resultant payload. +func (f Future) GetResult(sender autorest.Sender) (*http.Response, error) { + if f.pt.finalGetURL() == "" { + // we can end up in this situation if the async operation returns a 200 + // with no polling URLs. in that case return the response which should + // contain the JSON payload (only do this for successful terminal cases). + if lr := f.pt.latestResponse(); lr != nil && f.pt.hasSucceeded() { + return lr, nil + } + return nil, autorest.NewError("Future", "GetResult", "missing URL for retrieving result") + } + req, err := http.NewRequest(http.MethodGet, f.pt.finalGetURL(), nil) + if err != nil { + return nil, err + } + resp, err := sender.Do(req) + if err == nil && resp.Body != nil { + // copy the body and close it so callers don't have to + defer resp.Body.Close() + b, err := ioutil.ReadAll(resp.Body) + if err != nil { + return resp, err + } + resp.Body = ioutil.NopCloser(bytes.NewReader(b)) + } + return resp, err +} + +type pollingTracker interface { + // these methods can differ per tracker + + // checks the response headers and status code to determine the polling mechanism + updatePollingMethod() error + + // checks the response for tracker-specific error conditions + checkForErrors() error + + // returns true if provisioning state should be checked + provisioningStateApplicable() bool + + // methods common to all trackers + + // initializes a tracker's polling URL and method, called for each iteration. + // these values can be overridden by each polling tracker as required. + initPollingMethod() error + + // initializes the tracker's internal state, call this when the tracker is created + initializeState() error + + // makes an HTTP request to check the status of the LRO + pollForStatus(ctx context.Context, sender autorest.Sender) error + + // updates internal tracker state, call this after each call to pollForStatus + updatePollingState(provStateApl bool) error + + // returns the error response from the service, can be nil + pollingError() error + + // returns the polling method being used + pollingMethod() PollingMethodType + + // returns the state of the LRO as returned from the service + pollingStatus() string + + // returns the URL used for polling status + pollingURL() string + + // returns the URL used for the final GET to retrieve the resource + finalGetURL() string + + // returns true if the LRO is in a terminal state + hasTerminated() bool + + // returns true if the LRO is in a failed terminal state + hasFailed() bool + + // returns true if the LRO is in a successful terminal state + hasSucceeded() bool + + // returns the cached HTTP response after a call to pollForStatus(), can be nil + latestResponse() *http.Response +} + +type pollingTrackerBase struct { + // resp is the last response, either from the submission of the LRO or from polling + resp *http.Response + + // method is the HTTP verb, this is needed for deserialization + Method string `json:"method"` + + // rawBody is the raw JSON response body + rawBody map[string]interface{} + + // denotes if polling is using async-operation or location header + Pm PollingMethodType `json:"pollingMethod"` + + // the URL to poll for status + URI string `json:"pollingURI"` + + // the state of the LRO as returned from the service + State string `json:"lroState"` + + // the URL to GET for the final result + FinalGetURI string `json:"resultURI"` + + // used to hold an error object returned from the service + Err *ServiceError `json:"error,omitempty"` +} + +func (pt *pollingTrackerBase) initializeState() error { + // determine the initial polling state based on response body and/or HTTP status + // code. this is applicable to the initial LRO response, not polling responses! + pt.Method = pt.resp.Request.Method + if err := pt.updateRawBody(); err != nil { + return err + } + switch pt.resp.StatusCode { + case http.StatusOK: + if ps := pt.getProvisioningState(); ps != nil { + pt.State = *ps + if pt.hasFailed() { + pt.updateErrorFromResponse() + return pt.pollingError() + } + } else { + pt.State = operationSucceeded + } + case http.StatusCreated: + if ps := pt.getProvisioningState(); ps != nil { + pt.State = *ps + } else { + pt.State = operationInProgress + } + case http.StatusAccepted: + pt.State = operationInProgress + case http.StatusNoContent: + pt.State = operationSucceeded + default: + pt.State = operationFailed + pt.updateErrorFromResponse() + return pt.pollingError() + } + return pt.initPollingMethod() +} + +func (pt pollingTrackerBase) getProvisioningState() *string { + if pt.rawBody != nil && pt.rawBody["properties"] != nil { + p := pt.rawBody["properties"].(map[string]interface{}) + if ps := p["provisioningState"]; ps != nil { + s := ps.(string) + return &s + } + } + return nil +} + +func (pt *pollingTrackerBase) updateRawBody() error { + pt.rawBody = map[string]interface{}{} + if pt.resp.ContentLength != 0 { + defer pt.resp.Body.Close() + b, err := ioutil.ReadAll(pt.resp.Body) + if err != nil { + return autorest.NewErrorWithError(err, "pollingTrackerBase", "updateRawBody", nil, "failed to read response body") + } + // put the body back so it's available to other callers + pt.resp.Body = ioutil.NopCloser(bytes.NewReader(b)) + // observed in 204 responses over HTTP/2.0; the content length is -1 but body is empty + if len(b) == 0 { + return nil + } + if err = json.Unmarshal(b, &pt.rawBody); err != nil { + return autorest.NewErrorWithError(err, "pollingTrackerBase", "updateRawBody", nil, "failed to unmarshal response body") + } + } + return nil +} + +func (pt *pollingTrackerBase) pollForStatus(ctx context.Context, sender autorest.Sender) error { + req, err := http.NewRequest(http.MethodGet, pt.URI, nil) + if err != nil { + return autorest.NewErrorWithError(err, "pollingTrackerBase", "pollForStatus", nil, "failed to create HTTP request") + } + + req = req.WithContext(ctx) + preparer := autorest.CreatePreparer(autorest.GetPrepareDecorators(ctx)...) + req, err = preparer.Prepare(req) + if err != nil { + return autorest.NewErrorWithError(err, "pollingTrackerBase", "pollForStatus", nil, "failed preparing HTTP request") + } + pt.resp, err = sender.Do(req) + if err != nil { + return autorest.NewErrorWithError(err, "pollingTrackerBase", "pollForStatus", nil, "failed to send HTTP request") + } + if autorest.ResponseHasStatusCode(pt.resp, pollingCodes[:]...) { + // reset the service error on success case + pt.Err = nil + err = pt.updateRawBody() + } else { + // check response body for error content + pt.updateErrorFromResponse() + err = pt.pollingError() + } + return err +} + +// attempts to unmarshal a ServiceError type from the response body. +// if that fails then make a best attempt at creating something meaningful. +// NOTE: this assumes that the async operation has failed. +func (pt *pollingTrackerBase) updateErrorFromResponse() { + var err error + if pt.resp.ContentLength != 0 { + type respErr struct { + ServiceError *ServiceError `json:"error"` + } + re := respErr{} + defer pt.resp.Body.Close() + var b []byte + if b, err = ioutil.ReadAll(pt.resp.Body); err != nil { + goto Default + } + // put the body back so it's available to other callers + pt.resp.Body = ioutil.NopCloser(bytes.NewReader(b)) + if len(b) == 0 { + goto Default + } + if err = json.Unmarshal(b, &re); err != nil { + goto Default + } + // unmarshalling the error didn't yield anything, try unwrapped error + if re.ServiceError == nil { + err = json.Unmarshal(b, &re.ServiceError) + if err != nil { + goto Default + } + } + // the unmarshaller will ensure re.ServiceError is non-nil + // even if there was no content unmarshalled so check the code. + if re.ServiceError.Code != "" { + pt.Err = re.ServiceError + return + } + } +Default: + se := &ServiceError{ + Code: pt.pollingStatus(), + Message: "The async operation failed.", + } + if err != nil { + se.InnerError = make(map[string]interface{}) + se.InnerError["unmarshalError"] = err.Error() + } + // stick the response body into the error object in hopes + // it contains something useful to help diagnose the failure. + if len(pt.rawBody) > 0 { + se.AdditionalInfo = []map[string]interface{}{ + pt.rawBody, + } + } + pt.Err = se +} + +func (pt *pollingTrackerBase) updatePollingState(provStateApl bool) error { + if pt.Pm == PollingAsyncOperation && pt.rawBody["status"] != nil { + pt.State = pt.rawBody["status"].(string) + } else { + if pt.resp.StatusCode == http.StatusAccepted { + pt.State = operationInProgress + } else if provStateApl { + if ps := pt.getProvisioningState(); ps != nil { + pt.State = *ps + } else { + pt.State = operationSucceeded + } + } else { + return autorest.NewError("pollingTrackerBase", "updatePollingState", "the response from the async operation has an invalid status code") + } + } + // if the operation has failed update the error state + if pt.hasFailed() { + pt.updateErrorFromResponse() + } + return nil +} + +func (pt pollingTrackerBase) pollingError() error { + if pt.Err == nil { + return nil + } + return pt.Err +} + +func (pt pollingTrackerBase) pollingMethod() PollingMethodType { + return pt.Pm +} + +func (pt pollingTrackerBase) pollingStatus() string { + return pt.State +} + +func (pt pollingTrackerBase) pollingURL() string { + return pt.URI +} + +func (pt pollingTrackerBase) finalGetURL() string { + return pt.FinalGetURI +} + +func (pt pollingTrackerBase) hasTerminated() bool { + return strings.EqualFold(pt.State, operationCanceled) || strings.EqualFold(pt.State, operationFailed) || strings.EqualFold(pt.State, operationSucceeded) +} + +func (pt pollingTrackerBase) hasFailed() bool { + return strings.EqualFold(pt.State, operationCanceled) || strings.EqualFold(pt.State, operationFailed) +} + +func (pt pollingTrackerBase) hasSucceeded() bool { + return strings.EqualFold(pt.State, operationSucceeded) +} + +func (pt pollingTrackerBase) latestResponse() *http.Response { + return pt.resp +} + +// error checking common to all trackers +func (pt pollingTrackerBase) baseCheckForErrors() error { + // for Azure-AsyncOperations the response body cannot be nil or empty + if pt.Pm == PollingAsyncOperation { + if pt.resp.Body == nil || pt.resp.ContentLength == 0 { + return autorest.NewError("pollingTrackerBase", "baseCheckForErrors", "for Azure-AsyncOperation response body cannot be nil") + } + if pt.rawBody["status"] == nil { + return autorest.NewError("pollingTrackerBase", "baseCheckForErrors", "missing status property in Azure-AsyncOperation response body") + } + } + return nil +} + +// default initialization of polling URL/method. each verb tracker will update this as required. +func (pt *pollingTrackerBase) initPollingMethod() error { + if ao, err := getURLFromAsyncOpHeader(pt.resp); err != nil { + return err + } else if ao != "" { + pt.URI = ao + pt.Pm = PollingAsyncOperation + return nil + } + if lh, err := getURLFromLocationHeader(pt.resp); err != nil { + return err + } else if lh != "" { + pt.URI = lh + pt.Pm = PollingLocation + return nil + } + // it's ok if we didn't find a polling header, this will be handled elsewhere + return nil +} + +// DELETE + +type pollingTrackerDelete struct { + pollingTrackerBase +} + +func (pt *pollingTrackerDelete) updatePollingMethod() error { + // for 201 the Location header is required + if pt.resp.StatusCode == http.StatusCreated { + if lh, err := getURLFromLocationHeader(pt.resp); err != nil { + return err + } else if lh == "" { + return autorest.NewError("pollingTrackerDelete", "updateHeaders", "missing Location header in 201 response") + } else { + pt.URI = lh + } + pt.Pm = PollingLocation + pt.FinalGetURI = pt.URI + } + // for 202 prefer the Azure-AsyncOperation header but fall back to Location if necessary + if pt.resp.StatusCode == http.StatusAccepted { + ao, err := getURLFromAsyncOpHeader(pt.resp) + if err != nil { + return err + } else if ao != "" { + pt.URI = ao + pt.Pm = PollingAsyncOperation + } + // if the Location header is invalid and we already have a polling URL + // then we don't care if the Location header URL is malformed. + if lh, err := getURLFromLocationHeader(pt.resp); err != nil && pt.URI == "" { + return err + } else if lh != "" { + if ao == "" { + pt.URI = lh + pt.Pm = PollingLocation + } + // when both headers are returned we use the value in the Location header for the final GET + pt.FinalGetURI = lh + } + // make sure a polling URL was found + if pt.URI == "" { + return autorest.NewError("pollingTrackerPost", "updateHeaders", "didn't get any suitable polling URLs in 202 response") + } + } + return nil +} + +func (pt pollingTrackerDelete) checkForErrors() error { + return pt.baseCheckForErrors() +} + +func (pt pollingTrackerDelete) provisioningStateApplicable() bool { + return pt.resp.StatusCode == http.StatusOK || pt.resp.StatusCode == http.StatusNoContent +} + +// PATCH + +type pollingTrackerPatch struct { + pollingTrackerBase +} + +func (pt *pollingTrackerPatch) updatePollingMethod() error { + // by default we can use the original URL for polling and final GET + if pt.URI == "" { + pt.URI = pt.resp.Request.URL.String() + } + if pt.FinalGetURI == "" { + pt.FinalGetURI = pt.resp.Request.URL.String() + } + if pt.Pm == PollingUnknown { + pt.Pm = PollingRequestURI + } + // for 201 it's permissible for no headers to be returned + if pt.resp.StatusCode == http.StatusCreated { + if ao, err := getURLFromAsyncOpHeader(pt.resp); err != nil { + return err + } else if ao != "" { + pt.URI = ao + pt.Pm = PollingAsyncOperation + } + } + // for 202 prefer the Azure-AsyncOperation header but fall back to Location if necessary + // note the absence of the "final GET" mechanism for PATCH + if pt.resp.StatusCode == http.StatusAccepted { + ao, err := getURLFromAsyncOpHeader(pt.resp) + if err != nil { + return err + } else if ao != "" { + pt.URI = ao + pt.Pm = PollingAsyncOperation + } + if ao == "" { + if lh, err := getURLFromLocationHeader(pt.resp); err != nil { + return err + } else if lh == "" { + return autorest.NewError("pollingTrackerPatch", "updateHeaders", "didn't get any suitable polling URLs in 202 response") + } else { + pt.URI = lh + pt.Pm = PollingLocation + } + } + } + return nil +} + +func (pt pollingTrackerPatch) checkForErrors() error { + return pt.baseCheckForErrors() +} + +func (pt pollingTrackerPatch) provisioningStateApplicable() bool { + return pt.resp.StatusCode == http.StatusOK || pt.resp.StatusCode == http.StatusCreated +} + +// POST + +type pollingTrackerPost struct { + pollingTrackerBase +} + +func (pt *pollingTrackerPost) updatePollingMethod() error { + // 201 requires Location header + if pt.resp.StatusCode == http.StatusCreated { + if lh, err := getURLFromLocationHeader(pt.resp); err != nil { + return err + } else if lh == "" { + return autorest.NewError("pollingTrackerPost", "updateHeaders", "missing Location header in 201 response") + } else { + pt.URI = lh + pt.FinalGetURI = lh + pt.Pm = PollingLocation + } + } + // for 202 prefer the Azure-AsyncOperation header but fall back to Location if necessary + if pt.resp.StatusCode == http.StatusAccepted { + ao, err := getURLFromAsyncOpHeader(pt.resp) + if err != nil { + return err + } else if ao != "" { + pt.URI = ao + pt.Pm = PollingAsyncOperation + } + // if the Location header is invalid and we already have a polling URL + // then we don't care if the Location header URL is malformed. + if lh, err := getURLFromLocationHeader(pt.resp); err != nil && pt.URI == "" { + return err + } else if lh != "" { + if ao == "" { + pt.URI = lh + pt.Pm = PollingLocation + } + // when both headers are returned we use the value in the Location header for the final GET + pt.FinalGetURI = lh + } + // make sure a polling URL was found + if pt.URI == "" { + return autorest.NewError("pollingTrackerPost", "updateHeaders", "didn't get any suitable polling URLs in 202 response") + } + } + return nil +} + +func (pt pollingTrackerPost) checkForErrors() error { + return pt.baseCheckForErrors() +} + +func (pt pollingTrackerPost) provisioningStateApplicable() bool { + return pt.resp.StatusCode == http.StatusOK || pt.resp.StatusCode == http.StatusNoContent +} + +// PUT + +type pollingTrackerPut struct { + pollingTrackerBase +} + +func (pt *pollingTrackerPut) updatePollingMethod() error { + // by default we can use the original URL for polling and final GET + if pt.URI == "" { + pt.URI = pt.resp.Request.URL.String() + } + if pt.FinalGetURI == "" { + pt.FinalGetURI = pt.resp.Request.URL.String() + } + if pt.Pm == PollingUnknown { + pt.Pm = PollingRequestURI + } + // for 201 it's permissible for no headers to be returned + if pt.resp.StatusCode == http.StatusCreated { + if ao, err := getURLFromAsyncOpHeader(pt.resp); err != nil { + return err + } else if ao != "" { + pt.URI = ao + pt.Pm = PollingAsyncOperation + } + } + // for 202 prefer the Azure-AsyncOperation header but fall back to Location if necessary + if pt.resp.StatusCode == http.StatusAccepted { + ao, err := getURLFromAsyncOpHeader(pt.resp) + if err != nil { + return err + } else if ao != "" { + pt.URI = ao + pt.Pm = PollingAsyncOperation + } + // if the Location header is invalid and we already have a polling URL + // then we don't care if the Location header URL is malformed. + if lh, err := getURLFromLocationHeader(pt.resp); err != nil && pt.URI == "" { + return err + } else if lh != "" { + if ao == "" { + pt.URI = lh + pt.Pm = PollingLocation + } + } + // make sure a polling URL was found + if pt.URI == "" { + return autorest.NewError("pollingTrackerPut", "updateHeaders", "didn't get any suitable polling URLs in 202 response") + } + } + return nil +} + +func (pt pollingTrackerPut) checkForErrors() error { + err := pt.baseCheckForErrors() + if err != nil { + return err + } + // if there are no LRO headers then the body cannot be empty + ao, err := getURLFromAsyncOpHeader(pt.resp) + if err != nil { + return err + } + lh, err := getURLFromLocationHeader(pt.resp) + if err != nil { + return err + } + if ao == "" && lh == "" && len(pt.rawBody) == 0 { + return autorest.NewError("pollingTrackerPut", "checkForErrors", "the response did not contain a body") + } + return nil +} + +func (pt pollingTrackerPut) provisioningStateApplicable() bool { + return pt.resp.StatusCode == http.StatusOK || pt.resp.StatusCode == http.StatusCreated +} + +// creates a polling tracker based on the verb of the original request +func createPollingTracker(resp *http.Response) (pollingTracker, error) { + var pt pollingTracker + switch strings.ToUpper(resp.Request.Method) { + case http.MethodDelete: + pt = &pollingTrackerDelete{pollingTrackerBase: pollingTrackerBase{resp: resp}} + case http.MethodPatch: + pt = &pollingTrackerPatch{pollingTrackerBase: pollingTrackerBase{resp: resp}} + case http.MethodPost: + pt = &pollingTrackerPost{pollingTrackerBase: pollingTrackerBase{resp: resp}} + case http.MethodPut: + pt = &pollingTrackerPut{pollingTrackerBase: pollingTrackerBase{resp: resp}} + default: + return nil, autorest.NewError("azure", "createPollingTracker", "unsupported HTTP method %s", resp.Request.Method) + } + if err := pt.initializeState(); err != nil { + return pt, err + } + // this initializes the polling header values, we do this during creation in case the + // initial response send us invalid values; this way the API call will return a non-nil + // error (not doing this means the error shows up in Future.Done) + return pt, pt.updatePollingMethod() +} + +// gets the polling URL from the Azure-AsyncOperation header. +// ensures the URL is well-formed and absolute. +func getURLFromAsyncOpHeader(resp *http.Response) (string, error) { + s := resp.Header.Get(http.CanonicalHeaderKey(headerAsyncOperation)) + if s == "" { + return "", nil + } + if !isValidURL(s) { + return "", autorest.NewError("azure", "getURLFromAsyncOpHeader", "invalid polling URL '%s'", s) + } + return s, nil +} + +// gets the polling URL from the Location header. +// ensures the URL is well-formed and absolute. +func getURLFromLocationHeader(resp *http.Response) (string, error) { + s := resp.Header.Get(http.CanonicalHeaderKey(autorest.HeaderLocation)) + if s == "" { + return "", nil + } + if !isValidURL(s) { + return "", autorest.NewError("azure", "getURLFromLocationHeader", "invalid polling URL '%s'", s) + } + return s, nil +} + +// verify that the URL is valid and absolute +func isValidURL(s string) bool { + u, err := url.Parse(s) + return err == nil && u.IsAbs() +} + +// PollingMethodType defines a type used for enumerating polling mechanisms. +type PollingMethodType string + +const ( + // PollingAsyncOperation indicates the polling method uses the Azure-AsyncOperation header. + PollingAsyncOperation PollingMethodType = "AsyncOperation" + + // PollingLocation indicates the polling method uses the Location header. + PollingLocation PollingMethodType = "Location" + + // PollingRequestURI indicates the polling method uses the original request URI. + PollingRequestURI PollingMethodType = "RequestURI" + + // PollingUnknown indicates an unknown polling method and is the default value. + PollingUnknown PollingMethodType = "" +) + +// AsyncOpIncompleteError is the type that's returned from a future that has not completed. +type AsyncOpIncompleteError struct { + // FutureType is the name of the type composed of a azure.Future. + FutureType string +} + +// Error returns an error message including the originating type name of the error. +func (e AsyncOpIncompleteError) Error() string { + return fmt.Sprintf("%s: asynchronous operation has not completed", e.FutureType) +} + +// NewAsyncOpIncompleteError creates a new AsyncOpIncompleteError with the specified parameters. +func NewAsyncOpIncompleteError(futureType string) AsyncOpIncompleteError { + return AsyncOpIncompleteError{ + FutureType: futureType, + } +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/azure.go b/vendor/github.com/Azure/go-autorest/autorest/azure/azure.go new file mode 100644 index 00000000000..0ded76bc6f1 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/azure.go @@ -0,0 +1,388 @@ +// Package azure provides Azure-specific implementations used with AutoRest. +// See the included examples for more detail. +package azure + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "regexp" + "strconv" + "strings" + + "github.com/Azure/go-autorest/autorest" +) + +const ( + // HeaderClientID is the Azure extension header to set a user-specified request ID. + HeaderClientID = "x-ms-client-request-id" + + // HeaderReturnClientID is the Azure extension header to set if the user-specified request ID + // should be included in the response. + HeaderReturnClientID = "x-ms-return-client-request-id" + + // HeaderContentType is the type of the content in the HTTP response. + HeaderContentType = "Content-Type" + + // HeaderRequestID is the Azure extension header of the service generated request ID returned + // in the response. + HeaderRequestID = "x-ms-request-id" +) + +// ServiceError encapsulates the error response from an Azure service. +// It adhears to the OData v4 specification for error responses. +type ServiceError struct { + Code string `json:"code"` + Message string `json:"message"` + Target *string `json:"target"` + Details []map[string]interface{} `json:"details"` + InnerError map[string]interface{} `json:"innererror"` + AdditionalInfo []map[string]interface{} `json:"additionalInfo"` +} + +func (se ServiceError) Error() string { + result := fmt.Sprintf("Code=%q Message=%q", se.Code, se.Message) + + if se.Target != nil { + result += fmt.Sprintf(" Target=%q", *se.Target) + } + + if se.Details != nil { + d, err := json.Marshal(se.Details) + if err != nil { + result += fmt.Sprintf(" Details=%v", se.Details) + } + result += fmt.Sprintf(" Details=%v", string(d)) + } + + if se.InnerError != nil { + d, err := json.Marshal(se.InnerError) + if err != nil { + result += fmt.Sprintf(" InnerError=%v", se.InnerError) + } + result += fmt.Sprintf(" InnerError=%v", string(d)) + } + + if se.AdditionalInfo != nil { + d, err := json.Marshal(se.AdditionalInfo) + if err != nil { + result += fmt.Sprintf(" AdditionalInfo=%v", se.AdditionalInfo) + } + result += fmt.Sprintf(" AdditionalInfo=%v", string(d)) + } + + return result +} + +// UnmarshalJSON implements the json.Unmarshaler interface for the ServiceError type. +func (se *ServiceError) UnmarshalJSON(b []byte) error { + // http://docs.oasis-open.org/odata/odata-json-format/v4.0/os/odata-json-format-v4.0-os.html#_Toc372793091 + + type serviceErrorInternal struct { + Code string `json:"code"` + Message string `json:"message"` + Target *string `json:"target,omitempty"` + AdditionalInfo []map[string]interface{} `json:"additionalInfo,omitempty"` + // not all services conform to the OData v4 spec. + // the following fields are where we've seen discrepancies + + // spec calls for []map[string]interface{} but have seen map[string]interface{} + Details interface{} `json:"details,omitempty"` + + // spec calls for map[string]interface{} but have seen []map[string]interface{} and string + InnerError interface{} `json:"innererror,omitempty"` + } + + sei := serviceErrorInternal{} + if err := json.Unmarshal(b, &sei); err != nil { + return err + } + + // copy the fields we know to be correct + se.AdditionalInfo = sei.AdditionalInfo + se.Code = sei.Code + se.Message = sei.Message + se.Target = sei.Target + + // converts an []interface{} to []map[string]interface{} + arrayOfObjs := func(v interface{}) ([]map[string]interface{}, bool) { + arrayOf, ok := v.([]interface{}) + if !ok { + return nil, false + } + final := []map[string]interface{}{} + for _, item := range arrayOf { + as, ok := item.(map[string]interface{}) + if !ok { + return nil, false + } + final = append(final, as) + } + return final, true + } + + // convert the remaining fields, falling back to raw JSON if necessary + + if c, ok := arrayOfObjs(sei.Details); ok { + se.Details = c + } else if c, ok := sei.Details.(map[string]interface{}); ok { + se.Details = []map[string]interface{}{c} + } else if sei.Details != nil { + // stuff into Details + se.Details = []map[string]interface{}{ + {"raw": sei.Details}, + } + } + + if c, ok := sei.InnerError.(map[string]interface{}); ok { + se.InnerError = c + } else if c, ok := arrayOfObjs(sei.InnerError); ok { + // if there's only one error extract it + if len(c) == 1 { + se.InnerError = c[0] + } else { + // multiple errors, stuff them into the value + se.InnerError = map[string]interface{}{ + "multi": c, + } + } + } else if c, ok := sei.InnerError.(string); ok { + se.InnerError = map[string]interface{}{"error": c} + } else if sei.InnerError != nil { + // stuff into InnerError + se.InnerError = map[string]interface{}{ + "raw": sei.InnerError, + } + } + return nil +} + +// RequestError describes an error response returned by Azure service. +type RequestError struct { + autorest.DetailedError + + // The error returned by the Azure service. + ServiceError *ServiceError `json:"error" xml:"Error"` + + // The request id (from the x-ms-request-id-header) of the request. + RequestID string +} + +// Error returns a human-friendly error message from service error. +func (e RequestError) Error() string { + return fmt.Sprintf("autorest/azure: Service returned an error. Status=%v %v", + e.StatusCode, e.ServiceError) +} + +// IsAzureError returns true if the passed error is an Azure Service error; false otherwise. +func IsAzureError(e error) bool { + _, ok := e.(*RequestError) + return ok +} + +// Resource contains details about an Azure resource. +type Resource struct { + SubscriptionID string + ResourceGroup string + Provider string + ResourceType string + ResourceName string +} + +// String function returns a string in form of azureResourceID +func (r Resource) String() string { + return fmt.Sprintf("/subscriptions/%s/resourceGroups/%s/providers/%s/%s/%s", r.SubscriptionID, r.ResourceGroup, r.Provider, r.ResourceType, r.ResourceName) +} + +// ParseResourceID parses a resource ID into a ResourceDetails struct. +// See https://docs.microsoft.com/en-us/azure/azure-resource-manager/resource-group-template-functions-resource#return-value-4. +func ParseResourceID(resourceID string) (Resource, error) { + + const resourceIDPatternText = `(?i)subscriptions/(.+)/resourceGroups/(.+)/providers/(.+?)/(.+?)/(.+)` + resourceIDPattern := regexp.MustCompile(resourceIDPatternText) + match := resourceIDPattern.FindStringSubmatch(resourceID) + + if len(match) == 0 { + return Resource{}, fmt.Errorf("parsing failed for %s. Invalid resource Id format", resourceID) + } + + v := strings.Split(match[5], "/") + resourceName := v[len(v)-1] + + result := Resource{ + SubscriptionID: match[1], + ResourceGroup: match[2], + Provider: match[3], + ResourceType: match[4], + ResourceName: resourceName, + } + + return result, nil +} + +// NewErrorWithError creates a new Error conforming object from the +// passed packageType, method, statusCode of the given resp (UndefinedStatusCode +// if resp is nil), message, and original error. message is treated as a format +// string to which the optional args apply. +func NewErrorWithError(original error, packageType string, method string, resp *http.Response, message string, args ...interface{}) RequestError { + if v, ok := original.(*RequestError); ok { + return *v + } + + statusCode := autorest.UndefinedStatusCode + if resp != nil { + statusCode = resp.StatusCode + } + return RequestError{ + DetailedError: autorest.DetailedError{ + Original: original, + PackageType: packageType, + Method: method, + StatusCode: statusCode, + Message: fmt.Sprintf(message, args...), + }, + } +} + +// WithReturningClientID returns a PrepareDecorator that adds an HTTP extension header of +// x-ms-client-request-id whose value is the passed, undecorated UUID (e.g., +// "0F39878C-5F76-4DB8-A25D-61D2C193C3CA"). It also sets the x-ms-return-client-request-id +// header to true such that UUID accompanies the http.Response. +func WithReturningClientID(uuid string) autorest.PrepareDecorator { + preparer := autorest.CreatePreparer( + WithClientID(uuid), + WithReturnClientID(true)) + + return func(p autorest.Preparer) autorest.Preparer { + return autorest.PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err != nil { + return r, err + } + return preparer.Prepare(r) + }) + } +} + +// WithClientID returns a PrepareDecorator that adds an HTTP extension header of +// x-ms-client-request-id whose value is passed, undecorated UUID (e.g., +// "0F39878C-5F76-4DB8-A25D-61D2C193C3CA"). +func WithClientID(uuid string) autorest.PrepareDecorator { + return autorest.WithHeader(HeaderClientID, uuid) +} + +// WithReturnClientID returns a PrepareDecorator that adds an HTTP extension header of +// x-ms-return-client-request-id whose boolean value indicates if the value of the +// x-ms-client-request-id header should be included in the http.Response. +func WithReturnClientID(b bool) autorest.PrepareDecorator { + return autorest.WithHeader(HeaderReturnClientID, strconv.FormatBool(b)) +} + +// ExtractClientID extracts the client identifier from the x-ms-client-request-id header set on the +// http.Request sent to the service (and returned in the http.Response) +func ExtractClientID(resp *http.Response) string { + return autorest.ExtractHeaderValue(HeaderClientID, resp) +} + +// ExtractRequestID extracts the Azure server generated request identifier from the +// x-ms-request-id header. +func ExtractRequestID(resp *http.Response) string { + return autorest.ExtractHeaderValue(HeaderRequestID, resp) +} + +// WithErrorUnlessStatusCode returns a RespondDecorator that emits an +// azure.RequestError by reading the response body unless the response HTTP status code +// is among the set passed. +// +// If there is a chance service may return responses other than the Azure error +// format and the response cannot be parsed into an error, a decoding error will +// be returned containing the response body. In any case, the Responder will +// return an error if the status code is not satisfied. +// +// If this Responder returns an error, the response body will be replaced with +// an in-memory reader, which needs no further closing. +func WithErrorUnlessStatusCode(codes ...int) autorest.RespondDecorator { + return func(r autorest.Responder) autorest.Responder { + return autorest.ResponderFunc(func(resp *http.Response) error { + err := r.Respond(resp) + if err == nil && !autorest.ResponseHasStatusCode(resp, codes...) { + var e RequestError + defer resp.Body.Close() + + encodedAs := autorest.EncodedAsJSON + if strings.Contains(resp.Header.Get("Content-Type"), "xml") { + encodedAs = autorest.EncodedAsXML + } + + // Copy and replace the Body in case it does not contain an error object. + // This will leave the Body available to the caller. + b, decodeErr := autorest.CopyAndDecode(encodedAs, resp.Body, &e) + resp.Body = ioutil.NopCloser(&b) + if decodeErr != nil { + return fmt.Errorf("autorest/azure: error response cannot be parsed: %q error: %v", b.String(), decodeErr) + } + if e.ServiceError == nil { + // Check if error is unwrapped ServiceError + decoder := autorest.NewDecoder(encodedAs, bytes.NewReader(b.Bytes())) + if err := decoder.Decode(&e.ServiceError); err != nil { + return fmt.Errorf("autorest/azure: error response cannot be parsed: %q error: %v", b.String(), err) + } + + // for example, should the API return the literal value `null` as the response + if e.ServiceError == nil { + e.ServiceError = &ServiceError{ + Code: "Unknown", + Message: "Unknown service error", + Details: []map[string]interface{}{ + { + "HttpResponse.Body": b.String(), + }, + }, + } + } + } + + if e.ServiceError != nil && e.ServiceError.Message == "" { + // if we're here it means the returned error wasn't OData v4 compliant. + // try to unmarshal the body in hopes of getting something. + rawBody := map[string]interface{}{} + decoder := autorest.NewDecoder(encodedAs, bytes.NewReader(b.Bytes())) + if err := decoder.Decode(&rawBody); err != nil { + return fmt.Errorf("autorest/azure: error response cannot be parsed: %q error: %v", b.String(), err) + } + + e.ServiceError = &ServiceError{ + Code: "Unknown", + Message: "Unknown service error", + } + if len(rawBody) > 0 { + e.ServiceError.Details = []map[string]interface{}{rawBody} + } + } + e.Response = resp + e.RequestID = ExtractRequestID(resp) + if e.StatusCode == nil { + e.StatusCode = resp.StatusCode + } + err = &e + } + return err + }) + } +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/environments.go b/vendor/github.com/Azure/go-autorest/autorest/azure/environments.go new file mode 100644 index 00000000000..9bbc0899e4c --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/environments.go @@ -0,0 +1,269 @@ +package azure + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "os" + "strings" +) + +const ( + // EnvironmentFilepathName captures the name of the environment variable containing the path to the file + // to be used while populating the Azure Environment. + EnvironmentFilepathName = "AZURE_ENVIRONMENT_FILEPATH" + + // NotAvailable is used for endpoints and resource IDs that are not available for a given cloud. + NotAvailable = "N/A" +) + +var environments = map[string]Environment{ + "AZURECHINACLOUD": ChinaCloud, + "AZUREGERMANCLOUD": GermanCloud, + "AZUREPUBLICCLOUD": PublicCloud, + "AZUREUSGOVERNMENTCLOUD": USGovernmentCloud, +} + +// ResourceIdentifier contains a set of Azure resource IDs. +type ResourceIdentifier struct { + Graph string `json:"graph"` + KeyVault string `json:"keyVault"` + Datalake string `json:"datalake"` + Batch string `json:"batch"` + OperationalInsights string `json:"operationalInsights"` + Storage string `json:"storage"` + Synapse string `json:"synapse"` + ServiceBus string `json:"serviceBus"` +} + +// Environment represents a set of endpoints for each of Azure's Clouds. +type Environment struct { + Name string `json:"name"` + ManagementPortalURL string `json:"managementPortalURL"` + PublishSettingsURL string `json:"publishSettingsURL"` + ServiceManagementEndpoint string `json:"serviceManagementEndpoint"` + ResourceManagerEndpoint string `json:"resourceManagerEndpoint"` + ActiveDirectoryEndpoint string `json:"activeDirectoryEndpoint"` + GalleryEndpoint string `json:"galleryEndpoint"` + KeyVaultEndpoint string `json:"keyVaultEndpoint"` + GraphEndpoint string `json:"graphEndpoint"` + ServiceBusEndpoint string `json:"serviceBusEndpoint"` + BatchManagementEndpoint string `json:"batchManagementEndpoint"` + StorageEndpointSuffix string `json:"storageEndpointSuffix"` + SQLDatabaseDNSSuffix string `json:"sqlDatabaseDNSSuffix"` + TrafficManagerDNSSuffix string `json:"trafficManagerDNSSuffix"` + KeyVaultDNSSuffix string `json:"keyVaultDNSSuffix"` + ServiceBusEndpointSuffix string `json:"serviceBusEndpointSuffix"` + ServiceManagementVMDNSSuffix string `json:"serviceManagementVMDNSSuffix"` + ResourceManagerVMDNSSuffix string `json:"resourceManagerVMDNSSuffix"` + ContainerRegistryDNSSuffix string `json:"containerRegistryDNSSuffix"` + CosmosDBDNSSuffix string `json:"cosmosDBDNSSuffix"` + TokenAudience string `json:"tokenAudience"` + APIManagementHostNameSuffix string `json:"apiManagementHostNameSuffix"` + SynapseEndpointSuffix string `json:"synapseEndpointSuffix"` + ResourceIdentifiers ResourceIdentifier `json:"resourceIdentifiers"` +} + +var ( + // PublicCloud is the default public Azure cloud environment + PublicCloud = Environment{ + Name: "AzurePublicCloud", + ManagementPortalURL: "https://manage.windowsazure.com/", + PublishSettingsURL: "https://manage.windowsazure.com/publishsettings/index", + ServiceManagementEndpoint: "https://management.core.windows.net/", + ResourceManagerEndpoint: "https://management.azure.com/", + ActiveDirectoryEndpoint: "https://login.microsoftonline.com/", + GalleryEndpoint: "https://gallery.azure.com/", + KeyVaultEndpoint: "https://vault.azure.net/", + GraphEndpoint: "https://graph.windows.net/", + ServiceBusEndpoint: "https://servicebus.windows.net/", + BatchManagementEndpoint: "https://batch.core.windows.net/", + StorageEndpointSuffix: "core.windows.net", + SQLDatabaseDNSSuffix: "database.windows.net", + TrafficManagerDNSSuffix: "trafficmanager.net", + KeyVaultDNSSuffix: "vault.azure.net", + ServiceBusEndpointSuffix: "servicebus.windows.net", + ServiceManagementVMDNSSuffix: "cloudapp.net", + ResourceManagerVMDNSSuffix: "cloudapp.azure.com", + ContainerRegistryDNSSuffix: "azurecr.io", + CosmosDBDNSSuffix: "documents.azure.com", + TokenAudience: "https://management.azure.com/", + APIManagementHostNameSuffix: "azure-api.net", + SynapseEndpointSuffix: "dev.azuresynapse.net", + ResourceIdentifiers: ResourceIdentifier{ + Graph: "https://graph.windows.net/", + KeyVault: "https://vault.azure.net", + Datalake: "https://datalake.azure.net/", + Batch: "https://batch.core.windows.net/", + OperationalInsights: "https://api.loganalytics.io", + Storage: "https://storage.azure.com/", + Synapse: "https://dev.azuresynapse.net", + ServiceBus: "https://servicebus.azure.net/", + }, + } + + // USGovernmentCloud is the cloud environment for the US Government + USGovernmentCloud = Environment{ + Name: "AzureUSGovernmentCloud", + ManagementPortalURL: "https://manage.windowsazure.us/", + PublishSettingsURL: "https://manage.windowsazure.us/publishsettings/index", + ServiceManagementEndpoint: "https://management.core.usgovcloudapi.net/", + ResourceManagerEndpoint: "https://management.usgovcloudapi.net/", + ActiveDirectoryEndpoint: "https://login.microsoftonline.us/", + GalleryEndpoint: "https://gallery.usgovcloudapi.net/", + KeyVaultEndpoint: "https://vault.usgovcloudapi.net/", + GraphEndpoint: "https://graph.windows.net/", + ServiceBusEndpoint: "https://servicebus.usgovcloudapi.net/", + BatchManagementEndpoint: "https://batch.core.usgovcloudapi.net/", + StorageEndpointSuffix: "core.usgovcloudapi.net", + SQLDatabaseDNSSuffix: "database.usgovcloudapi.net", + TrafficManagerDNSSuffix: "usgovtrafficmanager.net", + KeyVaultDNSSuffix: "vault.usgovcloudapi.net", + ServiceBusEndpointSuffix: "servicebus.usgovcloudapi.net", + ServiceManagementVMDNSSuffix: "usgovcloudapp.net", + ResourceManagerVMDNSSuffix: "cloudapp.usgovcloudapi.net", + ContainerRegistryDNSSuffix: "azurecr.us", + CosmosDBDNSSuffix: "documents.azure.us", + TokenAudience: "https://management.usgovcloudapi.net/", + APIManagementHostNameSuffix: "azure-api.us", + SynapseEndpointSuffix: NotAvailable, + ResourceIdentifiers: ResourceIdentifier{ + Graph: "https://graph.windows.net/", + KeyVault: "https://vault.usgovcloudapi.net", + Datalake: NotAvailable, + Batch: "https://batch.core.usgovcloudapi.net/", + OperationalInsights: "https://api.loganalytics.us", + Storage: "https://storage.azure.com/", + Synapse: NotAvailable, + ServiceBus: "https://servicebus.azure.net/", + }, + } + + // ChinaCloud is the cloud environment operated in China + ChinaCloud = Environment{ + Name: "AzureChinaCloud", + ManagementPortalURL: "https://manage.chinacloudapi.com/", + PublishSettingsURL: "https://manage.chinacloudapi.com/publishsettings/index", + ServiceManagementEndpoint: "https://management.core.chinacloudapi.cn/", + ResourceManagerEndpoint: "https://management.chinacloudapi.cn/", + ActiveDirectoryEndpoint: "https://login.chinacloudapi.cn/", + GalleryEndpoint: "https://gallery.chinacloudapi.cn/", + KeyVaultEndpoint: "https://vault.azure.cn/", + GraphEndpoint: "https://graph.chinacloudapi.cn/", + ServiceBusEndpoint: "https://servicebus.chinacloudapi.cn/", + BatchManagementEndpoint: "https://batch.chinacloudapi.cn/", + StorageEndpointSuffix: "core.chinacloudapi.cn", + SQLDatabaseDNSSuffix: "database.chinacloudapi.cn", + TrafficManagerDNSSuffix: "trafficmanager.cn", + KeyVaultDNSSuffix: "vault.azure.cn", + ServiceBusEndpointSuffix: "servicebus.chinacloudapi.cn", + ServiceManagementVMDNSSuffix: "chinacloudapp.cn", + ResourceManagerVMDNSSuffix: "cloudapp.chinacloudapi.cn", + ContainerRegistryDNSSuffix: "azurecr.cn", + CosmosDBDNSSuffix: "documents.azure.cn", + TokenAudience: "https://management.chinacloudapi.cn/", + APIManagementHostNameSuffix: "azure-api.cn", + SynapseEndpointSuffix: "dev.azuresynapse.azure.cn", + ResourceIdentifiers: ResourceIdentifier{ + Graph: "https://graph.chinacloudapi.cn/", + KeyVault: "https://vault.azure.cn", + Datalake: NotAvailable, + Batch: "https://batch.chinacloudapi.cn/", + OperationalInsights: NotAvailable, + Storage: "https://storage.azure.com/", + Synapse: "https://dev.azuresynapse.net", + ServiceBus: "https://servicebus.azure.net/", + }, + } + + // GermanCloud is the cloud environment operated in Germany + GermanCloud = Environment{ + Name: "AzureGermanCloud", + ManagementPortalURL: "http://portal.microsoftazure.de/", + PublishSettingsURL: "https://manage.microsoftazure.de/publishsettings/index", + ServiceManagementEndpoint: "https://management.core.cloudapi.de/", + ResourceManagerEndpoint: "https://management.microsoftazure.de/", + ActiveDirectoryEndpoint: "https://login.microsoftonline.de/", + GalleryEndpoint: "https://gallery.cloudapi.de/", + KeyVaultEndpoint: "https://vault.microsoftazure.de/", + GraphEndpoint: "https://graph.cloudapi.de/", + ServiceBusEndpoint: "https://servicebus.cloudapi.de/", + BatchManagementEndpoint: "https://batch.cloudapi.de/", + StorageEndpointSuffix: "core.cloudapi.de", + SQLDatabaseDNSSuffix: "database.cloudapi.de", + TrafficManagerDNSSuffix: "azuretrafficmanager.de", + KeyVaultDNSSuffix: "vault.microsoftazure.de", + ServiceBusEndpointSuffix: "servicebus.cloudapi.de", + ServiceManagementVMDNSSuffix: "azurecloudapp.de", + ResourceManagerVMDNSSuffix: "cloudapp.microsoftazure.de", + ContainerRegistryDNSSuffix: NotAvailable, + CosmosDBDNSSuffix: "documents.microsoftazure.de", + TokenAudience: "https://management.microsoftazure.de/", + APIManagementHostNameSuffix: NotAvailable, + SynapseEndpointSuffix: NotAvailable, + ResourceIdentifiers: ResourceIdentifier{ + Graph: "https://graph.cloudapi.de/", + KeyVault: "https://vault.microsoftazure.de", + Datalake: NotAvailable, + Batch: "https://batch.cloudapi.de/", + OperationalInsights: NotAvailable, + Storage: "https://storage.azure.com/", + Synapse: NotAvailable, + ServiceBus: "https://servicebus.azure.net/", + }, + } +) + +// EnvironmentFromName returns an Environment based on the common name specified. +func EnvironmentFromName(name string) (Environment, error) { + // IMPORTANT + // As per @radhikagupta5: + // This is technical debt, fundamentally here because Kubernetes is not currently accepting + // contributions to the providers. Once that is an option, the provider should be updated to + // directly call `EnvironmentFromFile`. Until then, we rely on dispatching Azure Stack environment creation + // from this method based on the name that is provided to us. + if strings.EqualFold(name, "AZURESTACKCLOUD") { + return EnvironmentFromFile(os.Getenv(EnvironmentFilepathName)) + } + + name = strings.ToUpper(name) + env, ok := environments[name] + if !ok { + return env, fmt.Errorf("autorest/azure: There is no cloud environment matching the name %q", name) + } + + return env, nil +} + +// EnvironmentFromFile loads an Environment from a configuration file available on disk. +// This function is particularly useful in the Hybrid Cloud model, where one must define their own +// endpoints. +func EnvironmentFromFile(location string) (unmarshaled Environment, err error) { + fileContents, err := ioutil.ReadFile(location) + if err != nil { + return + } + + err = json.Unmarshal(fileContents, &unmarshaled) + + return +} + +// SetEnvironment updates the environment map with the specified values. +func SetEnvironment(name string, env Environment) { + environments[strings.ToUpper(name)] = env +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/metadata_environment.go b/vendor/github.com/Azure/go-autorest/autorest/azure/metadata_environment.go new file mode 100644 index 00000000000..507f9e95cf1 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/metadata_environment.go @@ -0,0 +1,245 @@ +package azure + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + + "github.com/Azure/go-autorest/autorest" +) + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +type audience []string + +type authentication struct { + LoginEndpoint string `json:"loginEndpoint"` + Audiences audience `json:"audiences"` +} + +type environmentMetadataInfo struct { + GalleryEndpoint string `json:"galleryEndpoint"` + GraphEndpoint string `json:"graphEndpoint"` + PortalEndpoint string `json:"portalEndpoint"` + Authentication authentication `json:"authentication"` +} + +// EnvironmentProperty represent property names that clients can override +type EnvironmentProperty string + +const ( + // EnvironmentName ... + EnvironmentName EnvironmentProperty = "name" + // EnvironmentManagementPortalURL .. + EnvironmentManagementPortalURL EnvironmentProperty = "managementPortalURL" + // EnvironmentPublishSettingsURL ... + EnvironmentPublishSettingsURL EnvironmentProperty = "publishSettingsURL" + // EnvironmentServiceManagementEndpoint ... + EnvironmentServiceManagementEndpoint EnvironmentProperty = "serviceManagementEndpoint" + // EnvironmentResourceManagerEndpoint ... + EnvironmentResourceManagerEndpoint EnvironmentProperty = "resourceManagerEndpoint" + // EnvironmentActiveDirectoryEndpoint ... + EnvironmentActiveDirectoryEndpoint EnvironmentProperty = "activeDirectoryEndpoint" + // EnvironmentGalleryEndpoint ... + EnvironmentGalleryEndpoint EnvironmentProperty = "galleryEndpoint" + // EnvironmentKeyVaultEndpoint ... + EnvironmentKeyVaultEndpoint EnvironmentProperty = "keyVaultEndpoint" + // EnvironmentGraphEndpoint ... + EnvironmentGraphEndpoint EnvironmentProperty = "graphEndpoint" + // EnvironmentServiceBusEndpoint ... + EnvironmentServiceBusEndpoint EnvironmentProperty = "serviceBusEndpoint" + // EnvironmentBatchManagementEndpoint ... + EnvironmentBatchManagementEndpoint EnvironmentProperty = "batchManagementEndpoint" + // EnvironmentStorageEndpointSuffix ... + EnvironmentStorageEndpointSuffix EnvironmentProperty = "storageEndpointSuffix" + // EnvironmentSQLDatabaseDNSSuffix ... + EnvironmentSQLDatabaseDNSSuffix EnvironmentProperty = "sqlDatabaseDNSSuffix" + // EnvironmentTrafficManagerDNSSuffix ... + EnvironmentTrafficManagerDNSSuffix EnvironmentProperty = "trafficManagerDNSSuffix" + // EnvironmentKeyVaultDNSSuffix ... + EnvironmentKeyVaultDNSSuffix EnvironmentProperty = "keyVaultDNSSuffix" + // EnvironmentServiceBusEndpointSuffix ... + EnvironmentServiceBusEndpointSuffix EnvironmentProperty = "serviceBusEndpointSuffix" + // EnvironmentServiceManagementVMDNSSuffix ... + EnvironmentServiceManagementVMDNSSuffix EnvironmentProperty = "serviceManagementVMDNSSuffix" + // EnvironmentResourceManagerVMDNSSuffix ... + EnvironmentResourceManagerVMDNSSuffix EnvironmentProperty = "resourceManagerVMDNSSuffix" + // EnvironmentContainerRegistryDNSSuffix ... + EnvironmentContainerRegistryDNSSuffix EnvironmentProperty = "containerRegistryDNSSuffix" + // EnvironmentTokenAudience ... + EnvironmentTokenAudience EnvironmentProperty = "tokenAudience" +) + +// OverrideProperty represents property name and value that clients can override +type OverrideProperty struct { + Key EnvironmentProperty + Value string +} + +// EnvironmentFromURL loads an Environment from a URL +// This function is particularly useful in the Hybrid Cloud model, where one may define their own +// endpoints. +func EnvironmentFromURL(resourceManagerEndpoint string, properties ...OverrideProperty) (environment Environment, err error) { + var metadataEnvProperties environmentMetadataInfo + + if resourceManagerEndpoint == "" { + return environment, fmt.Errorf("Metadata resource manager endpoint is empty") + } + + if metadataEnvProperties, err = retrieveMetadataEnvironment(resourceManagerEndpoint); err != nil { + return environment, err + } + + // Give priority to user's override values + overrideProperties(&environment, properties) + + if environment.Name == "" { + environment.Name = "HybridEnvironment" + } + stampDNSSuffix := environment.StorageEndpointSuffix + if stampDNSSuffix == "" { + stampDNSSuffix = strings.TrimSuffix(strings.TrimPrefix(strings.Replace(resourceManagerEndpoint, strings.Split(resourceManagerEndpoint, ".")[0], "", 1), "."), "/") + environment.StorageEndpointSuffix = stampDNSSuffix + } + if environment.KeyVaultDNSSuffix == "" { + environment.KeyVaultDNSSuffix = fmt.Sprintf("%s.%s", "vault", stampDNSSuffix) + } + if environment.KeyVaultEndpoint == "" { + environment.KeyVaultEndpoint = fmt.Sprintf("%s%s", "https://", environment.KeyVaultDNSSuffix) + } + if environment.TokenAudience == "" { + environment.TokenAudience = metadataEnvProperties.Authentication.Audiences[0] + } + if environment.ActiveDirectoryEndpoint == "" { + environment.ActiveDirectoryEndpoint = metadataEnvProperties.Authentication.LoginEndpoint + } + if environment.ResourceManagerEndpoint == "" { + environment.ResourceManagerEndpoint = resourceManagerEndpoint + } + if environment.GalleryEndpoint == "" { + environment.GalleryEndpoint = metadataEnvProperties.GalleryEndpoint + } + if environment.GraphEndpoint == "" { + environment.GraphEndpoint = metadataEnvProperties.GraphEndpoint + } + + return environment, nil +} + +func overrideProperties(environment *Environment, properties []OverrideProperty) { + for _, property := range properties { + switch property.Key { + case EnvironmentName: + { + environment.Name = property.Value + } + case EnvironmentManagementPortalURL: + { + environment.ManagementPortalURL = property.Value + } + case EnvironmentPublishSettingsURL: + { + environment.PublishSettingsURL = property.Value + } + case EnvironmentServiceManagementEndpoint: + { + environment.ServiceManagementEndpoint = property.Value + } + case EnvironmentResourceManagerEndpoint: + { + environment.ResourceManagerEndpoint = property.Value + } + case EnvironmentActiveDirectoryEndpoint: + { + environment.ActiveDirectoryEndpoint = property.Value + } + case EnvironmentGalleryEndpoint: + { + environment.GalleryEndpoint = property.Value + } + case EnvironmentKeyVaultEndpoint: + { + environment.KeyVaultEndpoint = property.Value + } + case EnvironmentGraphEndpoint: + { + environment.GraphEndpoint = property.Value + } + case EnvironmentServiceBusEndpoint: + { + environment.ServiceBusEndpoint = property.Value + } + case EnvironmentBatchManagementEndpoint: + { + environment.BatchManagementEndpoint = property.Value + } + case EnvironmentStorageEndpointSuffix: + { + environment.StorageEndpointSuffix = property.Value + } + case EnvironmentSQLDatabaseDNSSuffix: + { + environment.SQLDatabaseDNSSuffix = property.Value + } + case EnvironmentTrafficManagerDNSSuffix: + { + environment.TrafficManagerDNSSuffix = property.Value + } + case EnvironmentKeyVaultDNSSuffix: + { + environment.KeyVaultDNSSuffix = property.Value + } + case EnvironmentServiceBusEndpointSuffix: + { + environment.ServiceBusEndpointSuffix = property.Value + } + case EnvironmentServiceManagementVMDNSSuffix: + { + environment.ServiceManagementVMDNSSuffix = property.Value + } + case EnvironmentResourceManagerVMDNSSuffix: + { + environment.ResourceManagerVMDNSSuffix = property.Value + } + case EnvironmentContainerRegistryDNSSuffix: + { + environment.ContainerRegistryDNSSuffix = property.Value + } + case EnvironmentTokenAudience: + { + environment.TokenAudience = property.Value + } + } + } +} + +func retrieveMetadataEnvironment(endpoint string) (environment environmentMetadataInfo, err error) { + client := autorest.NewClientWithUserAgent("") + managementEndpoint := fmt.Sprintf("%s%s", strings.TrimSuffix(endpoint, "/"), "/metadata/endpoints?api-version=1.0") + req, _ := http.NewRequest("GET", managementEndpoint, nil) + response, err := client.Do(req) + if err != nil { + return environment, err + } + defer response.Body.Close() + jsonResponse, err := ioutil.ReadAll(response.Body) + if err != nil { + return environment, err + } + err = json.Unmarshal(jsonResponse, &environment) + return environment, err +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/rp.go b/vendor/github.com/Azure/go-autorest/autorest/azure/rp.go new file mode 100644 index 00000000000..c6d39f68665 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/rp.go @@ -0,0 +1,204 @@ +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package azure + +import ( + "errors" + "fmt" + "net/http" + "net/url" + "strings" + "time" + + "github.com/Azure/go-autorest/autorest" +) + +// DoRetryWithRegistration tries to register the resource provider in case it is unregistered. +// It also handles request retries +func DoRetryWithRegistration(client autorest.Client) autorest.SendDecorator { + return func(s autorest.Sender) autorest.Sender { + return autorest.SenderFunc(func(r *http.Request) (resp *http.Response, err error) { + rr := autorest.NewRetriableRequest(r) + for currentAttempt := 0; currentAttempt < client.RetryAttempts; currentAttempt++ { + err = rr.Prepare() + if err != nil { + return resp, err + } + + resp, err = autorest.SendWithSender(s, rr.Request(), + autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...), + ) + if err != nil { + return resp, err + } + + if resp.StatusCode != http.StatusConflict || client.SkipResourceProviderRegistration { + return resp, err + } + + var re RequestError + if strings.Contains(r.Header.Get("Content-Type"), "xml") { + // XML errors (e.g. Storage Data Plane) only return the inner object + err = autorest.Respond(resp, autorest.ByUnmarshallingXML(&re.ServiceError)) + } else { + err = autorest.Respond(resp, autorest.ByUnmarshallingJSON(&re)) + } + + if err != nil { + return resp, err + } + err = re + + if re.ServiceError != nil && re.ServiceError.Code == "MissingSubscriptionRegistration" { + regErr := register(client, r, re) + if regErr != nil { + return resp, fmt.Errorf("failed auto registering Resource Provider: %s. Original error: %s", regErr, err) + } + } + } + return resp, err + }) + } +} + +func getProvider(re RequestError) (string, error) { + if re.ServiceError != nil && len(re.ServiceError.Details) > 0 { + return re.ServiceError.Details[0]["target"].(string), nil + } + return "", errors.New("provider was not found in the response") +} + +func register(client autorest.Client, originalReq *http.Request, re RequestError) error { + subID := getSubscription(originalReq.URL.Path) + if subID == "" { + return errors.New("missing parameter subscriptionID to register resource provider") + } + providerName, err := getProvider(re) + if err != nil { + return fmt.Errorf("missing parameter provider to register resource provider: %s", err) + } + newURL := url.URL{ + Scheme: originalReq.URL.Scheme, + Host: originalReq.URL.Host, + } + + // taken from the resources SDK + // with almost identical code, this sections are easier to mantain + // It is also not a good idea to import the SDK here + // https://github.com/Azure/azure-sdk-for-go/blob/9f366792afa3e0ddaecdc860e793ba9d75e76c27/arm/resources/resources/providers.go#L252 + pathParameters := map[string]interface{}{ + "resourceProviderNamespace": autorest.Encode("path", providerName), + "subscriptionId": autorest.Encode("path", subID), + } + + const APIVersion = "2016-09-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithBaseURL(newURL.String()), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/{resourceProviderNamespace}/register", pathParameters), + autorest.WithQueryParameters(queryParameters), + ) + + req, err := preparer.Prepare(&http.Request{}) + if err != nil { + return err + } + req = req.WithContext(originalReq.Context()) + + resp, err := autorest.SendWithSender(client, req, + autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...), + ) + if err != nil { + return err + } + + type Provider struct { + RegistrationState *string `json:"registrationState,omitempty"` + } + var provider Provider + + err = autorest.Respond( + resp, + WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&provider), + autorest.ByClosing(), + ) + if err != nil { + return err + } + + // poll for registered provisioning state + registrationStartTime := time.Now() + for err == nil && (client.PollingDuration == 0 || (client.PollingDuration != 0 && time.Since(registrationStartTime) < client.PollingDuration)) { + // taken from the resources SDK + // https://github.com/Azure/azure-sdk-for-go/blob/9f366792afa3e0ddaecdc860e793ba9d75e76c27/arm/resources/resources/providers.go#L45 + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(newURL.String()), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/{resourceProviderNamespace}", pathParameters), + autorest.WithQueryParameters(queryParameters), + ) + req, err = preparer.Prepare(&http.Request{}) + if err != nil { + return err + } + req = req.WithContext(originalReq.Context()) + + resp, err := autorest.SendWithSender(client, req, + autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...), + ) + if err != nil { + return err + } + + err = autorest.Respond( + resp, + WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&provider), + autorest.ByClosing(), + ) + if err != nil { + return err + } + + if provider.RegistrationState != nil && + *provider.RegistrationState == "Registered" { + break + } + + delayed := autorest.DelayWithRetryAfter(resp, originalReq.Context().Done()) + if !delayed && !autorest.DelayForBackoff(client.PollingDelay, 0, originalReq.Context().Done()) { + return originalReq.Context().Err() + } + } + if client.PollingDuration != 0 && !(time.Since(registrationStartTime) < client.PollingDuration) { + return errors.New("polling for resource provider registration has exceeded the polling duration") + } + return err +} + +func getSubscription(path string) string { + parts := strings.Split(path, "/") + for i, v := range parts { + if v == "subscriptions" && (i+1) < len(parts) { + return parts[i+1] + } + } + return "" +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/client.go b/vendor/github.com/Azure/go-autorest/autorest/client.go new file mode 100644 index 00000000000..0b7525f0f4c --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/client.go @@ -0,0 +1,328 @@ +package autorest + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "crypto/tls" + "errors" + "fmt" + "io" + "io/ioutil" + "log" + "net/http" + "strings" + "time" + + "github.com/Azure/go-autorest/logger" +) + +const ( + // DefaultPollingDelay is a reasonable delay between polling requests. + DefaultPollingDelay = 60 * time.Second + + // DefaultPollingDuration is a reasonable total polling duration. + DefaultPollingDuration = 15 * time.Minute + + // DefaultRetryAttempts is number of attempts for retry status codes (5xx). + DefaultRetryAttempts = 3 + + // DefaultRetryDuration is the duration to wait between retries. + DefaultRetryDuration = 30 * time.Second +) + +var ( + // StatusCodesForRetry are a defined group of status code for which the client will retry + StatusCodesForRetry = []int{ + http.StatusRequestTimeout, // 408 + http.StatusTooManyRequests, // 429 + http.StatusInternalServerError, // 500 + http.StatusBadGateway, // 502 + http.StatusServiceUnavailable, // 503 + http.StatusGatewayTimeout, // 504 + } +) + +const ( + requestFormat = `HTTP Request Begin =================================================== +%s +===================================================== HTTP Request End +` + responseFormat = `HTTP Response Begin =================================================== +%s +===================================================== HTTP Response End +` +) + +// Response serves as the base for all responses from generated clients. It provides access to the +// last http.Response. +type Response struct { + *http.Response `json:"-"` +} + +// IsHTTPStatus returns true if the returned HTTP status code matches the provided status code. +// If there was no response (i.e. the underlying http.Response is nil) the return value is false. +func (r Response) IsHTTPStatus(statusCode int) bool { + if r.Response == nil { + return false + } + return r.Response.StatusCode == statusCode +} + +// HasHTTPStatus returns true if the returned HTTP status code matches one of the provided status codes. +// If there was no response (i.e. the underlying http.Response is nil) or not status codes are provided +// the return value is false. +func (r Response) HasHTTPStatus(statusCodes ...int) bool { + return ResponseHasStatusCode(r.Response, statusCodes...) +} + +// LoggingInspector implements request and response inspectors that log the full request and +// response to a supplied log. +type LoggingInspector struct { + Logger *log.Logger +} + +// WithInspection returns a PrepareDecorator that emits the http.Request to the supplied logger. The +// body is restored after being emitted. +// +// Note: Since it reads the entire Body, this decorator should not be used where body streaming is +// important. It is best used to trace JSON or similar body values. +func (li LoggingInspector) WithInspection() PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + var body, b bytes.Buffer + + defer r.Body.Close() + + r.Body = ioutil.NopCloser(io.TeeReader(r.Body, &body)) + if err := r.Write(&b); err != nil { + return nil, fmt.Errorf("Failed to write response: %v", err) + } + + li.Logger.Printf(requestFormat, b.String()) + + r.Body = ioutil.NopCloser(&body) + return p.Prepare(r) + }) + } +} + +// ByInspecting returns a RespondDecorator that emits the http.Response to the supplied logger. The +// body is restored after being emitted. +// +// Note: Since it reads the entire Body, this decorator should not be used where body streaming is +// important. It is best used to trace JSON or similar body values. +func (li LoggingInspector) ByInspecting() RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + var body, b bytes.Buffer + defer resp.Body.Close() + resp.Body = ioutil.NopCloser(io.TeeReader(resp.Body, &body)) + if err := resp.Write(&b); err != nil { + return fmt.Errorf("Failed to write response: %v", err) + } + + li.Logger.Printf(responseFormat, b.String()) + + resp.Body = ioutil.NopCloser(&body) + return r.Respond(resp) + }) + } +} + +// Client is the base for autorest generated clients. It provides default, "do nothing" +// implementations of an Authorizer, RequestInspector, and ResponseInspector. It also returns the +// standard, undecorated http.Client as a default Sender. +// +// Generated clients should also use Error (see NewError and NewErrorWithError) for errors and +// return responses that compose with Response. +// +// Most customization of generated clients is best achieved by supplying a custom Authorizer, custom +// RequestInspector, and / or custom ResponseInspector. Users may log requests, implement circuit +// breakers (see https://msdn.microsoft.com/en-us/library/dn589784.aspx) or otherwise influence +// sending the request by providing a decorated Sender. +type Client struct { + Authorizer Authorizer + Sender Sender + RequestInspector PrepareDecorator + ResponseInspector RespondDecorator + + // PollingDelay sets the polling frequency used in absence of a Retry-After HTTP header + PollingDelay time.Duration + + // PollingDuration sets the maximum polling time after which an error is returned. + // Setting this to zero will use the provided context to control the duration. + PollingDuration time.Duration + + // RetryAttempts sets the total number of times the client will attempt to make an HTTP request. + // Set the value to 1 to disable retries. DO NOT set the value to less than 1. + RetryAttempts int + + // RetryDuration sets the delay duration for retries. + RetryDuration time.Duration + + // UserAgent, if not empty, will be set as the HTTP User-Agent header on all requests sent + // through the Do method. + UserAgent string + + Jar http.CookieJar + + // Set to true to skip attempted registration of resource providers (false by default). + SkipResourceProviderRegistration bool + + // SendDecorators can be used to override the default chain of SendDecorators. + // This can be used to specify things like a custom retry SendDecorator. + // Set this to an empty slice to use no SendDecorators. + SendDecorators []SendDecorator +} + +// NewClientWithUserAgent returns an instance of a Client with the UserAgent set to the passed +// string. +func NewClientWithUserAgent(ua string) Client { + return newClient(ua, tls.RenegotiateNever) +} + +// ClientOptions contains various Client configuration options. +type ClientOptions struct { + // UserAgent is an optional user-agent string to append to the default user agent. + UserAgent string + + // Renegotiation is an optional setting to control client-side TLS renegotiation. + Renegotiation tls.RenegotiationSupport +} + +// NewClientWithOptions returns an instance of a Client with the specified values. +func NewClientWithOptions(options ClientOptions) Client { + return newClient(options.UserAgent, options.Renegotiation) +} + +func newClient(ua string, renegotiation tls.RenegotiationSupport) Client { + c := Client{ + PollingDelay: DefaultPollingDelay, + PollingDuration: DefaultPollingDuration, + RetryAttempts: DefaultRetryAttempts, + RetryDuration: DefaultRetryDuration, + UserAgent: UserAgent(), + } + c.Sender = c.sender(renegotiation) + c.AddToUserAgent(ua) + return c +} + +// AddToUserAgent adds an extension to the current user agent +func (c *Client) AddToUserAgent(extension string) error { + if extension != "" { + c.UserAgent = fmt.Sprintf("%s %s", c.UserAgent, extension) + return nil + } + return fmt.Errorf("Extension was empty, User Agent stayed as %s", c.UserAgent) +} + +// Do implements the Sender interface by invoking the active Sender after applying authorization. +// If Sender is not set, it uses a new instance of http.Client. In both cases it will, if UserAgent +// is set, apply set the User-Agent header. +func (c Client) Do(r *http.Request) (*http.Response, error) { + if r.UserAgent() == "" { + r, _ = Prepare(r, + WithUserAgent(c.UserAgent)) + } + // NOTE: c.WithInspection() must be last in the list so that it can inspect all preceding operations + r, err := Prepare(r, + c.WithAuthorization(), + c.WithInspection()) + if err != nil { + var resp *http.Response + if detErr, ok := err.(DetailedError); ok { + // if the authorization failed (e.g. invalid credentials) there will + // be a response associated with the error, be sure to return it. + resp = detErr.Response + } + return resp, NewErrorWithError(err, "autorest/Client", "Do", nil, "Preparing request failed") + } + logger.Instance.WriteRequest(r, logger.Filter{ + Header: func(k string, v []string) (bool, []string) { + // remove the auth token from the log + if strings.EqualFold(k, "Authorization") || strings.EqualFold(k, "Ocp-Apim-Subscription-Key") { + v = []string{"**REDACTED**"} + } + return true, v + }, + }) + resp, err := SendWithSender(c.sender(tls.RenegotiateNever), r) + if resp == nil && err == nil { + err = errors.New("autorest: received nil response and error") + } + logger.Instance.WriteResponse(resp, logger.Filter{}) + Respond(resp, c.ByInspecting()) + return resp, err +} + +// sender returns the Sender to which to send requests. +func (c Client) sender(renengotiation tls.RenegotiationSupport) Sender { + if c.Sender == nil { + return sender(renengotiation) + } + return c.Sender +} + +// WithAuthorization is a convenience method that returns the WithAuthorization PrepareDecorator +// from the current Authorizer. If not Authorizer is set, it uses the NullAuthorizer. +func (c Client) WithAuthorization() PrepareDecorator { + return c.authorizer().WithAuthorization() +} + +// authorizer returns the Authorizer to use. +func (c Client) authorizer() Authorizer { + if c.Authorizer == nil { + return NullAuthorizer{} + } + return c.Authorizer +} + +// WithInspection is a convenience method that passes the request to the supplied RequestInspector, +// if present, or returns the WithNothing PrepareDecorator otherwise. +func (c Client) WithInspection() PrepareDecorator { + if c.RequestInspector == nil { + return WithNothing() + } + return c.RequestInspector +} + +// ByInspecting is a convenience method that passes the response to the supplied ResponseInspector, +// if present, or returns the ByIgnoring RespondDecorator otherwise. +func (c Client) ByInspecting() RespondDecorator { + if c.ResponseInspector == nil { + return ByIgnoring() + } + return c.ResponseInspector +} + +// Send sends the provided http.Request using the client's Sender or the default sender. +// It returns the http.Response and possible error. It also accepts a, possibly empty, +// default set of SendDecorators used when sending the request. +// SendDecorators have the following precedence: +// 1. In a request's context via WithSendDecorators() +// 2. Specified on the client in SendDecorators +// 3. The default values specified in this method +func (c Client) Send(req *http.Request, decorators ...SendDecorator) (*http.Response, error) { + if c.SendDecorators != nil { + decorators = c.SendDecorators + } + inCtx := req.Context().Value(ctxSendDecorators{}) + if sd, ok := inCtx.([]SendDecorator); ok { + decorators = sd + } + return SendWithSender(c, req, decorators...) +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/LICENSE b/vendor/github.com/Azure/go-autorest/autorest/date/LICENSE new file mode 100644 index 00000000000..b9d6a27ea92 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/date/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2015 Microsoft Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/date.go b/vendor/github.com/Azure/go-autorest/autorest/date/date.go new file mode 100644 index 00000000000..c4571065685 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/date/date.go @@ -0,0 +1,96 @@ +/* +Package date provides time.Time derivatives that conform to the Swagger.io (https://swagger.io/) +defined date formats: Date and DateTime. Both types may, in most cases, be used in lieu of +time.Time types. And both convert to time.Time through a ToTime method. +*/ +package date + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "fmt" + "time" +) + +const ( + fullDate = "2006-01-02" + fullDateJSON = `"2006-01-02"` + dateFormat = "%04d-%02d-%02d" + jsonFormat = `"%04d-%02d-%02d"` +) + +// Date defines a type similar to time.Time but assumes a layout of RFC3339 full-date (i.e., +// 2006-01-02). +type Date struct { + time.Time +} + +// ParseDate create a new Date from the passed string. +func ParseDate(date string) (d Date, err error) { + return parseDate(date, fullDate) +} + +func parseDate(date string, format string) (Date, error) { + d, err := time.Parse(format, date) + return Date{Time: d}, err +} + +// MarshalBinary preserves the Date as a byte array conforming to RFC3339 full-date (i.e., +// 2006-01-02). +func (d Date) MarshalBinary() ([]byte, error) { + return d.MarshalText() +} + +// UnmarshalBinary reconstitutes a Date saved as a byte array conforming to RFC3339 full-date (i.e., +// 2006-01-02). +func (d *Date) UnmarshalBinary(data []byte) error { + return d.UnmarshalText(data) +} + +// MarshalJSON preserves the Date as a JSON string conforming to RFC3339 full-date (i.e., +// 2006-01-02). +func (d Date) MarshalJSON() (json []byte, err error) { + return []byte(fmt.Sprintf(jsonFormat, d.Year(), d.Month(), d.Day())), nil +} + +// UnmarshalJSON reconstitutes the Date from a JSON string conforming to RFC3339 full-date (i.e., +// 2006-01-02). +func (d *Date) UnmarshalJSON(data []byte) (err error) { + d.Time, err = time.Parse(fullDateJSON, string(data)) + return err +} + +// MarshalText preserves the Date as a byte array conforming to RFC3339 full-date (i.e., +// 2006-01-02). +func (d Date) MarshalText() (text []byte, err error) { + return []byte(fmt.Sprintf(dateFormat, d.Year(), d.Month(), d.Day())), nil +} + +// UnmarshalText reconstitutes a Date saved as a byte array conforming to RFC3339 full-date (i.e., +// 2006-01-02). +func (d *Date) UnmarshalText(data []byte) (err error) { + d.Time, err = time.Parse(fullDate, string(data)) + return err +} + +// String returns the Date formatted as an RFC3339 full-date string (i.e., 2006-01-02). +func (d Date) String() string { + return fmt.Sprintf(dateFormat, d.Year(), d.Month(), d.Day()) +} + +// ToTime returns a Date as a time.Time +func (d Date) ToTime() time.Time { + return d.Time +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/go.mod b/vendor/github.com/Azure/go-autorest/autorest/date/go.mod new file mode 100644 index 00000000000..f88ecc4022d --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/date/go.mod @@ -0,0 +1,5 @@ +module github.com/Azure/go-autorest/autorest/date + +go 1.12 + +require github.com/Azure/go-autorest v14.2.0+incompatible diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/go.sum b/vendor/github.com/Azure/go-autorest/autorest/date/go.sum new file mode 100644 index 00000000000..1fc56a962ee --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/date/go.sum @@ -0,0 +1,2 @@ +github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= +github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/go_mod_tidy_hack.go b/vendor/github.com/Azure/go-autorest/autorest/date/go_mod_tidy_hack.go new file mode 100644 index 00000000000..4e054320717 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/date/go_mod_tidy_hack.go @@ -0,0 +1,24 @@ +// +build modhack + +package date + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This file, and the github.com/Azure/go-autorest import, won't actually become part of +// the resultant binary. + +// Necessary for safely adding multi-module repo. +// See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository +import _ "github.com/Azure/go-autorest" diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/time.go b/vendor/github.com/Azure/go-autorest/autorest/date/time.go new file mode 100644 index 00000000000..b453fad0491 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/date/time.go @@ -0,0 +1,103 @@ +package date + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "regexp" + "time" +) + +// Azure reports time in UTC but it doesn't include the 'Z' time zone suffix in some cases. +const ( + azureUtcFormatJSON = `"2006-01-02T15:04:05.999999999"` + azureUtcFormat = "2006-01-02T15:04:05.999999999" + rfc3339JSON = `"` + time.RFC3339Nano + `"` + rfc3339 = time.RFC3339Nano + tzOffsetRegex = `(Z|z|\+|-)(\d+:\d+)*"*$` +) + +// Time defines a type similar to time.Time but assumes a layout of RFC3339 date-time (i.e., +// 2006-01-02T15:04:05Z). +type Time struct { + time.Time +} + +// MarshalBinary preserves the Time as a byte array conforming to RFC3339 date-time (i.e., +// 2006-01-02T15:04:05Z). +func (t Time) MarshalBinary() ([]byte, error) { + return t.Time.MarshalText() +} + +// UnmarshalBinary reconstitutes a Time saved as a byte array conforming to RFC3339 date-time +// (i.e., 2006-01-02T15:04:05Z). +func (t *Time) UnmarshalBinary(data []byte) error { + return t.UnmarshalText(data) +} + +// MarshalJSON preserves the Time as a JSON string conforming to RFC3339 date-time (i.e., +// 2006-01-02T15:04:05Z). +func (t Time) MarshalJSON() (json []byte, err error) { + return t.Time.MarshalJSON() +} + +// UnmarshalJSON reconstitutes the Time from a JSON string conforming to RFC3339 date-time +// (i.e., 2006-01-02T15:04:05Z). +func (t *Time) UnmarshalJSON(data []byte) (err error) { + timeFormat := azureUtcFormatJSON + match, err := regexp.Match(tzOffsetRegex, data) + if err != nil { + return err + } else if match { + timeFormat = rfc3339JSON + } + t.Time, err = ParseTime(timeFormat, string(data)) + return err +} + +// MarshalText preserves the Time as a byte array conforming to RFC3339 date-time (i.e., +// 2006-01-02T15:04:05Z). +func (t Time) MarshalText() (text []byte, err error) { + return t.Time.MarshalText() +} + +// UnmarshalText reconstitutes a Time saved as a byte array conforming to RFC3339 date-time +// (i.e., 2006-01-02T15:04:05Z). +func (t *Time) UnmarshalText(data []byte) (err error) { + timeFormat := azureUtcFormat + match, err := regexp.Match(tzOffsetRegex, data) + if err != nil { + return err + } else if match { + timeFormat = rfc3339 + } + t.Time, err = ParseTime(timeFormat, string(data)) + return err +} + +// String returns the Time formatted as an RFC3339 date-time string (i.e., +// 2006-01-02T15:04:05Z). +func (t Time) String() string { + // Note: time.Time.String does not return an RFC3339 compliant string, time.Time.MarshalText does. + b, err := t.MarshalText() + if err != nil { + return "" + } + return string(b) +} + +// ToTime returns a Time as a time.Time +func (t Time) ToTime() time.Time { + return t.Time +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/timerfc1123.go b/vendor/github.com/Azure/go-autorest/autorest/date/timerfc1123.go new file mode 100644 index 00000000000..48fb39ba9b9 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/date/timerfc1123.go @@ -0,0 +1,100 @@ +package date + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "errors" + "time" +) + +const ( + rfc1123JSON = `"` + time.RFC1123 + `"` + rfc1123 = time.RFC1123 +) + +// TimeRFC1123 defines a type similar to time.Time but assumes a layout of RFC1123 date-time (i.e., +// Mon, 02 Jan 2006 15:04:05 MST). +type TimeRFC1123 struct { + time.Time +} + +// UnmarshalJSON reconstitutes the Time from a JSON string conforming to RFC1123 date-time +// (i.e., Mon, 02 Jan 2006 15:04:05 MST). +func (t *TimeRFC1123) UnmarshalJSON(data []byte) (err error) { + t.Time, err = ParseTime(rfc1123JSON, string(data)) + if err != nil { + return err + } + return nil +} + +// MarshalJSON preserves the Time as a JSON string conforming to RFC1123 date-time (i.e., +// Mon, 02 Jan 2006 15:04:05 MST). +func (t TimeRFC1123) MarshalJSON() ([]byte, error) { + if y := t.Year(); y < 0 || y >= 10000 { + return nil, errors.New("Time.MarshalJSON: year outside of range [0,9999]") + } + b := []byte(t.Format(rfc1123JSON)) + return b, nil +} + +// MarshalText preserves the Time as a byte array conforming to RFC1123 date-time (i.e., +// Mon, 02 Jan 2006 15:04:05 MST). +func (t TimeRFC1123) MarshalText() ([]byte, error) { + if y := t.Year(); y < 0 || y >= 10000 { + return nil, errors.New("Time.MarshalText: year outside of range [0,9999]") + } + + b := []byte(t.Format(rfc1123)) + return b, nil +} + +// UnmarshalText reconstitutes a Time saved as a byte array conforming to RFC1123 date-time +// (i.e., Mon, 02 Jan 2006 15:04:05 MST). +func (t *TimeRFC1123) UnmarshalText(data []byte) (err error) { + t.Time, err = ParseTime(rfc1123, string(data)) + if err != nil { + return err + } + return nil +} + +// MarshalBinary preserves the Time as a byte array conforming to RFC1123 date-time (i.e., +// Mon, 02 Jan 2006 15:04:05 MST). +func (t TimeRFC1123) MarshalBinary() ([]byte, error) { + return t.MarshalText() +} + +// UnmarshalBinary reconstitutes a Time saved as a byte array conforming to RFC1123 date-time +// (i.e., Mon, 02 Jan 2006 15:04:05 MST). +func (t *TimeRFC1123) UnmarshalBinary(data []byte) error { + return t.UnmarshalText(data) +} + +// ToTime returns a Time as a time.Time +func (t TimeRFC1123) ToTime() time.Time { + return t.Time +} + +// String returns the Time formatted as an RFC1123 date-time string (i.e., +// Mon, 02 Jan 2006 15:04:05 MST). +func (t TimeRFC1123) String() string { + // Note: time.Time.String does not return an RFC1123 compliant string, time.Time.MarshalText does. + b, err := t.MarshalText() + if err != nil { + return "" + } + return string(b) +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/unixtime.go b/vendor/github.com/Azure/go-autorest/autorest/date/unixtime.go new file mode 100644 index 00000000000..7073959b2a9 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/date/unixtime.go @@ -0,0 +1,123 @@ +package date + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "encoding/binary" + "encoding/json" + "time" +) + +// unixEpoch is the moment in time that should be treated as timestamp 0. +var unixEpoch = time.Date(1970, time.January, 1, 0, 0, 0, 0, time.UTC) + +// UnixTime marshals and unmarshals a time that is represented as the number +// of seconds (ignoring skip-seconds) since the Unix Epoch. +type UnixTime time.Time + +// Duration returns the time as a Duration since the UnixEpoch. +func (t UnixTime) Duration() time.Duration { + return time.Time(t).Sub(unixEpoch) +} + +// NewUnixTimeFromSeconds creates a UnixTime as a number of seconds from the UnixEpoch. +func NewUnixTimeFromSeconds(seconds float64) UnixTime { + return NewUnixTimeFromDuration(time.Duration(seconds * float64(time.Second))) +} + +// NewUnixTimeFromNanoseconds creates a UnixTime as a number of nanoseconds from the UnixEpoch. +func NewUnixTimeFromNanoseconds(nanoseconds int64) UnixTime { + return NewUnixTimeFromDuration(time.Duration(nanoseconds)) +} + +// NewUnixTimeFromDuration creates a UnixTime as a duration of time since the UnixEpoch. +func NewUnixTimeFromDuration(dur time.Duration) UnixTime { + return UnixTime(unixEpoch.Add(dur)) +} + +// UnixEpoch retreives the moment considered the Unix Epoch. I.e. The time represented by '0' +func UnixEpoch() time.Time { + return unixEpoch +} + +// MarshalJSON preserves the UnixTime as a JSON number conforming to Unix Timestamp requirements. +// (i.e. the number of seconds since midnight January 1st, 1970 not considering leap seconds.) +func (t UnixTime) MarshalJSON() ([]byte, error) { + buffer := &bytes.Buffer{} + enc := json.NewEncoder(buffer) + err := enc.Encode(float64(time.Time(t).UnixNano()) / 1e9) + if err != nil { + return nil, err + } + return buffer.Bytes(), nil +} + +// UnmarshalJSON reconstitures a UnixTime saved as a JSON number of the number of seconds since +// midnight January 1st, 1970. +func (t *UnixTime) UnmarshalJSON(text []byte) error { + dec := json.NewDecoder(bytes.NewReader(text)) + + var secondsSinceEpoch float64 + if err := dec.Decode(&secondsSinceEpoch); err != nil { + return err + } + + *t = NewUnixTimeFromSeconds(secondsSinceEpoch) + + return nil +} + +// MarshalText stores the number of seconds since the Unix Epoch as a textual floating point number. +func (t UnixTime) MarshalText() ([]byte, error) { + cast := time.Time(t) + return cast.MarshalText() +} + +// UnmarshalText populates a UnixTime with a value stored textually as a floating point number of seconds since the Unix Epoch. +func (t *UnixTime) UnmarshalText(raw []byte) error { + var unmarshaled time.Time + + if err := unmarshaled.UnmarshalText(raw); err != nil { + return err + } + + *t = UnixTime(unmarshaled) + return nil +} + +// MarshalBinary converts a UnixTime into a binary.LittleEndian float64 of nanoseconds since the epoch. +func (t UnixTime) MarshalBinary() ([]byte, error) { + buf := &bytes.Buffer{} + + payload := int64(t.Duration()) + + if err := binary.Write(buf, binary.LittleEndian, &payload); err != nil { + return nil, err + } + + return buf.Bytes(), nil +} + +// UnmarshalBinary converts a from a binary.LittleEndian float64 of nanoseconds since the epoch into a UnixTime. +func (t *UnixTime) UnmarshalBinary(raw []byte) error { + var nanosecondsSinceEpoch int64 + + if err := binary.Read(bytes.NewReader(raw), binary.LittleEndian, &nanosecondsSinceEpoch); err != nil { + return err + } + *t = NewUnixTimeFromNanoseconds(nanosecondsSinceEpoch) + return nil +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/utility.go b/vendor/github.com/Azure/go-autorest/autorest/date/utility.go new file mode 100644 index 00000000000..12addf0ebb4 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/date/utility.go @@ -0,0 +1,25 @@ +package date + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "strings" + "time" +) + +// ParseTime to parse Time string to specified format. +func ParseTime(format string, t string) (d time.Time, err error) { + return time.Parse(format, strings.ToUpper(t)) +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/error.go b/vendor/github.com/Azure/go-autorest/autorest/error.go new file mode 100644 index 00000000000..35098eda8e7 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/error.go @@ -0,0 +1,103 @@ +package autorest + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "fmt" + "net/http" +) + +const ( + // UndefinedStatusCode is used when HTTP status code is not available for an error. + UndefinedStatusCode = 0 +) + +// DetailedError encloses a error with details of the package, method, and associated HTTP +// status code (if any). +type DetailedError struct { + Original error + + // PackageType is the package type of the object emitting the error. For types, the value + // matches that produced the the '%T' format specifier of the fmt package. For other elements, + // such as functions, it is just the package name (e.g., "autorest"). + PackageType string + + // Method is the name of the method raising the error. + Method string + + // StatusCode is the HTTP Response StatusCode (if non-zero) that led to the error. + StatusCode interface{} + + // Message is the error message. + Message string + + // Service Error is the response body of failed API in bytes + ServiceError []byte + + // Response is the response object that was returned during failure if applicable. + Response *http.Response +} + +// NewError creates a new Error conforming object from the passed packageType, method, and +// message. message is treated as a format string to which the optional args apply. +func NewError(packageType string, method string, message string, args ...interface{}) DetailedError { + return NewErrorWithError(nil, packageType, method, nil, message, args...) +} + +// NewErrorWithResponse creates a new Error conforming object from the passed +// packageType, method, statusCode of the given resp (UndefinedStatusCode if +// resp is nil), and message. message is treated as a format string to which the +// optional args apply. +func NewErrorWithResponse(packageType string, method string, resp *http.Response, message string, args ...interface{}) DetailedError { + return NewErrorWithError(nil, packageType, method, resp, message, args...) +} + +// NewErrorWithError creates a new Error conforming object from the +// passed packageType, method, statusCode of the given resp (UndefinedStatusCode +// if resp is nil), message, and original error. message is treated as a format +// string to which the optional args apply. +func NewErrorWithError(original error, packageType string, method string, resp *http.Response, message string, args ...interface{}) DetailedError { + if v, ok := original.(DetailedError); ok { + return v + } + + statusCode := UndefinedStatusCode + if resp != nil { + statusCode = resp.StatusCode + } + + return DetailedError{ + Original: original, + PackageType: packageType, + Method: method, + StatusCode: statusCode, + Message: fmt.Sprintf(message, args...), + Response: resp, + } +} + +// Error returns a formatted containing all available details (i.e., PackageType, Method, +// StatusCode, Message, and original error (if any)). +func (e DetailedError) Error() string { + if e.Original == nil { + return fmt.Sprintf("%s#%s: %s: StatusCode=%d", e.PackageType, e.Method, e.Message, e.StatusCode) + } + return fmt.Sprintf("%s#%s: %s: StatusCode=%d -- Original Error: %v", e.PackageType, e.Method, e.Message, e.StatusCode, e.Original) +} + +// Unwrap returns the original error. +func (e DetailedError) Unwrap() error { + return e.Original +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/go.mod b/vendor/github.com/Azure/go-autorest/autorest/go.mod new file mode 100644 index 00000000000..fd0b2c0c327 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/go.mod @@ -0,0 +1,12 @@ +module github.com/Azure/go-autorest/autorest + +go 1.12 + +require ( + github.com/Azure/go-autorest v14.2.0+incompatible + github.com/Azure/go-autorest/autorest/adal v0.9.13 + github.com/Azure/go-autorest/autorest/mocks v0.4.1 + github.com/Azure/go-autorest/logger v0.2.1 + github.com/Azure/go-autorest/tracing v0.6.0 + golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0 +) diff --git a/vendor/github.com/Azure/go-autorest/autorest/go.sum b/vendor/github.com/Azure/go-autorest/autorest/go.sum new file mode 100644 index 00000000000..373d9c4e255 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/go.sum @@ -0,0 +1,23 @@ +github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= +github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= +github.com/Azure/go-autorest/autorest/adal v0.9.13 h1:Mp5hbtOePIzM8pJVRa3YLrWWmZtoxRXqUEzCfJt3+/Q= +github.com/Azure/go-autorest/autorest/adal v0.9.13/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M= +github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw= +github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= +github.com/Azure/go-autorest/autorest/mocks v0.4.1 h1:K0laFcLE6VLTOwNgSxaGbUcLPuGXlNkbVvq4cW4nIHk= +github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= +github.com/Azure/go-autorest/logger v0.2.1 h1:IG7i4p/mDa2Ce4TRyAO8IHnVhAVF3RFU+ZtXWSmf4Tg= +github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= +github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo= +github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= +github.com/form3tech-oss/jwt-go v3.2.2+incompatible h1:TcekIExNqud5crz4xD2pavyTgWiPvpYe4Xau31I0PRk= +github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0 h1:hb9wdF1z5waM+dSIICn1l0DkLVDT3hqhhQsDNUmHPRE= +golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= diff --git a/vendor/github.com/Azure/go-autorest/autorest/go_mod_tidy_hack.go b/vendor/github.com/Azure/go-autorest/autorest/go_mod_tidy_hack.go new file mode 100644 index 00000000000..da65e1041eb --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/go_mod_tidy_hack.go @@ -0,0 +1,24 @@ +// +build modhack + +package autorest + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This file, and the github.com/Azure/go-autorest import, won't actually become part of +// the resultant binary. + +// Necessary for safely adding multi-module repo. +// See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository +import _ "github.com/Azure/go-autorest" diff --git a/vendor/github.com/Azure/go-autorest/autorest/preparer.go b/vendor/github.com/Azure/go-autorest/autorest/preparer.go new file mode 100644 index 00000000000..98574a4155f --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/preparer.go @@ -0,0 +1,547 @@ +package autorest + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "context" + "encoding/json" + "encoding/xml" + "fmt" + "io" + "io/ioutil" + "mime/multipart" + "net/http" + "net/url" + "strings" +) + +const ( + mimeTypeJSON = "application/json" + mimeTypeOctetStream = "application/octet-stream" + mimeTypeFormPost = "application/x-www-form-urlencoded" + + headerAuthorization = "Authorization" + headerAuxAuthorization = "x-ms-authorization-auxiliary" + headerContentType = "Content-Type" + headerUserAgent = "User-Agent" +) + +// used as a key type in context.WithValue() +type ctxPrepareDecorators struct{} + +// WithPrepareDecorators adds the specified PrepareDecorators to the provided context. +// If no PrepareDecorators are provided the context is unchanged. +func WithPrepareDecorators(ctx context.Context, prepareDecorator []PrepareDecorator) context.Context { + if len(prepareDecorator) == 0 { + return ctx + } + return context.WithValue(ctx, ctxPrepareDecorators{}, prepareDecorator) +} + +// GetPrepareDecorators returns the PrepareDecorators in the provided context or the provided default PrepareDecorators. +func GetPrepareDecorators(ctx context.Context, defaultPrepareDecorators ...PrepareDecorator) []PrepareDecorator { + inCtx := ctx.Value(ctxPrepareDecorators{}) + if pd, ok := inCtx.([]PrepareDecorator); ok { + return pd + } + return defaultPrepareDecorators +} + +// Preparer is the interface that wraps the Prepare method. +// +// Prepare accepts and possibly modifies an http.Request (e.g., adding Headers). Implementations +// must ensure to not share or hold per-invocation state since Preparers may be shared and re-used. +type Preparer interface { + Prepare(*http.Request) (*http.Request, error) +} + +// PreparerFunc is a method that implements the Preparer interface. +type PreparerFunc func(*http.Request) (*http.Request, error) + +// Prepare implements the Preparer interface on PreparerFunc. +func (pf PreparerFunc) Prepare(r *http.Request) (*http.Request, error) { + return pf(r) +} + +// PrepareDecorator takes and possibly decorates, by wrapping, a Preparer. Decorators may affect the +// http.Request and pass it along or, first, pass the http.Request along then affect the result. +type PrepareDecorator func(Preparer) Preparer + +// CreatePreparer creates, decorates, and returns a Preparer. +// Without decorators, the returned Preparer returns the passed http.Request unmodified. +// Preparers are safe to share and re-use. +func CreatePreparer(decorators ...PrepareDecorator) Preparer { + return DecoratePreparer( + Preparer(PreparerFunc(func(r *http.Request) (*http.Request, error) { return r, nil })), + decorators...) +} + +// DecoratePreparer accepts a Preparer and a, possibly empty, set of PrepareDecorators, which it +// applies to the Preparer. Decorators are applied in the order received, but their affect upon the +// request depends on whether they are a pre-decorator (change the http.Request and then pass it +// along) or a post-decorator (pass the http.Request along and alter it on return). +func DecoratePreparer(p Preparer, decorators ...PrepareDecorator) Preparer { + for _, decorate := range decorators { + p = decorate(p) + } + return p +} + +// Prepare accepts an http.Request and a, possibly empty, set of PrepareDecorators. +// It creates a Preparer from the decorators which it then applies to the passed http.Request. +func Prepare(r *http.Request, decorators ...PrepareDecorator) (*http.Request, error) { + if r == nil { + return nil, NewError("autorest", "Prepare", "Invoked without an http.Request") + } + return CreatePreparer(decorators...).Prepare(r) +} + +// WithNothing returns a "do nothing" PrepareDecorator that makes no changes to the passed +// http.Request. +func WithNothing() PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + return p.Prepare(r) + }) + } +} + +// WithHeader returns a PrepareDecorator that sets the specified HTTP header of the http.Request to +// the passed value. It canonicalizes the passed header name (via http.CanonicalHeaderKey) before +// adding the header. +func WithHeader(header string, value string) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + setHeader(r, http.CanonicalHeaderKey(header), value) + } + return r, err + }) + } +} + +// WithHeaders returns a PrepareDecorator that sets the specified HTTP headers of the http.Request to +// the passed value. It canonicalizes the passed headers name (via http.CanonicalHeaderKey) before +// adding them. +func WithHeaders(headers map[string]interface{}) PrepareDecorator { + h := ensureValueStrings(headers) + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + if r.Header == nil { + r.Header = make(http.Header) + } + + for name, value := range h { + r.Header.Set(http.CanonicalHeaderKey(name), value) + } + } + return r, err + }) + } +} + +// WithBearerAuthorization returns a PrepareDecorator that adds an HTTP Authorization header whose +// value is "Bearer " followed by the supplied token. +func WithBearerAuthorization(token string) PrepareDecorator { + return WithHeader(headerAuthorization, fmt.Sprintf("Bearer %s", token)) +} + +// AsContentType returns a PrepareDecorator that adds an HTTP Content-Type header whose value +// is the passed contentType. +func AsContentType(contentType string) PrepareDecorator { + return WithHeader(headerContentType, contentType) +} + +// WithUserAgent returns a PrepareDecorator that adds an HTTP User-Agent header whose value is the +// passed string. +func WithUserAgent(ua string) PrepareDecorator { + return WithHeader(headerUserAgent, ua) +} + +// AsFormURLEncoded returns a PrepareDecorator that adds an HTTP Content-Type header whose value is +// "application/x-www-form-urlencoded". +func AsFormURLEncoded() PrepareDecorator { + return AsContentType(mimeTypeFormPost) +} + +// AsJSON returns a PrepareDecorator that adds an HTTP Content-Type header whose value is +// "application/json". +func AsJSON() PrepareDecorator { + return AsContentType(mimeTypeJSON) +} + +// AsOctetStream returns a PrepareDecorator that adds the "application/octet-stream" Content-Type header. +func AsOctetStream() PrepareDecorator { + return AsContentType(mimeTypeOctetStream) +} + +// WithMethod returns a PrepareDecorator that sets the HTTP method of the passed request. The +// decorator does not validate that the passed method string is a known HTTP method. +func WithMethod(method string) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r.Method = method + return p.Prepare(r) + }) + } +} + +// AsDelete returns a PrepareDecorator that sets the HTTP method to DELETE. +func AsDelete() PrepareDecorator { return WithMethod("DELETE") } + +// AsGet returns a PrepareDecorator that sets the HTTP method to GET. +func AsGet() PrepareDecorator { return WithMethod("GET") } + +// AsHead returns a PrepareDecorator that sets the HTTP method to HEAD. +func AsHead() PrepareDecorator { return WithMethod("HEAD") } + +// AsMerge returns a PrepareDecorator that sets the HTTP method to MERGE. +func AsMerge() PrepareDecorator { return WithMethod("MERGE") } + +// AsOptions returns a PrepareDecorator that sets the HTTP method to OPTIONS. +func AsOptions() PrepareDecorator { return WithMethod("OPTIONS") } + +// AsPatch returns a PrepareDecorator that sets the HTTP method to PATCH. +func AsPatch() PrepareDecorator { return WithMethod("PATCH") } + +// AsPost returns a PrepareDecorator that sets the HTTP method to POST. +func AsPost() PrepareDecorator { return WithMethod("POST") } + +// AsPut returns a PrepareDecorator that sets the HTTP method to PUT. +func AsPut() PrepareDecorator { return WithMethod("PUT") } + +// WithBaseURL returns a PrepareDecorator that populates the http.Request with a url.URL constructed +// from the supplied baseUrl. Query parameters will be encoded as required. +func WithBaseURL(baseURL string) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + var u *url.URL + if u, err = url.Parse(baseURL); err != nil { + return r, err + } + if u.Scheme == "" { + return r, fmt.Errorf("autorest: No scheme detected in URL %s", baseURL) + } + if u.RawQuery != "" { + q, err := url.ParseQuery(u.RawQuery) + if err != nil { + return r, err + } + u.RawQuery = q.Encode() + } + r.URL = u + } + return r, err + }) + } +} + +// WithBytes returns a PrepareDecorator that takes a list of bytes +// which passes the bytes directly to the body +func WithBytes(input *[]byte) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + if input == nil { + return r, fmt.Errorf("Input Bytes was nil") + } + + r.ContentLength = int64(len(*input)) + r.Body = ioutil.NopCloser(bytes.NewReader(*input)) + } + return r, err + }) + } +} + +// WithCustomBaseURL returns a PrepareDecorator that replaces brace-enclosed keys within the +// request base URL (i.e., http.Request.URL) with the corresponding values from the passed map. +func WithCustomBaseURL(baseURL string, urlParameters map[string]interface{}) PrepareDecorator { + parameters := ensureValueStrings(urlParameters) + for key, value := range parameters { + baseURL = strings.Replace(baseURL, "{"+key+"}", value, -1) + } + return WithBaseURL(baseURL) +} + +// WithFormData returns a PrepareDecoratore that "URL encodes" (e.g., bar=baz&foo=quux) into the +// http.Request body. +func WithFormData(v url.Values) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + s := v.Encode() + + setHeader(r, http.CanonicalHeaderKey(headerContentType), mimeTypeFormPost) + r.ContentLength = int64(len(s)) + r.Body = ioutil.NopCloser(strings.NewReader(s)) + } + return r, err + }) + } +} + +// WithMultiPartFormData returns a PrepareDecoratore that "URL encodes" (e.g., bar=baz&foo=quux) form parameters +// into the http.Request body. +func WithMultiPartFormData(formDataParameters map[string]interface{}) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + var body bytes.Buffer + writer := multipart.NewWriter(&body) + for key, value := range formDataParameters { + if rc, ok := value.(io.ReadCloser); ok { + var fd io.Writer + if fd, err = writer.CreateFormFile(key, key); err != nil { + return r, err + } + if _, err = io.Copy(fd, rc); err != nil { + return r, err + } + } else { + if err = writer.WriteField(key, ensureValueString(value)); err != nil { + return r, err + } + } + } + if err = writer.Close(); err != nil { + return r, err + } + setHeader(r, http.CanonicalHeaderKey(headerContentType), writer.FormDataContentType()) + r.Body = ioutil.NopCloser(bytes.NewReader(body.Bytes())) + r.ContentLength = int64(body.Len()) + return r, err + } + return r, err + }) + } +} + +// WithFile returns a PrepareDecorator that sends file in request body. +func WithFile(f io.ReadCloser) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + b, err := ioutil.ReadAll(f) + if err != nil { + return r, err + } + r.Body = ioutil.NopCloser(bytes.NewReader(b)) + r.ContentLength = int64(len(b)) + } + return r, err + }) + } +} + +// WithBool returns a PrepareDecorator that encodes the passed bool into the body of the request +// and sets the Content-Length header. +func WithBool(v bool) PrepareDecorator { + return WithString(fmt.Sprintf("%v", v)) +} + +// WithFloat32 returns a PrepareDecorator that encodes the passed float32 into the body of the +// request and sets the Content-Length header. +func WithFloat32(v float32) PrepareDecorator { + return WithString(fmt.Sprintf("%v", v)) +} + +// WithFloat64 returns a PrepareDecorator that encodes the passed float64 into the body of the +// request and sets the Content-Length header. +func WithFloat64(v float64) PrepareDecorator { + return WithString(fmt.Sprintf("%v", v)) +} + +// WithInt32 returns a PrepareDecorator that encodes the passed int32 into the body of the request +// and sets the Content-Length header. +func WithInt32(v int32) PrepareDecorator { + return WithString(fmt.Sprintf("%v", v)) +} + +// WithInt64 returns a PrepareDecorator that encodes the passed int64 into the body of the request +// and sets the Content-Length header. +func WithInt64(v int64) PrepareDecorator { + return WithString(fmt.Sprintf("%v", v)) +} + +// WithString returns a PrepareDecorator that encodes the passed string into the body of the request +// and sets the Content-Length header. +func WithString(v string) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + r.ContentLength = int64(len(v)) + r.Body = ioutil.NopCloser(strings.NewReader(v)) + } + return r, err + }) + } +} + +// WithJSON returns a PrepareDecorator that encodes the data passed as JSON into the body of the +// request and sets the Content-Length header. +func WithJSON(v interface{}) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + b, err := json.Marshal(v) + if err == nil { + r.ContentLength = int64(len(b)) + r.Body = ioutil.NopCloser(bytes.NewReader(b)) + } + } + return r, err + }) + } +} + +// WithXML returns a PrepareDecorator that encodes the data passed as XML into the body of the +// request and sets the Content-Length header. +func WithXML(v interface{}) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + b, err := xml.Marshal(v) + if err == nil { + // we have to tack on an XML header + withHeader := xml.Header + string(b) + bytesWithHeader := []byte(withHeader) + + r.ContentLength = int64(len(bytesWithHeader)) + setHeader(r, headerContentLength, fmt.Sprintf("%d", len(bytesWithHeader))) + r.Body = ioutil.NopCloser(bytes.NewReader(bytesWithHeader)) + } + } + return r, err + }) + } +} + +// WithPath returns a PrepareDecorator that adds the supplied path to the request URL. If the path +// is absolute (that is, it begins with a "/"), it replaces the existing path. +func WithPath(path string) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + if r.URL == nil { + return r, NewError("autorest", "WithPath", "Invoked with a nil URL") + } + if r.URL, err = parseURL(r.URL, path); err != nil { + return r, err + } + } + return r, err + }) + } +} + +// WithEscapedPathParameters returns a PrepareDecorator that replaces brace-enclosed keys within the +// request path (i.e., http.Request.URL.Path) with the corresponding values from the passed map. The +// values will be escaped (aka URL encoded) before insertion into the path. +func WithEscapedPathParameters(path string, pathParameters map[string]interface{}) PrepareDecorator { + parameters := escapeValueStrings(ensureValueStrings(pathParameters)) + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + if r.URL == nil { + return r, NewError("autorest", "WithEscapedPathParameters", "Invoked with a nil URL") + } + for key, value := range parameters { + path = strings.Replace(path, "{"+key+"}", value, -1) + } + if r.URL, err = parseURL(r.URL, path); err != nil { + return r, err + } + } + return r, err + }) + } +} + +// WithPathParameters returns a PrepareDecorator that replaces brace-enclosed keys within the +// request path (i.e., http.Request.URL.Path) with the corresponding values from the passed map. +func WithPathParameters(path string, pathParameters map[string]interface{}) PrepareDecorator { + parameters := ensureValueStrings(pathParameters) + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + if r.URL == nil { + return r, NewError("autorest", "WithPathParameters", "Invoked with a nil URL") + } + for key, value := range parameters { + path = strings.Replace(path, "{"+key+"}", value, -1) + } + + if r.URL, err = parseURL(r.URL, path); err != nil { + return r, err + } + } + return r, err + }) + } +} + +func parseURL(u *url.URL, path string) (*url.URL, error) { + p := strings.TrimRight(u.String(), "/") + if !strings.HasPrefix(path, "/") { + path = "/" + path + } + return url.Parse(p + path) +} + +// WithQueryParameters returns a PrepareDecorators that encodes and applies the query parameters +// given in the supplied map (i.e., key=value). +func WithQueryParameters(queryParameters map[string]interface{}) PrepareDecorator { + parameters := MapToValues(queryParameters) + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + if r.URL == nil { + return r, NewError("autorest", "WithQueryParameters", "Invoked with a nil URL") + } + v := r.URL.Query() + for key, value := range parameters { + for i := range value { + d, err := url.QueryUnescape(value[i]) + if err != nil { + return r, err + } + value[i] = d + } + v[key] = value + } + r.URL.RawQuery = v.Encode() + } + return r, err + }) + } +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/responder.go b/vendor/github.com/Azure/go-autorest/autorest/responder.go new file mode 100644 index 00000000000..349e1963a2c --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/responder.go @@ -0,0 +1,269 @@ +package autorest + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "encoding/json" + "encoding/xml" + "fmt" + "io" + "io/ioutil" + "net/http" + "strings" +) + +// Responder is the interface that wraps the Respond method. +// +// Respond accepts and reacts to an http.Response. Implementations must ensure to not share or hold +// state since Responders may be shared and re-used. +type Responder interface { + Respond(*http.Response) error +} + +// ResponderFunc is a method that implements the Responder interface. +type ResponderFunc func(*http.Response) error + +// Respond implements the Responder interface on ResponderFunc. +func (rf ResponderFunc) Respond(r *http.Response) error { + return rf(r) +} + +// RespondDecorator takes and possibly decorates, by wrapping, a Responder. Decorators may react to +// the http.Response and pass it along or, first, pass the http.Response along then react. +type RespondDecorator func(Responder) Responder + +// CreateResponder creates, decorates, and returns a Responder. Without decorators, the returned +// Responder returns the passed http.Response unmodified. Responders may or may not be safe to share +// and re-used: It depends on the applied decorators. For example, a standard decorator that closes +// the response body is fine to share whereas a decorator that reads the body into a passed struct +// is not. +// +// To prevent memory leaks, ensure that at least one Responder closes the response body. +func CreateResponder(decorators ...RespondDecorator) Responder { + return DecorateResponder( + Responder(ResponderFunc(func(r *http.Response) error { return nil })), + decorators...) +} + +// DecorateResponder accepts a Responder and a, possibly empty, set of RespondDecorators, which it +// applies to the Responder. Decorators are applied in the order received, but their affect upon the +// request depends on whether they are a pre-decorator (react to the http.Response and then pass it +// along) or a post-decorator (pass the http.Response along and then react). +func DecorateResponder(r Responder, decorators ...RespondDecorator) Responder { + for _, decorate := range decorators { + r = decorate(r) + } + return r +} + +// Respond accepts an http.Response and a, possibly empty, set of RespondDecorators. +// It creates a Responder from the decorators it then applies to the passed http.Response. +func Respond(r *http.Response, decorators ...RespondDecorator) error { + if r == nil { + return nil + } + return CreateResponder(decorators...).Respond(r) +} + +// ByIgnoring returns a RespondDecorator that ignores the passed http.Response passing it unexamined +// to the next RespondDecorator. +func ByIgnoring() RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + return r.Respond(resp) + }) + } +} + +// ByCopying copies the contents of the http.Response Body into the passed bytes.Buffer as +// the Body is read. +func ByCopying(b *bytes.Buffer) RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + err := r.Respond(resp) + if err == nil && resp != nil && resp.Body != nil { + resp.Body = TeeReadCloser(resp.Body, b) + } + return err + }) + } +} + +// ByDiscardingBody returns a RespondDecorator that first invokes the passed Responder after which +// it copies the remaining bytes (if any) in the response body to ioutil.Discard. Since the passed +// Responder is invoked prior to discarding the response body, the decorator may occur anywhere +// within the set. +func ByDiscardingBody() RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + err := r.Respond(resp) + if err == nil && resp != nil && resp.Body != nil { + if _, err := io.Copy(ioutil.Discard, resp.Body); err != nil { + return fmt.Errorf("Error discarding the response body: %v", err) + } + } + return err + }) + } +} + +// ByClosing returns a RespondDecorator that first invokes the passed Responder after which it +// closes the response body. Since the passed Responder is invoked prior to closing the response +// body, the decorator may occur anywhere within the set. +func ByClosing() RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + err := r.Respond(resp) + if resp != nil && resp.Body != nil { + if err := resp.Body.Close(); err != nil { + return fmt.Errorf("Error closing the response body: %v", err) + } + } + return err + }) + } +} + +// ByClosingIfError returns a RespondDecorator that first invokes the passed Responder after which +// it closes the response if the passed Responder returns an error and the response body exists. +func ByClosingIfError() RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + err := r.Respond(resp) + if err != nil && resp != nil && resp.Body != nil { + if err := resp.Body.Close(); err != nil { + return fmt.Errorf("Error closing the response body: %v", err) + } + } + return err + }) + } +} + +// ByUnmarshallingBytes returns a RespondDecorator that copies the Bytes returned in the +// response Body into the value pointed to by v. +func ByUnmarshallingBytes(v *[]byte) RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + err := r.Respond(resp) + if err == nil { + bytes, errInner := ioutil.ReadAll(resp.Body) + if errInner != nil { + err = fmt.Errorf("Error occurred reading http.Response#Body - Error = '%v'", errInner) + } else { + *v = bytes + } + } + return err + }) + } +} + +// ByUnmarshallingJSON returns a RespondDecorator that decodes a JSON document returned in the +// response Body into the value pointed to by v. +func ByUnmarshallingJSON(v interface{}) RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + err := r.Respond(resp) + if err == nil { + b, errInner := ioutil.ReadAll(resp.Body) + // Some responses might include a BOM, remove for successful unmarshalling + b = bytes.TrimPrefix(b, []byte("\xef\xbb\xbf")) + if errInner != nil { + err = fmt.Errorf("Error occurred reading http.Response#Body - Error = '%v'", errInner) + } else if len(strings.Trim(string(b), " ")) > 0 { + errInner = json.Unmarshal(b, v) + if errInner != nil { + err = fmt.Errorf("Error occurred unmarshalling JSON - Error = '%v' JSON = '%s'", errInner, string(b)) + } + } + } + return err + }) + } +} + +// ByUnmarshallingXML returns a RespondDecorator that decodes a XML document returned in the +// response Body into the value pointed to by v. +func ByUnmarshallingXML(v interface{}) RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + err := r.Respond(resp) + if err == nil { + b, errInner := ioutil.ReadAll(resp.Body) + if errInner != nil { + err = fmt.Errorf("Error occurred reading http.Response#Body - Error = '%v'", errInner) + } else { + errInner = xml.Unmarshal(b, v) + if errInner != nil { + err = fmt.Errorf("Error occurred unmarshalling Xml - Error = '%v' Xml = '%s'", errInner, string(b)) + } + } + } + return err + }) + } +} + +// WithErrorUnlessStatusCode returns a RespondDecorator that emits an error unless the response +// StatusCode is among the set passed. On error, response body is fully read into a buffer and +// presented in the returned error, as well as in the response body. +func WithErrorUnlessStatusCode(codes ...int) RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + err := r.Respond(resp) + if err == nil && !ResponseHasStatusCode(resp, codes...) { + derr := NewErrorWithResponse("autorest", "WithErrorUnlessStatusCode", resp, "%v %v failed with %s", + resp.Request.Method, + resp.Request.URL, + resp.Status) + if resp.Body != nil { + defer resp.Body.Close() + b, _ := ioutil.ReadAll(resp.Body) + derr.ServiceError = b + resp.Body = ioutil.NopCloser(bytes.NewReader(b)) + } + err = derr + } + return err + }) + } +} + +// WithErrorUnlessOK returns a RespondDecorator that emits an error if the response StatusCode is +// anything other than HTTP 200. +func WithErrorUnlessOK() RespondDecorator { + return WithErrorUnlessStatusCode(http.StatusOK) +} + +// ExtractHeader extracts all values of the specified header from the http.Response. It returns an +// empty string slice if the passed http.Response is nil or the header does not exist. +func ExtractHeader(header string, resp *http.Response) []string { + if resp != nil && resp.Header != nil { + return resp.Header[http.CanonicalHeaderKey(header)] + } + return nil +} + +// ExtractHeaderValue extracts the first value of the specified header from the http.Response. It +// returns an empty string if the passed http.Response is nil or the header does not exist. +func ExtractHeaderValue(header string, resp *http.Response) string { + h := ExtractHeader(header, resp) + if len(h) > 0 { + return h[0] + } + return "" +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/retriablerequest.go b/vendor/github.com/Azure/go-autorest/autorest/retriablerequest.go new file mode 100644 index 00000000000..fa11dbed79b --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/retriablerequest.go @@ -0,0 +1,52 @@ +package autorest + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "io" + "io/ioutil" + "net/http" +) + +// NewRetriableRequest returns a wrapper around an HTTP request that support retry logic. +func NewRetriableRequest(req *http.Request) *RetriableRequest { + return &RetriableRequest{req: req} +} + +// Request returns the wrapped HTTP request. +func (rr *RetriableRequest) Request() *http.Request { + return rr.req +} + +func (rr *RetriableRequest) prepareFromByteReader() (err error) { + // fall back to making a copy (only do this once) + b := []byte{} + if rr.req.ContentLength > 0 { + b = make([]byte, rr.req.ContentLength) + _, err = io.ReadFull(rr.req.Body, b) + if err != nil { + return err + } + } else { + b, err = ioutil.ReadAll(rr.req.Body) + if err != nil { + return err + } + } + rr.br = bytes.NewReader(b) + rr.req.Body = ioutil.NopCloser(rr.br) + return err +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.7.go b/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.7.go new file mode 100644 index 00000000000..7143cc61b58 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.7.go @@ -0,0 +1,54 @@ +// +build !go1.8 + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package autorest + +import ( + "bytes" + "io/ioutil" + "net/http" +) + +// RetriableRequest provides facilities for retrying an HTTP request. +type RetriableRequest struct { + req *http.Request + br *bytes.Reader +} + +// Prepare signals that the request is about to be sent. +func (rr *RetriableRequest) Prepare() (err error) { + // preserve the request body; this is to support retry logic as + // the underlying transport will always close the reqeust body + if rr.req.Body != nil { + if rr.br != nil { + _, err = rr.br.Seek(0, 0 /*io.SeekStart*/) + rr.req.Body = ioutil.NopCloser(rr.br) + } + if err != nil { + return err + } + if rr.br == nil { + // fall back to making a copy (only do this once) + err = rr.prepareFromByteReader() + } + } + return err +} + +func removeRequestBody(req *http.Request) { + req.Body = nil + req.ContentLength = 0 +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.8.go b/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.8.go new file mode 100644 index 00000000000..ae15c6bf962 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.8.go @@ -0,0 +1,66 @@ +// +build go1.8 + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package autorest + +import ( + "bytes" + "io" + "io/ioutil" + "net/http" +) + +// RetriableRequest provides facilities for retrying an HTTP request. +type RetriableRequest struct { + req *http.Request + rc io.ReadCloser + br *bytes.Reader +} + +// Prepare signals that the request is about to be sent. +func (rr *RetriableRequest) Prepare() (err error) { + // preserve the request body; this is to support retry logic as + // the underlying transport will always close the reqeust body + if rr.req.Body != nil { + if rr.rc != nil { + rr.req.Body = rr.rc + } else if rr.br != nil { + _, err = rr.br.Seek(0, io.SeekStart) + rr.req.Body = ioutil.NopCloser(rr.br) + } + if err != nil { + return err + } + if rr.req.GetBody != nil { + // this will allow us to preserve the body without having to + // make a copy. note we need to do this on each iteration + rr.rc, err = rr.req.GetBody() + if err != nil { + return err + } + } else if rr.br == nil { + // fall back to making a copy (only do this once) + err = rr.prepareFromByteReader() + } + } + return err +} + +func removeRequestBody(req *http.Request) { + req.Body = nil + req.GetBody = nil + req.ContentLength = 0 +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/sender.go b/vendor/github.com/Azure/go-autorest/autorest/sender.go new file mode 100644 index 00000000000..78610ef2044 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/sender.go @@ -0,0 +1,447 @@ +package autorest + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "context" + "crypto/tls" + "fmt" + "log" + "math" + "net/http" + "net/http/cookiejar" + "strconv" + "sync" + "time" + + "github.com/Azure/go-autorest/tracing" +) + +// there is one sender per TLS renegotiation type, i.e. count of tls.RenegotiationSupport enums +const defaultSendersCount = 3 + +type defaultSender struct { + sender Sender + init *sync.Once +} + +// each type of sender will be created on demand in sender() +var defaultSenders [defaultSendersCount]defaultSender + +func init() { + for i := 0; i < defaultSendersCount; i++ { + defaultSenders[i].init = &sync.Once{} + } +} + +// used as a key type in context.WithValue() +type ctxSendDecorators struct{} + +// WithSendDecorators adds the specified SendDecorators to the provided context. +// If no SendDecorators are provided the context is unchanged. +func WithSendDecorators(ctx context.Context, sendDecorator []SendDecorator) context.Context { + if len(sendDecorator) == 0 { + return ctx + } + return context.WithValue(ctx, ctxSendDecorators{}, sendDecorator) +} + +// GetSendDecorators returns the SendDecorators in the provided context or the provided default SendDecorators. +func GetSendDecorators(ctx context.Context, defaultSendDecorators ...SendDecorator) []SendDecorator { + inCtx := ctx.Value(ctxSendDecorators{}) + if sd, ok := inCtx.([]SendDecorator); ok { + return sd + } + return defaultSendDecorators +} + +// Sender is the interface that wraps the Do method to send HTTP requests. +// +// The standard http.Client conforms to this interface. +type Sender interface { + Do(*http.Request) (*http.Response, error) +} + +// SenderFunc is a method that implements the Sender interface. +type SenderFunc func(*http.Request) (*http.Response, error) + +// Do implements the Sender interface on SenderFunc. +func (sf SenderFunc) Do(r *http.Request) (*http.Response, error) { + return sf(r) +} + +// SendDecorator takes and possibly decorates, by wrapping, a Sender. Decorators may affect the +// http.Request and pass it along or, first, pass the http.Request along then react to the +// http.Response result. +type SendDecorator func(Sender) Sender + +// CreateSender creates, decorates, and returns, as a Sender, the default http.Client. +func CreateSender(decorators ...SendDecorator) Sender { + return DecorateSender(sender(tls.RenegotiateNever), decorators...) +} + +// DecorateSender accepts a Sender and a, possibly empty, set of SendDecorators, which is applies to +// the Sender. Decorators are applied in the order received, but their affect upon the request +// depends on whether they are a pre-decorator (change the http.Request and then pass it along) or a +// post-decorator (pass the http.Request along and react to the results in http.Response). +func DecorateSender(s Sender, decorators ...SendDecorator) Sender { + for _, decorate := range decorators { + s = decorate(s) + } + return s +} + +// Send sends, by means of the default http.Client, the passed http.Request, returning the +// http.Response and possible error. It also accepts a, possibly empty, set of SendDecorators which +// it will apply the http.Client before invoking the Do method. +// +// Send is a convenience method and not recommended for production. Advanced users should use +// SendWithSender, passing and sharing their own Sender (e.g., instance of http.Client). +// +// Send will not poll or retry requests. +func Send(r *http.Request, decorators ...SendDecorator) (*http.Response, error) { + return SendWithSender(sender(tls.RenegotiateNever), r, decorators...) +} + +// SendWithSender sends the passed http.Request, through the provided Sender, returning the +// http.Response and possible error. It also accepts a, possibly empty, set of SendDecorators which +// it will apply the http.Client before invoking the Do method. +// +// SendWithSender will not poll or retry requests. +func SendWithSender(s Sender, r *http.Request, decorators ...SendDecorator) (*http.Response, error) { + return DecorateSender(s, decorators...).Do(r) +} + +func sender(renengotiation tls.RenegotiationSupport) Sender { + // note that we can't init defaultSenders in init() since it will + // execute before calling code has had a chance to enable tracing + defaultSenders[renengotiation].init.Do(func() { + // Use behaviour compatible with DefaultTransport, but require TLS minimum version. + defaultTransport := http.DefaultTransport.(*http.Transport) + transport := &http.Transport{ + Proxy: defaultTransport.Proxy, + DialContext: defaultTransport.DialContext, + MaxIdleConns: defaultTransport.MaxIdleConns, + IdleConnTimeout: defaultTransport.IdleConnTimeout, + TLSHandshakeTimeout: defaultTransport.TLSHandshakeTimeout, + ExpectContinueTimeout: defaultTransport.ExpectContinueTimeout, + TLSClientConfig: &tls.Config{ + MinVersion: tls.VersionTLS12, + Renegotiation: renengotiation, + }, + } + var roundTripper http.RoundTripper = transport + if tracing.IsEnabled() { + roundTripper = tracing.NewTransport(transport) + } + j, _ := cookiejar.New(nil) + defaultSenders[renengotiation].sender = &http.Client{Jar: j, Transport: roundTripper} + }) + return defaultSenders[renengotiation].sender +} + +// AfterDelay returns a SendDecorator that delays for the passed time.Duration before +// invoking the Sender. The delay may be terminated by closing the optional channel on the +// http.Request. If canceled, no further Senders are invoked. +func AfterDelay(d time.Duration) SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + if !DelayForBackoff(d, 0, r.Context().Done()) { + return nil, fmt.Errorf("autorest: AfterDelay canceled before full delay") + } + return s.Do(r) + }) + } +} + +// AsIs returns a SendDecorator that invokes the passed Sender without modifying the http.Request. +func AsIs() SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + return s.Do(r) + }) + } +} + +// DoCloseIfError returns a SendDecorator that first invokes the passed Sender after which +// it closes the response if the passed Sender returns an error and the response body exists. +func DoCloseIfError() SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + resp, err := s.Do(r) + if err != nil { + Respond(resp, ByDiscardingBody(), ByClosing()) + } + return resp, err + }) + } +} + +// DoErrorIfStatusCode returns a SendDecorator that emits an error if the response StatusCode is +// among the set passed. Since these are artificial errors, the response body may still require +// closing. +func DoErrorIfStatusCode(codes ...int) SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + resp, err := s.Do(r) + if err == nil && ResponseHasStatusCode(resp, codes...) { + err = NewErrorWithResponse("autorest", "DoErrorIfStatusCode", resp, "%v %v failed with %s", + resp.Request.Method, + resp.Request.URL, + resp.Status) + } + return resp, err + }) + } +} + +// DoErrorUnlessStatusCode returns a SendDecorator that emits an error unless the response +// StatusCode is among the set passed. Since these are artificial errors, the response body +// may still require closing. +func DoErrorUnlessStatusCode(codes ...int) SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + resp, err := s.Do(r) + if err == nil && !ResponseHasStatusCode(resp, codes...) { + err = NewErrorWithResponse("autorest", "DoErrorUnlessStatusCode", resp, "%v %v failed with %s", + resp.Request.Method, + resp.Request.URL, + resp.Status) + } + return resp, err + }) + } +} + +// DoPollForStatusCodes returns a SendDecorator that polls if the http.Response contains one of the +// passed status codes. It expects the http.Response to contain a Location header providing the +// URL at which to poll (using GET) and will poll until the time passed is equal to or greater than +// the supplied duration. It will delay between requests for the duration specified in the +// RetryAfter header or, if the header is absent, the passed delay. Polling may be canceled by +// closing the optional channel on the http.Request. +func DoPollForStatusCodes(duration time.Duration, delay time.Duration, codes ...int) SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (resp *http.Response, err error) { + resp, err = s.Do(r) + + if err == nil && ResponseHasStatusCode(resp, codes...) { + r, err = NewPollingRequestWithContext(r.Context(), resp) + + for err == nil && ResponseHasStatusCode(resp, codes...) { + Respond(resp, + ByDiscardingBody(), + ByClosing()) + resp, err = SendWithSender(s, r, + AfterDelay(GetRetryAfter(resp, delay))) + } + } + + return resp, err + }) + } +} + +// DoRetryForAttempts returns a SendDecorator that retries a failed request for up to the specified +// number of attempts, exponentially backing off between requests using the supplied backoff +// time.Duration (which may be zero). Retrying may be canceled by closing the optional channel on +// the http.Request. +func DoRetryForAttempts(attempts int, backoff time.Duration) SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (resp *http.Response, err error) { + rr := NewRetriableRequest(r) + for attempt := 0; attempt < attempts; attempt++ { + err = rr.Prepare() + if err != nil { + return resp, err + } + DrainResponseBody(resp) + resp, err = s.Do(rr.Request()) + if err == nil { + return resp, err + } + if !DelayForBackoff(backoff, attempt, r.Context().Done()) { + return nil, r.Context().Err() + } + } + return resp, err + }) + } +} + +// Count429AsRetry indicates that a 429 response should be included as a retry attempt. +var Count429AsRetry = true + +// Max429Delay is the maximum duration to wait between retries on a 429 if no Retry-After header was received. +var Max429Delay time.Duration + +// DoRetryForStatusCodes returns a SendDecorator that retries for specified statusCodes for up to the specified +// number of attempts, exponentially backing off between requests using the supplied backoff +// time.Duration (which may be zero). Retrying may be canceled by cancelling the context on the http.Request. +// NOTE: Code http.StatusTooManyRequests (429) will *not* be counted against the number of attempts. +func DoRetryForStatusCodes(attempts int, backoff time.Duration, codes ...int) SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + return doRetryForStatusCodesImpl(s, r, Count429AsRetry, attempts, backoff, 0, codes...) + }) + } +} + +// DoRetryForStatusCodesWithCap returns a SendDecorator that retries for specified statusCodes for up to the +// specified number of attempts, exponentially backing off between requests using the supplied backoff +// time.Duration (which may be zero). To cap the maximum possible delay between iterations specify a value greater +// than zero for cap. Retrying may be canceled by cancelling the context on the http.Request. +func DoRetryForStatusCodesWithCap(attempts int, backoff, cap time.Duration, codes ...int) SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + return doRetryForStatusCodesImpl(s, r, Count429AsRetry, attempts, backoff, cap, codes...) + }) + } +} + +func doRetryForStatusCodesImpl(s Sender, r *http.Request, count429 bool, attempts int, backoff, cap time.Duration, codes ...int) (resp *http.Response, err error) { + rr := NewRetriableRequest(r) + // Increment to add the first call (attempts denotes number of retries) + for attempt, delayCount := 0, 0; attempt < attempts+1; { + err = rr.Prepare() + if err != nil { + return + } + DrainResponseBody(resp) + resp, err = s.Do(rr.Request()) + // we want to retry if err is not nil (e.g. transient network failure). note that for failed authentication + // resp and err will both have a value, so in this case we don't want to retry as it will never succeed. + if err == nil && !ResponseHasStatusCode(resp, codes...) || IsTokenRefreshError(err) { + return resp, err + } + delayed := DelayWithRetryAfter(resp, r.Context().Done()) + // if this was a 429 set the delay cap as specified. + // applicable only in the absence of a retry-after header. + if resp != nil && resp.StatusCode == http.StatusTooManyRequests { + cap = Max429Delay + } + if !delayed && !DelayForBackoffWithCap(backoff, cap, delayCount, r.Context().Done()) { + return resp, r.Context().Err() + } + // when count429 == false don't count a 429 against the number + // of attempts so that we continue to retry until it succeeds + if count429 || (resp == nil || resp.StatusCode != http.StatusTooManyRequests) { + attempt++ + } + // delay count is tracked separately from attempts to + // ensure that 429 participates in exponential back-off + delayCount++ + } + return resp, err +} + +// DelayWithRetryAfter invokes time.After for the duration specified in the "Retry-After" header. +// The value of Retry-After can be either the number of seconds or a date in RFC1123 format. +// The function returns true after successfully waiting for the specified duration. If there is +// no Retry-After header or the wait is cancelled the return value is false. +func DelayWithRetryAfter(resp *http.Response, cancel <-chan struct{}) bool { + if resp == nil { + return false + } + var dur time.Duration + ra := resp.Header.Get("Retry-After") + if retryAfter, _ := strconv.Atoi(ra); retryAfter > 0 { + dur = time.Duration(retryAfter) * time.Second + } else if t, err := time.Parse(time.RFC1123, ra); err == nil { + dur = t.Sub(time.Now()) + } + if dur > 0 { + select { + case <-time.After(dur): + return true + case <-cancel: + return false + } + } + return false +} + +// DoRetryForDuration returns a SendDecorator that retries the request until the total time is equal +// to or greater than the specified duration, exponentially backing off between requests using the +// supplied backoff time.Duration (which may be zero). Retrying may be canceled by closing the +// optional channel on the http.Request. +func DoRetryForDuration(d time.Duration, backoff time.Duration) SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (resp *http.Response, err error) { + rr := NewRetriableRequest(r) + end := time.Now().Add(d) + for attempt := 0; time.Now().Before(end); attempt++ { + err = rr.Prepare() + if err != nil { + return resp, err + } + DrainResponseBody(resp) + resp, err = s.Do(rr.Request()) + if err == nil { + return resp, err + } + if !DelayForBackoff(backoff, attempt, r.Context().Done()) { + return nil, r.Context().Err() + } + } + return resp, err + }) + } +} + +// WithLogging returns a SendDecorator that implements simple before and after logging of the +// request. +func WithLogging(logger *log.Logger) SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + logger.Printf("Sending %s %s", r.Method, r.URL) + resp, err := s.Do(r) + if err != nil { + logger.Printf("%s %s received error '%v'", r.Method, r.URL, err) + } else { + logger.Printf("%s %s received %s", r.Method, r.URL, resp.Status) + } + return resp, err + }) + } +} + +// DelayForBackoff invokes time.After for the supplied backoff duration raised to the power of +// passed attempt (i.e., an exponential backoff delay). Backoff duration is in seconds and can set +// to zero for no delay. The delay may be canceled by closing the passed channel. If terminated early, +// returns false. +// Note: Passing attempt 1 will result in doubling "backoff" duration. Treat this as a zero-based attempt +// count. +func DelayForBackoff(backoff time.Duration, attempt int, cancel <-chan struct{}) bool { + return DelayForBackoffWithCap(backoff, 0, attempt, cancel) +} + +// DelayForBackoffWithCap invokes time.After for the supplied backoff duration raised to the power of +// passed attempt (i.e., an exponential backoff delay). Backoff duration is in seconds and can set +// to zero for no delay. To cap the maximum possible delay specify a value greater than zero for cap. +// The delay may be canceled by closing the passed channel. If terminated early, returns false. +// Note: Passing attempt 1 will result in doubling "backoff" duration. Treat this as a zero-based attempt +// count. +func DelayForBackoffWithCap(backoff, cap time.Duration, attempt int, cancel <-chan struct{}) bool { + d := time.Duration(backoff.Seconds()*math.Pow(2, float64(attempt))) * time.Second + if cap > 0 && d > cap { + d = cap + } + select { + case <-time.After(d): + return true + case <-cancel: + return false + } +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/utility.go b/vendor/github.com/Azure/go-autorest/autorest/utility.go new file mode 100644 index 00000000000..3467b8fa604 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/utility.go @@ -0,0 +1,232 @@ +package autorest + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "encoding/json" + "encoding/xml" + "fmt" + "io" + "io/ioutil" + "net" + "net/http" + "net/url" + "reflect" + "strings" +) + +// EncodedAs is a series of constants specifying various data encodings +type EncodedAs string + +const ( + // EncodedAsJSON states that data is encoded as JSON + EncodedAsJSON EncodedAs = "JSON" + + // EncodedAsXML states that data is encoded as Xml + EncodedAsXML EncodedAs = "XML" +) + +// Decoder defines the decoding method json.Decoder and xml.Decoder share +type Decoder interface { + Decode(v interface{}) error +} + +// NewDecoder creates a new decoder appropriate to the passed encoding. +// encodedAs specifies the type of encoding and r supplies the io.Reader containing the +// encoded data. +func NewDecoder(encodedAs EncodedAs, r io.Reader) Decoder { + if encodedAs == EncodedAsJSON { + return json.NewDecoder(r) + } else if encodedAs == EncodedAsXML { + return xml.NewDecoder(r) + } + return nil +} + +// CopyAndDecode decodes the data from the passed io.Reader while making a copy. Having a copy +// is especially useful if there is a chance the data will fail to decode. +// encodedAs specifies the expected encoding, r provides the io.Reader to the data, and v +// is the decoding destination. +func CopyAndDecode(encodedAs EncodedAs, r io.Reader, v interface{}) (bytes.Buffer, error) { + b := bytes.Buffer{} + return b, NewDecoder(encodedAs, io.TeeReader(r, &b)).Decode(v) +} + +// TeeReadCloser returns a ReadCloser that writes to w what it reads from rc. +// It utilizes io.TeeReader to copy the data read and has the same behavior when reading. +// Further, when it is closed, it ensures that rc is closed as well. +func TeeReadCloser(rc io.ReadCloser, w io.Writer) io.ReadCloser { + return &teeReadCloser{rc, io.TeeReader(rc, w)} +} + +type teeReadCloser struct { + rc io.ReadCloser + r io.Reader +} + +func (t *teeReadCloser) Read(p []byte) (int, error) { + return t.r.Read(p) +} + +func (t *teeReadCloser) Close() error { + return t.rc.Close() +} + +func containsInt(ints []int, n int) bool { + for _, i := range ints { + if i == n { + return true + } + } + return false +} + +func escapeValueStrings(m map[string]string) map[string]string { + for key, value := range m { + m[key] = url.QueryEscape(value) + } + return m +} + +func ensureValueStrings(mapOfInterface map[string]interface{}) map[string]string { + mapOfStrings := make(map[string]string) + for key, value := range mapOfInterface { + mapOfStrings[key] = ensureValueString(value) + } + return mapOfStrings +} + +func ensureValueString(value interface{}) string { + if value == nil { + return "" + } + switch v := value.(type) { + case string: + return v + case []byte: + return string(v) + default: + return fmt.Sprintf("%v", v) + } +} + +// MapToValues method converts map[string]interface{} to url.Values. +func MapToValues(m map[string]interface{}) url.Values { + v := url.Values{} + for key, value := range m { + x := reflect.ValueOf(value) + if x.Kind() == reflect.Array || x.Kind() == reflect.Slice { + for i := 0; i < x.Len(); i++ { + v.Add(key, ensureValueString(x.Index(i))) + } + } else { + v.Add(key, ensureValueString(value)) + } + } + return v +} + +// AsStringSlice method converts interface{} to []string. +// s must be of type slice or array or an error is returned. +// Each element of s will be converted to its string representation. +func AsStringSlice(s interface{}) ([]string, error) { + v := reflect.ValueOf(s) + if v.Kind() != reflect.Slice && v.Kind() != reflect.Array { + return nil, NewError("autorest", "AsStringSlice", "the value's type is not a slice or array.") + } + stringSlice := make([]string, 0, v.Len()) + + for i := 0; i < v.Len(); i++ { + stringSlice = append(stringSlice, fmt.Sprintf("%v", v.Index(i))) + } + return stringSlice, nil +} + +// String method converts interface v to string. If interface is a list, it +// joins list elements using the separator. Note that only sep[0] will be used for +// joining if any separator is specified. +func String(v interface{}, sep ...string) string { + if len(sep) == 0 { + return ensureValueString(v) + } + stringSlice, ok := v.([]string) + if ok == false { + var err error + stringSlice, err = AsStringSlice(v) + if err != nil { + panic(fmt.Sprintf("autorest: Couldn't convert value to a string %s.", err)) + } + } + return ensureValueString(strings.Join(stringSlice, sep[0])) +} + +// Encode method encodes url path and query parameters. +func Encode(location string, v interface{}, sep ...string) string { + s := String(v, sep...) + switch strings.ToLower(location) { + case "path": + return pathEscape(s) + case "query": + return queryEscape(s) + default: + return s + } +} + +func pathEscape(s string) string { + return strings.Replace(url.QueryEscape(s), "+", "%20", -1) +} + +func queryEscape(s string) string { + return url.QueryEscape(s) +} + +// ChangeToGet turns the specified http.Request into a GET (it assumes it wasn't). +// This is mainly useful for long-running operations that use the Azure-AsyncOperation +// header, so we change the initial PUT into a GET to retrieve the final result. +func ChangeToGet(req *http.Request) *http.Request { + req.Method = "GET" + req.Body = nil + req.ContentLength = 0 + req.Header.Del("Content-Length") + return req +} + +// IsTemporaryNetworkError returns true if the specified error is a temporary network error or false +// if it's not. If the error doesn't implement the net.Error interface the return value is true. +func IsTemporaryNetworkError(err error) bool { + if netErr, ok := err.(net.Error); !ok || (ok && netErr.Temporary()) { + return true + } + return false +} + +// DrainResponseBody reads the response body then closes it. +func DrainResponseBody(resp *http.Response) error { + if resp != nil && resp.Body != nil { + _, err := io.Copy(ioutil.Discard, resp.Body) + resp.Body.Close() + return err + } + return nil +} + +func setHeader(r *http.Request, key, value string) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set(key, value) +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/utility_1.13.go b/vendor/github.com/Azure/go-autorest/autorest/utility_1.13.go new file mode 100644 index 00000000000..4cb5e6849f6 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/utility_1.13.go @@ -0,0 +1,29 @@ +// +build go1.13 + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package autorest + +import ( + "errors" + + "github.com/Azure/go-autorest/autorest/adal" +) + +// IsTokenRefreshError returns true if the specified error implements the TokenRefreshError interface. +func IsTokenRefreshError(err error) bool { + var tre adal.TokenRefreshError + return errors.As(err, &tre) +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/utility_legacy.go b/vendor/github.com/Azure/go-autorest/autorest/utility_legacy.go new file mode 100644 index 00000000000..ebb51b4f531 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/utility_legacy.go @@ -0,0 +1,31 @@ +// +build !go1.13 + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package autorest + +import "github.com/Azure/go-autorest/autorest/adal" + +// IsTokenRefreshError returns true if the specified error implements the TokenRefreshError +// interface. If err is a DetailedError it will walk the chain of Original errors. +func IsTokenRefreshError(err error) bool { + if _, ok := err.(adal.TokenRefreshError); ok { + return true + } + if de, ok := err.(DetailedError); ok { + return IsTokenRefreshError(de.Original) + } + return false +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/version.go b/vendor/github.com/Azure/go-autorest/autorest/version.go new file mode 100644 index 00000000000..713e23581d9 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/version.go @@ -0,0 +1,41 @@ +package autorest + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "fmt" + "runtime" +) + +const number = "v14.2.1" + +var ( + userAgent = fmt.Sprintf("Go/%s (%s-%s) go-autorest/%s", + runtime.Version(), + runtime.GOARCH, + runtime.GOOS, + number, + ) +) + +// UserAgent returns a string containing the Go version, system architecture and OS, and the go-autorest version. +func UserAgent() string { + return userAgent +} + +// Version returns the semantic version (see http://semver.org). +func Version() string { + return number +} diff --git a/vendor/github.com/Azure/go-autorest/azure-pipelines.yml b/vendor/github.com/Azure/go-autorest/azure-pipelines.yml new file mode 100644 index 00000000000..6fb8404fd01 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/azure-pipelines.yml @@ -0,0 +1,105 @@ +variables: + GOPATH: '$(system.defaultWorkingDirectory)/work' + sdkPath: '$(GOPATH)/src/github.com/$(build.repository.name)' + +jobs: + - job: 'goautorest' + displayName: 'Run go-autorest CI Checks' + + strategy: + matrix: + Linux_Go113: + vm.image: 'ubuntu-18.04' + go.version: '1.13' + Linux_Go114: + vm.image: 'ubuntu-18.04' + go.version: '1.14' + + pool: + vmImage: '$(vm.image)' + + steps: + - task: GoTool@0 + inputs: + version: '$(go.version)' + displayName: "Select Go Version" + + - script: | + set -e + mkdir -p '$(GOPATH)/bin' + mkdir -p '$(sdkPath)' + shopt -s extglob + mv !(work) '$(sdkPath)' + echo '##vso[task.prependpath]$(GOPATH)/bin' + displayName: 'Create Go Workspace' + + - script: | + set -e + curl -sSL https://raw.githubusercontent.com/golang/dep/master/install.sh | sh + dep ensure -v + go install ./vendor/golang.org/x/lint/golint + go get github.com/jstemmer/go-junit-report + go get github.com/axw/gocov/gocov + go get github.com/AlekSi/gocov-xml + go get -u github.com/matm/gocov-html + workingDirectory: '$(sdkPath)' + displayName: 'Install Dependencies' + + - script: | + go vet ./autorest/... + go vet ./logger/... + go vet ./tracing/... + workingDirectory: '$(sdkPath)' + displayName: 'Vet' + + - script: | + go build -v ./autorest/... + go build -v ./logger/... + go build -v ./tracing/... + workingDirectory: '$(sdkPath)' + displayName: 'Build' + + - script: | + set -e + go test -race -v -coverprofile=coverage.txt -covermode atomic ./autorest/... ./logger/... ./tracing/... 2>&1 | go-junit-report > report.xml + gocov convert coverage.txt > coverage.json + gocov-xml < coverage.json > coverage.xml + gocov-html < coverage.json > coverage.html + workingDirectory: '$(sdkPath)' + displayName: 'Run Tests' + + - script: grep -L -r --include *.go --exclude-dir vendor -P "Copyright (\d{4}|\(c\)) Microsoft" ./ | tee >&2 + workingDirectory: '$(sdkPath)' + displayName: 'Copyright Header Check' + failOnStderr: true + condition: succeededOrFailed() + + - script: | + gofmt -s -l -w ./autorest/. >&2 + gofmt -s -l -w ./logger/. >&2 + gofmt -s -l -w ./tracing/. >&2 + workingDirectory: '$(sdkPath)' + displayName: 'Format Check' + failOnStderr: true + condition: succeededOrFailed() + + - script: | + golint ./autorest/... >&2 + golint ./logger/... >&2 + golint ./tracing/... >&2 + workingDirectory: '$(sdkPath)' + displayName: 'Linter Check' + failOnStderr: true + condition: succeededOrFailed() + + - task: PublishTestResults@2 + inputs: + testRunner: JUnit + testResultsFiles: $(sdkPath)/report.xml + failTaskOnFailedTests: true + + - task: PublishCodeCoverageResults@1 + inputs: + codeCoverageTool: Cobertura + summaryFileLocation: $(sdkPath)/coverage.xml + additionalCodeCoverageFiles: $(sdkPath)/coverage.html diff --git a/vendor/github.com/Azure/go-autorest/doc.go b/vendor/github.com/Azure/go-autorest/doc.go new file mode 100644 index 00000000000..99ae6ca988a --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/doc.go @@ -0,0 +1,18 @@ +/* +Package go-autorest provides an HTTP request client for use with Autorest-generated API client packages. +*/ +package go_autorest + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. diff --git a/vendor/github.com/Azure/go-autorest/logger/LICENSE b/vendor/github.com/Azure/go-autorest/logger/LICENSE new file mode 100644 index 00000000000..b9d6a27ea92 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/logger/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2015 Microsoft Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/Azure/go-autorest/logger/go.mod b/vendor/github.com/Azure/go-autorest/logger/go.mod new file mode 100644 index 00000000000..bedeaee039e --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/logger/go.mod @@ -0,0 +1,5 @@ +module github.com/Azure/go-autorest/logger + +go 1.12 + +require github.com/Azure/go-autorest v14.2.0+incompatible diff --git a/vendor/github.com/Azure/go-autorest/logger/go.sum b/vendor/github.com/Azure/go-autorest/logger/go.sum new file mode 100644 index 00000000000..1fc56a962ee --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/logger/go.sum @@ -0,0 +1,2 @@ +github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= +github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= diff --git a/vendor/github.com/Azure/go-autorest/logger/go_mod_tidy_hack.go b/vendor/github.com/Azure/go-autorest/logger/go_mod_tidy_hack.go new file mode 100644 index 00000000000..0aa27680db9 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/logger/go_mod_tidy_hack.go @@ -0,0 +1,24 @@ +// +build modhack + +package logger + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This file, and the github.com/Azure/go-autorest import, won't actually become part of +// the resultant binary. + +// Necessary for safely adding multi-module repo. +// See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository +import _ "github.com/Azure/go-autorest" diff --git a/vendor/github.com/Azure/go-autorest/logger/logger.go b/vendor/github.com/Azure/go-autorest/logger/logger.go new file mode 100644 index 00000000000..2f5d8cc1a19 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/logger/logger.go @@ -0,0 +1,337 @@ +package logger + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "os" + "strings" + "sync" + "time" +) + +// LevelType tells a logger the minimum level to log. When code reports a log entry, +// the LogLevel indicates the level of the log entry. The logger only records entries +// whose level is at least the level it was told to log. See the Log* constants. +// For example, if a logger is configured with LogError, then LogError, LogPanic, +// and LogFatal entries will be logged; lower level entries are ignored. +type LevelType uint32 + +const ( + // LogNone tells a logger not to log any entries passed to it. + LogNone LevelType = iota + + // LogFatal tells a logger to log all LogFatal entries passed to it. + LogFatal + + // LogPanic tells a logger to log all LogPanic and LogFatal entries passed to it. + LogPanic + + // LogError tells a logger to log all LogError, LogPanic and LogFatal entries passed to it. + LogError + + // LogWarning tells a logger to log all LogWarning, LogError, LogPanic and LogFatal entries passed to it. + LogWarning + + // LogInfo tells a logger to log all LogInfo, LogWarning, LogError, LogPanic and LogFatal entries passed to it. + LogInfo + + // LogDebug tells a logger to log all LogDebug, LogInfo, LogWarning, LogError, LogPanic and LogFatal entries passed to it. + LogDebug + + // LogAuth is a special case of LogDebug, it tells a logger to also log the body of an authentication request and response. + // NOTE: this can disclose sensitive information, use with care. + LogAuth +) + +const ( + logNone = "NONE" + logFatal = "FATAL" + logPanic = "PANIC" + logError = "ERROR" + logWarning = "WARNING" + logInfo = "INFO" + logDebug = "DEBUG" + logAuth = "AUTH" + logUnknown = "UNKNOWN" +) + +// ParseLevel converts the specified string into the corresponding LevelType. +func ParseLevel(s string) (lt LevelType, err error) { + switch strings.ToUpper(s) { + case logFatal: + lt = LogFatal + case logPanic: + lt = LogPanic + case logError: + lt = LogError + case logWarning: + lt = LogWarning + case logInfo: + lt = LogInfo + case logDebug: + lt = LogDebug + case logAuth: + lt = LogAuth + default: + err = fmt.Errorf("bad log level '%s'", s) + } + return +} + +// String implements the stringer interface for LevelType. +func (lt LevelType) String() string { + switch lt { + case LogNone: + return logNone + case LogFatal: + return logFatal + case LogPanic: + return logPanic + case LogError: + return logError + case LogWarning: + return logWarning + case LogInfo: + return logInfo + case LogDebug: + return logDebug + case LogAuth: + return logAuth + default: + return logUnknown + } +} + +// Filter defines functions for filtering HTTP request/response content. +type Filter struct { + // URL returns a potentially modified string representation of a request URL. + URL func(u *url.URL) string + + // Header returns a potentially modified set of values for the specified key. + // To completely exclude the header key/values return false. + Header func(key string, val []string) (bool, []string) + + // Body returns a potentially modified request/response body. + Body func(b []byte) []byte +} + +func (f Filter) processURL(u *url.URL) string { + if f.URL == nil { + return u.String() + } + return f.URL(u) +} + +func (f Filter) processHeader(k string, val []string) (bool, []string) { + if f.Header == nil { + return true, val + } + return f.Header(k, val) +} + +func (f Filter) processBody(b []byte) []byte { + if f.Body == nil { + return b + } + return f.Body(b) +} + +// Writer defines methods for writing to a logging facility. +type Writer interface { + // Writeln writes the specified message with the standard log entry header and new-line character. + Writeln(level LevelType, message string) + + // Writef writes the specified format specifier with the standard log entry header and no new-line character. + Writef(level LevelType, format string, a ...interface{}) + + // WriteRequest writes the specified HTTP request to the logger if the log level is greater than + // or equal to LogInfo. The request body, if set, is logged at level LogDebug or higher. + // Custom filters can be specified to exclude URL, header, and/or body content from the log. + // By default no request content is excluded. + WriteRequest(req *http.Request, filter Filter) + + // WriteResponse writes the specified HTTP response to the logger if the log level is greater than + // or equal to LogInfo. The response body, if set, is logged at level LogDebug or higher. + // Custom filters can be specified to exclude URL, header, and/or body content from the log. + // By default no response content is excluded. + WriteResponse(resp *http.Response, filter Filter) +} + +// Instance is the default log writer initialized during package init. +// This can be replaced with a custom implementation as required. +var Instance Writer + +// default log level +var logLevel = LogNone + +// Level returns the value specified in AZURE_GO_AUTOREST_LOG_LEVEL. +// If no value was specified the default value is LogNone. +// Custom loggers can call this to retrieve the configured log level. +func Level() LevelType { + return logLevel +} + +func init() { + // separated for testing purposes + initDefaultLogger() +} + +func initDefaultLogger() { + // init with nilLogger so callers don't have to do a nil check on Default + Instance = nilLogger{} + llStr := strings.ToLower(os.Getenv("AZURE_GO_SDK_LOG_LEVEL")) + if llStr == "" { + return + } + var err error + logLevel, err = ParseLevel(llStr) + if err != nil { + fmt.Fprintf(os.Stderr, "go-autorest: failed to parse log level: %s\n", err.Error()) + return + } + if logLevel == LogNone { + return + } + // default to stderr + dest := os.Stderr + lfStr := os.Getenv("AZURE_GO_SDK_LOG_FILE") + if strings.EqualFold(lfStr, "stdout") { + dest = os.Stdout + } else if lfStr != "" { + lf, err := os.Create(lfStr) + if err == nil { + dest = lf + } else { + fmt.Fprintf(os.Stderr, "go-autorest: failed to create log file, using stderr: %s\n", err.Error()) + } + } + Instance = fileLogger{ + logLevel: logLevel, + mu: &sync.Mutex{}, + logFile: dest, + } +} + +// the nil logger does nothing +type nilLogger struct{} + +func (nilLogger) Writeln(LevelType, string) {} + +func (nilLogger) Writef(LevelType, string, ...interface{}) {} + +func (nilLogger) WriteRequest(*http.Request, Filter) {} + +func (nilLogger) WriteResponse(*http.Response, Filter) {} + +// A File is used instead of a Logger so the stream can be flushed after every write. +type fileLogger struct { + logLevel LevelType + mu *sync.Mutex // for synchronizing writes to logFile + logFile *os.File +} + +func (fl fileLogger) Writeln(level LevelType, message string) { + fl.Writef(level, "%s\n", message) +} + +func (fl fileLogger) Writef(level LevelType, format string, a ...interface{}) { + if fl.logLevel >= level { + fl.mu.Lock() + defer fl.mu.Unlock() + fmt.Fprintf(fl.logFile, "%s %s", entryHeader(level), fmt.Sprintf(format, a...)) + fl.logFile.Sync() + } +} + +func (fl fileLogger) WriteRequest(req *http.Request, filter Filter) { + if req == nil || fl.logLevel < LogInfo { + return + } + b := &bytes.Buffer{} + fmt.Fprintf(b, "%s REQUEST: %s %s\n", entryHeader(LogInfo), req.Method, filter.processURL(req.URL)) + // dump headers + for k, v := range req.Header { + if ok, mv := filter.processHeader(k, v); ok { + fmt.Fprintf(b, "%s: %s\n", k, strings.Join(mv, ",")) + } + } + if fl.shouldLogBody(req.Header, req.Body) { + // dump body + body, err := ioutil.ReadAll(req.Body) + if err == nil { + fmt.Fprintln(b, string(filter.processBody(body))) + if nc, ok := req.Body.(io.Seeker); ok { + // rewind to the beginning + nc.Seek(0, io.SeekStart) + } else { + // recreate the body + req.Body = ioutil.NopCloser(bytes.NewReader(body)) + } + } else { + fmt.Fprintf(b, "failed to read body: %v\n", err) + } + } + fl.mu.Lock() + defer fl.mu.Unlock() + fmt.Fprint(fl.logFile, b.String()) + fl.logFile.Sync() +} + +func (fl fileLogger) WriteResponse(resp *http.Response, filter Filter) { + if resp == nil || fl.logLevel < LogInfo { + return + } + b := &bytes.Buffer{} + fmt.Fprintf(b, "%s RESPONSE: %d %s\n", entryHeader(LogInfo), resp.StatusCode, filter.processURL(resp.Request.URL)) + // dump headers + for k, v := range resp.Header { + if ok, mv := filter.processHeader(k, v); ok { + fmt.Fprintf(b, "%s: %s\n", k, strings.Join(mv, ",")) + } + } + if fl.shouldLogBody(resp.Header, resp.Body) { + // dump body + defer resp.Body.Close() + body, err := ioutil.ReadAll(resp.Body) + if err == nil { + fmt.Fprintln(b, string(filter.processBody(body))) + resp.Body = ioutil.NopCloser(bytes.NewReader(body)) + } else { + fmt.Fprintf(b, "failed to read body: %v\n", err) + } + } + fl.mu.Lock() + defer fl.mu.Unlock() + fmt.Fprint(fl.logFile, b.String()) + fl.logFile.Sync() +} + +// returns true if the provided body should be included in the log +func (fl fileLogger) shouldLogBody(header http.Header, body io.ReadCloser) bool { + ct := header.Get("Content-Type") + return fl.logLevel >= LogDebug && body != nil && !strings.Contains(ct, "application/octet-stream") +} + +// creates standard header for log entries, it contains a timestamp and the log level +func entryHeader(level LevelType) string { + // this format provides a fixed number of digits so the size of the timestamp is constant + return fmt.Sprintf("(%s) %s:", time.Now().Format("2006-01-02T15:04:05.0000000Z07:00"), level.String()) +} diff --git a/vendor/github.com/Azure/go-autorest/tracing/LICENSE b/vendor/github.com/Azure/go-autorest/tracing/LICENSE new file mode 100644 index 00000000000..b9d6a27ea92 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/tracing/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2015 Microsoft Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/Azure/go-autorest/tracing/go.mod b/vendor/github.com/Azure/go-autorest/tracing/go.mod new file mode 100644 index 00000000000..a2cdec78c81 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/tracing/go.mod @@ -0,0 +1,5 @@ +module github.com/Azure/go-autorest/tracing + +go 1.12 + +require github.com/Azure/go-autorest v14.2.0+incompatible diff --git a/vendor/github.com/Azure/go-autorest/tracing/go.sum b/vendor/github.com/Azure/go-autorest/tracing/go.sum new file mode 100644 index 00000000000..1fc56a962ee --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/tracing/go.sum @@ -0,0 +1,2 @@ +github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= +github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= diff --git a/vendor/github.com/Azure/go-autorest/tracing/go_mod_tidy_hack.go b/vendor/github.com/Azure/go-autorest/tracing/go_mod_tidy_hack.go new file mode 100644 index 00000000000..e163975cd4e --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/tracing/go_mod_tidy_hack.go @@ -0,0 +1,24 @@ +// +build modhack + +package tracing + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This file, and the github.com/Azure/go-autorest import, won't actually become part of +// the resultant binary. + +// Necessary for safely adding multi-module repo. +// See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository +import _ "github.com/Azure/go-autorest" diff --git a/vendor/github.com/Azure/go-autorest/tracing/tracing.go b/vendor/github.com/Azure/go-autorest/tracing/tracing.go new file mode 100644 index 00000000000..0e7a6e96254 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/tracing/tracing.go @@ -0,0 +1,67 @@ +package tracing + +// Copyright 2018 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "context" + "net/http" +) + +// Tracer represents an HTTP tracing facility. +type Tracer interface { + NewTransport(base *http.Transport) http.RoundTripper + StartSpan(ctx context.Context, name string) context.Context + EndSpan(ctx context.Context, httpStatusCode int, err error) +} + +var ( + tracer Tracer +) + +// Register will register the provided Tracer. Pass nil to unregister a Tracer. +func Register(t Tracer) { + tracer = t +} + +// IsEnabled returns true if a Tracer has been registered. +func IsEnabled() bool { + return tracer != nil +} + +// NewTransport creates a new instrumenting http.RoundTripper for the +// registered Tracer. If no Tracer has been registered it returns nil. +func NewTransport(base *http.Transport) http.RoundTripper { + if tracer != nil { + return tracer.NewTransport(base) + } + return nil +} + +// StartSpan starts a trace span with the specified name, associating it with the +// provided context. Has no effect if a Tracer has not been registered. +func StartSpan(ctx context.Context, name string) context.Context { + if tracer != nil { + return tracer.StartSpan(ctx, name) + } + return ctx +} + +// EndSpan ends a previously started span stored in the context. +// Has no effect if a Tracer has not been registered. +func EndSpan(ctx context.Context, httpStatusCode int, err error) { + if tracer != nil { + tracer.EndSpan(ctx, httpStatusCode, err) + } +} diff --git a/vendor/github.com/GoogleCloudPlatform/testgrid/AUTHORS b/vendor/github.com/GoogleCloudPlatform/testgrid/AUTHORS new file mode 100644 index 00000000000..5322d6de22e --- /dev/null +++ b/vendor/github.com/GoogleCloudPlatform/testgrid/AUTHORS @@ -0,0 +1,6 @@ +# This is the list of TestGrid authors for copyright purposes. +# +# This does not necessarily list everyone who has contributed code, since in +# some cases, their employer may be the copyright holder. To see the full list +# of contributors, see the revision history in source control. +Google LLC diff --git a/vendor/github.com/GoogleCloudPlatform/testgrid/LICENSE b/vendor/github.com/GoogleCloudPlatform/testgrid/LICENSE new file mode 100644 index 00000000000..d6456956733 --- /dev/null +++ b/vendor/github.com/GoogleCloudPlatform/testgrid/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/GoogleCloudPlatform/testgrid/metadata/job.go b/vendor/github.com/GoogleCloudPlatform/testgrid/metadata/job.go new file mode 100644 index 00000000000..c3f53a3ecd9 --- /dev/null +++ b/vendor/github.com/GoogleCloudPlatform/testgrid/metadata/job.go @@ -0,0 +1,188 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metadata + +import ( + "strings" +) + +// Started holds the started.json values of the build. +type Started struct { + // Timestamp is UTC epoch seconds when the job started. + Timestamp int64 `json:"timestamp"` // epoch seconds + // Node holds the name of the machine that ran the job. + Node string `json:"node,omitempty"` + + // Consider whether to keep the following: + + // Pull holds the PR number the primary repo is testing + Pull string `json:"pull,omitempty"` + // Repos holds the RepoVersion of all commits checked out. + Repos map[string]string `json:"repos,omitempty"` // {repo: branch_or_pull} map + RepoCommit string `json:"repo-commit,omitempty"` + + // Deprecated fields: + + // Metadata is deprecated, add to finished.json + Metadata Metadata `json:"metadata,omitempty"` // TODO(fejta): remove + + // Use RepoCommit + DeprecatedJobVersion string `json:"job-version,omitempty"` // TODO(fejta): remove + DeprecatedRepoVersion string `json:"repo-version,omitempty"` // TODO(fejta): remove + +} + +const ( + // JobVersion is the metadata key that overrides repo-commit in Started when set. + JobVersion = "job-version" +) + +// Finished holds the finished.json values of the build +type Finished struct { + // Timestamp is UTC epoch seconds when the job finished. + // An empty value indicates an incomplete job. + Timestamp *int64 `json:"timestamp,omitempty"` + // Passed is true when the job completes successfully. + Passed *bool `json:"passed"` + // Metadata holds data computed by the job at runtime. + // For example, the version of a binary downloaded at runtime + // The JobVersion key overrides the auto-version set in Started. + Metadata Metadata `json:"metadata,omitempty"` + + // Consider whether to keep the following: + + // Deprecated fields: + + // Result is deprecated, use Passed. + Result string `json:"result,omitempty"` // TODO(fejta): remove + + // Use Metadata[JobVersion] or Started.RepoCommit + DeprecatedJobVersion string `json:"job-version,omitempty"` // TODO(fejta): remove + DeprecatedRevision string `json:"revision,omitempty"` // TODO(fejta): remove + DeprecatedRepoVersion string `json:"repo-version,omitempty"` // TODO(fejta): remove +} + +// Metadata holds the finished.json values in the metadata key. +// +// Metadata values can either be string or string map of strings +// +// TODO(fejta): figure out which of these we want and document them +// Special values: infra-commit, repos, repo, repo-commit, links, others +type Metadata map[string]interface{} + +// String returns the name key if its value is a string, and true if the key is present. +func (m Metadata) String(name string) (*string, bool) { + if v, ok := m[name]; !ok { + return nil, false + } else if t, good := v.(string); !good { + return nil, true + } else { + return &t, true + } +} + +// Meta returns the name key if its value is a child object, and true if they key is present. +func (m Metadata) Meta(name string) (*Metadata, bool) { + if v, ok := m[name]; !ok { + return nil, false + } else if t, good := v.(Metadata); good { + return &t, true + } else if t, good := v.(map[string]interface{}); good { + child := Metadata(t) + return &child, true + } + return nil, true +} + +// Keys returns an array of the keys of all valid Metadata values. +func (m Metadata) Keys() []string { + ka := make([]string, 0, len(m)) + for k := range m { + if _, ok := m.Meta(k); ok { + ka = append(ka, k) + } + } + return ka +} + +// Strings returns the submap of values in the map that are strings. +func (m Metadata) Strings() map[string]string { + bm := map[string]string{} + for k, v := range m { + if s, ok := v.(string); ok { + bm[k] = s + } + // TODO(fejta): handle sub items + } + return bm +} + +// firstFilled returns the first non-empty option or else def. +func firstFilled(def string, options ...string) string { + for _, o := range options { + if o != "" { + return o + } + } + return def +} + +const Missing = "missing" + +// Version extracts the job's custom version or else the checked out repo commit. +func Version(started Started, finished Finished) string { + // TODO(fejta): started.RepoCommit, finished.Metadata.String(JobVersion) + meta := func(key string) string { + if finished.Metadata == nil { + return "" + } + v, ok := finished.Metadata.String(key) + if !ok { + return "" + } + return *v + } + + val := firstFilled( + Missing, + finished.DeprecatedJobVersion, started.DeprecatedJobVersion, + started.DeprecatedRepoVersion, finished.DeprecatedRepoVersion, + meta("revision"), meta("repo-commit"), + meta(JobVersion), started.RepoCommit, // TODO(fejta): remove others + ) + parts := strings.SplitN(val, "+", 2) + val = parts[len(parts)-1] + if n := len(val); n > 9 { + return val[:9] + } + return val +} + +// SetVersion ensures that the repoCommit and jobVersion are set appropriately. +func SetVersion(started *Started, finished *Finished, repoCommit, jobVersion string) { + if started != nil && repoCommit != "" { + started.DeprecatedRepoVersion = repoCommit // TODO(fejta): pump this + started.RepoCommit = repoCommit + } + if finished != nil && jobVersion != "" { + if finished.Metadata == nil { + finished.Metadata = Metadata{} + } + finished.Metadata["job-version"] = jobVersion + finished.DeprecatedJobVersion = jobVersion + } +} diff --git a/vendor/github.com/GoogleCloudPlatform/testgrid/metadata/junit/junit.go b/vendor/github.com/GoogleCloudPlatform/testgrid/metadata/junit/junit.go new file mode 100644 index 00000000000..62d1e47f0a4 --- /dev/null +++ b/vendor/github.com/GoogleCloudPlatform/testgrid/metadata/junit/junit.go @@ -0,0 +1,215 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package junit describes the test-infra definition of "junit", and provides +// utilities to parse it. +package junit + +import ( + "bytes" + "encoding/xml" + "fmt" + "io" + "strings" + "unicode/utf8" +) + +type suiteOrSuites struct { + suites Suites +} + +func (s *suiteOrSuites) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { + switch start.Name.Local { + case "testsuites": + d.DecodeElement(&s.suites, &start) + case "testsuite": + var suite Suite + d.DecodeElement(&suite, &start) + s.suites.Suites = append(s.suites.Suites, suite) + default: + return fmt.Errorf("bad element name: %q", start.Name) + } + s.suites.Truncate(10000) + return nil +} + +// Suites holds a list of Suite results +type Suites struct { + XMLName xml.Name `xml:"testsuites"` + Suites []Suite `xml:"testsuite"` +} + +// Truncate ensures that strings do not exceed the specified length. +func (s *Suites) Truncate(max int) { + for i := range s.Suites { + s.Suites[i].Truncate(max) + } +} + +// Suite holds results +type Suite struct { + XMLName xml.Name `xml:"testsuite"` + Suites []Suite `xml:"testsuite"` + Name string `xml:"name,attr"` + Time float64 `xml:"time,attr"` // Seconds + Failures int `xml:"failures,attr"` + Tests int `xml:"tests,attr"` + Results []Result `xml:"testcase"` + /* + * + */ +} + +// Truncate ensures that strings do not exceed the specified length. +func (s *Suite) Truncate(max int) { + for i := range s.Suites { + s.Suites[i].Truncate(max) + } + for i := range s.Results { + s.Results[i].Truncate(max) + } +} + +// Property defines the xml element that stores additional metrics about each benchmark. +type Property struct { + Name string `xml:"name,attr"` + Value string `xml:"value,attr"` +} + +// Properties defines the xml element that stores the list of properties that are associated with one benchmark. +type Properties struct { + PropertyList []Property `xml:"property"` +} + +// Result holds results +type Result struct { + Name string `xml:"name,attr"` + Time float64 `xml:"time,attr"` + ClassName string `xml:"classname,attr"` + Failure *string `xml:"failure,omitempty"` + Output *string `xml:"system-out,omitempty"` + Error *string `xml:"system-err,omitempty"` + Errored *string `xml:"error,omitempty"` + Skipped *string `xml:"skipped,omitempty"` + Properties *Properties `xml:"properties,omitempty"` +} + +// SetProperty adds the specified property to the Result or replaces the +// existing value if a property with that name already exists. +func (r *Result) SetProperty(name, value string) { + if r.Properties == nil { + r.Properties = &Properties{} + } + for i, existing := range r.Properties.PropertyList { + if existing.Name == name { + r.Properties.PropertyList[i].Value = value + return + } + } + // Didn't find an existing property. Add a new one. + r.Properties.PropertyList = append( + r.Properties.PropertyList, + Property{ + Name: name, + Value: value, + }, + ) +} + +// Message extracts the message for the junit test case. +// +// Will use the first non-empty , , , , value. +func (r Result) Message(max int) string { + var msg string + switch { + case r.Errored != nil && *r.Errored != "": + msg = *r.Errored + case r.Failure != nil && *r.Failure != "": + msg = *r.Failure + case r.Skipped != nil && *r.Skipped != "": + msg = *r.Skipped + case r.Error != nil && *r.Error != "": + msg = *r.Error + case r.Output != nil && *r.Output != "": + msg = *r.Output + } + msg = truncate(msg, max) + if utf8.ValidString(msg) { + return msg + } + return fmt.Sprintf("invalid utf8: %s", strings.ToValidUTF8(msg, "?")) +} + +func truncate(s string, max int) string { + if max <= 0 { + return s + } + l := len(s) + if l < max { + return s + } + h := max / 2 + return s[:h] + "..." + s[l-h:] +} + +func truncatePointer(str *string, max int) { + if str == nil { + return + } + s := truncate(*str, max) + str = &s +} + +// Truncate ensures that strings do not exceed the specified length. +func (r Result) Truncate(max int) { + for _, s := range []*string{r.Errored, r.Failure, r.Skipped, r.Error, r.Output} { + truncatePointer(s, max) + } +} + +func unmarshalXML(reader io.Reader, i interface{}) error { + dec := xml.NewDecoder(reader) + dec.CharsetReader = func(charset string, input io.Reader) (io.Reader, error) { + switch charset { + case "UTF-8", "utf8", "": + // utf8 is not recognized by golang, but our coalesce.py writes a utf8 doc, which python accepts. + return input, nil + default: + return nil, fmt.Errorf("unknown charset: %s", charset) + } + } + return dec.Decode(i) +} + +// Parse returns the Suites representation of these XML bytes. +func Parse(buf []byte) (*Suites, error) { + if len(buf) == 0 { + return &Suites{}, nil + } + reader := bytes.NewReader(buf) + return ParseStream(reader) +} + +// ParseStream reads bytes into a Suites object. +func ParseStream(reader io.Reader) (*Suites, error) { + // Try to parse it as a object + var s suiteOrSuites + err := unmarshalXML(reader, &s) + if err != nil && err != io.EOF { + return nil, err + } + return &s.suites, nil +} diff --git a/vendor/github.com/GoogleCloudPlatform/testgrid/pb/config/config.pb.go b/vendor/github.com/GoogleCloudPlatform/testgrid/pb/config/config.pb.go new file mode 100644 index 00000000000..ca7d7255ac1 --- /dev/null +++ b/vendor/github.com/GoogleCloudPlatform/testgrid/pb/config/config.pb.go @@ -0,0 +1,2723 @@ +/* +Copyright The TestGrid Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: config.proto + +package config + +import ( + fmt "fmt" + custom_evaluator "github.com/GoogleCloudPlatform/testgrid/pb/custom_evaluator" + proto "github.com/golang/protobuf/proto" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +type TestGroup_TestsName int32 + +const ( + TestGroup_TESTS_NAME_UNSPECIFIED TestGroup_TestsName = 0 + TestGroup_TESTS_NAME_IGNORE TestGroup_TestsName = 1 + TestGroup_TESTS_NAME_REPLACE TestGroup_TestsName = 2 + TestGroup_TESTS_NAME_APPEND TestGroup_TestsName = 3 +) + +var TestGroup_TestsName_name = map[int32]string{ + 0: "TESTS_NAME_UNSPECIFIED", + 1: "TESTS_NAME_IGNORE", + 2: "TESTS_NAME_REPLACE", + 3: "TESTS_NAME_APPEND", +} + +var TestGroup_TestsName_value = map[string]int32{ + "TESTS_NAME_UNSPECIFIED": 0, + "TESTS_NAME_IGNORE": 1, + "TESTS_NAME_REPLACE": 2, + "TESTS_NAME_APPEND": 3, +} + +func (x TestGroup_TestsName) String() string { + return proto.EnumName(TestGroup_TestsName_name, int32(x)) +} + +func (TestGroup_TestsName) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_3eaf2c85e69e9ea4, []int{2, 0} +} + +type TestGroup_FallbackGrouping int32 + +const ( + TestGroup_FALLBACK_GROUPING_NONE TestGroup_FallbackGrouping = 0 + TestGroup_FALLBACK_GROUPING_DATE TestGroup_FallbackGrouping = 1 + TestGroup_FALLBACK_GROUPING_LABELS TestGroup_FallbackGrouping = 2 + TestGroup_FALLBACK_GROUPING_ID TestGroup_FallbackGrouping = 3 + TestGroup_FALLBACK_GROUPING_BUILD TestGroup_FallbackGrouping = 4 + // When using this, ensure fallback_grouping_configuration_value is + // also set. + TestGroup_FALLBACK_GROUPING_CONFIGURATION_VALUE TestGroup_FallbackGrouping = 5 +) + +var TestGroup_FallbackGrouping_name = map[int32]string{ + 0: "FALLBACK_GROUPING_NONE", + 1: "FALLBACK_GROUPING_DATE", + 2: "FALLBACK_GROUPING_LABELS", + 3: "FALLBACK_GROUPING_ID", + 4: "FALLBACK_GROUPING_BUILD", + 5: "FALLBACK_GROUPING_CONFIGURATION_VALUE", +} + +var TestGroup_FallbackGrouping_value = map[string]int32{ + "FALLBACK_GROUPING_NONE": 0, + "FALLBACK_GROUPING_DATE": 1, + "FALLBACK_GROUPING_LABELS": 2, + "FALLBACK_GROUPING_ID": 3, + "FALLBACK_GROUPING_BUILD": 4, + "FALLBACK_GROUPING_CONFIGURATION_VALUE": 5, +} + +func (x TestGroup_FallbackGrouping) String() string { + return proto.EnumName(TestGroup_FallbackGrouping_name, int32(x)) +} + +func (TestGroup_FallbackGrouping) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_3eaf2c85e69e9ea4, []int{2, 1} +} + +type TestGroup_PrimaryGrouping int32 + +const ( + TestGroup_PRIMARY_GROUPING_NONE TestGroup_PrimaryGrouping = 0 + TestGroup_PRIMARY_GROUPING_BUILD TestGroup_PrimaryGrouping = 1 +) + +var TestGroup_PrimaryGrouping_name = map[int32]string{ + 0: "PRIMARY_GROUPING_NONE", + 1: "PRIMARY_GROUPING_BUILD", +} + +var TestGroup_PrimaryGrouping_value = map[string]int32{ + "PRIMARY_GROUPING_NONE": 0, + "PRIMARY_GROUPING_BUILD": 1, +} + +func (x TestGroup_PrimaryGrouping) String() string { + return proto.EnumName(TestGroup_PrimaryGrouping_name, int32(x)) +} + +func (TestGroup_PrimaryGrouping) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_3eaf2c85e69e9ea4, []int{2, 2} +} + +// Scale of issue priority, used to indicate importance of issue. +type AutoBugOptions_Priority int32 + +const ( + // Unspecified; may not set priority at all + AutoBugOptions_PRIORITY_UNSPECIFIED AutoBugOptions_Priority = 0 + // See https://developers.google.com/issue-tracker/concepts/issues + AutoBugOptions_P0 AutoBugOptions_Priority = 1 + AutoBugOptions_P1 AutoBugOptions_Priority = 2 + AutoBugOptions_P2 AutoBugOptions_Priority = 3 + AutoBugOptions_P3 AutoBugOptions_Priority = 4 + AutoBugOptions_P4 AutoBugOptions_Priority = 5 +) + +var AutoBugOptions_Priority_name = map[int32]string{ + 0: "PRIORITY_UNSPECIFIED", + 1: "P0", + 2: "P1", + 3: "P2", + 4: "P3", + 5: "P4", +} + +var AutoBugOptions_Priority_value = map[string]int32{ + "PRIORITY_UNSPECIFIED": 0, + "P0": 1, + "P1": 2, + "P2": 3, + "P3": 4, + "P4": 5, +} + +func (x AutoBugOptions_Priority) String() string { + return proto.EnumName(AutoBugOptions_Priority_name, int32(x)) +} + +func (AutoBugOptions_Priority) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_3eaf2c85e69e9ea4, []int{5, 0} +} + +// Specifies the test name, and its source +type TestNameConfig struct { + // The name elements specifying the target test name for this tab. + NameElements []*TestNameConfig_NameElement `protobuf:"bytes,1,rep,name=name_elements,json=nameElements,proto3" json:"name_elements,omitempty"` + // Specifies a printf-style format string for name elements. The format + // string should have as many conversions as there are name_elements. + // For example, two name_elements could be used with name_format="%s: %s". + NameFormat string `protobuf:"bytes,2,opt,name=name_format,json=nameFormat,proto3" json:"name_format,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *TestNameConfig) Reset() { *m = TestNameConfig{} } +func (m *TestNameConfig) String() string { return proto.CompactTextString(m) } +func (*TestNameConfig) ProtoMessage() {} +func (*TestNameConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_3eaf2c85e69e9ea4, []int{0} +} + +func (m *TestNameConfig) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_TestNameConfig.Unmarshal(m, b) +} +func (m *TestNameConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_TestNameConfig.Marshal(b, m, deterministic) +} +func (m *TestNameConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_TestNameConfig.Merge(m, src) +} +func (m *TestNameConfig) XXX_Size() int { + return xxx_messageInfo_TestNameConfig.Size(m) +} +func (m *TestNameConfig) XXX_DiscardUnknown() { + xxx_messageInfo_TestNameConfig.DiscardUnknown(m) +} + +var xxx_messageInfo_TestNameConfig proto.InternalMessageInfo + +func (m *TestNameConfig) GetNameElements() []*TestNameConfig_NameElement { + if m != nil { + return m.NameElements + } + return nil +} + +func (m *TestNameConfig) GetNameFormat() string { + if m != nil { + return m.NameFormat + } + return "" +} + +// Specifies name elements to be selected from configuration values +type TestNameConfig_NameElement struct { + // A space-delimited string of labels + Labels string `protobuf:"bytes,1,opt,name=labels,proto3" json:"labels,omitempty"` + // Configuration value to use. + // Valid choice are: + // 'Tests name': The name of a test case + // 'Commit': The commit number of the build + // 'Context', 'Thread': The info extracted from each junit files: + // - junit_core-os_01.xml -> Context: core-os, Thread: 01 + // - junit_runner.xml -> Context: runner + // - junit_01.xml -> Thread: 01 + // or any metadata key from finished.json, which is copied from your test suite. + // + // A valid sample TestNameConfig looks like: + // test_name_config: + // name_elements: + // - target_config: Tests name + // - target_config: Context + // name_format: '%s [%s]' + TargetConfig string `protobuf:"bytes,2,opt,name=target_config,json=targetConfig,proto3" json:"target_config,omitempty"` + // Whether to use the build-target name + BuildTarget bool `protobuf:"varint,3,opt,name=build_target,json=buildTarget,proto3" json:"build_target,omitempty"` + // A space-delimited string of Bazel build tags. + Tags string `protobuf:"bytes,4,opt,name=tags,proto3" json:"tags,omitempty"` + // The key of a test result's property. + TestProperty string `protobuf:"bytes,5,opt,name=test_property,json=testProperty,proto3" json:"test_property,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *TestNameConfig_NameElement) Reset() { *m = TestNameConfig_NameElement{} } +func (m *TestNameConfig_NameElement) String() string { return proto.CompactTextString(m) } +func (*TestNameConfig_NameElement) ProtoMessage() {} +func (*TestNameConfig_NameElement) Descriptor() ([]byte, []int) { + return fileDescriptor_3eaf2c85e69e9ea4, []int{0, 0} +} + +func (m *TestNameConfig_NameElement) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_TestNameConfig_NameElement.Unmarshal(m, b) +} +func (m *TestNameConfig_NameElement) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_TestNameConfig_NameElement.Marshal(b, m, deterministic) +} +func (m *TestNameConfig_NameElement) XXX_Merge(src proto.Message) { + xxx_messageInfo_TestNameConfig_NameElement.Merge(m, src) +} +func (m *TestNameConfig_NameElement) XXX_Size() int { + return xxx_messageInfo_TestNameConfig_NameElement.Size(m) +} +func (m *TestNameConfig_NameElement) XXX_DiscardUnknown() { + xxx_messageInfo_TestNameConfig_NameElement.DiscardUnknown(m) +} + +var xxx_messageInfo_TestNameConfig_NameElement proto.InternalMessageInfo + +func (m *TestNameConfig_NameElement) GetLabels() string { + if m != nil { + return m.Labels + } + return "" +} + +func (m *TestNameConfig_NameElement) GetTargetConfig() string { + if m != nil { + return m.TargetConfig + } + return "" +} + +func (m *TestNameConfig_NameElement) GetBuildTarget() bool { + if m != nil { + return m.BuildTarget + } + return false +} + +func (m *TestNameConfig_NameElement) GetTags() string { + if m != nil { + return m.Tags + } + return "" +} + +func (m *TestNameConfig_NameElement) GetTestProperty() string { + if m != nil { + return m.TestProperty + } + return "" +} + +// A single notification. +type Notification struct { + // Required: Text summary of the issue or notice. + Summary string `protobuf:"bytes,1,opt,name=summary,proto3" json:"summary,omitempty"` + // Optional: Link to further information, such as a bug, email, document, etc. + ContextLink string `protobuf:"bytes,2,opt,name=context_link,json=contextLink,proto3" json:"context_link,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Notification) Reset() { *m = Notification{} } +func (m *Notification) String() string { return proto.CompactTextString(m) } +func (*Notification) ProtoMessage() {} +func (*Notification) Descriptor() ([]byte, []int) { + return fileDescriptor_3eaf2c85e69e9ea4, []int{1} +} + +func (m *Notification) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Notification.Unmarshal(m, b) +} +func (m *Notification) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Notification.Marshal(b, m, deterministic) +} +func (m *Notification) XXX_Merge(src proto.Message) { + xxx_messageInfo_Notification.Merge(m, src) +} +func (m *Notification) XXX_Size() int { + return xxx_messageInfo_Notification.Size(m) +} +func (m *Notification) XXX_DiscardUnknown() { + xxx_messageInfo_Notification.DiscardUnknown(m) +} + +var xxx_messageInfo_Notification proto.InternalMessageInfo + +func (m *Notification) GetSummary() string { + if m != nil { + return m.Summary + } + return "" +} + +func (m *Notification) GetContextLink() string { + if m != nil { + return m.ContextLink + } + return "" +} + +// Specifies a group of tests to gather. +type TestGroup struct { + // Name of this TestGroup, for mapping dashboard tabs to tests. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Path to the test result stored in gcs (some-bucket/some/optional/path). + GcsPrefix string `protobuf:"bytes,2,opt,name=gcs_prefix,json=gcsPrefix,proto3" json:"gcs_prefix,omitempty"` + // Number of days of test results to gather and serve. + DaysOfResults int32 `protobuf:"varint,3,opt,name=days_of_results,json=daysOfResults,proto3" json:"days_of_results,omitempty"` + // Whether to ignore pending (currently running) test results. + IgnorePending bool `protobuf:"varint,4,opt,name=ignore_pending,json=ignorePending,proto3" json:"ignore_pending,omitempty"` + // Whether to ignore reported build results. It is recommended that tests + // report BUILD_FAIL instead of relying on this being disabled. + IgnoreBuilt bool `protobuf:"varint,5,opt,name=ignore_built,json=ignoreBuilt,proto3" json:"ignore_built,omitempty"` + // What to do with the 'Tests name' configuration value. It can replace the + // name of the test, be appended to the name of the test, or ignored. If it is + // ignored, then the name of the tests will be the build target. + TestsNamePolicy TestGroup_TestsName `protobuf:"varint,6,opt,name=tests_name_policy,json=testsNamePolicy,proto3,enum=TestGroup_TestsName" json:"tests_name_policy,omitempty"` + // Tests with names that include these substrings will be removed from the + // table. + IgnoreTestSubstring []string `protobuf:"bytes,8,rep,name=ignore_test_substring,json=ignoreTestSubstring,proto3" json:"ignore_test_substring,omitempty"` + ColumnHeader []*TestGroup_ColumnHeader `protobuf:"bytes,9,rep,name=column_header,json=columnHeader,proto3" json:"column_header,omitempty"` + // A test grouping option used if not specified by primary_grouping (#29) + FallbackGrouping TestGroup_FallbackGrouping `protobuf:"varint,10,opt,name=fallback_grouping,json=fallbackGrouping,proto3,enum=TestGroup_FallbackGrouping" json:"fallback_grouping,omitempty"` + // DEPRECATED: use DashboardTabAlertOptions > alert_stale_result_hours + AlertStaleResultsHours int32 `protobuf:"varint,11,opt,name=alert_stale_results_hours,json=alertStaleResultsHours,proto3" json:"alert_stale_results_hours,omitempty"` // Deprecated: Do not use. + // DEPRECATED: use DashboardTabAlertOptions > num_failures_to_alert + NumFailuresToAlert int32 `protobuf:"varint,12,opt,name=num_failures_to_alert,json=numFailuresToAlert,proto3" json:"num_failures_to_alert,omitempty"` // Deprecated: Do not use. + // Whether to automatically file bugs, and what component to file them to. + // Requires further implementation of additional components. + BugComponent int32 `protobuf:"varint,13,opt,name=bug_component,json=bugComponent,proto3" json:"bug_component,omitempty"` + // Default code search path for searching regressions. Overridden by + // code_search_path in DashboardTab. + CodeSearchPath string `protobuf:"bytes,14,opt,name=code_search_path,json=codeSearchPath,proto3" json:"code_search_path,omitempty"` + // The number of columns to consider "recent" for a variety of purposes. + NumColumnsRecent int32 `protobuf:"varint,15,opt,name=num_columns_recent,json=numColumnsRecent,proto3" json:"num_columns_recent,omitempty"` + // Whether to read test metadata from the test results. Information + // from the test metadata is used to determine where bugs are filed in + // specific cases. + UseTestMetadata bool `protobuf:"varint,16,opt,name=use_test_metadata,json=useTestMetadata,proto3" json:"use_test_metadata,omitempty"` + // DEPRECATED: use DashboardTabAlertOptions > alert_mail_to_address instead + AlertMailToAddresses string `protobuf:"bytes,17,opt,name=alert_mail_to_addresses,json=alertMailToAddresses,proto3" json:"alert_mail_to_addresses,omitempty"` // Deprecated: Do not use. + // DEPRECATED: use DashboardTabAlertOptions > subject + AlertMailSubject string `protobuf:"bytes,18,opt,name=alert_mail_subject,json=alertMailSubject,proto3" json:"alert_mail_subject,omitempty"` // Deprecated: Do not use. + // DEPRECATED: use DashboardTabAlertOptions > alert_mail_failure_message + AlertMailFailureMessage string `protobuf:"bytes,19,opt,name=alert_mail_failure_message,json=alertMailFailureMessage,proto3" json:"alert_mail_failure_message,omitempty"` // Deprecated: Do not use. + // DEPRECATED: use DashboardTabAlertOptions > debug_url + AlertMailDebugUrl string `protobuf:"bytes,20,opt,name=alert_mail_debug_url,json=alertMailDebugUrl,proto3" json:"alert_mail_debug_url,omitempty"` // Deprecated: Do not use. + // DEPRECATED: use DashboardTabAlertOptions > wait_minutes_between_emails + MinElapsedMinutesBetweenMails int32 `protobuf:"varint,21,opt,name=min_elapsed_minutes_between_mails,json=minElapsedMinutesBetweenMails,proto3" json:"min_elapsed_minutes_between_mails,omitempty"` // Deprecated: Do not use. + // Whether to treat a combination of passes and failures within one test as a + // flaky status. + EnableFlakyStatus bool `protobuf:"varint,23,opt,name=enable_flaky_status,json=enableFlakyStatus,proto3" json:"enable_flaky_status,omitempty"` + // disable_merged_status will restores deprecated behavior of + // splitting multiple foo rows into foo [2], etc rather a single + // potentially flaky row. + DisableMergedStatus bool `protobuf:"varint,60,opt,name=disable_merged_status,json=disableMergedStatus,proto3" json:"disable_merged_status,omitempty"` + // deprecated - always set to true + UseKubernetesClient bool `protobuf:"varint,24,opt,name=use_kubernetes_client,json=useKubernetesClient,proto3" json:"use_kubernetes_client,omitempty"` + // When use_kubernetes_client is on testgrid expects these results + // to come from prow, which should include a prowjob.json and podinfo.json + // to help debugging. If you do not expect these files to exist, you + // can optionally disable this analysis. + DisableProwjobAnalysis bool `protobuf:"varint,62,opt,name=disable_prowjob_analysis,json=disableProwjobAnalysis,proto3" json:"disable_prowjob_analysis,omitempty"` + // deprecated - always set to true + IsExternal bool `protobuf:"varint,25,opt,name=is_external,json=isExternal,proto3" json:"is_external,omitempty"` + // Specifies the test name for a test. + TestNameConfig *TestNameConfig `protobuf:"bytes,26,opt,name=test_name_config,json=testNameConfig,proto3" json:"test_name_config,omitempty"` + // A list of notifications attached to this test group. + // This is displayed on any dashboard tab backed by this test group. + Notifications []*Notification `protobuf:"bytes,27,rep,name=notifications,proto3" json:"notifications,omitempty"` + // A primary grouping strategy for grouping test results in columns. + // If a primary grouping is specified, the fallback grouping is ignored. + PrimaryGrouping TestGroup_PrimaryGrouping `protobuf:"varint,29,opt,name=primary_grouping,json=primaryGrouping,proto3,enum=TestGroup_PrimaryGrouping" json:"primary_grouping,omitempty"` + // Whether to collect pass-fail data for test methods. Additional test cases + // will be added for each test method in a target. + EnableTestMethods bool `protobuf:"varint,30,opt,name=enable_test_methods,json=enableTestMethods,proto3" json:"enable_test_methods,omitempty"` + // Test annotations to look for. Adds custom short text overlays to results. + TestAnnotations []*TestGroup_TestAnnotation `protobuf:"bytes,31,rep,name=test_annotations,json=testAnnotations,proto3" json:"test_annotations,omitempty"` + // Maximum number of individual test methods to collect for any given test row. + // If a test has more than this many methods, no methods will be displayed. + MaxTestMethodsPerTest int32 `protobuf:"varint,32,opt,name=max_test_methods_per_test,json=maxTestMethodsPerTest,proto3" json:"max_test_methods_per_test,omitempty"` + // Default metadata that should be applied for opening bugs, if a given regex + // matches against a test's name. + // Requires 'use_test_metadata = true'. + TestMetadataOptions []*TestMetadataOptions `protobuf:"bytes,34,rep,name=test_metadata_options,json=testMetadataOptions,proto3" json:"test_metadata_options,omitempty"` + // A space-delimited string of tags that are used to filter test targets. + // A leading - before the tag means this tag should not be present + // in the target. + // Example: + // contains tag1, but not tag2: test_tag_pattern = 'tag1 -tag2' + TestTagPattern string `protobuf:"bytes,35,opt,name=test_tag_pattern,json=testTagPattern,proto3" json:"test_tag_pattern,omitempty"` + // Options for auto-filed bugs, if enabled. + AutoBugOptions *AutoBugOptions `protobuf:"bytes,36,opt,name=auto_bug_options,json=autoBugOptions,proto3" json:"auto_bug_options,omitempty"` + // Max number of days any single test can take. + MaxTestRuntimeHours int32 `protobuf:"varint,37,opt,name=max_test_runtime_hours,json=maxTestRuntimeHours,proto3" json:"max_test_runtime_hours,omitempty"` + // The number of consecutive test passes to close the alert. + NumPassesToDisableAlert int32 `protobuf:"varint,38,opt,name=num_passes_to_disable_alert,json=numPassesToDisableAlert,proto3" json:"num_passes_to_disable_alert,omitempty"` + // If true, also associate bugs with tests if the test result's overview/group + // ID is in the bug. + LinkBugsByGroup bool `protobuf:"varint,39,opt,name=link_bugs_by_group,json=linkBugsByGroup,proto3" json:"link_bugs_by_group,omitempty"` + // Only show test methods with all required properties + TestMethodProperties []*TestGroup_KeyValue `protobuf:"bytes,41,rep,name=test_method_properties,json=testMethodProperties,proto3" json:"test_method_properties,omitempty"` + // If true, allows gathering and associating bugs with targets in the dashboard. + // Required in order to auto-file bugs. + GatherBugs bool `protobuf:"varint,42,opt,name=gather_bugs,json=gatherBugs,proto3" json:"gather_bugs,omitempty"` + // Numeric property metric value to be used for short text. If this property + // is present, it will override all the other short text values. + ShortTextMetric string `protobuf:"bytes,43,opt,name=short_text_metric,json=shortTextMetric,proto3" json:"short_text_metric,omitempty"` + // If true, only associate bugs with test methods if that test method is + // mentioned in the bug. If false, bugs will be associated with all test + // methods. + LinkBugsByTestMethods bool `protobuf:"varint,45,opt,name=link_bugs_by_test_methods,json=linkBugsByTestMethods,proto3" json:"link_bugs_by_test_methods,omitempty"` + // Regex to match test methods. Only test methods with names that match + // this regex will be included in the table. + TestMethodMatchRegex string `protobuf:"bytes,46,opt,name=test_method_match_regex,json=testMethodMatchRegex,proto3" json:"test_method_match_regex,omitempty"` + // Regex to exclude test methods. Test methods with names that match + // this regex will be excluded from the table, even if they match + // test_method_match_regex. + TestMethodUnmatchRegex string `protobuf:"bytes,61,opt,name=test_method_unmatch_regex,json=testMethodUnmatchRegex,proto3" json:"test_method_unmatch_regex,omitempty"` + // If true, test method names are printed with the full class names. + UseFullMethodNames bool `protobuf:"varint,47,opt,name=use_full_method_names,json=useFullMethodNames,proto3" json:"use_full_method_names,omitempty"` + // A configuration value that is used as a fallback grouping. + // This is useful for cases where there are builds that shared the same + // commit but are run at separate times of day. + FallbackGroupingConfigurationValue string `protobuf:"bytes,49,opt,name=fallback_grouping_configuration_value,json=fallbackGroupingConfigurationValue,proto3" json:"fallback_grouping_configuration_value,omitempty"` + // Configuration type of the result source. + ResultSource *TestGroup_ResultSource `protobuf:"bytes,50,opt,name=result_source,json=resultSource,proto3" json:"result_source,omitempty"` + // Set of rules that are evaluated with each test result. If an evaluation is + // successful, the status of that test result will be whatever is specified + // for a given rule. For more information, look at RuleSet documention + CustomEvaluatorRuleSet *custom_evaluator.RuleSet `protobuf:"bytes,51,opt,name=custom_evaluator_rule_set,json=customEvaluatorRuleSet,proto3" json:"custom_evaluator_rule_set,omitempty"` + // If true, instead of updating the group, read the state proto from storage + // and update summary, alerts, etc. from that state. + // This only applies to test group state, not bug state for a test group. + // This assumes that the state proto is updated through other means (another + // updater, manually, etc). + ReadStateFromStorage bool `protobuf:"varint,52,opt,name=read_state_from_storage,json=readStateFromStorage,proto3" json:"read_state_from_storage,omitempty"` + // If true, only add the most recent result for a test when multiple results + // for a test with the same name are encountered. + IgnoreOldResults bool `protobuf:"varint,53,opt,name=ignore_old_results,json=ignoreOldResults,proto3" json:"ignore_old_results,omitempty"` + // If True, ignore the 'pass with skips' status (show as a blank cell). + IgnoreSkip bool `protobuf:"varint,54,opt,name=ignore_skip,json=ignoreSkip,proto3" json:"ignore_skip,omitempty"` + // A string containing python formatting specifiers that overrides the + // commit with the date formatted according to this string. This is useful + // for aggregating multiple columns that don't have a matching commit. + BuildOverrideStrftime string `protobuf:"bytes,55,opt,name=build_override_strftime,json=buildOverrideStrftime,proto3" json:"build_override_strftime,omitempty"` + // Specify a property that will be read into state in the user_property field. + // These can be substituted into LinkTemplates. + UserProperty string `protobuf:"bytes,56,opt,name=user_property,json=userProperty,proto3" json:"user_property,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *TestGroup) Reset() { *m = TestGroup{} } +func (m *TestGroup) String() string { return proto.CompactTextString(m) } +func (*TestGroup) ProtoMessage() {} +func (*TestGroup) Descriptor() ([]byte, []int) { + return fileDescriptor_3eaf2c85e69e9ea4, []int{2} +} + +func (m *TestGroup) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_TestGroup.Unmarshal(m, b) +} +func (m *TestGroup) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_TestGroup.Marshal(b, m, deterministic) +} +func (m *TestGroup) XXX_Merge(src proto.Message) { + xxx_messageInfo_TestGroup.Merge(m, src) +} +func (m *TestGroup) XXX_Size() int { + return xxx_messageInfo_TestGroup.Size(m) +} +func (m *TestGroup) XXX_DiscardUnknown() { + xxx_messageInfo_TestGroup.DiscardUnknown(m) +} + +var xxx_messageInfo_TestGroup proto.InternalMessageInfo + +func (m *TestGroup) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *TestGroup) GetGcsPrefix() string { + if m != nil { + return m.GcsPrefix + } + return "" +} + +func (m *TestGroup) GetDaysOfResults() int32 { + if m != nil { + return m.DaysOfResults + } + return 0 +} + +func (m *TestGroup) GetIgnorePending() bool { + if m != nil { + return m.IgnorePending + } + return false +} + +func (m *TestGroup) GetIgnoreBuilt() bool { + if m != nil { + return m.IgnoreBuilt + } + return false +} + +func (m *TestGroup) GetTestsNamePolicy() TestGroup_TestsName { + if m != nil { + return m.TestsNamePolicy + } + return TestGroup_TESTS_NAME_UNSPECIFIED +} + +func (m *TestGroup) GetIgnoreTestSubstring() []string { + if m != nil { + return m.IgnoreTestSubstring + } + return nil +} + +func (m *TestGroup) GetColumnHeader() []*TestGroup_ColumnHeader { + if m != nil { + return m.ColumnHeader + } + return nil +} + +func (m *TestGroup) GetFallbackGrouping() TestGroup_FallbackGrouping { + if m != nil { + return m.FallbackGrouping + } + return TestGroup_FALLBACK_GROUPING_NONE +} + +// Deprecated: Do not use. +func (m *TestGroup) GetAlertStaleResultsHours() int32 { + if m != nil { + return m.AlertStaleResultsHours + } + return 0 +} + +// Deprecated: Do not use. +func (m *TestGroup) GetNumFailuresToAlert() int32 { + if m != nil { + return m.NumFailuresToAlert + } + return 0 +} + +func (m *TestGroup) GetBugComponent() int32 { + if m != nil { + return m.BugComponent + } + return 0 +} + +func (m *TestGroup) GetCodeSearchPath() string { + if m != nil { + return m.CodeSearchPath + } + return "" +} + +func (m *TestGroup) GetNumColumnsRecent() int32 { + if m != nil { + return m.NumColumnsRecent + } + return 0 +} + +func (m *TestGroup) GetUseTestMetadata() bool { + if m != nil { + return m.UseTestMetadata + } + return false +} + +// Deprecated: Do not use. +func (m *TestGroup) GetAlertMailToAddresses() string { + if m != nil { + return m.AlertMailToAddresses + } + return "" +} + +// Deprecated: Do not use. +func (m *TestGroup) GetAlertMailSubject() string { + if m != nil { + return m.AlertMailSubject + } + return "" +} + +// Deprecated: Do not use. +func (m *TestGroup) GetAlertMailFailureMessage() string { + if m != nil { + return m.AlertMailFailureMessage + } + return "" +} + +// Deprecated: Do not use. +func (m *TestGroup) GetAlertMailDebugUrl() string { + if m != nil { + return m.AlertMailDebugUrl + } + return "" +} + +// Deprecated: Do not use. +func (m *TestGroup) GetMinElapsedMinutesBetweenMails() int32 { + if m != nil { + return m.MinElapsedMinutesBetweenMails + } + return 0 +} + +func (m *TestGroup) GetEnableFlakyStatus() bool { + if m != nil { + return m.EnableFlakyStatus + } + return false +} + +func (m *TestGroup) GetDisableMergedStatus() bool { + if m != nil { + return m.DisableMergedStatus + } + return false +} + +func (m *TestGroup) GetUseKubernetesClient() bool { + if m != nil { + return m.UseKubernetesClient + } + return false +} + +func (m *TestGroup) GetDisableProwjobAnalysis() bool { + if m != nil { + return m.DisableProwjobAnalysis + } + return false +} + +func (m *TestGroup) GetIsExternal() bool { + if m != nil { + return m.IsExternal + } + return false +} + +func (m *TestGroup) GetTestNameConfig() *TestNameConfig { + if m != nil { + return m.TestNameConfig + } + return nil +} + +func (m *TestGroup) GetNotifications() []*Notification { + if m != nil { + return m.Notifications + } + return nil +} + +func (m *TestGroup) GetPrimaryGrouping() TestGroup_PrimaryGrouping { + if m != nil { + return m.PrimaryGrouping + } + return TestGroup_PRIMARY_GROUPING_NONE +} + +func (m *TestGroup) GetEnableTestMethods() bool { + if m != nil { + return m.EnableTestMethods + } + return false +} + +func (m *TestGroup) GetTestAnnotations() []*TestGroup_TestAnnotation { + if m != nil { + return m.TestAnnotations + } + return nil +} + +func (m *TestGroup) GetMaxTestMethodsPerTest() int32 { + if m != nil { + return m.MaxTestMethodsPerTest + } + return 0 +} + +func (m *TestGroup) GetTestMetadataOptions() []*TestMetadataOptions { + if m != nil { + return m.TestMetadataOptions + } + return nil +} + +func (m *TestGroup) GetTestTagPattern() string { + if m != nil { + return m.TestTagPattern + } + return "" +} + +func (m *TestGroup) GetAutoBugOptions() *AutoBugOptions { + if m != nil { + return m.AutoBugOptions + } + return nil +} + +func (m *TestGroup) GetMaxTestRuntimeHours() int32 { + if m != nil { + return m.MaxTestRuntimeHours + } + return 0 +} + +func (m *TestGroup) GetNumPassesToDisableAlert() int32 { + if m != nil { + return m.NumPassesToDisableAlert + } + return 0 +} + +func (m *TestGroup) GetLinkBugsByGroup() bool { + if m != nil { + return m.LinkBugsByGroup + } + return false +} + +func (m *TestGroup) GetTestMethodProperties() []*TestGroup_KeyValue { + if m != nil { + return m.TestMethodProperties + } + return nil +} + +func (m *TestGroup) GetGatherBugs() bool { + if m != nil { + return m.GatherBugs + } + return false +} + +func (m *TestGroup) GetShortTextMetric() string { + if m != nil { + return m.ShortTextMetric + } + return "" +} + +func (m *TestGroup) GetLinkBugsByTestMethods() bool { + if m != nil { + return m.LinkBugsByTestMethods + } + return false +} + +func (m *TestGroup) GetTestMethodMatchRegex() string { + if m != nil { + return m.TestMethodMatchRegex + } + return "" +} + +func (m *TestGroup) GetTestMethodUnmatchRegex() string { + if m != nil { + return m.TestMethodUnmatchRegex + } + return "" +} + +func (m *TestGroup) GetUseFullMethodNames() bool { + if m != nil { + return m.UseFullMethodNames + } + return false +} + +func (m *TestGroup) GetFallbackGroupingConfigurationValue() string { + if m != nil { + return m.FallbackGroupingConfigurationValue + } + return "" +} + +func (m *TestGroup) GetResultSource() *TestGroup_ResultSource { + if m != nil { + return m.ResultSource + } + return nil +} + +func (m *TestGroup) GetCustomEvaluatorRuleSet() *custom_evaluator.RuleSet { + if m != nil { + return m.CustomEvaluatorRuleSet + } + return nil +} + +func (m *TestGroup) GetReadStateFromStorage() bool { + if m != nil { + return m.ReadStateFromStorage + } + return false +} + +func (m *TestGroup) GetIgnoreOldResults() bool { + if m != nil { + return m.IgnoreOldResults + } + return false +} + +func (m *TestGroup) GetIgnoreSkip() bool { + if m != nil { + return m.IgnoreSkip + } + return false +} + +func (m *TestGroup) GetBuildOverrideStrftime() string { + if m != nil { + return m.BuildOverrideStrftime + } + return "" +} + +func (m *TestGroup) GetUserProperty() string { + if m != nil { + return m.UserProperty + } + return "" +} + +// Custom column headers for defining extra column-heading rows from values in +// the test result. +type TestGroup_ColumnHeader struct { + Label string `protobuf:"bytes,1,opt,name=label,proto3" json:"label,omitempty"` + Property string `protobuf:"bytes,2,opt,name=property,proto3" json:"property,omitempty"` + ConfigurationValue string `protobuf:"bytes,3,opt,name=configuration_value,json=configurationValue,proto3" json:"configuration_value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *TestGroup_ColumnHeader) Reset() { *m = TestGroup_ColumnHeader{} } +func (m *TestGroup_ColumnHeader) String() string { return proto.CompactTextString(m) } +func (*TestGroup_ColumnHeader) ProtoMessage() {} +func (*TestGroup_ColumnHeader) Descriptor() ([]byte, []int) { + return fileDescriptor_3eaf2c85e69e9ea4, []int{2, 0} +} + +func (m *TestGroup_ColumnHeader) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_TestGroup_ColumnHeader.Unmarshal(m, b) +} +func (m *TestGroup_ColumnHeader) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_TestGroup_ColumnHeader.Marshal(b, m, deterministic) +} +func (m *TestGroup_ColumnHeader) XXX_Merge(src proto.Message) { + xxx_messageInfo_TestGroup_ColumnHeader.Merge(m, src) +} +func (m *TestGroup_ColumnHeader) XXX_Size() int { + return xxx_messageInfo_TestGroup_ColumnHeader.Size(m) +} +func (m *TestGroup_ColumnHeader) XXX_DiscardUnknown() { + xxx_messageInfo_TestGroup_ColumnHeader.DiscardUnknown(m) +} + +var xxx_messageInfo_TestGroup_ColumnHeader proto.InternalMessageInfo + +func (m *TestGroup_ColumnHeader) GetLabel() string { + if m != nil { + return m.Label + } + return "" +} + +func (m *TestGroup_ColumnHeader) GetProperty() string { + if m != nil { + return m.Property + } + return "" +} + +func (m *TestGroup_ColumnHeader) GetConfigurationValue() string { + if m != nil { + return m.ConfigurationValue + } + return "" +} + +// Associates the presence of a named test property with a custom short text +// displayed over the results. Short text must be <=5 characters long. +type TestGroup_TestAnnotation struct { + ShortText string `protobuf:"bytes,1,opt,name=short_text,json=shortText,proto3" json:"short_text,omitempty"` + // Types that are valid to be assigned to ShortTextMessageSource: + // *TestGroup_TestAnnotation_PropertyName + ShortTextMessageSource isTestGroup_TestAnnotation_ShortTextMessageSource `protobuf_oneof:"short_text_message_source"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *TestGroup_TestAnnotation) Reset() { *m = TestGroup_TestAnnotation{} } +func (m *TestGroup_TestAnnotation) String() string { return proto.CompactTextString(m) } +func (*TestGroup_TestAnnotation) ProtoMessage() {} +func (*TestGroup_TestAnnotation) Descriptor() ([]byte, []int) { + return fileDescriptor_3eaf2c85e69e9ea4, []int{2, 1} +} + +func (m *TestGroup_TestAnnotation) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_TestGroup_TestAnnotation.Unmarshal(m, b) +} +func (m *TestGroup_TestAnnotation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_TestGroup_TestAnnotation.Marshal(b, m, deterministic) +} +func (m *TestGroup_TestAnnotation) XXX_Merge(src proto.Message) { + xxx_messageInfo_TestGroup_TestAnnotation.Merge(m, src) +} +func (m *TestGroup_TestAnnotation) XXX_Size() int { + return xxx_messageInfo_TestGroup_TestAnnotation.Size(m) +} +func (m *TestGroup_TestAnnotation) XXX_DiscardUnknown() { + xxx_messageInfo_TestGroup_TestAnnotation.DiscardUnknown(m) +} + +var xxx_messageInfo_TestGroup_TestAnnotation proto.InternalMessageInfo + +func (m *TestGroup_TestAnnotation) GetShortText() string { + if m != nil { + return m.ShortText + } + return "" +} + +type isTestGroup_TestAnnotation_ShortTextMessageSource interface { + isTestGroup_TestAnnotation_ShortTextMessageSource() +} + +type TestGroup_TestAnnotation_PropertyName struct { + PropertyName string `protobuf:"bytes,2,opt,name=property_name,json=propertyName,proto3,oneof"` +} + +func (*TestGroup_TestAnnotation_PropertyName) isTestGroup_TestAnnotation_ShortTextMessageSource() {} + +func (m *TestGroup_TestAnnotation) GetShortTextMessageSource() isTestGroup_TestAnnotation_ShortTextMessageSource { + if m != nil { + return m.ShortTextMessageSource + } + return nil +} + +func (m *TestGroup_TestAnnotation) GetPropertyName() string { + if x, ok := m.GetShortTextMessageSource().(*TestGroup_TestAnnotation_PropertyName); ok { + return x.PropertyName + } + return "" +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*TestGroup_TestAnnotation) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*TestGroup_TestAnnotation_PropertyName)(nil), + } +} + +// A string key value pair message +type TestGroup_KeyValue struct { + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *TestGroup_KeyValue) Reset() { *m = TestGroup_KeyValue{} } +func (m *TestGroup_KeyValue) String() string { return proto.CompactTextString(m) } +func (*TestGroup_KeyValue) ProtoMessage() {} +func (*TestGroup_KeyValue) Descriptor() ([]byte, []int) { + return fileDescriptor_3eaf2c85e69e9ea4, []int{2, 2} +} + +func (m *TestGroup_KeyValue) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_TestGroup_KeyValue.Unmarshal(m, b) +} +func (m *TestGroup_KeyValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_TestGroup_KeyValue.Marshal(b, m, deterministic) +} +func (m *TestGroup_KeyValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_TestGroup_KeyValue.Merge(m, src) +} +func (m *TestGroup_KeyValue) XXX_Size() int { + return xxx_messageInfo_TestGroup_KeyValue.Size(m) +} +func (m *TestGroup_KeyValue) XXX_DiscardUnknown() { + xxx_messageInfo_TestGroup_KeyValue.DiscardUnknown(m) +} + +var xxx_messageInfo_TestGroup_KeyValue proto.InternalMessageInfo + +func (m *TestGroup_KeyValue) GetKey() string { + if m != nil { + return m.Key + } + return "" +} + +func (m *TestGroup_KeyValue) GetValue() string { + if m != nil { + return m.Value + } + return "" +} + +type TestGroup_ResultSource struct { + // Types that are valid to be assigned to ResultSourceConfig: + // *TestGroup_ResultSource_JunitConfig + ResultSourceConfig isTestGroup_ResultSource_ResultSourceConfig `protobuf_oneof:"result_source_config"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *TestGroup_ResultSource) Reset() { *m = TestGroup_ResultSource{} } +func (m *TestGroup_ResultSource) String() string { return proto.CompactTextString(m) } +func (*TestGroup_ResultSource) ProtoMessage() {} +func (*TestGroup_ResultSource) Descriptor() ([]byte, []int) { + return fileDescriptor_3eaf2c85e69e9ea4, []int{2, 3} +} + +func (m *TestGroup_ResultSource) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_TestGroup_ResultSource.Unmarshal(m, b) +} +func (m *TestGroup_ResultSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_TestGroup_ResultSource.Marshal(b, m, deterministic) +} +func (m *TestGroup_ResultSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_TestGroup_ResultSource.Merge(m, src) +} +func (m *TestGroup_ResultSource) XXX_Size() int { + return xxx_messageInfo_TestGroup_ResultSource.Size(m) +} +func (m *TestGroup_ResultSource) XXX_DiscardUnknown() { + xxx_messageInfo_TestGroup_ResultSource.DiscardUnknown(m) +} + +var xxx_messageInfo_TestGroup_ResultSource proto.InternalMessageInfo + +type isTestGroup_ResultSource_ResultSourceConfig interface { + isTestGroup_ResultSource_ResultSourceConfig() +} + +type TestGroup_ResultSource_JunitConfig struct { + JunitConfig *JUnitConfig `protobuf:"bytes,2,opt,name=junit_config,json=junitConfig,proto3,oneof"` +} + +func (*TestGroup_ResultSource_JunitConfig) isTestGroup_ResultSource_ResultSourceConfig() {} + +func (m *TestGroup_ResultSource) GetResultSourceConfig() isTestGroup_ResultSource_ResultSourceConfig { + if m != nil { + return m.ResultSourceConfig + } + return nil +} + +func (m *TestGroup_ResultSource) GetJunitConfig() *JUnitConfig { + if x, ok := m.GetResultSourceConfig().(*TestGroup_ResultSource_JunitConfig); ok { + return x.JunitConfig + } + return nil +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*TestGroup_ResultSource) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*TestGroup_ResultSource_JunitConfig)(nil), + } +} + +type JUnitConfig struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *JUnitConfig) Reset() { *m = JUnitConfig{} } +func (m *JUnitConfig) String() string { return proto.CompactTextString(m) } +func (*JUnitConfig) ProtoMessage() {} +func (*JUnitConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_3eaf2c85e69e9ea4, []int{3} +} + +func (m *JUnitConfig) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_JUnitConfig.Unmarshal(m, b) +} +func (m *JUnitConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_JUnitConfig.Marshal(b, m, deterministic) +} +func (m *JUnitConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_JUnitConfig.Merge(m, src) +} +func (m *JUnitConfig) XXX_Size() int { + return xxx_messageInfo_JUnitConfig.Size(m) +} +func (m *JUnitConfig) XXX_DiscardUnknown() { + xxx_messageInfo_JUnitConfig.DiscardUnknown(m) +} + +var xxx_messageInfo_JUnitConfig proto.InternalMessageInfo + +// Default metadata to apply when opening bugs. +type TestMetadataOptions struct { + // Apply the following metadata if this regex matches a test's name. + TestNameRegex string `protobuf:"bytes,1,opt,name=test_name_regex,json=testNameRegex,proto3" json:"test_name_regex,omitempty"` + // Default bug component to open a bug in. + BugComponent int32 `protobuf:"varint,2,opt,name=bug_component,json=bugComponent,proto3" json:"bug_component,omitempty"` + // Default owner to assign a bug to. + Owner string `protobuf:"bytes,3,opt,name=owner,proto3" json:"owner,omitempty"` + // List of default users to CC a bug to. + Cc []string `protobuf:"bytes,4,rep,name=cc,proto3" json:"cc,omitempty"` + // Apply following metadata if this regex matches a test’s failure message. + MessageRegex string `protobuf:"bytes,5,opt,name=message_regex,json=messageRegex,proto3" json:"message_regex,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *TestMetadataOptions) Reset() { *m = TestMetadataOptions{} } +func (m *TestMetadataOptions) String() string { return proto.CompactTextString(m) } +func (*TestMetadataOptions) ProtoMessage() {} +func (*TestMetadataOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_3eaf2c85e69e9ea4, []int{4} +} + +func (m *TestMetadataOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_TestMetadataOptions.Unmarshal(m, b) +} +func (m *TestMetadataOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_TestMetadataOptions.Marshal(b, m, deterministic) +} +func (m *TestMetadataOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_TestMetadataOptions.Merge(m, src) +} +func (m *TestMetadataOptions) XXX_Size() int { + return xxx_messageInfo_TestMetadataOptions.Size(m) +} +func (m *TestMetadataOptions) XXX_DiscardUnknown() { + xxx_messageInfo_TestMetadataOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_TestMetadataOptions proto.InternalMessageInfo + +func (m *TestMetadataOptions) GetTestNameRegex() string { + if m != nil { + return m.TestNameRegex + } + return "" +} + +func (m *TestMetadataOptions) GetBugComponent() int32 { + if m != nil { + return m.BugComponent + } + return 0 +} + +func (m *TestMetadataOptions) GetOwner() string { + if m != nil { + return m.Owner + } + return "" +} + +func (m *TestMetadataOptions) GetCc() []string { + if m != nil { + return m.Cc + } + return nil +} + +func (m *TestMetadataOptions) GetMessageRegex() string { + if m != nil { + return m.MessageRegex + } + return "" +} + +type AutoBugOptions struct { + // [BETA] When specified, file bugs to this component, using the beta AutoBug. + // If you do not want to opt into the beta, specify `bug_component` in your + // TestGroup instead. + // TODO(b/154866134): Rename to autobug_component once we've migrated. + BetaAutobugComponent int32 `protobuf:"varint,10,opt,name=beta_autobug_component,json=betaAutobugComponent,proto3" json:"beta_autobug_component,omitempty"` + // Whether to auto-close auto-filed bugs. + AutoClose bool `protobuf:"varint,1,opt,name=auto_close,json=autoClose,proto3" json:"auto_close,omitempty"` + // A list of hotlist ids attached to auto-filed bugs. + HotlistIds []int64 `protobuf:"varint,2,rep,packed,name=hotlist_ids,json=hotlistIds,proto3" json:"hotlist_ids,omitempty"` + // The priority of the auto-filed bug. If provided, this will overwrite the + // priority in the component default template + Priority AutoBugOptions_Priority `protobuf:"varint,3,opt,name=priority,proto3,enum=AutoBugOptions_Priority" json:"priority,omitempty"` + // A list of hotlist id sources + // Corresponds with the list hotlist_ids (#2) + HotlistIdsFromSource []*HotlistIdFromSource `protobuf:"bytes,4,rep,name=hotlist_ids_from_source,json=hotlistIdsFromSource,proto3" json:"hotlist_ids_from_source,omitempty"` + // If True, files separate bugs for each failing target, instead of one bug + // for each set of targets failing at the same run. + FileIndividual bool `protobuf:"varint,5,opt,name=file_individual,json=fileIndividual,proto3" json:"file_individual,omitempty"` + // If True; keep only one automantic bug per target, regardless of the number of + // separate failures a target gets. This also requires `auto_close` and + // `file_individual` to be True. + // Consider setting `num_passes_to_disable_alert` instead if you're tracking + // flaky tests. + SingletonAutobug bool `protobuf:"varint,6,opt,name=singleton_autobug,json=singletonAutobug,proto3" json:"singleton_autobug,omitempty"` + // If provided: only raise one bug if the number of failures for a single + // query by testgrid for a single failure group exceeds this value. Requires + // 'file_individual' to be True. + MaxAllowedIndividualBugs int32 `protobuf:"varint,7,opt,name=max_allowed_individual_bugs,json=maxAllowedIndividualBugs,proto3" json:"max_allowed_individual_bugs,omitempty"` + // If True; file issues for the 'Overall' target, even if otherwise invalid. + FileOverall bool `protobuf:"varint,8,opt,name=file_overall,json=fileOverall,proto3" json:"file_overall,omitempty"` + // If provided: supplements `max_allowed_individual_bugs` field to raise a + // single bug if the number of failures for a single query by testgrid exceeds + // the max_allowed_individual_bugs` value, regardless of `TEST_METADATA` + // configurations. This is useful for filing fewer suspected environmental + // failure bugs and routing them to a specific location (i.e. an oncall). + // Requires 'file_individual' to be true and `max_allowed_individual_bugs` to + // not be empty. + DefaultTestMetadata *AutoBugOptions_DefaultTestMetadata `protobuf:"bytes,9,opt,name=default_test_metadata,json=defaultTestMetadata,proto3" json:"default_test_metadata,omitempty"` + // [BETA] If True, query the test metadata API to get issue-routing metadata. + // Enables routing issues using structured test failures. + AdvancedTestMetadata bool `protobuf:"varint,11,opt,name=advanced_test_metadata,json=advancedTestMetadata,proto3" json:"advanced_test_metadata,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AutoBugOptions) Reset() { *m = AutoBugOptions{} } +func (m *AutoBugOptions) String() string { return proto.CompactTextString(m) } +func (*AutoBugOptions) ProtoMessage() {} +func (*AutoBugOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_3eaf2c85e69e9ea4, []int{5} +} + +func (m *AutoBugOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_AutoBugOptions.Unmarshal(m, b) +} +func (m *AutoBugOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_AutoBugOptions.Marshal(b, m, deterministic) +} +func (m *AutoBugOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_AutoBugOptions.Merge(m, src) +} +func (m *AutoBugOptions) XXX_Size() int { + return xxx_messageInfo_AutoBugOptions.Size(m) +} +func (m *AutoBugOptions) XXX_DiscardUnknown() { + xxx_messageInfo_AutoBugOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_AutoBugOptions proto.InternalMessageInfo + +func (m *AutoBugOptions) GetBetaAutobugComponent() int32 { + if m != nil { + return m.BetaAutobugComponent + } + return 0 +} + +func (m *AutoBugOptions) GetAutoClose() bool { + if m != nil { + return m.AutoClose + } + return false +} + +func (m *AutoBugOptions) GetHotlistIds() []int64 { + if m != nil { + return m.HotlistIds + } + return nil +} + +func (m *AutoBugOptions) GetPriority() AutoBugOptions_Priority { + if m != nil { + return m.Priority + } + return AutoBugOptions_PRIORITY_UNSPECIFIED +} + +func (m *AutoBugOptions) GetHotlistIdsFromSource() []*HotlistIdFromSource { + if m != nil { + return m.HotlistIdsFromSource + } + return nil +} + +func (m *AutoBugOptions) GetFileIndividual() bool { + if m != nil { + return m.FileIndividual + } + return false +} + +func (m *AutoBugOptions) GetSingletonAutobug() bool { + if m != nil { + return m.SingletonAutobug + } + return false +} + +func (m *AutoBugOptions) GetMaxAllowedIndividualBugs() int32 { + if m != nil { + return m.MaxAllowedIndividualBugs + } + return 0 +} + +func (m *AutoBugOptions) GetFileOverall() bool { + if m != nil { + return m.FileOverall + } + return false +} + +func (m *AutoBugOptions) GetDefaultTestMetadata() *AutoBugOptions_DefaultTestMetadata { + if m != nil { + return m.DefaultTestMetadata + } + return nil +} + +func (m *AutoBugOptions) GetAdvancedTestMetadata() bool { + if m != nil { + return m.AdvancedTestMetadata + } + return false +} + +type AutoBugOptions_DefaultTestMetadata struct { + BugComponent int32 `protobuf:"varint,1,opt,name=bug_component,json=bugComponent,proto3" json:"bug_component,omitempty"` + Owner string `protobuf:"bytes,2,opt,name=owner,proto3" json:"owner,omitempty"` + Cc string `protobuf:"bytes,3,opt,name=cc,proto3" json:"cc,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AutoBugOptions_DefaultTestMetadata) Reset() { *m = AutoBugOptions_DefaultTestMetadata{} } +func (m *AutoBugOptions_DefaultTestMetadata) String() string { return proto.CompactTextString(m) } +func (*AutoBugOptions_DefaultTestMetadata) ProtoMessage() {} +func (*AutoBugOptions_DefaultTestMetadata) Descriptor() ([]byte, []int) { + return fileDescriptor_3eaf2c85e69e9ea4, []int{5, 0} +} + +func (m *AutoBugOptions_DefaultTestMetadata) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_AutoBugOptions_DefaultTestMetadata.Unmarshal(m, b) +} +func (m *AutoBugOptions_DefaultTestMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_AutoBugOptions_DefaultTestMetadata.Marshal(b, m, deterministic) +} +func (m *AutoBugOptions_DefaultTestMetadata) XXX_Merge(src proto.Message) { + xxx_messageInfo_AutoBugOptions_DefaultTestMetadata.Merge(m, src) +} +func (m *AutoBugOptions_DefaultTestMetadata) XXX_Size() int { + return xxx_messageInfo_AutoBugOptions_DefaultTestMetadata.Size(m) +} +func (m *AutoBugOptions_DefaultTestMetadata) XXX_DiscardUnknown() { + xxx_messageInfo_AutoBugOptions_DefaultTestMetadata.DiscardUnknown(m) +} + +var xxx_messageInfo_AutoBugOptions_DefaultTestMetadata proto.InternalMessageInfo + +func (m *AutoBugOptions_DefaultTestMetadata) GetBugComponent() int32 { + if m != nil { + return m.BugComponent + } + return 0 +} + +func (m *AutoBugOptions_DefaultTestMetadata) GetOwner() string { + if m != nil { + return m.Owner + } + return "" +} + +func (m *AutoBugOptions_DefaultTestMetadata) GetCc() string { + if m != nil { + return m.Cc + } + return "" +} + +type HotlistIdFromSource struct { + // Types that are valid to be assigned to HotlistIdSource: + // *HotlistIdFromSource_Value + // *HotlistIdFromSource_Label + HotlistIdSource isHotlistIdFromSource_HotlistIdSource `protobuf_oneof:"hotlist_id_source"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *HotlistIdFromSource) Reset() { *m = HotlistIdFromSource{} } +func (m *HotlistIdFromSource) String() string { return proto.CompactTextString(m) } +func (*HotlistIdFromSource) ProtoMessage() {} +func (*HotlistIdFromSource) Descriptor() ([]byte, []int) { + return fileDescriptor_3eaf2c85e69e9ea4, []int{6} +} + +func (m *HotlistIdFromSource) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_HotlistIdFromSource.Unmarshal(m, b) +} +func (m *HotlistIdFromSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_HotlistIdFromSource.Marshal(b, m, deterministic) +} +func (m *HotlistIdFromSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_HotlistIdFromSource.Merge(m, src) +} +func (m *HotlistIdFromSource) XXX_Size() int { + return xxx_messageInfo_HotlistIdFromSource.Size(m) +} +func (m *HotlistIdFromSource) XXX_DiscardUnknown() { + xxx_messageInfo_HotlistIdFromSource.DiscardUnknown(m) +} + +var xxx_messageInfo_HotlistIdFromSource proto.InternalMessageInfo + +type isHotlistIdFromSource_HotlistIdSource interface { + isHotlistIdFromSource_HotlistIdSource() +} + +type HotlistIdFromSource_Value struct { + Value int64 `protobuf:"varint,1,opt,name=value,proto3,oneof"` +} + +type HotlistIdFromSource_Label struct { + Label string `protobuf:"bytes,2,opt,name=label,proto3,oneof"` +} + +func (*HotlistIdFromSource_Value) isHotlistIdFromSource_HotlistIdSource() {} + +func (*HotlistIdFromSource_Label) isHotlistIdFromSource_HotlistIdSource() {} + +func (m *HotlistIdFromSource) GetHotlistIdSource() isHotlistIdFromSource_HotlistIdSource { + if m != nil { + return m.HotlistIdSource + } + return nil +} + +func (m *HotlistIdFromSource) GetValue() int64 { + if x, ok := m.GetHotlistIdSource().(*HotlistIdFromSource_Value); ok { + return x.Value + } + return 0 +} + +func (m *HotlistIdFromSource) GetLabel() string { + if x, ok := m.GetHotlistIdSource().(*HotlistIdFromSource_Label); ok { + return x.Label + } + return "" +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*HotlistIdFromSource) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*HotlistIdFromSource_Value)(nil), + (*HotlistIdFromSource_Label)(nil), + } +} + +// Specifies a dashboard. +type Dashboard struct { + // A list of the tabs on the dashboard. + DashboardTab []*DashboardTab `protobuf:"bytes,1,rep,name=dashboard_tab,json=dashboardTab,proto3" json:"dashboard_tab,omitempty"` + // A name for the Dashboard. + Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` + // A list of notifications attached to this dashboard. + // This is displayed on any dashboard tab in this dashboard. + Notifications []*Notification `protobuf:"bytes,3,rep,name=notifications,proto3" json:"notifications,omitempty"` + // Control which tab is displayed when first opening a dashboard. + // Defaults to Summary + DefaultTab string `protobuf:"bytes,5,opt,name=default_tab,json=defaultTab,proto3" json:"default_tab,omitempty"` + // Controls whether to suppress highlighting of failing tabs. + DownplayFailingTabs bool `protobuf:"varint,8,opt,name=downplay_failing_tabs,json=downplayFailingTabs,proto3" json:"downplay_failing_tabs,omitempty"` + // Deprecated: Invert of 'downplay_failing_tabs' + HighlightFailingTabs bool `protobuf:"varint,6,opt,name=highlight_failing_tabs,json=highlightFailingTabs,proto3" json:"highlight_failing_tabs,omitempty"` // Deprecated: Do not use. + // Controls whether to apply special highlighting to result header columns for + // the current day. + HighlightToday bool `protobuf:"varint,7,opt,name=highlight_today,json=highlightToday,proto3" json:"highlight_today,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Dashboard) Reset() { *m = Dashboard{} } +func (m *Dashboard) String() string { return proto.CompactTextString(m) } +func (*Dashboard) ProtoMessage() {} +func (*Dashboard) Descriptor() ([]byte, []int) { + return fileDescriptor_3eaf2c85e69e9ea4, []int{7} +} + +func (m *Dashboard) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Dashboard.Unmarshal(m, b) +} +func (m *Dashboard) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Dashboard.Marshal(b, m, deterministic) +} +func (m *Dashboard) XXX_Merge(src proto.Message) { + xxx_messageInfo_Dashboard.Merge(m, src) +} +func (m *Dashboard) XXX_Size() int { + return xxx_messageInfo_Dashboard.Size(m) +} +func (m *Dashboard) XXX_DiscardUnknown() { + xxx_messageInfo_Dashboard.DiscardUnknown(m) +} + +var xxx_messageInfo_Dashboard proto.InternalMessageInfo + +func (m *Dashboard) GetDashboardTab() []*DashboardTab { + if m != nil { + return m.DashboardTab + } + return nil +} + +func (m *Dashboard) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Dashboard) GetNotifications() []*Notification { + if m != nil { + return m.Notifications + } + return nil +} + +func (m *Dashboard) GetDefaultTab() string { + if m != nil { + return m.DefaultTab + } + return "" +} + +func (m *Dashboard) GetDownplayFailingTabs() bool { + if m != nil { + return m.DownplayFailingTabs + } + return false +} + +// Deprecated: Do not use. +func (m *Dashboard) GetHighlightFailingTabs() bool { + if m != nil { + return m.HighlightFailingTabs + } + return false +} + +func (m *Dashboard) GetHighlightToday() bool { + if m != nil { + return m.HighlightToday + } + return false +} + +type LinkTemplate struct { + // The URL template. + Url string `protobuf:"bytes,1,opt,name=url,proto3" json:"url,omitempty"` + // The options templates. + Options []*LinkOptionsTemplate `protobuf:"bytes,2,rep,name=options,proto3" json:"options,omitempty"` + // An optional name, used for the context menu + Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *LinkTemplate) Reset() { *m = LinkTemplate{} } +func (m *LinkTemplate) String() string { return proto.CompactTextString(m) } +func (*LinkTemplate) ProtoMessage() {} +func (*LinkTemplate) Descriptor() ([]byte, []int) { + return fileDescriptor_3eaf2c85e69e9ea4, []int{8} +} + +func (m *LinkTemplate) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_LinkTemplate.Unmarshal(m, b) +} +func (m *LinkTemplate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_LinkTemplate.Marshal(b, m, deterministic) +} +func (m *LinkTemplate) XXX_Merge(src proto.Message) { + xxx_messageInfo_LinkTemplate.Merge(m, src) +} +func (m *LinkTemplate) XXX_Size() int { + return xxx_messageInfo_LinkTemplate.Size(m) +} +func (m *LinkTemplate) XXX_DiscardUnknown() { + xxx_messageInfo_LinkTemplate.DiscardUnknown(m) +} + +var xxx_messageInfo_LinkTemplate proto.InternalMessageInfo + +func (m *LinkTemplate) GetUrl() string { + if m != nil { + return m.Url + } + return "" +} + +func (m *LinkTemplate) GetOptions() []*LinkOptionsTemplate { + if m != nil { + return m.Options + } + return nil +} + +func (m *LinkTemplate) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// A simple key/value pair for link options. +type LinkOptionsTemplate struct { + // The key for the option. This is not expanded. + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + // The value for the option. This is expanded the same as the LinkTemplate. + Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *LinkOptionsTemplate) Reset() { *m = LinkOptionsTemplate{} } +func (m *LinkOptionsTemplate) String() string { return proto.CompactTextString(m) } +func (*LinkOptionsTemplate) ProtoMessage() {} +func (*LinkOptionsTemplate) Descriptor() ([]byte, []int) { + return fileDescriptor_3eaf2c85e69e9ea4, []int{9} +} + +func (m *LinkOptionsTemplate) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_LinkOptionsTemplate.Unmarshal(m, b) +} +func (m *LinkOptionsTemplate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_LinkOptionsTemplate.Marshal(b, m, deterministic) +} +func (m *LinkOptionsTemplate) XXX_Merge(src proto.Message) { + xxx_messageInfo_LinkOptionsTemplate.Merge(m, src) +} +func (m *LinkOptionsTemplate) XXX_Size() int { + return xxx_messageInfo_LinkOptionsTemplate.Size(m) +} +func (m *LinkOptionsTemplate) XXX_DiscardUnknown() { + xxx_messageInfo_LinkOptionsTemplate.DiscardUnknown(m) +} + +var xxx_messageInfo_LinkOptionsTemplate proto.InternalMessageInfo + +func (m *LinkOptionsTemplate) GetKey() string { + if m != nil { + return m.Key + } + return "" +} + +func (m *LinkOptionsTemplate) GetValue() string { + if m != nil { + return m.Value + } + return "" +} + +// A single tab on a dashboard. +type DashboardTab struct { + // The name of the dashboard tab to display in the client. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // The name of the TestGroup specifying the test results for this tab. + TestGroupName string `protobuf:"bytes,2,opt,name=test_group_name,json=testGroupName,proto3" json:"test_group_name,omitempty"` + // Default bug component for manually filing bugs from the dashboard + BugComponent int32 `protobuf:"varint,3,opt,name=bug_component,json=bugComponent,proto3" json:"bug_component,omitempty"` + // Default code search path for searching regressions. This value overrides + // the default in the TestGroup config so that dashboards may be customized + // separately. + CodeSearchPath string `protobuf:"bytes,4,opt,name=code_search_path,json=codeSearchPath,proto3" json:"code_search_path,omitempty"` + // See TestGroup.num_columns_recent. This value overrides the default in the + // TestGroup config so that dashboards may be customized separately. + NumColumnsRecent int32 `protobuf:"varint,5,opt,name=num_columns_recent,json=numColumnsRecent,proto3" json:"num_columns_recent,omitempty"` + // Base options to always include, for example: + // width=20&include-filter-by-regex=level_tests + // This is taken from the #fragment part of the testgrid url. + // Best way to create these is to setup the options on testgrid and then + // copy the #fragment part. + BaseOptions string `protobuf:"bytes,6,opt,name=base_options,json=baseOptions,proto3" json:"base_options,omitempty"` + // The URL template to visit after clicking on a cell. + OpenTestTemplate *LinkTemplate `protobuf:"bytes,7,opt,name=open_test_template,json=openTestTemplate,proto3" json:"open_test_template,omitempty"` + // The URL template to visit when filing a bug. + FileBugTemplate *LinkTemplate `protobuf:"bytes,8,opt,name=file_bug_template,json=fileBugTemplate,proto3" json:"file_bug_template,omitempty"` + // The URL template to visit when attaching a bug + AttachBugTemplate *LinkTemplate `protobuf:"bytes,9,opt,name=attach_bug_template,json=attachBugTemplate,proto3" json:"attach_bug_template,omitempty"` + // Text to show in the about menu as a link to another view of the results. + ResultsText string `protobuf:"bytes,10,opt,name=results_text,json=resultsText,proto3" json:"results_text,omitempty"` + // The URL template to visit after clicking. + ResultsUrlTemplate *LinkTemplate `protobuf:"bytes,11,opt,name=results_url_template,json=resultsUrlTemplate,proto3" json:"results_url_template,omitempty"` + // The URL template to visit when searching for code changes, such as pull requests + CodeSearchUrlTemplate *LinkTemplate `protobuf:"bytes,12,opt,name=code_search_url_template,json=codeSearchUrlTemplate,proto3" json:"code_search_url_template,omitempty"` + // A description paragraph to be displayed. + Description string `protobuf:"bytes,13,opt,name=description,proto3" json:"description,omitempty"` + // A regular expression that uses the named group syntax to specify how to + // show names in a table. + TabularNamesRegex string `protobuf:"bytes,14,opt,name=tabular_names_regex,json=tabularNamesRegex,proto3" json:"tabular_names_regex,omitempty"` + // Configuration options for dashboard tab alerts. + AlertOptions *DashboardTabAlertOptions `protobuf:"bytes,15,opt,name=alert_options,json=alertOptions,proto3" json:"alert_options,omitempty"` + // Configuration options for dashboard tab flakiness alerts. + FlakinessAlertOptions *DashboardTabFlakinessAlertOptions `protobuf:"bytes,24,opt,name=flakiness_alert_options,json=flakinessAlertOptions,proto3" json:"flakiness_alert_options,omitempty"` + // A URL for the "About this Dashboard" menu option + AboutDashboardUrl string `protobuf:"bytes,16,opt,name=about_dashboard_url,json=aboutDashboardUrl,proto3" json:"about_dashboard_url,omitempty"` + // The URL template to visit when viewing an associated bug. + OpenBugTemplate *LinkTemplate `protobuf:"bytes,17,opt,name=open_bug_template,json=openBugTemplate,proto3" json:"open_bug_template,omitempty"` + // If true, auto-file bugs when new alerts occur. This requires that the + // backing test group has `bug_component` set and uses the backing test + // group's `auto_bug_options`. + AutoFileBugs bool `protobuf:"varint,18,opt,name=auto_file_bugs,json=autoFileBugs,proto3" json:"auto_file_bugs,omitempty"` + // Display user local time on the dashboard when set to true (by default). + // If false, uses Pacific Timezone for this DashboardTab. + DisplayLocalTime bool `protobuf:"varint,19,opt,name=display_local_time,json=displayLocalTime,proto3" json:"display_local_time,omitempty"` + // A set of optional LinkTemplates that will become right-click context menu + // items. + // TODO(b/159042168) in the near future this should be re-implemented as a + // generic list of repeated LinkTemplates which users may specify in their + // reqpective configurations as right-click context menus with names and + // actions upon being clicked. + ContextMenuTemplate *LinkTemplate `protobuf:"bytes,20,opt,name=context_menu_template,json=contextMenuTemplate,proto3" json:"context_menu_template,omitempty"` + // When specified, treat a tab as BROKEN as long as one of the most recent + // columns are "broken" (ratio of failed to total tests exceeds ). + BrokenColumnThreshold float32 `protobuf:"fixed32,21,opt,name=broken_column_threshold,json=brokenColumnThreshold,proto3" json:"broken_column_threshold,omitempty"` + // Options for auto-filed bugs. + // Using this for a dashboard tab requires specifying `beta_autobug_component` + // and will opt you into the beta AutoBug. + BetaAutobugOptions *AutoBugOptions `protobuf:"bytes,22,opt,name=beta_autobug_options,json=betaAutobugOptions,proto3" json:"beta_autobug_options,omitempty"` + // Options for the configuration of the flakiness analysis tool, on a per tab basis + HealthAnalysisOptions *HealthAnalysisOptions `protobuf:"bytes,23,opt,name=health_analysis_options,json=healthAnalysisOptions,proto3" json:"health_analysis_options,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DashboardTab) Reset() { *m = DashboardTab{} } +func (m *DashboardTab) String() string { return proto.CompactTextString(m) } +func (*DashboardTab) ProtoMessage() {} +func (*DashboardTab) Descriptor() ([]byte, []int) { + return fileDescriptor_3eaf2c85e69e9ea4, []int{10} +} + +func (m *DashboardTab) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DashboardTab.Unmarshal(m, b) +} +func (m *DashboardTab) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DashboardTab.Marshal(b, m, deterministic) +} +func (m *DashboardTab) XXX_Merge(src proto.Message) { + xxx_messageInfo_DashboardTab.Merge(m, src) +} +func (m *DashboardTab) XXX_Size() int { + return xxx_messageInfo_DashboardTab.Size(m) +} +func (m *DashboardTab) XXX_DiscardUnknown() { + xxx_messageInfo_DashboardTab.DiscardUnknown(m) +} + +var xxx_messageInfo_DashboardTab proto.InternalMessageInfo + +func (m *DashboardTab) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *DashboardTab) GetTestGroupName() string { + if m != nil { + return m.TestGroupName + } + return "" +} + +func (m *DashboardTab) GetBugComponent() int32 { + if m != nil { + return m.BugComponent + } + return 0 +} + +func (m *DashboardTab) GetCodeSearchPath() string { + if m != nil { + return m.CodeSearchPath + } + return "" +} + +func (m *DashboardTab) GetNumColumnsRecent() int32 { + if m != nil { + return m.NumColumnsRecent + } + return 0 +} + +func (m *DashboardTab) GetBaseOptions() string { + if m != nil { + return m.BaseOptions + } + return "" +} + +func (m *DashboardTab) GetOpenTestTemplate() *LinkTemplate { + if m != nil { + return m.OpenTestTemplate + } + return nil +} + +func (m *DashboardTab) GetFileBugTemplate() *LinkTemplate { + if m != nil { + return m.FileBugTemplate + } + return nil +} + +func (m *DashboardTab) GetAttachBugTemplate() *LinkTemplate { + if m != nil { + return m.AttachBugTemplate + } + return nil +} + +func (m *DashboardTab) GetResultsText() string { + if m != nil { + return m.ResultsText + } + return "" +} + +func (m *DashboardTab) GetResultsUrlTemplate() *LinkTemplate { + if m != nil { + return m.ResultsUrlTemplate + } + return nil +} + +func (m *DashboardTab) GetCodeSearchUrlTemplate() *LinkTemplate { + if m != nil { + return m.CodeSearchUrlTemplate + } + return nil +} + +func (m *DashboardTab) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *DashboardTab) GetTabularNamesRegex() string { + if m != nil { + return m.TabularNamesRegex + } + return "" +} + +func (m *DashboardTab) GetAlertOptions() *DashboardTabAlertOptions { + if m != nil { + return m.AlertOptions + } + return nil +} + +func (m *DashboardTab) GetFlakinessAlertOptions() *DashboardTabFlakinessAlertOptions { + if m != nil { + return m.FlakinessAlertOptions + } + return nil +} + +func (m *DashboardTab) GetAboutDashboardUrl() string { + if m != nil { + return m.AboutDashboardUrl + } + return "" +} + +func (m *DashboardTab) GetOpenBugTemplate() *LinkTemplate { + if m != nil { + return m.OpenBugTemplate + } + return nil +} + +func (m *DashboardTab) GetAutoFileBugs() bool { + if m != nil { + return m.AutoFileBugs + } + return false +} + +func (m *DashboardTab) GetDisplayLocalTime() bool { + if m != nil { + return m.DisplayLocalTime + } + return false +} + +func (m *DashboardTab) GetContextMenuTemplate() *LinkTemplate { + if m != nil { + return m.ContextMenuTemplate + } + return nil +} + +func (m *DashboardTab) GetBrokenColumnThreshold() float32 { + if m != nil { + return m.BrokenColumnThreshold + } + return 0 +} + +func (m *DashboardTab) GetBetaAutobugOptions() *AutoBugOptions { + if m != nil { + return m.BetaAutobugOptions + } + return nil +} + +func (m *DashboardTab) GetHealthAnalysisOptions() *HealthAnalysisOptions { + if m != nil { + return m.HealthAnalysisOptions + } + return nil +} + +// Configuration options for dashboard tab alerts. +type DashboardTabAlertOptions struct { + // Time in hours before an alert will be added to a test results table if the + // run date of the latest results are older than this time. If zero, no + // alerts are raised. + AlertStaleResultsHours int32 `protobuf:"varint,1,opt,name=alert_stale_results_hours,json=alertStaleResultsHours,proto3" json:"alert_stale_results_hours,omitempty"` + // The number of consecutive test result failures to see before alerting of + // a consistent failure. If zero, no alerts are raised. + NumFailuresToAlert int32 `protobuf:"varint,2,opt,name=num_failures_to_alert,json=numFailuresToAlert,proto3" json:"num_failures_to_alert,omitempty"` + // The comma-separated addresses to send mail. + AlertMailToAddresses string `protobuf:"bytes,3,opt,name=alert_mail_to_addresses,json=alertMailToAddresses,proto3" json:"alert_mail_to_addresses,omitempty"` + // The number of consecutive test passes to close the alert. + NumPassesToDisableAlert int32 `protobuf:"varint,4,opt,name=num_passes_to_disable_alert,json=numPassesToDisableAlert,proto3" json:"num_passes_to_disable_alert,omitempty"` + // Custom subject for alert mails. + Subject string `protobuf:"bytes,5,opt,name=subject,proto3" json:"subject,omitempty"` + // Custom link for further help/instructions on debugging this alert. + DebugUrl string `protobuf:"bytes,6,opt,name=debug_url,json=debugUrl,proto3" json:"debug_url,omitempty"` + // Custom text to show for the debug link. + DebugMessage string `protobuf:"bytes,7,opt,name=debug_message,json=debugMessage,proto3" json:"debug_message,omitempty"` + // Wait time between emails. If unset or zero, an email will be sent only once + // it becomes a consistent failure, and not again until it succeeds. + // TestGrid does not pester about staleness + WaitMinutesBetweenEmails int32 `protobuf:"varint,8,opt,name=wait_minutes_between_emails,json=waitMinutesBetweenEmails,proto3" json:"wait_minutes_between_emails,omitempty"` + // A custom message + AlertMailFailureMessage string `protobuf:"bytes,9,opt,name=alert_mail_failure_message,json=alertMailFailureMessage,proto3" json:"alert_mail_failure_message,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DashboardTabAlertOptions) Reset() { *m = DashboardTabAlertOptions{} } +func (m *DashboardTabAlertOptions) String() string { return proto.CompactTextString(m) } +func (*DashboardTabAlertOptions) ProtoMessage() {} +func (*DashboardTabAlertOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_3eaf2c85e69e9ea4, []int{11} +} + +func (m *DashboardTabAlertOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DashboardTabAlertOptions.Unmarshal(m, b) +} +func (m *DashboardTabAlertOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DashboardTabAlertOptions.Marshal(b, m, deterministic) +} +func (m *DashboardTabAlertOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_DashboardTabAlertOptions.Merge(m, src) +} +func (m *DashboardTabAlertOptions) XXX_Size() int { + return xxx_messageInfo_DashboardTabAlertOptions.Size(m) +} +func (m *DashboardTabAlertOptions) XXX_DiscardUnknown() { + xxx_messageInfo_DashboardTabAlertOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_DashboardTabAlertOptions proto.InternalMessageInfo + +func (m *DashboardTabAlertOptions) GetAlertStaleResultsHours() int32 { + if m != nil { + return m.AlertStaleResultsHours + } + return 0 +} + +func (m *DashboardTabAlertOptions) GetNumFailuresToAlert() int32 { + if m != nil { + return m.NumFailuresToAlert + } + return 0 +} + +func (m *DashboardTabAlertOptions) GetAlertMailToAddresses() string { + if m != nil { + return m.AlertMailToAddresses + } + return "" +} + +func (m *DashboardTabAlertOptions) GetNumPassesToDisableAlert() int32 { + if m != nil { + return m.NumPassesToDisableAlert + } + return 0 +} + +func (m *DashboardTabAlertOptions) GetSubject() string { + if m != nil { + return m.Subject + } + return "" +} + +func (m *DashboardTabAlertOptions) GetDebugUrl() string { + if m != nil { + return m.DebugUrl + } + return "" +} + +func (m *DashboardTabAlertOptions) GetDebugMessage() string { + if m != nil { + return m.DebugMessage + } + return "" +} + +func (m *DashboardTabAlertOptions) GetWaitMinutesBetweenEmails() int32 { + if m != nil { + return m.WaitMinutesBetweenEmails + } + return 0 +} + +func (m *DashboardTabAlertOptions) GetAlertMailFailureMessage() string { + if m != nil { + return m.AlertMailFailureMessage + } + return "" +} + +// Configuration options for dashboard tab flakiness alerts. +type DashboardTabFlakinessAlertOptions struct { + // The minimum amount of flakiness needed to trigger a flakiness alert. + // 0=Disable alerts + // This is a percentage; expected values go from 0 to 100 (100 = 100% flaky) + MinimumFlakinessToAlert float32 `protobuf:"fixed32,1,opt,name=minimum_flakiness_to_alert,json=minimumFlakinessToAlert,proto3" json:"minimum_flakiness_to_alert,omitempty"` + // The comma-separated addresses to send mail. + AlertMailToAddresses string `protobuf:"bytes,2,opt,name=alert_mail_to_addresses,json=alertMailToAddresses,proto3" json:"alert_mail_to_addresses,omitempty"` + // Custom subject for alert mails. + Subject string `protobuf:"bytes,3,opt,name=subject,proto3" json:"subject,omitempty"` + // Minimum time between sending mails. + WaitMinutesBetweenEmails int32 `protobuf:"varint,4,opt,name=wait_minutes_between_emails,json=waitMinutesBetweenEmails,proto3" json:"wait_minutes_between_emails,omitempty"` + // A custom message + // TODO(RonWeber): This should be a template + AlertMailFailureMessage string `protobuf:"bytes,5,opt,name=alert_mail_failure_message,json=alertMailFailureMessage,proto3" json:"alert_mail_failure_message,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DashboardTabFlakinessAlertOptions) Reset() { *m = DashboardTabFlakinessAlertOptions{} } +func (m *DashboardTabFlakinessAlertOptions) String() string { return proto.CompactTextString(m) } +func (*DashboardTabFlakinessAlertOptions) ProtoMessage() {} +func (*DashboardTabFlakinessAlertOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_3eaf2c85e69e9ea4, []int{12} +} + +func (m *DashboardTabFlakinessAlertOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DashboardTabFlakinessAlertOptions.Unmarshal(m, b) +} +func (m *DashboardTabFlakinessAlertOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DashboardTabFlakinessAlertOptions.Marshal(b, m, deterministic) +} +func (m *DashboardTabFlakinessAlertOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_DashboardTabFlakinessAlertOptions.Merge(m, src) +} +func (m *DashboardTabFlakinessAlertOptions) XXX_Size() int { + return xxx_messageInfo_DashboardTabFlakinessAlertOptions.Size(m) +} +func (m *DashboardTabFlakinessAlertOptions) XXX_DiscardUnknown() { + xxx_messageInfo_DashboardTabFlakinessAlertOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_DashboardTabFlakinessAlertOptions proto.InternalMessageInfo + +func (m *DashboardTabFlakinessAlertOptions) GetMinimumFlakinessToAlert() float32 { + if m != nil { + return m.MinimumFlakinessToAlert + } + return 0 +} + +func (m *DashboardTabFlakinessAlertOptions) GetAlertMailToAddresses() string { + if m != nil { + return m.AlertMailToAddresses + } + return "" +} + +func (m *DashboardTabFlakinessAlertOptions) GetSubject() string { + if m != nil { + return m.Subject + } + return "" +} + +func (m *DashboardTabFlakinessAlertOptions) GetWaitMinutesBetweenEmails() int32 { + if m != nil { + return m.WaitMinutesBetweenEmails + } + return 0 +} + +func (m *DashboardTabFlakinessAlertOptions) GetAlertMailFailureMessage() string { + if m != nil { + return m.AlertMailFailureMessage + } + return "" +} + +// Specifies a dashboard group. +type DashboardGroup struct { + // The name for the dashboard group. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // A list of names specifying dashboards to show links to in a separate tabbed + // bar at the top of the page for each of the given dashboards. + DashboardNames []string `protobuf:"bytes,2,rep,name=dashboard_names,json=dashboardNames,proto3" json:"dashboard_names,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DashboardGroup) Reset() { *m = DashboardGroup{} } +func (m *DashboardGroup) String() string { return proto.CompactTextString(m) } +func (*DashboardGroup) ProtoMessage() {} +func (*DashboardGroup) Descriptor() ([]byte, []int) { + return fileDescriptor_3eaf2c85e69e9ea4, []int{13} +} + +func (m *DashboardGroup) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DashboardGroup.Unmarshal(m, b) +} +func (m *DashboardGroup) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DashboardGroup.Marshal(b, m, deterministic) +} +func (m *DashboardGroup) XXX_Merge(src proto.Message) { + xxx_messageInfo_DashboardGroup.Merge(m, src) +} +func (m *DashboardGroup) XXX_Size() int { + return xxx_messageInfo_DashboardGroup.Size(m) +} +func (m *DashboardGroup) XXX_DiscardUnknown() { + xxx_messageInfo_DashboardGroup.DiscardUnknown(m) +} + +var xxx_messageInfo_DashboardGroup proto.InternalMessageInfo + +func (m *DashboardGroup) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *DashboardGroup) GetDashboardNames() []string { + if m != nil { + return m.DashboardNames + } + return nil +} + +// A service configuration consisting of multiple test groups and dashboards. +type Configuration struct { + // A list of groups of tests to gather. + TestGroups []*TestGroup `protobuf:"bytes,1,rep,name=test_groups,json=testGroups,proto3" json:"test_groups,omitempty"` + // A list of all of the dashboards for a server. + Dashboards []*Dashboard `protobuf:"bytes,2,rep,name=dashboards,proto3" json:"dashboards,omitempty"` + // A list of all the dashboard groups for a server. + DashboardGroups []*DashboardGroup `protobuf:"bytes,3,rep,name=dashboard_groups,json=dashboardGroups,proto3" json:"dashboard_groups,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Configuration) Reset() { *m = Configuration{} } +func (m *Configuration) String() string { return proto.CompactTextString(m) } +func (*Configuration) ProtoMessage() {} +func (*Configuration) Descriptor() ([]byte, []int) { + return fileDescriptor_3eaf2c85e69e9ea4, []int{14} +} + +func (m *Configuration) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Configuration.Unmarshal(m, b) +} +func (m *Configuration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Configuration.Marshal(b, m, deterministic) +} +func (m *Configuration) XXX_Merge(src proto.Message) { + xxx_messageInfo_Configuration.Merge(m, src) +} +func (m *Configuration) XXX_Size() int { + return xxx_messageInfo_Configuration.Size(m) +} +func (m *Configuration) XXX_DiscardUnknown() { + xxx_messageInfo_Configuration.DiscardUnknown(m) +} + +var xxx_messageInfo_Configuration proto.InternalMessageInfo + +func (m *Configuration) GetTestGroups() []*TestGroup { + if m != nil { + return m.TestGroups + } + return nil +} + +func (m *Configuration) GetDashboards() []*Dashboard { + if m != nil { + return m.Dashboards + } + return nil +} + +func (m *Configuration) GetDashboardGroups() []*DashboardGroup { + if m != nil { + return m.DashboardGroups + } + return nil +} + +// A grouping of configuration options for the flakiness analysis tool. +// Later configuration options could include the ability to choose different kinds of +// flakiness and choosing if and who to email a copy of the flakiness report. +type HealthAnalysisOptions struct { + // Defaults to false; flakiness analysis is opt-in + Enable bool `protobuf:"varint,1,opt,name=enable,proto3" json:"enable,omitempty"` + // Defines the number of days for one interval of analysis. + // i.e. flakiness will be analyzed for the previous N days starting from Now, + // and it will be compared to the calculated N days before that for trend analysis. + DaysOfAnalysis int32 `protobuf:"varint,2,opt,name=days_of_analysis,json=daysOfAnalysis,proto3" json:"days_of_analysis,omitempty"` + // When to send healthiness emails out, uses cron string format. + EmailSchedule string `protobuf:"bytes,3,opt,name=email_schedule,json=emailSchedule,proto3" json:"email_schedule,omitempty"` + // A comma-separated list of healthiness email recipients. + EmailRecipients string `protobuf:"bytes,4,opt,name=email_recipients,json=emailRecipients,proto3" json:"email_recipients,omitempty"` + // A compilable regex string for grouping tests by name. + // Works the same as the group-by-regex-mask option of base_options: + // go/testgrid/users/dashboard_guide#grouping-tests + // An empty string means no grouping. + // e.g. test name: "//path/to/test - env", regex: ` - \w+` + // The regex will match " - env" in the above test name and give a group of: + // //path/to/test <- Group Name + // - env <- Group Member + GroupingRegex string `protobuf:"bytes,5,opt,name=grouping_regex,json=groupingRegex,proto3" json:"grouping_regex,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *HealthAnalysisOptions) Reset() { *m = HealthAnalysisOptions{} } +func (m *HealthAnalysisOptions) String() string { return proto.CompactTextString(m) } +func (*HealthAnalysisOptions) ProtoMessage() {} +func (*HealthAnalysisOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_3eaf2c85e69e9ea4, []int{15} +} + +func (m *HealthAnalysisOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_HealthAnalysisOptions.Unmarshal(m, b) +} +func (m *HealthAnalysisOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_HealthAnalysisOptions.Marshal(b, m, deterministic) +} +func (m *HealthAnalysisOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_HealthAnalysisOptions.Merge(m, src) +} +func (m *HealthAnalysisOptions) XXX_Size() int { + return xxx_messageInfo_HealthAnalysisOptions.Size(m) +} +func (m *HealthAnalysisOptions) XXX_DiscardUnknown() { + xxx_messageInfo_HealthAnalysisOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_HealthAnalysisOptions proto.InternalMessageInfo + +func (m *HealthAnalysisOptions) GetEnable() bool { + if m != nil { + return m.Enable + } + return false +} + +func (m *HealthAnalysisOptions) GetDaysOfAnalysis() int32 { + if m != nil { + return m.DaysOfAnalysis + } + return 0 +} + +func (m *HealthAnalysisOptions) GetEmailSchedule() string { + if m != nil { + return m.EmailSchedule + } + return "" +} + +func (m *HealthAnalysisOptions) GetEmailRecipients() string { + if m != nil { + return m.EmailRecipients + } + return "" +} + +func (m *HealthAnalysisOptions) GetGroupingRegex() string { + if m != nil { + return m.GroupingRegex + } + return "" +} + +// The DefaultConfiguration Proto is deprecated, and will be deleted after Nov 1, 2019 +// For defaulting behavior, use the yamlcfg library instead. +type DefaultConfiguration struct { + // A default testgroup with default initialization data + DefaultTestGroup *TestGroup `protobuf:"bytes,1,opt,name=default_test_group,json=defaultTestGroup,proto3" json:"default_test_group,omitempty"` // Deprecated: Do not use. + // A default dashboard tab with default initialization data + DefaultDashboardTab *DashboardTab `protobuf:"bytes,2,opt,name=default_dashboard_tab,json=defaultDashboardTab,proto3" json:"default_dashboard_tab,omitempty"` // Deprecated: Do not use. + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DefaultConfiguration) Reset() { *m = DefaultConfiguration{} } +func (m *DefaultConfiguration) String() string { return proto.CompactTextString(m) } +func (*DefaultConfiguration) ProtoMessage() {} +func (*DefaultConfiguration) Descriptor() ([]byte, []int) { + return fileDescriptor_3eaf2c85e69e9ea4, []int{16} +} + +func (m *DefaultConfiguration) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DefaultConfiguration.Unmarshal(m, b) +} +func (m *DefaultConfiguration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DefaultConfiguration.Marshal(b, m, deterministic) +} +func (m *DefaultConfiguration) XXX_Merge(src proto.Message) { + xxx_messageInfo_DefaultConfiguration.Merge(m, src) +} +func (m *DefaultConfiguration) XXX_Size() int { + return xxx_messageInfo_DefaultConfiguration.Size(m) +} +func (m *DefaultConfiguration) XXX_DiscardUnknown() { + xxx_messageInfo_DefaultConfiguration.DiscardUnknown(m) +} + +var xxx_messageInfo_DefaultConfiguration proto.InternalMessageInfo + +// Deprecated: Do not use. +func (m *DefaultConfiguration) GetDefaultTestGroup() *TestGroup { + if m != nil { + return m.DefaultTestGroup + } + return nil +} + +// Deprecated: Do not use. +func (m *DefaultConfiguration) GetDefaultDashboardTab() *DashboardTab { + if m != nil { + return m.DefaultDashboardTab + } + return nil +} + +func init() { + proto.RegisterEnum("TestGroup_TestsName", TestGroup_TestsName_name, TestGroup_TestsName_value) + proto.RegisterEnum("TestGroup_FallbackGrouping", TestGroup_FallbackGrouping_name, TestGroup_FallbackGrouping_value) + proto.RegisterEnum("TestGroup_PrimaryGrouping", TestGroup_PrimaryGrouping_name, TestGroup_PrimaryGrouping_value) + proto.RegisterEnum("AutoBugOptions_Priority", AutoBugOptions_Priority_name, AutoBugOptions_Priority_value) + proto.RegisterType((*TestNameConfig)(nil), "TestNameConfig") + proto.RegisterType((*TestNameConfig_NameElement)(nil), "TestNameConfig.NameElement") + proto.RegisterType((*Notification)(nil), "Notification") + proto.RegisterType((*TestGroup)(nil), "TestGroup") + proto.RegisterType((*TestGroup_ColumnHeader)(nil), "TestGroup.ColumnHeader") + proto.RegisterType((*TestGroup_TestAnnotation)(nil), "TestGroup.TestAnnotation") + proto.RegisterType((*TestGroup_KeyValue)(nil), "TestGroup.KeyValue") + proto.RegisterType((*TestGroup_ResultSource)(nil), "TestGroup.ResultSource") + proto.RegisterType((*JUnitConfig)(nil), "JUnitConfig") + proto.RegisterType((*TestMetadataOptions)(nil), "TestMetadataOptions") + proto.RegisterType((*AutoBugOptions)(nil), "AutoBugOptions") + proto.RegisterType((*AutoBugOptions_DefaultTestMetadata)(nil), "AutoBugOptions.DefaultTestMetadata") + proto.RegisterType((*HotlistIdFromSource)(nil), "HotlistIdFromSource") + proto.RegisterType((*Dashboard)(nil), "Dashboard") + proto.RegisterType((*LinkTemplate)(nil), "LinkTemplate") + proto.RegisterType((*LinkOptionsTemplate)(nil), "LinkOptionsTemplate") + proto.RegisterType((*DashboardTab)(nil), "DashboardTab") + proto.RegisterType((*DashboardTabAlertOptions)(nil), "DashboardTabAlertOptions") + proto.RegisterType((*DashboardTabFlakinessAlertOptions)(nil), "DashboardTabFlakinessAlertOptions") + proto.RegisterType((*DashboardGroup)(nil), "DashboardGroup") + proto.RegisterType((*Configuration)(nil), "Configuration") + proto.RegisterType((*HealthAnalysisOptions)(nil), "HealthAnalysisOptions") + proto.RegisterType((*DefaultConfiguration)(nil), "DefaultConfiguration") +} + +func init() { proto.RegisterFile("config.proto", fileDescriptor_3eaf2c85e69e9ea4) } + +var fileDescriptor_3eaf2c85e69e9ea4 = []byte{ + // 3358 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x3a, 0x5d, 0x57, 0x1b, 0x49, + 0x76, 0xd6, 0x07, 0xb6, 0xb8, 0x48, 0xa2, 0x29, 0x81, 0x68, 0x60, 0x9c, 0xc1, 0x9a, 0xf5, 0x0e, + 0x33, 0xb3, 0xcb, 0x8c, 0xf1, 0xcc, 0x66, 0xbc, 0xb6, 0x77, 0x57, 0x80, 0x30, 0x60, 0x3e, 0x94, + 0x46, 0x6c, 0xce, 0xee, 0x4b, 0xa7, 0xd4, 0x5d, 0x92, 0x7a, 0xe8, 0x0f, 0xa5, 0xab, 0xda, 0x86, + 0xb7, 0x3c, 0xe6, 0x3f, 0x64, 0x1f, 0x73, 0xf2, 0x36, 0x7f, 0x23, 0x0f, 0x79, 0xcc, 0x39, 0xf9, + 0x3f, 0x39, 0x75, 0xab, 0xba, 0xd5, 0x42, 0xb2, 0xc7, 0x39, 0x79, 0x42, 0x75, 0xbf, 0xaa, 0xea, + 0x7e, 0xd5, 0xbd, 0xb7, 0x81, 0xaa, 0x13, 0x85, 0x03, 0x6f, 0xb8, 0x3b, 0x8e, 0x23, 0x11, 0x6d, + 0x7e, 0x3d, 0xee, 0x7f, 0xeb, 0x24, 0x5c, 0x44, 0x81, 0xcd, 0xde, 0x51, 0x3f, 0xa1, 0x22, 0x8a, + 0x67, 0x00, 0x8a, 0xb6, 0xf5, 0xb7, 0x22, 0xd4, 0x7b, 0x8c, 0x8b, 0x0b, 0x1a, 0xb0, 0x03, 0x14, + 0x42, 0xfe, 0x04, 0xb5, 0x90, 0x06, 0xcc, 0x66, 0x3e, 0x0b, 0x58, 0x28, 0xb8, 0x59, 0xd8, 0x2e, + 0xed, 0x2c, 0xed, 0x6d, 0xed, 0x4e, 0xd3, 0xed, 0xca, 0x9f, 0x1d, 0x45, 0x63, 0x55, 0xc3, 0xc9, + 0x82, 0x93, 0xcf, 0x61, 0x09, 0x25, 0x0c, 0xa2, 0x38, 0xa0, 0xc2, 0x2c, 0x6e, 0x17, 0x76, 0x16, + 0x2d, 0x90, 0xa0, 0x23, 0x84, 0x6c, 0xfe, 0x47, 0x01, 0x96, 0x72, 0xec, 0xa4, 0x09, 0x0f, 0x7d, + 0xda, 0x67, 0xbe, 0xdc, 0x4b, 0xd2, 0xea, 0x15, 0xf9, 0x02, 0x6a, 0x82, 0xc6, 0x43, 0x26, 0x6c, + 0x75, 0x41, 0x2d, 0xaa, 0xaa, 0x80, 0xfa, 0xbc, 0x4f, 0xa0, 0xda, 0x4f, 0x3c, 0xdf, 0xb5, 0x15, + 0xd4, 0x2c, 0x6d, 0x17, 0x76, 0x2a, 0xd6, 0x12, 0xc2, 0x7a, 0x08, 0x22, 0x04, 0xca, 0x82, 0x0e, + 0xb9, 0x59, 0x46, 0x76, 0xfc, 0x8d, 0xb2, 0x19, 0x17, 0xf6, 0x38, 0x8e, 0xc6, 0x2c, 0x16, 0x77, + 0xe6, 0x82, 0x96, 0xcd, 0xb8, 0xe8, 0x6a, 0x58, 0xeb, 0x2d, 0x54, 0x2f, 0x22, 0xe1, 0x0d, 0x3c, + 0x87, 0x0a, 0x2f, 0x0a, 0x89, 0x09, 0x8f, 0x78, 0x12, 0x04, 0x34, 0xbe, 0xd3, 0x27, 0x4d, 0x97, + 0xf2, 0x14, 0x4e, 0x14, 0x0a, 0x76, 0x2b, 0x6c, 0xdf, 0x0b, 0x6f, 0xf4, 0x49, 0x97, 0x34, 0xec, + 0xcc, 0x0b, 0x6f, 0x5a, 0x7f, 0xfb, 0x0c, 0x16, 0xa5, 0x0e, 0xdf, 0xc4, 0x51, 0x32, 0x96, 0x67, + 0x92, 0x1a, 0xd1, 0x72, 0xf0, 0x37, 0x79, 0x0c, 0x30, 0x74, 0xb8, 0x3d, 0x8e, 0xd9, 0xc0, 0xbb, + 0xd5, 0x22, 0x16, 0x87, 0x0e, 0xef, 0x22, 0x80, 0xfc, 0x1a, 0x96, 0x5d, 0x7a, 0xc7, 0xed, 0x68, + 0x60, 0xc7, 0x8c, 0x27, 0xbe, 0xe0, 0x78, 0xd9, 0x05, 0xab, 0x26, 0xc1, 0x97, 0x03, 0x4b, 0x01, + 0xc9, 0x53, 0xa8, 0x7b, 0xc3, 0x30, 0x8a, 0x99, 0x3d, 0x66, 0xa1, 0xeb, 0x85, 0x43, 0xbc, 0x78, + 0xc5, 0xaa, 0x29, 0x68, 0x57, 0x01, 0xe5, 0x91, 0x35, 0x99, 0xd4, 0x95, 0x40, 0x05, 0x54, 0xac, + 0x25, 0x05, 0xdb, 0x97, 0x20, 0xf2, 0x27, 0x58, 0x91, 0xfa, 0xe0, 0x36, 0xda, 0x73, 0x1c, 0xf9, + 0x9e, 0x73, 0x67, 0x3e, 0xdc, 0x2e, 0xec, 0xd4, 0xf7, 0x56, 0x77, 0xb3, 0xbb, 0xe0, 0x2f, 0x2e, + 0x0d, 0x6a, 0x2d, 0x8b, 0xf4, 0x67, 0x17, 0x89, 0xc9, 0x1e, 0xac, 0xe9, 0x4d, 0x50, 0xdb, 0x3c, + 0xe9, 0x73, 0x11, 0xcb, 0x23, 0x55, 0xb6, 0x4b, 0x3b, 0x8b, 0x56, 0x43, 0x21, 0xa5, 0x80, 0xab, + 0x14, 0x45, 0x5e, 0x41, 0xcd, 0x89, 0xfc, 0x24, 0x08, 0xed, 0x11, 0xa3, 0x2e, 0x8b, 0xcd, 0x45, + 0xf4, 0xc0, 0xf5, 0xdc, 0x8e, 0x07, 0x88, 0x3f, 0x46, 0xb4, 0x55, 0x75, 0x72, 0x2b, 0x72, 0x0c, + 0x2b, 0x03, 0xea, 0xfb, 0x7d, 0xea, 0xdc, 0xd8, 0x43, 0x49, 0x2c, 0x77, 0x03, 0x3c, 0xf3, 0x56, + 0x4e, 0xc2, 0x91, 0xa6, 0x79, 0xa3, 0x49, 0x2c, 0x63, 0x70, 0x0f, 0x42, 0x5e, 0xc3, 0x06, 0xf5, + 0x59, 0x2c, 0x6c, 0x2e, 0xa8, 0xcf, 0x52, 0x9d, 0xdb, 0xa3, 0x28, 0x89, 0xb9, 0xb9, 0x24, 0x35, + 0xbf, 0x5f, 0x34, 0x0b, 0x56, 0x13, 0x89, 0xae, 0x24, 0x8d, 0xb6, 0xc0, 0xb1, 0xa4, 0x20, 0x3f, + 0xc0, 0x5a, 0x98, 0x04, 0xf6, 0x80, 0x7a, 0x7e, 0x12, 0x33, 0x6e, 0x8b, 0xc8, 0x46, 0x4a, 0xb3, + 0x9a, 0xb1, 0x92, 0x30, 0x09, 0x8e, 0x34, 0xbe, 0x17, 0xb5, 0x25, 0x56, 0x3a, 0x66, 0x3f, 0x19, + 0xda, 0x4e, 0x14, 0x8c, 0xa3, 0x90, 0x85, 0xc2, 0xac, 0xa1, 0x8d, 0xab, 0xfd, 0x64, 0x78, 0x90, + 0xc2, 0xc8, 0x0e, 0x18, 0x4e, 0xe4, 0x32, 0x9b, 0x33, 0x1a, 0x3b, 0x23, 0x7b, 0x4c, 0xc5, 0xc8, + 0xac, 0xa3, 0xbf, 0xd4, 0x25, 0xfc, 0x0a, 0xc1, 0x5d, 0x2a, 0x46, 0xe4, 0x37, 0x20, 0x37, 0xb1, + 0x95, 0x8a, 0xb8, 0x1d, 0x33, 0x47, 0xca, 0x5c, 0x46, 0x99, 0x46, 0x98, 0x04, 0x4a, 0x93, 0xdc, + 0x42, 0x38, 0xf9, 0x1a, 0x56, 0x12, 0xae, 0x6d, 0x15, 0x30, 0x41, 0x5d, 0x2a, 0xa8, 0x69, 0xa0, + 0x63, 0x2c, 0x27, 0x1c, 0xed, 0x74, 0xae, 0xc1, 0xe4, 0x05, 0xac, 0x2b, 0xf5, 0x04, 0xd4, 0xf3, + 0xf1, 0x76, 0xae, 0x1b, 0x33, 0xce, 0x19, 0x37, 0x57, 0xe4, 0x51, 0xf0, 0x86, 0xab, 0x48, 0x72, + 0x4e, 0x3d, 0xbf, 0x17, 0xb5, 0x53, 0x3c, 0xf9, 0x0e, 0x48, 0x8e, 0x95, 0x27, 0xfd, 0x9f, 0x98, + 0x23, 0x4c, 0x92, 0x71, 0x19, 0x19, 0xd7, 0x95, 0xc2, 0x91, 0x3f, 0xc2, 0x66, 0x8e, 0x43, 0xeb, + 0xd4, 0x0e, 0x18, 0xe7, 0x74, 0xc8, 0xcc, 0x46, 0xc6, 0xb9, 0x9e, 0x71, 0x6a, 0xbd, 0x9e, 0x2b, + 0x12, 0xf2, 0x1c, 0x56, 0x73, 0x02, 0x5c, 0x26, 0x75, 0x9c, 0xc4, 0xbe, 0xb9, 0x9a, 0xb1, 0xae, + 0x64, 0xac, 0x87, 0x12, 0x7b, 0x1d, 0xfb, 0xe4, 0x0c, 0x9e, 0x04, 0x5e, 0x68, 0x33, 0x9f, 0x8e, + 0x39, 0x73, 0xed, 0xc0, 0x0b, 0x13, 0xc1, 0xb8, 0xdd, 0x67, 0xe2, 0x3d, 0x63, 0x21, 0x8a, 0xe2, + 0xe6, 0x5a, 0x66, 0xce, 0xc7, 0x81, 0x17, 0x76, 0x14, 0xed, 0xb9, 0x22, 0xdd, 0x57, 0x94, 0x52, + 0x28, 0x27, 0xbb, 0xd0, 0x60, 0x21, 0xed, 0xfb, 0xcc, 0x1e, 0xf8, 0xf4, 0xe6, 0x4e, 0xba, 0x95, + 0x48, 0xb8, 0xb9, 0x8e, 0xea, 0x5d, 0x51, 0xa8, 0x23, 0x89, 0xb9, 0x42, 0x84, 0x8c, 0x1d, 0xd7, + 0xe3, 0xc8, 0x10, 0xb0, 0x78, 0xc8, 0xdc, 0x94, 0xe3, 0x15, 0x72, 0x34, 0x34, 0xf2, 0x1c, 0x71, + 0x13, 0x1e, 0x69, 0xc0, 0x9b, 0xa4, 0xcf, 0xe2, 0x90, 0xc9, 0xc3, 0x3a, 0xbe, 0x27, 0x2d, 0x6e, + 0x2a, 0x9e, 0x84, 0xb3, 0xb7, 0x19, 0xee, 0x00, 0x51, 0xe4, 0x47, 0x30, 0xd3, 0x7d, 0xc6, 0x71, + 0xf4, 0xfe, 0xa7, 0xa8, 0x6f, 0xd3, 0x90, 0xfa, 0x77, 0xdc, 0xe3, 0xe6, 0x1f, 0x90, 0xad, 0xa9, + 0xf1, 0x5d, 0x85, 0x6e, 0x6b, 0xac, 0xcc, 0xf4, 0x1e, 0xb7, 0xd9, 0xad, 0x60, 0x71, 0x48, 0x7d, + 0x73, 0x03, 0x89, 0xc1, 0xe3, 0x1d, 0x0d, 0x21, 0x2f, 0xc0, 0x40, 0x5f, 0xc2, 0xfc, 0xa1, 0x93, + 0xf8, 0xe6, 0x76, 0x61, 0x67, 0x69, 0x6f, 0xf9, 0xde, 0x7b, 0x62, 0xd5, 0xc5, 0xf4, 0x3b, 0xf4, + 0x1c, 0x6a, 0x61, 0x2e, 0xf7, 0x72, 0x73, 0x0b, 0xb3, 0x40, 0x6d, 0x37, 0x9f, 0x91, 0xad, 0x69, + 0x1a, 0xd2, 0x01, 0x63, 0x1c, 0x7b, 0x32, 0x23, 0x4f, 0x62, 0xff, 0x31, 0xc6, 0xfe, 0x66, 0x2e, + 0xf6, 0xbb, 0x8a, 0x24, 0x0b, 0xfd, 0xe5, 0xf1, 0x34, 0x20, 0x67, 0xa9, 0x34, 0x12, 0x46, 0x91, + 0xcb, 0xcd, 0xbf, 0xcb, 0x5b, 0x4a, 0xc7, 0x82, 0x44, 0x90, 0x43, 0x7d, 0x4d, 0x1a, 0x86, 0x91, + 0xd0, 0xc7, 0xfd, 0x1c, 0x8f, 0xbb, 0x71, 0x2f, 0x4d, 0xb6, 0x33, 0x0a, 0x95, 0x2b, 0x27, 0x6b, + 0x4e, 0x7e, 0x84, 0x8d, 0x80, 0xde, 0x4e, 0x6d, 0x69, 0x8f, 0x59, 0x8c, 0x00, 0x73, 0x1b, 0x23, + 0x76, 0x2d, 0xa0, 0xb7, 0xb9, 0x8d, 0xbb, 0x2c, 0x96, 0x2b, 0x72, 0x0c, 0x6b, 0x53, 0x21, 0x6b, + 0x47, 0x63, 0x75, 0x88, 0x16, 0x1e, 0x42, 0xe5, 0xea, 0x34, 0x70, 0x2f, 0x15, 0xce, 0x6a, 0x88, + 0x59, 0xa0, 0x4c, 0x2c, 0x28, 0x49, 0xd0, 0xa1, 0xcc, 0x2a, 0xd2, 0x8c, 0xe6, 0x17, 0x2a, 0xb1, + 0x48, 0x78, 0x8f, 0x0e, 0xbb, 0x0a, 0x2a, 0x4d, 0x4b, 0x13, 0x11, 0xd9, 0x32, 0x90, 0xd2, 0xed, + 0x7e, 0xa5, 0x4d, 0xdb, 0x4e, 0x44, 0xb4, 0x9f, 0x0c, 0xd3, 0x9d, 0xea, 0x74, 0x6a, 0x4d, 0x9e, + 0x43, 0x33, 0xbb, 0x68, 0x9c, 0x84, 0xc2, 0x0b, 0x98, 0xce, 0xaa, 0x4f, 0xf1, 0x96, 0x0d, 0x7d, + 0x4b, 0x4b, 0xe1, 0x54, 0x3a, 0x7d, 0x05, 0x5b, 0x32, 0x91, 0x8d, 0xa9, 0xcc, 0x20, 0x32, 0xdd, + 0xa4, 0x3e, 0xab, 0x92, 0xea, 0xaf, 0x91, 0x73, 0x3d, 0x4c, 0x82, 0x2e, 0x52, 0xf4, 0xa2, 0x43, + 0x85, 0x57, 0x59, 0xf5, 0x1b, 0x20, 0xf2, 0x5d, 0x96, 0xa7, 0xe5, 0x76, 0x5f, 0x7b, 0x87, 0xf9, + 0xa5, 0xca, 0x6c, 0x12, 0xb3, 0x9f, 0x0c, 0xf9, 0xbe, 0xf2, 0x00, 0x72, 0x02, 0xcd, 0x9c, 0x11, + 0xd2, 0x12, 0xc1, 0x63, 0xdc, 0xfc, 0x0a, 0xf5, 0xd9, 0xc8, 0x19, 0xf5, 0x2d, 0xbb, 0xfb, 0x33, + 0xf5, 0x13, 0x66, 0xad, 0x8a, 0xcc, 0x2e, 0xdd, 0x8c, 0x41, 0x46, 0xc8, 0x90, 0x8a, 0x11, 0x8b, + 0x71, 0x67, 0xf3, 0x6b, 0x15, 0x21, 0x0a, 0x24, 0xb7, 0x94, 0x19, 0x97, 0x8f, 0xa2, 0x58, 0xd8, + 0x58, 0x3b, 0x04, 0x4c, 0xc4, 0x9e, 0x63, 0x7e, 0x83, 0x1a, 0x5f, 0x46, 0x44, 0x8f, 0xdd, 0x4a, + 0xb1, 0xb1, 0xe7, 0x48, 0x07, 0x99, 0xba, 0xc4, 0x94, 0x73, 0xfe, 0x16, 0x45, 0xaf, 0x4d, 0xee, + 0x92, 0x77, 0xd0, 0x1f, 0x60, 0x3d, 0x7f, 0xa3, 0x80, 0x0a, 0x67, 0x64, 0xc7, 0x6c, 0xc8, 0x6e, + 0xcd, 0x5d, 0xdc, 0x2b, 0x77, 0xfa, 0x73, 0x89, 0xb4, 0x24, 0x8e, 0xbc, 0x80, 0x8d, 0x3c, 0x5b, + 0x12, 0xe6, 0x19, 0x5f, 0x23, 0x63, 0x73, 0xc2, 0x78, 0xad, 0xd0, 0x8a, 0xf5, 0x99, 0x4a, 0x44, + 0x83, 0xc4, 0xf7, 0x53, 0x76, 0x99, 0x04, 0xb8, 0xf9, 0x2d, 0x9e, 0x93, 0x24, 0x9c, 0x1d, 0x25, + 0xbe, 0xaf, 0x38, 0x65, 0xd8, 0x73, 0xf2, 0x0f, 0xf0, 0x74, 0xe6, 0xe5, 0xd6, 0x49, 0x23, 0x89, + 0x31, 0x46, 0x6c, 0x59, 0xbe, 0x32, 0xf3, 0x19, 0xee, 0xdc, 0xba, 0xff, 0x60, 0x1f, 0xe4, 0x49, + 0xd1, 0x28, 0xb2, 0x94, 0x50, 0xcf, 0xb6, 0xcd, 0xa3, 0x24, 0x76, 0x98, 0xb9, 0x87, 0x1e, 0x9a, + 0x2f, 0x25, 0xd4, 0x9b, 0x7d, 0x85, 0x68, 0xab, 0x1a, 0xe7, 0x56, 0xe4, 0x00, 0x36, 0xee, 0xd7, + 0xcd, 0x76, 0x9c, 0xf8, 0xf2, 0xd9, 0x15, 0xe6, 0x73, 0x94, 0x54, 0xd9, 0xb5, 0x12, 0x9f, 0x5d, + 0x31, 0x61, 0x35, 0x15, 0x69, 0x27, 0xa5, 0xd4, 0x70, 0xa9, 0xfa, 0x98, 0x51, 0x95, 0xbb, 0x99, + 0x3d, 0x88, 0xa3, 0xc0, 0xe6, 0x22, 0x8a, 0xe5, 0xb3, 0xf5, 0x3d, 0xaa, 0x62, 0x55, 0xa2, 0x65, + 0xfa, 0x66, 0x47, 0x71, 0x14, 0x5c, 0x29, 0x9c, 0x7c, 0xb7, 0x75, 0xe1, 0x14, 0xf9, 0x6e, 0x56, + 0xef, 0xfd, 0x80, 0x1c, 0x86, 0xc2, 0x5c, 0xfa, 0x6e, 0x5a, 0xf2, 0xc9, 0x44, 0xac, 0xa8, 0xf9, + 0x8d, 0x37, 0x36, 0x7f, 0xa7, 0x13, 0x31, 0x82, 0xae, 0x6e, 0xbc, 0x31, 0xf9, 0x1d, 0xac, 0xab, + 0x2a, 0x39, 0x7a, 0xc7, 0xe2, 0xd8, 0x93, 0xa5, 0x83, 0x88, 0x07, 0x32, 0xba, 0xcc, 0xbf, 0x47, + 0x6d, 0xae, 0x21, 0xfa, 0x52, 0x63, 0xaf, 0x34, 0x52, 0x56, 0x23, 0x09, 0x67, 0xf1, 0xa4, 0x4c, + 0xfe, 0x51, 0x95, 0xc9, 0x12, 0x98, 0x96, 0xc9, 0x9b, 0xff, 0x0c, 0xd5, 0x7c, 0x41, 0x46, 0x56, + 0x61, 0x01, 0x2b, 0x78, 0x5d, 0xdc, 0xaa, 0x05, 0xd9, 0x84, 0x4a, 0x26, 0x45, 0xd5, 0xb6, 0xd9, + 0x9a, 0x7c, 0x0b, 0x8d, 0x79, 0x86, 0x2e, 0x21, 0x19, 0x71, 0x66, 0x0c, 0xbb, 0xc9, 0x55, 0xdf, + 0x32, 0x49, 0x9f, 0xb2, 0x78, 0x9e, 0x04, 0x92, 0xde, 0x79, 0x31, 0x8b, 0x20, 0xf2, 0x14, 0x6a, + 0xe9, 0x6e, 0xe8, 0x88, 0xea, 0x08, 0xc7, 0x0f, 0xac, 0x6a, 0x0a, 0x96, 0x4e, 0xb8, 0xbf, 0x05, + 0x1b, 0x53, 0xe1, 0x88, 0xc5, 0x83, 0x76, 0x9e, 0xcd, 0x3d, 0xa8, 0xa4, 0xe1, 0x4e, 0x0c, 0x28, + 0xdd, 0xb0, 0xb4, 0x0d, 0x90, 0x3f, 0xe5, 0xad, 0xd5, 0xa9, 0xd5, 0xe5, 0xd4, 0x62, 0xf3, 0x06, + 0xaa, 0x79, 0x0f, 0x23, 0xcf, 0xa0, 0xfa, 0x53, 0x12, 0x7a, 0x53, 0x2d, 0xcd, 0xd2, 0x5e, 0x75, + 0xf7, 0xf4, 0x3a, 0xf4, 0x74, 0x4b, 0x73, 0xfc, 0xc0, 0x5a, 0x42, 0x1a, 0xb5, 0xdc, 0x6f, 0xc2, + 0xea, 0x94, 0x13, 0x6b, 0xd6, 0xd3, 0x72, 0xa5, 0x60, 0x14, 0x4f, 0xcb, 0x95, 0x92, 0x51, 0x3e, + 0x2d, 0x57, 0xca, 0xc6, 0x42, 0x2b, 0x50, 0x1d, 0x06, 0x16, 0xe0, 0x64, 0x13, 0x9a, 0xbd, 0xce, + 0x55, 0xef, 0xca, 0xbe, 0x68, 0x9f, 0x77, 0xec, 0xeb, 0x8b, 0xab, 0x6e, 0xe7, 0xe0, 0xe4, 0xe8, + 0xa4, 0x73, 0x68, 0x3c, 0x20, 0x6b, 0xb0, 0x92, 0xc3, 0x9d, 0xbc, 0xb9, 0xb8, 0xb4, 0x3a, 0x46, + 0x81, 0x34, 0x81, 0xe4, 0xc0, 0x56, 0xa7, 0x7b, 0xd6, 0x3e, 0xe8, 0x18, 0xc5, 0x7b, 0xe4, 0xed, + 0x6e, 0xb7, 0x73, 0x71, 0x68, 0x94, 0x5a, 0xff, 0x55, 0x00, 0xe3, 0x7e, 0x1d, 0x2d, 0xb7, 0x3d, + 0x6a, 0x9f, 0x9d, 0xed, 0xb7, 0x0f, 0xde, 0xda, 0x6f, 0xac, 0xcb, 0xeb, 0xee, 0xc9, 0xc5, 0x1b, + 0xfb, 0xe2, 0xf2, 0xa2, 0x63, 0x3c, 0x98, 0x8f, 0x3b, 0x6c, 0xf7, 0xe4, 0xde, 0x9f, 0x81, 0x39, + 0x8b, 0x3b, 0x6b, 0xef, 0x77, 0xce, 0xae, 0x8c, 0x22, 0x31, 0x61, 0x75, 0x16, 0x7b, 0x72, 0x68, + 0x94, 0xc8, 0x16, 0xac, 0xcf, 0x62, 0xf6, 0xaf, 0x4f, 0xce, 0x0e, 0x8d, 0x32, 0xf9, 0x0a, 0x9e, + 0xce, 0x22, 0x0f, 0x2e, 0x2f, 0x8e, 0x4e, 0xde, 0x5c, 0x5b, 0xed, 0xde, 0xc9, 0xe5, 0x85, 0xfd, + 0xe7, 0xf6, 0xd9, 0x75, 0xc7, 0x58, 0x68, 0x1d, 0xc3, 0xf2, 0xbd, 0xba, 0x80, 0x6c, 0xc0, 0x5a, + 0xd7, 0x3a, 0x39, 0x6f, 0x5b, 0x7f, 0x99, 0x77, 0x93, 0x19, 0x94, 0xda, 0xb4, 0x70, 0x5a, 0xae, + 0x3c, 0x32, 0x2a, 0xa7, 0xe5, 0x4a, 0xd3, 0x58, 0x3f, 0x2d, 0x57, 0x3e, 0x33, 0x1e, 0x9f, 0x96, + 0x2b, 0x4f, 0x8c, 0xd6, 0x69, 0xb9, 0xb2, 0x63, 0x7c, 0x75, 0x5a, 0xae, 0xfc, 0xc6, 0xf8, 0xed, + 0x69, 0xb9, 0xf2, 0x9d, 0xf1, 0xec, 0xb4, 0x5c, 0xf9, 0xbd, 0xf1, 0xf2, 0xb4, 0x5c, 0x79, 0x69, + 0xbc, 0x6a, 0xd5, 0x60, 0x29, 0xe7, 0x03, 0xad, 0x9f, 0x0b, 0xd0, 0x98, 0xf3, 0x6a, 0xcb, 0x26, + 0x70, 0x52, 0x51, 0xa9, 0x44, 0xac, 0x7c, 0xb0, 0x96, 0xd6, 0x4f, 0x2a, 0xff, 0xce, 0xb4, 0x11, + 0xc5, 0x39, 0x6d, 0xc4, 0x2a, 0x2c, 0x44, 0xef, 0x43, 0x16, 0xeb, 0x40, 0x53, 0x0b, 0x52, 0x87, + 0xa2, 0xe3, 0x98, 0x65, 0x6c, 0xd0, 0x8a, 0x8e, 0x23, 0x45, 0xa5, 0x81, 0xa0, 0x36, 0xd4, 0xad, + 0xb2, 0x06, 0xe2, 0x7e, 0xad, 0x7f, 0x79, 0x08, 0xf5, 0xe9, 0x67, 0x9f, 0x7c, 0x0f, 0xcd, 0x3e, + 0x13, 0xd4, 0x96, 0xaf, 0xff, 0xf4, 0x59, 0x00, 0xcf, 0xb2, 0x2a, 0xb1, 0x6d, 0x85, 0x9c, 0x9c, + 0xe9, 0x31, 0x00, 0xd6, 0x15, 0x8e, 0x1f, 0x71, 0xd5, 0x1e, 0x57, 0xac, 0x45, 0x09, 0x39, 0x90, + 0x00, 0x99, 0xe9, 0x46, 0x91, 0xf0, 0x3d, 0x2e, 0x6c, 0xcf, 0xe5, 0x66, 0x71, 0xbb, 0xb4, 0x53, + 0xb2, 0x40, 0x83, 0x4e, 0x5c, 0xb9, 0x6b, 0x65, 0x1c, 0x7b, 0x51, 0xec, 0x89, 0x3b, 0xbc, 0x56, + 0x7d, 0xcf, 0xbc, 0x57, 0x8f, 0xc8, 0xfa, 0x0f, 0xf1, 0x56, 0x46, 0x49, 0xde, 0xc2, 0x7a, 0x4e, + 0xac, 0x4e, 0xd3, 0xea, 0xc9, 0x28, 0xeb, 0x1a, 0xea, 0x38, 0xdd, 0x03, 0xd3, 0xb4, 0x7a, 0x2f, + 0x56, 0x27, 0x1b, 0x4f, 0xa0, 0xe4, 0x4b, 0x58, 0x1e, 0x78, 0x3e, 0xb3, 0xbd, 0xd0, 0xf5, 0xde, + 0x79, 0x6e, 0x42, 0x7d, 0xdd, 0x5c, 0xd7, 0x25, 0xf8, 0x24, 0x83, 0x92, 0x6f, 0x60, 0x85, 0x7b, + 0xe1, 0xd0, 0x67, 0x22, 0x0a, 0x53, 0x35, 0x61, 0x7f, 0x5d, 0xb1, 0x8c, 0x0c, 0xa1, 0x35, 0x44, + 0x5e, 0xc3, 0x96, 0xac, 0x9a, 0xa8, 0xef, 0x47, 0xef, 0x99, 0x9b, 0x13, 0xae, 0x4a, 0x8b, 0x47, + 0xa8, 0x53, 0x33, 0xa0, 0xb7, 0x6d, 0x45, 0x31, 0xd9, 0x07, 0x0b, 0x8d, 0x27, 0x50, 0xc5, 0x43, + 0xc9, 0x07, 0x80, 0xfa, 0xbe, 0x59, 0x51, 0xed, 0xbe, 0x84, 0x5d, 0x2a, 0x10, 0xf9, 0x47, 0x58, + 0x73, 0xd9, 0x80, 0xca, 0x4c, 0x33, 0xdd, 0x01, 0x2e, 0x62, 0x92, 0xfa, 0xe2, 0xbe, 0x1e, 0x0f, + 0x15, 0x71, 0xde, 0x4d, 0xad, 0x86, 0x3b, 0x0b, 0x94, 0x9e, 0x40, 0xdd, 0x77, 0x34, 0x74, 0x98, + 0x7b, 0x4f, 0xf2, 0x92, 0x7a, 0x02, 0x53, 0x6c, 0x9e, 0x6b, 0xf3, 0x9f, 0xa0, 0x31, 0x67, 0x87, + 0x59, 0xcf, 0x2e, 0x7c, 0xcc, 0xb3, 0x8b, 0xb3, 0x9e, 0xad, 0x9c, 0xbd, 0xe8, 0x38, 0xad, 0x33, + 0xa8, 0xa4, 0xbe, 0x20, 0x33, 0x4c, 0xd7, 0x3a, 0xb9, 0xb4, 0x4e, 0x7a, 0x7f, 0xb9, 0x97, 0x2c, + 0x1f, 0x42, 0xb1, 0xfb, 0x9d, 0x51, 0xc0, 0xbf, 0xcf, 0x8c, 0x22, 0xfe, 0xdd, 0x33, 0x4a, 0xf8, + 0xf7, 0xb9, 0x51, 0xc6, 0xbf, 0xdf, 0x1b, 0x0b, 0xad, 0xbf, 0x42, 0x63, 0x8e, 0x8f, 0x90, 0x66, + 0xfa, 0x2e, 0xc8, 0x73, 0x96, 0x8e, 0x1f, 0xe8, 0x97, 0x41, 0xc2, 0xd5, 0x2b, 0x99, 0xbe, 0x44, + 0x6a, 0xb9, 0xdf, 0x80, 0x95, 0x89, 0x2b, 0x6a, 0x27, 0x6c, 0xfd, 0x67, 0x11, 0x16, 0x0f, 0x29, + 0x1f, 0xf5, 0x23, 0x1a, 0xbb, 0x64, 0x0f, 0x6a, 0x6e, 0xba, 0xb0, 0x05, 0xed, 0xeb, 0x19, 0x5d, + 0x6d, 0x37, 0x23, 0xe9, 0xd1, 0xbe, 0x55, 0x75, 0x73, 0xab, 0x6c, 0xe0, 0x54, 0xcc, 0x0d, 0x9c, + 0x66, 0x7a, 0xac, 0xd2, 0x27, 0xf4, 0x58, 0x9f, 0xc3, 0x52, 0xe6, 0x25, 0xb4, 0xaf, 0x93, 0x01, + 0xa4, 0x66, 0xa7, 0x7d, 0xec, 0x5b, 0xa3, 0xf7, 0xe1, 0xd8, 0xa7, 0x77, 0xd8, 0xa9, 0xcb, 0x32, + 0x4e, 0xd0, 0x3e, 0xd7, 0x2e, 0xd7, 0x48, 0x91, 0x47, 0x0a, 0xd7, 0xa3, 0x7d, 0xd9, 0xfb, 0x34, + 0x47, 0xde, 0x70, 0xe4, 0x7b, 0xc3, 0x91, 0x98, 0x66, 0xc2, 0x70, 0x50, 0xb3, 0x84, 0x8c, 0x22, + 0xcf, 0xf9, 0x25, 0x2c, 0x4f, 0x38, 0x45, 0xe4, 0xd2, 0x3b, 0x0c, 0x85, 0x8a, 0x55, 0xcf, 0xc0, + 0x3d, 0x09, 0xd5, 0x4f, 0xa4, 0x0b, 0xd5, 0x33, 0x2f, 0xbc, 0xe9, 0xb1, 0x60, 0xec, 0x53, 0x81, + 0xef, 0x78, 0x12, 0xa7, 0x95, 0x8a, 0xfc, 0x49, 0x76, 0xe1, 0x51, 0xda, 0xcf, 0x14, 0x75, 0xe8, + 0x4b, 0x0e, 0xed, 0xf4, 0x29, 0xa3, 0x95, 0x12, 0x65, 0x8a, 0x2d, 0x4d, 0x14, 0xdb, 0x7a, 0x0d, + 0x8d, 0x39, 0x3c, 0x9f, 0x5a, 0x34, 0xb4, 0xfe, 0x15, 0xa0, 0x7a, 0x38, 0xcf, 0x78, 0xf9, 0x69, + 0x61, 0xfa, 0x12, 0x60, 0xa9, 0x9c, 0xab, 0x69, 0xd4, 0x4b, 0x80, 0x8f, 0x18, 0xd6, 0x01, 0x33, + 0xf1, 0x52, 0xfa, 0xc4, 0x81, 0x52, 0xf9, 0xff, 0x30, 0x50, 0x5a, 0xf8, 0xc0, 0x40, 0xe9, 0x09, + 0x54, 0xfb, 0x94, 0xb3, 0xac, 0x43, 0x7c, 0xa8, 0xe6, 0xa2, 0x12, 0x96, 0x3e, 0x13, 0x2f, 0x81, + 0x44, 0x63, 0x16, 0xaa, 0xc4, 0x20, 0xb4, 0xaa, 0xd0, 0x86, 0xd2, 0x13, 0xf3, 0xc6, 0xb2, 0x0c, + 0x49, 0x28, 0x93, 0x41, 0xa6, 0xd1, 0x17, 0xb0, 0x82, 0x59, 0x4d, 0xde, 0x30, 0xe3, 0xad, 0xcc, + 0xe3, 0xc5, 0x94, 0xbc, 0x9f, 0x0c, 0x33, 0xd6, 0xd7, 0xd0, 0xa0, 0x42, 0x50, 0x67, 0x34, 0xcd, + 0xbc, 0x38, 0x8f, 0x79, 0x45, 0x51, 0xe6, 0xd9, 0x9f, 0x40, 0x35, 0x9d, 0x08, 0x62, 0xc5, 0x09, + 0xea, 0x66, 0x1a, 0x86, 0x35, 0xe7, 0x1f, 0xd3, 0xc2, 0x8d, 0xdb, 0x49, 0xec, 0x4f, 0xb6, 0x58, + 0x9a, 0xb7, 0x05, 0xd1, 0xa4, 0xd7, 0xb1, 0x9f, 0xed, 0x71, 0x04, 0x66, 0xde, 0x2a, 0x53, 0x42, + 0xaa, 0xf3, 0x84, 0xac, 0x4d, 0x8c, 0x95, 0x97, 0xb3, 0x2d, 0x43, 0x96, 0x3b, 0xb1, 0x87, 0x2a, + 0xc7, 0x89, 0xe2, 0xa2, 0x95, 0x07, 0x91, 0x5d, 0x68, 0x08, 0xda, 0x4f, 0x7c, 0x1a, 0xab, 0x36, + 0x4d, 0xbf, 0xf4, 0x6a, 0xa6, 0xb8, 0xa2, 0x51, 0xd8, 0xa6, 0xa9, 0xf2, 0xe2, 0x0f, 0x50, 0x53, + 0xe3, 0xb4, 0xd4, 0xb0, 0xcb, 0x78, 0x9c, 0x8d, 0xa9, 0x0c, 0x84, 0xad, 0x77, 0x3a, 0x04, 0xa8, + 0xd2, 0xdc, 0x8a, 0xfc, 0x15, 0xd6, 0x07, 0x3e, 0xbd, 0xf1, 0x42, 0xc6, 0xb9, 0x3d, 0x2d, 0xc9, + 0x44, 0x49, 0xad, 0x29, 0x49, 0x47, 0x29, 0xed, 0x94, 0xc8, 0xb5, 0xc1, 0x3c, 0xb0, 0xbc, 0x0b, + 0xed, 0x47, 0x89, 0xb0, 0x27, 0x39, 0x52, 0x86, 0xb8, 0xa1, 0xee, 0x82, 0xa8, 0x4c, 0xf6, 0x75, + 0xec, 0x4b, 0x1f, 0x42, 0x07, 0x9c, 0x72, 0x83, 0x95, 0xb9, 0x3e, 0x24, 0xe9, 0xf2, 0x4e, 0xf0, + 0x2b, 0xc0, 0xd9, 0x86, 0x9d, 0xfa, 0x20, 0xc7, 0x21, 0x66, 0xc5, 0xaa, 0x4a, 0xe8, 0x91, 0x72, + 0x38, 0x2e, 0x43, 0xc6, 0xf5, 0x38, 0xe6, 0x43, 0x3f, 0x72, 0xa8, 0x6f, 0x63, 0xdf, 0xd5, 0x50, + 0xef, 0xbc, 0xc6, 0x9c, 0x49, 0x44, 0x4f, 0xb6, 0x5c, 0x6d, 0x58, 0x4b, 0x3f, 0x25, 0x04, 0x2c, + 0x4c, 0x26, 0x47, 0x5a, 0x9d, 0x77, 0xa4, 0x86, 0xa6, 0x3d, 0x67, 0x61, 0x92, 0x1d, 0x4b, 0x76, + 0x7b, 0x71, 0x74, 0xc3, 0x42, 0x1d, 0xa6, 0xb6, 0x18, 0xc5, 0x8c, 0x8f, 0x22, 0xdf, 0xc5, 0x69, + 0x65, 0xd1, 0x5a, 0x53, 0x68, 0x15, 0xab, 0xbd, 0x14, 0x49, 0xda, 0xb0, 0x3a, 0x55, 0xb1, 0xa5, + 0x26, 0x69, 0xce, 0x9f, 0xeb, 0x90, 0x5c, 0x01, 0x97, 0x2a, 0xff, 0x02, 0xd6, 0x47, 0x8c, 0xfa, + 0x62, 0x94, 0xcd, 0x10, 0x33, 0x29, 0xeb, 0x28, 0xa5, 0xb9, 0x7b, 0x8c, 0xf8, 0x74, 0x88, 0x98, + 0x19, 0x73, 0x34, 0x0f, 0xdc, 0xfa, 0x9f, 0x12, 0x98, 0x1f, 0xf2, 0x29, 0xf2, 0xe2, 0x63, 0x13, + 0x7a, 0x55, 0x16, 0x7c, 0x68, 0x3a, 0xff, 0xec, 0x43, 0xd3, 0x79, 0x55, 0x27, 0xcf, 0x9b, 0xcc, + 0xff, 0xf0, 0xe1, 0x81, 0xb7, 0xca, 0xfd, 0xf3, 0x87, 0xdd, 0xbf, 0x30, 0xb8, 0x2a, 0x7f, 0x7c, + 0x70, 0x85, 0x9f, 0x9c, 0xd4, 0x7c, 0x7c, 0x21, 0xfd, 0xe4, 0xa4, 0x46, 0xe2, 0x5b, 0xb0, 0x38, + 0x19, 0x63, 0xab, 0xbc, 0x5a, 0x71, 0xd3, 0xc9, 0xf5, 0x17, 0x50, 0x53, 0xc8, 0x74, 0x44, 0xfe, + 0x48, 0xd5, 0xec, 0x08, 0x4c, 0x67, 0xe2, 0xaf, 0x61, 0xeb, 0x3d, 0xf5, 0xc4, 0xcc, 0x5c, 0x9b, + 0xa9, 0xc1, 0x76, 0x45, 0x55, 0x94, 0x92, 0x64, 0x7a, 0x9c, 0xdd, 0x41, 0x3c, 0x79, 0xf9, 0xd1, + 0x99, 0xfc, 0x22, 0x6e, 0xf8, 0xa1, 0x79, 0x7c, 0xeb, 0xe7, 0x22, 0x3c, 0xf9, 0xc5, 0x08, 0x97, + 0x5b, 0x04, 0x5e, 0xe8, 0x05, 0xd2, 0x52, 0x59, 0xba, 0xc8, 0x4c, 0x55, 0x40, 0x5f, 0x5e, 0xd7, + 0x14, 0x99, 0x84, 0x4f, 0xb0, 0x57, 0xf1, 0x23, 0xf6, 0xca, 0x69, 0xbc, 0x34, 0xad, 0xf1, 0x5f, + 0xd0, 0x57, 0xf9, 0xff, 0xa5, 0xaf, 0x85, 0x8f, 0xeb, 0xeb, 0x1c, 0xea, 0x99, 0xba, 0x3e, 0xfc, + 0x05, 0xf1, 0x4b, 0x58, 0x9e, 0x24, 0x3d, 0x35, 0x6f, 0x2b, 0x62, 0x1f, 0x57, 0xcf, 0xc0, 0x98, + 0xc4, 0x5b, 0xff, 0x5e, 0x80, 0xda, 0xd4, 0xbc, 0x8c, 0x7c, 0x03, 0x4b, 0x93, 0x72, 0x22, 0xfd, + 0xea, 0x0b, 0x93, 0x41, 0x99, 0x05, 0x59, 0x59, 0xc1, 0xc9, 0xd7, 0x00, 0x99, 0xc0, 0xb4, 0x4c, + 0x82, 0x49, 0xc6, 0xb6, 0x72, 0x58, 0xf2, 0x7b, 0x30, 0x26, 0x67, 0xd2, 0xd2, 0x55, 0x9d, 0xb9, + 0xbc, 0x3b, 0x7d, 0x25, 0x6b, 0x72, 0x78, 0xb5, 0x4f, 0xeb, 0xbf, 0x0b, 0xb0, 0x36, 0x37, 0x5d, + 0x90, 0x26, 0x3c, 0x54, 0x73, 0x78, 0xdd, 0x22, 0xea, 0x95, 0x2c, 0x64, 0xd2, 0x8f, 0xa4, 0xd9, + 0x47, 0x0c, 0x15, 0xd2, 0x75, 0xf5, 0x95, 0x34, 0xfb, 0x78, 0xf1, 0x14, 0xea, 0x4c, 0x7d, 0x7f, + 0x72, 0x46, 0xcc, 0x4d, 0xfc, 0xb4, 0x82, 0xab, 0x21, 0xf4, 0x4a, 0x03, 0xc9, 0x57, 0x60, 0x28, + 0xb2, 0x98, 0x39, 0xde, 0xd8, 0xc3, 0x4f, 0xe2, 0xaa, 0x32, 0x5a, 0x46, 0xb8, 0x95, 0x81, 0xa5, + 0xc4, 0x6c, 0x6e, 0x99, 0xef, 0x94, 0x6b, 0x29, 0x54, 0xb5, 0xca, 0xff, 0x56, 0x80, 0x55, 0xdd, + 0xd8, 0x4c, 0x9b, 0xe0, 0x15, 0x90, 0xa9, 0xfe, 0x4b, 0x0d, 0xa9, 0x0b, 0x98, 0x36, 0x73, 0x96, + 0x50, 0x9f, 0xc8, 0x72, 0x7d, 0x96, 0xf2, 0x87, 0xce, 0xa4, 0x7b, 0x9b, 0x6e, 0x0e, 0x8a, 0xfa, + 0xdd, 0xc8, 0x87, 0x1b, 0xca, 0x48, 0x7b, 0xb5, 0x3c, 0xa2, 0xff, 0x10, 0xff, 0x33, 0xe0, 0xf9, + 0xff, 0x06, 0x00, 0x00, 0xff, 0xff, 0x0b, 0x12, 0x09, 0x6b, 0x55, 0x20, 0x00, 0x00, +} diff --git a/vendor/github.com/GoogleCloudPlatform/testgrid/pb/config/config.proto b/vendor/github.com/GoogleCloudPlatform/testgrid/pb/config/config.proto new file mode 100644 index 00000000000..d40707a65a0 --- /dev/null +++ b/vendor/github.com/GoogleCloudPlatform/testgrid/pb/config/config.proto @@ -0,0 +1,691 @@ +syntax = "proto3"; + +// Protocol buffer for configuring testgrid.k8s.io + +import "pb/custom_evaluator/custom_evaluator.proto"; + +// Specifies the test name, and its source +message TestNameConfig { + // Specifies name elements to be selected from configuration values + message NameElement { + // A space-delimited string of labels + string labels = 1; + + // Configuration value to use. + // Valid choice are: + // 'Tests name': The name of a test case + // 'Commit': The commit number of the build + // 'Context', 'Thread': The info extracted from each junit files: + // - junit_core-os_01.xml -> Context: core-os, Thread: 01 + // - junit_runner.xml -> Context: runner + // - junit_01.xml -> Thread: 01 + // or any metadata key from finished.json, which is copied from your test suite. + // + // A valid sample TestNameConfig looks like: + // test_name_config: + // name_elements: + // - target_config: Tests name + // - target_config: Context + // name_format: '%s [%s]' + string target_config = 2; + // Whether to use the build-target name + bool build_target = 3; + // A space-delimited string of Bazel build tags. + string tags = 4; + // The key of a test result's property. + string test_property = 5; + } + + // The name elements specifying the target test name for this tab. + repeated NameElement name_elements = 1; + + // Specifies a printf-style format string for name elements. The format + // string should have as many conversions as there are name_elements. + // For example, two name_elements could be used with name_format="%s: %s". + string name_format = 2; +} + +// A single notification. +message Notification { + // Required: Text summary of the issue or notice. + string summary = 1; + // Optional: Link to further information, such as a bug, email, document, etc. + string context_link = 2; +} + +// Specifies a group of tests to gather. +message TestGroup { + // Name of this TestGroup, for mapping dashboard tabs to tests. + string name = 1; + + // Path to the test result stored in gcs (some-bucket/some/optional/path). + string gcs_prefix = 2; + + // Number of days of test results to gather and serve. + int32 days_of_results = 3; + + // Whether to ignore pending (currently running) test results. + bool ignore_pending = 4; + + // Whether to ignore reported build results. It is recommended that tests + // report BUILD_FAIL instead of relying on this being disabled. + bool ignore_built = 5; + + enum TestsName { + TESTS_NAME_UNSPECIFIED = 0; + TESTS_NAME_IGNORE = 1; + TESTS_NAME_REPLACE = 2; + TESTS_NAME_APPEND = 3; + } + + // What to do with the 'Tests name' configuration value. It can replace the + // name of the test, be appended to the name of the test, or ignored. If it is + // ignored, then the name of the tests will be the build target. + TestsName tests_name_policy = 6; + + reserved 7; // Unused gather_test_properties + + // Tests with names that include these substrings will be removed from the + // table. + repeated string ignore_test_substring = 8; + + // Custom column headers for defining extra column-heading rows from values in + // the test result. + message ColumnHeader { + string label = 1; + string property = 2; + string configuration_value = 3; + } + repeated ColumnHeader column_header = 9; + + enum FallbackGrouping { + FALLBACK_GROUPING_NONE = 0; + FALLBACK_GROUPING_DATE = 1; + FALLBACK_GROUPING_LABELS = 2; + FALLBACK_GROUPING_ID = 3; + FALLBACK_GROUPING_BUILD = 4; + + // When using this, ensure fallback_grouping_configuration_value is + // also set. + FALLBACK_GROUPING_CONFIGURATION_VALUE = 5; + } + + // A test grouping option used if not specified by primary_grouping (#29) + FallbackGrouping fallback_grouping = 10; + + // DEPRECATED: use DashboardTabAlertOptions > alert_stale_result_hours + int32 alert_stale_results_hours = 11 [deprecated = true]; + + // DEPRECATED: use DashboardTabAlertOptions > num_failures_to_alert + int32 num_failures_to_alert = 12 [deprecated = true]; + + // Whether to automatically file bugs, and what component to file them to. + // Requires further implementation of additional components. + int32 bug_component = 13; + + // Default code search path for searching regressions. Overridden by + // code_search_path in DashboardTab. + string code_search_path = 14; + + // The number of columns to consider "recent" for a variety of purposes. + int32 num_columns_recent = 15; + + // Whether to read test metadata from the test results. Information + // from the test metadata is used to determine where bugs are filed in + // specific cases. + bool use_test_metadata = 16; + + // DEPRECATED: use DashboardTabAlertOptions > alert_mail_to_address instead + string alert_mail_to_addresses = 17 [deprecated = true]; + + // DEPRECATED: use DashboardTabAlertOptions > subject + string alert_mail_subject = 18 [deprecated = true]; + + // DEPRECATED: use DashboardTabAlertOptions > alert_mail_failure_message + string alert_mail_failure_message = 19 [deprecated = true]; + + // DEPRECATED: use DashboardTabAlertOptions > debug_url + string alert_mail_debug_url = 20 [deprecated = true]; + + // DEPRECATED: use DashboardTabAlertOptions > wait_minutes_between_emails + int32 min_elapsed_minutes_between_mails = 21 [deprecated = true]; + + reserved 22; // No longer used + + // Whether to treat a combination of passes and failures within one test as a + // flaky status. + bool enable_flaky_status = 23; + + // disable_merged_status will restores deprecated behavior of + // splitting multiple foo rows into foo [2], etc rather a single + // potentially flaky row. + bool disable_merged_status = 60; + + // deprecated - always set to true + bool use_kubernetes_client = 24; + + // When use_kubernetes_client is on testgrid expects these results + // to come from prow, which should include a prowjob.json and podinfo.json + // to help debugging. If you do not expect these files to exist, you + // can optionally disable this analysis. + bool disable_prowjob_analysis = 62; + + // deprecated - always set to true + bool is_external = 25; + + // Specifies the test name for a test. + TestNameConfig test_name_config = 26; + + // A list of notifications attached to this test group. + // This is displayed on any dashboard tab backed by this test group. + repeated Notification notifications = 27; + + reserved 28; // Unused externally (column_sort_by) + + enum PrimaryGrouping { + PRIMARY_GROUPING_NONE = 0; + PRIMARY_GROUPING_BUILD = 1; + } + + // A primary grouping strategy for grouping test results in columns. + // If a primary grouping is specified, the fallback grouping is ignored. + PrimaryGrouping primary_grouping = 29; + + // Whether to collect pass-fail data for test methods. Additional test cases + // will be added for each test method in a target. + bool enable_test_methods = 30; + + // Associates the presence of a named test property with a custom short text + // displayed over the results. Short text must be <=5 characters long. + message TestAnnotation { + string short_text = 1; + oneof short_text_message_source { + string property_name = 2; + } + } + + // Test annotations to look for. Adds custom short text overlays to results. + repeated TestAnnotation test_annotations = 31; + + // Maximum number of individual test methods to collect for any given test row. + // If a test has more than this many methods, no methods will be displayed. + int32 max_test_methods_per_test = 32; + + reserved 33; + + // Default metadata that should be applied for opening bugs, if a given regex + // matches against a test's name. + // Requires 'use_test_metadata = true'. + repeated TestMetadataOptions test_metadata_options = 34; + + // A space-delimited string of tags that are used to filter test targets. + // A leading - before the tag means this tag should not be present + // in the target. + // Example: + // contains tag1, but not tag2: test_tag_pattern = 'tag1 -tag2' + string test_tag_pattern = 35; + + // Options for auto-filed bugs, if enabled. + AutoBugOptions auto_bug_options = 36; + + // Max number of days any single test can take. + int32 max_test_runtime_hours = 37; + + // The number of consecutive test passes to close the alert. + int32 num_passes_to_disable_alert = 38; + + // If true, also associate bugs with tests if the test result's overview/group + // ID is in the bug. + bool link_bugs_by_group = 39; + + reserved 40; + + // A string key value pair message + message KeyValue { + string key = 1; + string value = 2; + } + + // Only show test methods with all required properties + repeated KeyValue test_method_properties = 41; + + // If true, allows gathering and associating bugs with targets in the dashboard. + // Required in order to auto-file bugs. + bool gather_bugs = 42; + + // Numeric property metric value to be used for short text. If this property + // is present, it will override all the other short text values. + string short_text_metric = 43; + + reserved 44; + + // If true, only associate bugs with test methods if that test method is + // mentioned in the bug. If false, bugs will be associated with all test + // methods. + bool link_bugs_by_test_methods = 45; + + // Regex to match test methods. Only test methods with names that match + // this regex will be included in the table. + string test_method_match_regex = 46; + + // Regex to exclude test methods. Test methods with names that match + // this regex will be excluded from the table, even if they match + // test_method_match_regex. + string test_method_unmatch_regex = 61; + + // If true, test method names are printed with the full class names. + bool use_full_method_names = 47; + + reserved 48; + + // A configuration value that is used as a fallback grouping. + // This is useful for cases where there are builds that shared the same + // commit but are run at separate times of day. + string fallback_grouping_configuration_value = 49; + + message ResultSource { + reserved 1, 3; // Legacy sources + + oneof result_source_config { + // JUnit results, parsed from GCS buckets. + JUnitConfig junit_config = 2; + } + + reserved 4; // Private source + } + + // Configuration type of the result source. + ResultSource result_source = 50; + + // Set of rules that are evaluated with each test result. If an evaluation is + // successful, the status of that test result will be whatever is specified + // for a given rule. For more information, look at RuleSet documention + RuleSet custom_evaluator_rule_set = 51; + + // If true, instead of updating the group, read the state proto from storage + // and update summary, alerts, etc. from that state. + // This only applies to test group state, not bug state for a test group. + // This assumes that the state proto is updated through other means (another + // updater, manually, etc). + bool read_state_from_storage = 52; + + // If true, only add the most recent result for a test when multiple results + // for a test with the same name are encountered. + bool ignore_old_results = 53; + + // If True, ignore the 'pass with skips' status (show as a blank cell). + bool ignore_skip = 54; + + // A string containing python formatting specifiers that overrides the + // commit with the date formatted according to this string. This is useful + // for aggregating multiple columns that don't have a matching commit. + string build_override_strftime = 55; + + // Specify a property that will be read into state in the user_property field. + // These can be substituted into LinkTemplates. + string user_property = 56; + + reserved 58,59; + + // disable_prowjob_analysis 62 +} + +message JUnitConfig {} + +// Default metadata to apply when opening bugs. +message TestMetadataOptions { + // Apply the following metadata if this regex matches a test's name. + string test_name_regex = 1; + + // Default bug component to open a bug in. + int32 bug_component = 2; + + // Default owner to assign a bug to. + string owner = 3; + + // List of default users to CC a bug to. + repeated string cc = 4; + + // Apply following metadata if this regex matches a test’s failure message. + string message_regex = 5; +} + +message AutoBugOptions { + // [BETA] When specified, file bugs to this component, using the beta AutoBug. + // If you do not want to opt into the beta, specify `bug_component` in your + // TestGroup instead. + // TODO(b/154866134): Rename to autobug_component once we've migrated. + int32 beta_autobug_component = 10; + + + // Whether to auto-close auto-filed bugs. + bool auto_close = 1; + + // A list of hotlist ids attached to auto-filed bugs. + repeated int64 hotlist_ids = 2; + + // Scale of issue priority, used to indicate importance of issue. + enum Priority { + // Unspecified; may not set priority at all + PRIORITY_UNSPECIFIED = 0; + // See https://developers.google.com/issue-tracker/concepts/issues + P0 = 1; + P1 = 2; + P2 = 3; + P3 = 4; + P4 = 5; + } + + // The priority of the auto-filed bug. If provided, this will overwrite the + // priority in the component default template + Priority priority = 3; + + // A list of hotlist id sources + // Corresponds with the list hotlist_ids (#2) + repeated HotlistIdFromSource hotlist_ids_from_source = 4; + + // If True, files separate bugs for each failing target, instead of one bug + // for each set of targets failing at the same run. + bool file_individual = 5; + + // If True; keep only one automantic bug per target, regardless of the number of + // separate failures a target gets. This also requires `auto_close` and + // `file_individual` to be True. + // Consider setting `num_passes_to_disable_alert` instead if you're tracking + // flaky tests. + bool singleton_autobug = 6; + + // If provided: only raise one bug if the number of failures for a single + // query by testgrid for a single failure group exceeds this value. Requires + // 'file_individual' to be True. + int32 max_allowed_individual_bugs = 7; + + // If True; file issues for the 'Overall' target, even if otherwise invalid. + bool file_overall = 8; + + message DefaultTestMetadata { + int32 bug_component = 1; + string owner = 2; + string cc = 3; + } + + // If provided: supplements `max_allowed_individual_bugs` field to raise a + // single bug if the number of failures for a single query by testgrid exceeds + // the max_allowed_individual_bugs` value, regardless of `TEST_METADATA` + // configurations. This is useful for filing fewer suspected environmental + // failure bugs and routing them to a specific location (i.e. an oncall). + // Requires 'file_individual' to be true and `max_allowed_individual_bugs` to + // not be empty. + DefaultTestMetadata default_test_metadata = 9; + + // [BETA] If True, query the test metadata API to get issue-routing metadata. + // Enables routing issues using structured test failures. + bool advanced_test_metadata = 11; +} + +message HotlistIdFromSource { + oneof hotlist_id_source { + // ID value of hotlists + int64 value = 1; + // A label prefix + string label = 2; + } +} + +// Specifies a dashboard. +message Dashboard { + // A list of the tabs on the dashboard. + repeated DashboardTab dashboard_tab = 1; + + // A name for the Dashboard. + string name = 2; + + // A list of notifications attached to this dashboard. + // This is displayed on any dashboard tab in this dashboard. + repeated Notification notifications = 3; + + reserved 4; // Deprecated show_summary_first bool, unused + + // Control which tab is displayed when first opening a dashboard. + // Defaults to Summary + string default_tab = 5; + + // Controls whether to suppress highlighting of failing tabs. + bool downplay_failing_tabs = 8; + + // Deprecated: Invert of 'downplay_failing_tabs' + bool highlight_failing_tabs = 6 [deprecated=true]; + + // Controls whether to apply special highlighting to result header columns for + // the current day. + bool highlight_today = 7; +} + +message LinkTemplate { + // The URL template. + string url = 1; + // The options templates. + repeated LinkOptionsTemplate options = 2; + // An optional name, used for the context menu + string name = 3; +} + +// A simple key/value pair for link options. +message LinkOptionsTemplate { + // The key for the option. This is not expanded. + string key = 1; + + // The value for the option. This is expanded the same as the LinkTemplate. + string value = 2; +} + +// A single tab on a dashboard. +message DashboardTab { + // The name of the dashboard tab to display in the client. + string name = 1; + + // The name of the TestGroup specifying the test results for this tab. + string test_group_name = 2; + + // Default bug component for manually filing bugs from the dashboard + int32 bug_component = 3; + + // Default code search path for searching regressions. This value overrides + // the default in the TestGroup config so that dashboards may be customized + // separately. + string code_search_path = 4; + + // See TestGroup.num_columns_recent. This value overrides the default in the + // TestGroup config so that dashboards may be customized separately. + int32 num_columns_recent = 5; + + // Base options to always include, for example: + // width=20&include-filter-by-regex=level_tests + // This is taken from the #fragment part of the testgrid url. + // Best way to create these is to setup the options on testgrid and then + // copy the #fragment part. + string base_options = 6; + + // The URL template to visit after clicking on a cell. + LinkTemplate open_test_template = 7; + + // The URL template to visit when filing a bug. + LinkTemplate file_bug_template = 8; + + // The URL template to visit when attaching a bug + LinkTemplate attach_bug_template = 9; + + // Text to show in the about menu as a link to another view of the results. + string results_text = 10; + + // The URL template to visit after clicking. + LinkTemplate results_url_template = 11; + + // The URL template to visit when searching for code changes, such as pull requests + LinkTemplate code_search_url_template = 12; + + // A description paragraph to be displayed. + string description = 13; + + // A regular expression that uses the named group syntax to specify how to + // show names in a table. + string tabular_names_regex = 14; + + // Configuration options for dashboard tab alerts. + DashboardTabAlertOptions alert_options = 15; + + // Configuration options for dashboard tab flakiness alerts. + DashboardTabFlakinessAlertOptions flakiness_alert_options = 24; + + // A URL for the "About this Dashboard" menu option + string about_dashboard_url = 16; + + // The URL template to visit when viewing an associated bug. + LinkTemplate open_bug_template = 17; + + // If true, auto-file bugs when new alerts occur. This requires that the + // backing test group has `bug_component` set and uses the backing test + // group's `auto_bug_options`. + bool auto_file_bugs = 18; + + // Display user local time on the dashboard when set to true (by default). + // If false, uses Pacific Timezone for this DashboardTab. + bool display_local_time = 19; + + // A set of optional LinkTemplates that will become right-click context menu + // items. + // TODO(b/159042168) in the near future this should be re-implemented as a + // generic list of repeated LinkTemplates which users may specify in their + // reqpective configurations as right-click context menus with names and + // actions upon being clicked. + LinkTemplate context_menu_template = 20; + + // When specified, treat a tab as BROKEN as long as one of the most recent + // columns are "broken" (ratio of failed to total tests exceeds ). + float broken_column_threshold = 21; + + // Options for auto-filed bugs. + // Using this for a dashboard tab requires specifying `beta_autobug_component` + // and will opt you into the beta AutoBug. + AutoBugOptions beta_autobug_options = 22; + + // Options for the configuration of the flakiness analysis tool, on a per tab basis + HealthAnalysisOptions health_analysis_options = 23; +} + +// Configuration options for dashboard tab alerts. +message DashboardTabAlertOptions { + // Time in hours before an alert will be added to a test results table if the + // run date of the latest results are older than this time. If zero, no + // alerts are raised. + int32 alert_stale_results_hours = 1; + + // The number of consecutive test result failures to see before alerting of + // a consistent failure. If zero, no alerts are raised. + int32 num_failures_to_alert = 2; + + // The comma-separated addresses to send mail. + string alert_mail_to_addresses = 3; + + // The number of consecutive test passes to close the alert. + int32 num_passes_to_disable_alert = 4; + + // Custom subject for alert mails. + string subject = 5; + + // Custom link for further help/instructions on debugging this alert. + string debug_url = 6; + + // Custom text to show for the debug link. + string debug_message = 7; + + // Wait time between emails. If unset or zero, an email will be sent only once + // it becomes a consistent failure, and not again until it succeeds. + // TestGrid does not pester about staleness + int32 wait_minutes_between_emails = 8; + + // A custom message + string alert_mail_failure_message = 9; +} + +// Configuration options for dashboard tab flakiness alerts. +message DashboardTabFlakinessAlertOptions { + // The minimum amount of flakiness needed to trigger a flakiness alert. + // 0=Disable alerts + // This is a percentage; expected values go from 0 to 100 (100 = 100% flaky) + float minimum_flakiness_to_alert = 1; + + // The comma-separated addresses to send mail. + string alert_mail_to_addresses = 2; + + // Custom subject for alert mails. + string subject = 3; + + // Minimum time between sending mails. + int32 wait_minutes_between_emails = 4; + + // A custom message + // TODO(RonWeber): This should be a template + string alert_mail_failure_message = 5; +} + +// Specifies a dashboard group. +message DashboardGroup { + // The name for the dashboard group. + string name = 1; + + // A list of names specifying dashboards to show links to in a separate tabbed + // bar at the top of the page for each of the given dashboards. + repeated string dashboard_names = 2; +} + +// A service configuration consisting of multiple test groups and dashboards. +message Configuration { + // A list of groups of tests to gather. + repeated TestGroup test_groups = 1; + + // A list of all of the dashboards for a server. + repeated Dashboard dashboards = 2; + + // A list of all the dashboard groups for a server. + repeated DashboardGroup dashboard_groups = 3; +} + +// A grouping of configuration options for the flakiness analysis tool. +// Later configuration options could include the ability to choose different kinds of +// flakiness and choosing if and who to email a copy of the flakiness report. +message HealthAnalysisOptions { + // Defaults to false; flakiness analysis is opt-in + bool enable = 1; + + // Defines the number of days for one interval of analysis. + // i.e. flakiness will be analyzed for the previous N days starting from Now, + // and it will be compared to the calculated N days before that for trend analysis. + int32 days_of_analysis = 2; + + // When to send healthiness emails out, uses cron string format. + string email_schedule = 3; + + // A comma-separated list of healthiness email recipients. + string email_recipients = 4; + + // A compilable regex string for grouping tests by name. + // Works the same as the group-by-regex-mask option of base_options: + // go/testgrid/users/dashboard_guide#grouping-tests + // An empty string means no grouping. + // e.g. test name: "//path/to/test - env", regex: ` - \w+` + // The regex will match " - env" in the above test name and give a group of: + // //path/to/test <- Group Name + // - env <- Group Member + string grouping_regex = 5; +} + +// The DefaultConfiguration Proto is deprecated, and will be deleted after Nov 1, 2019 +// For defaulting behavior, use the yamlcfg library instead. +message DefaultConfiguration { + // A default testgroup with default initialization data + TestGroup default_test_group = 1 [deprecated=true]; + + // A default dashboard tab with default initialization data + DashboardTab default_dashboard_tab = 2 [deprecated=true]; +} diff --git a/vendor/github.com/GoogleCloudPlatform/testgrid/pb/custom_evaluator/custom_evaluator.pb.go b/vendor/github.com/GoogleCloudPlatform/testgrid/pb/custom_evaluator/custom_evaluator.pb.go new file mode 100644 index 00000000000..82cb31ddd66 --- /dev/null +++ b/vendor/github.com/GoogleCloudPlatform/testgrid/pb/custom_evaluator/custom_evaluator.pb.go @@ -0,0 +1,433 @@ +/* +Copyright The TestGrid Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: custom_evaluator.proto + +package custom_evaluator + +import ( + fmt "fmt" + test_status "github.com/GoogleCloudPlatform/testgrid/pb/test_status" + proto "github.com/golang/protobuf/proto" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +type Comparison_Operator int32 + +const ( + // Unknown. May assume OP_EQ for legacy purposes, but should warn. + Comparison_OP_UNKNOWN Comparison_Operator = 0 + // Equals operator. + Comparison_OP_EQ Comparison_Operator = 1 + // Not equals operator. + Comparison_OP_NE Comparison_Operator = 2 + // Comparison value less than TestResult's value + Comparison_OP_LT Comparison_Operator = 3 + // Comparison value less than or equal TestResult's value + Comparison_OP_LE Comparison_Operator = 4 + // Comparison value greater than TestResult's value + Comparison_OP_GT Comparison_Operator = 5 + // Comparison value greater than or equal TestResult's value + Comparison_OP_GE Comparison_Operator = 6 + // Regex match of Comparison.value string with the TestResult's evaluation + // value string. + Comparison_OP_REGEX Comparison_Operator = 7 + // Checks to see if the evaluation value string starts with the + // Comparison.value string + Comparison_OP_STARTS_WITH Comparison_Operator = 8 + // Checks to see if the evaluation value string is contained within the + // Comparison.value string + Comparison_OP_CONTAINS Comparison_Operator = 9 +) + +var Comparison_Operator_name = map[int32]string{ + 0: "OP_UNKNOWN", + 1: "OP_EQ", + 2: "OP_NE", + 3: "OP_LT", + 4: "OP_LE", + 5: "OP_GT", + 6: "OP_GE", + 7: "OP_REGEX", + 8: "OP_STARTS_WITH", + 9: "OP_CONTAINS", +} + +var Comparison_Operator_value = map[string]int32{ + "OP_UNKNOWN": 0, + "OP_EQ": 1, + "OP_NE": 2, + "OP_LT": 3, + "OP_LE": 4, + "OP_GT": 5, + "OP_GE": 6, + "OP_REGEX": 7, + "OP_STARTS_WITH": 8, + "OP_CONTAINS": 9, +} + +func (x Comparison_Operator) String() string { + return proto.EnumName(Comparison_Operator_name, int32(x)) +} + +func (Comparison_Operator) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_14164f833d03200a, []int{3, 0} +} + +// A collection of Rule objects. Used to define many rules. +type RuleSet struct { + Rules []*Rule `protobuf:"bytes,1,rep,name=rules,proto3" json:"rules,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RuleSet) Reset() { *m = RuleSet{} } +func (m *RuleSet) String() string { return proto.CompactTextString(m) } +func (*RuleSet) ProtoMessage() {} +func (*RuleSet) Descriptor() ([]byte, []int) { + return fileDescriptor_14164f833d03200a, []int{0} +} + +func (m *RuleSet) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_RuleSet.Unmarshal(m, b) +} +func (m *RuleSet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_RuleSet.Marshal(b, m, deterministic) +} +func (m *RuleSet) XXX_Merge(src proto.Message) { + xxx_messageInfo_RuleSet.Merge(m, src) +} +func (m *RuleSet) XXX_Size() int { + return xxx_messageInfo_RuleSet.Size(m) +} +func (m *RuleSet) XXX_DiscardUnknown() { + xxx_messageInfo_RuleSet.DiscardUnknown(m) +} + +var xxx_messageInfo_RuleSet proto.InternalMessageInfo + +func (m *RuleSet) GetRules() []*Rule { + if m != nil { + return m.Rules + } + return nil +} + +// A single rule that describes how to evaluate a test_cases_pb2.TestResult +type Rule struct { + // Multiple comparisons to run against a result. EVERY TestResultComparison + // has to succeed for this Rule to succeed. + TestResultComparisons []*TestResultComparison `protobuf:"bytes,1,rep,name=test_result_comparisons,json=testResultComparisons,proto3" json:"test_result_comparisons,omitempty"` + // Required: The TestStatus to return if the comparison succeeds. + ComputedStatus test_status.TestStatus `protobuf:"varint,3,opt,name=computed_status,json=computedStatus,proto3,enum=TestStatus" json:"computed_status,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Rule) Reset() { *m = Rule{} } +func (m *Rule) String() string { return proto.CompactTextString(m) } +func (*Rule) ProtoMessage() {} +func (*Rule) Descriptor() ([]byte, []int) { + return fileDescriptor_14164f833d03200a, []int{1} +} + +func (m *Rule) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Rule.Unmarshal(m, b) +} +func (m *Rule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Rule.Marshal(b, m, deterministic) +} +func (m *Rule) XXX_Merge(src proto.Message) { + xxx_messageInfo_Rule.Merge(m, src) +} +func (m *Rule) XXX_Size() int { + return xxx_messageInfo_Rule.Size(m) +} +func (m *Rule) XXX_DiscardUnknown() { + xxx_messageInfo_Rule.DiscardUnknown(m) +} + +var xxx_messageInfo_Rule proto.InternalMessageInfo + +func (m *Rule) GetTestResultComparisons() []*TestResultComparison { + if m != nil { + return m.TestResultComparisons + } + return nil +} + +func (m *Rule) GetComputedStatus() test_status.TestStatus { + if m != nil { + return m.ComputedStatus + } + return test_status.TestStatus_NO_RESULT +} + +// Describes how to get information the TestResult proto and how to compare the +// value against the comparison value. +type TestResultComparison struct { + // Required: This is the comparison that will be used as + Comparison *Comparison `protobuf:"bytes,1,opt,name=comparison,proto3" json:"comparison,omitempty"` + // Types that are valid to be assigned to TestResultInfo: + // *TestResultComparison_PropertyKey + // *TestResultComparison_TestResultField + // *TestResultComparison_TestResultErrorField + TestResultInfo isTestResultComparison_TestResultInfo `protobuf_oneof:"test_result_info"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *TestResultComparison) Reset() { *m = TestResultComparison{} } +func (m *TestResultComparison) String() string { return proto.CompactTextString(m) } +func (*TestResultComparison) ProtoMessage() {} +func (*TestResultComparison) Descriptor() ([]byte, []int) { + return fileDescriptor_14164f833d03200a, []int{2} +} + +func (m *TestResultComparison) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_TestResultComparison.Unmarshal(m, b) +} +func (m *TestResultComparison) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_TestResultComparison.Marshal(b, m, deterministic) +} +func (m *TestResultComparison) XXX_Merge(src proto.Message) { + xxx_messageInfo_TestResultComparison.Merge(m, src) +} +func (m *TestResultComparison) XXX_Size() int { + return xxx_messageInfo_TestResultComparison.Size(m) +} +func (m *TestResultComparison) XXX_DiscardUnknown() { + xxx_messageInfo_TestResultComparison.DiscardUnknown(m) +} + +var xxx_messageInfo_TestResultComparison proto.InternalMessageInfo + +func (m *TestResultComparison) GetComparison() *Comparison { + if m != nil { + return m.Comparison + } + return nil +} + +type isTestResultComparison_TestResultInfo interface { + isTestResultComparison_TestResultInfo() +} + +type TestResultComparison_PropertyKey struct { + PropertyKey string `protobuf:"bytes,2,opt,name=property_key,json=propertyKey,proto3,oneof"` +} + +type TestResultComparison_TestResultField struct { + TestResultField string `protobuf:"bytes,3,opt,name=test_result_field,json=testResultField,proto3,oneof"` +} + +type TestResultComparison_TestResultErrorField struct { + TestResultErrorField string `protobuf:"bytes,4,opt,name=test_result_error_field,json=testResultErrorField,proto3,oneof"` +} + +func (*TestResultComparison_PropertyKey) isTestResultComparison_TestResultInfo() {} + +func (*TestResultComparison_TestResultField) isTestResultComparison_TestResultInfo() {} + +func (*TestResultComparison_TestResultErrorField) isTestResultComparison_TestResultInfo() {} + +func (m *TestResultComparison) GetTestResultInfo() isTestResultComparison_TestResultInfo { + if m != nil { + return m.TestResultInfo + } + return nil +} + +func (m *TestResultComparison) GetPropertyKey() string { + if x, ok := m.GetTestResultInfo().(*TestResultComparison_PropertyKey); ok { + return x.PropertyKey + } + return "" +} + +func (m *TestResultComparison) GetTestResultField() string { + if x, ok := m.GetTestResultInfo().(*TestResultComparison_TestResultField); ok { + return x.TestResultField + } + return "" +} + +func (m *TestResultComparison) GetTestResultErrorField() string { + if x, ok := m.GetTestResultInfo().(*TestResultComparison_TestResultErrorField); ok { + return x.TestResultErrorField + } + return "" +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*TestResultComparison) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*TestResultComparison_PropertyKey)(nil), + (*TestResultComparison_TestResultField)(nil), + (*TestResultComparison_TestResultErrorField)(nil), + } +} + +// The method of comparison used for evaluation. Describes how to compare two +// values. +type Comparison struct { + // Required: Defines how to compare two attributes. + // When the TestResult value is numerical, numerical_value will be used to + // compare. When the TestResult value is a string, string_value will be used. + Op Comparison_Operator `protobuf:"varint,1,opt,name=op,proto3,enum=Comparison_Operator" json:"op,omitempty"` + // Types that are valid to be assigned to ComparisonValue: + // *Comparison_StringValue + // *Comparison_NumericalValue + ComparisonValue isComparison_ComparisonValue `protobuf_oneof:"comparison_value"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Comparison) Reset() { *m = Comparison{} } +func (m *Comparison) String() string { return proto.CompactTextString(m) } +func (*Comparison) ProtoMessage() {} +func (*Comparison) Descriptor() ([]byte, []int) { + return fileDescriptor_14164f833d03200a, []int{3} +} + +func (m *Comparison) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Comparison.Unmarshal(m, b) +} +func (m *Comparison) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Comparison.Marshal(b, m, deterministic) +} +func (m *Comparison) XXX_Merge(src proto.Message) { + xxx_messageInfo_Comparison.Merge(m, src) +} +func (m *Comparison) XXX_Size() int { + return xxx_messageInfo_Comparison.Size(m) +} +func (m *Comparison) XXX_DiscardUnknown() { + xxx_messageInfo_Comparison.DiscardUnknown(m) +} + +var xxx_messageInfo_Comparison proto.InternalMessageInfo + +func (m *Comparison) GetOp() Comparison_Operator { + if m != nil { + return m.Op + } + return Comparison_OP_UNKNOWN +} + +type isComparison_ComparisonValue interface { + isComparison_ComparisonValue() +} + +type Comparison_StringValue struct { + StringValue string `protobuf:"bytes,2,opt,name=string_value,json=stringValue,proto3,oneof"` +} + +type Comparison_NumericalValue struct { + NumericalValue float64 `protobuf:"fixed64,3,opt,name=numerical_value,json=numericalValue,proto3,oneof"` +} + +func (*Comparison_StringValue) isComparison_ComparisonValue() {} + +func (*Comparison_NumericalValue) isComparison_ComparisonValue() {} + +func (m *Comparison) GetComparisonValue() isComparison_ComparisonValue { + if m != nil { + return m.ComparisonValue + } + return nil +} + +func (m *Comparison) GetStringValue() string { + if x, ok := m.GetComparisonValue().(*Comparison_StringValue); ok { + return x.StringValue + } + return "" +} + +func (m *Comparison) GetNumericalValue() float64 { + if x, ok := m.GetComparisonValue().(*Comparison_NumericalValue); ok { + return x.NumericalValue + } + return 0 +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*Comparison) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*Comparison_StringValue)(nil), + (*Comparison_NumericalValue)(nil), + } +} + +func init() { + proto.RegisterEnum("Comparison_Operator", Comparison_Operator_name, Comparison_Operator_value) + proto.RegisterType((*RuleSet)(nil), "RuleSet") + proto.RegisterType((*Rule)(nil), "Rule") + proto.RegisterType((*TestResultComparison)(nil), "TestResultComparison") + proto.RegisterType((*Comparison)(nil), "Comparison") +} + +func init() { proto.RegisterFile("custom_evaluator.proto", fileDescriptor_14164f833d03200a) } + +var fileDescriptor_14164f833d03200a = []byte{ + // 461 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x52, 0xdd, 0x6e, 0xd3, 0x30, + 0x18, 0x9d, 0xfb, 0xb3, 0xb5, 0x5f, 0xa6, 0xd4, 0x58, 0x1d, 0x54, 0x70, 0x13, 0x05, 0x84, 0x8a, + 0x40, 0x41, 0x2a, 0x48, 0x5c, 0x8f, 0x29, 0xac, 0xd3, 0x20, 0x09, 0x4e, 0x60, 0xdc, 0x59, 0x59, + 0xe7, 0xa1, 0x88, 0xb4, 0x8e, 0x6c, 0x07, 0xa9, 0xcf, 0xc0, 0x05, 0xaf, 0xc1, 0x7b, 0xf1, 0x22, + 0xc8, 0xf9, 0x59, 0x82, 0xb4, 0xbb, 0xe3, 0x73, 0xbe, 0x63, 0x9f, 0x63, 0x1b, 0x1e, 0x6e, 0x4a, + 0xa5, 0xc5, 0x96, 0xf1, 0x9f, 0x69, 0x5e, 0xa6, 0x5a, 0x48, 0xaf, 0x90, 0x42, 0x8b, 0xc7, 0x4e, + 0x71, 0xfd, 0x5a, 0x73, 0xa5, 0x99, 0xd2, 0xa9, 0x2e, 0x55, 0x1f, 0xd7, 0x13, 0xee, 0x73, 0x38, + 0xa2, 0x65, 0xce, 0x63, 0xae, 0xc9, 0x13, 0x18, 0xcb, 0x32, 0xe7, 0x6a, 0x81, 0x9c, 0xe1, 0xd2, + 0x5a, 0x8d, 0x3d, 0x23, 0xd0, 0x9a, 0x73, 0x7f, 0x21, 0x18, 0x99, 0x35, 0xf9, 0x04, 0x8f, 0xaa, + 0x5d, 0x24, 0x57, 0x65, 0xae, 0xd9, 0x46, 0x6c, 0x8b, 0x54, 0x66, 0x4a, 0xec, 0x5a, 0xdf, 0x89, + 0x97, 0x70, 0xa5, 0x69, 0x25, 0x9f, 0xdd, 0xa9, 0xf4, 0x44, 0xdf, 0xc3, 0x2a, 0xf2, 0x16, 0x66, + 0x66, 0x8b, 0x52, 0xf3, 0x9b, 0x26, 0xd8, 0x62, 0xe8, 0xa0, 0xa5, 0xbd, 0xb2, 0xaa, 0x6d, 0xe2, + 0x8a, 0xa2, 0x76, 0x3b, 0x53, 0xaf, 0xdd, 0xbf, 0x08, 0xe6, 0xf7, 0x9d, 0x42, 0x5e, 0x02, 0x74, + 0x89, 0x16, 0xc8, 0x41, 0x4b, 0x6b, 0x65, 0x79, 0xbd, 0x18, 0x3d, 0x99, 0x3c, 0x85, 0xe3, 0x42, + 0x8a, 0x82, 0x4b, 0xbd, 0x67, 0x3f, 0xf8, 0x7e, 0x31, 0x70, 0xd0, 0x72, 0xba, 0x3e, 0xa0, 0x56, + 0xcb, 0x5e, 0xf2, 0x3d, 0x79, 0x05, 0x0f, 0xfa, 0x7d, 0x6f, 0x33, 0x9e, 0xdf, 0x54, 0x11, 0xcd, + 0xe4, 0xac, 0x2b, 0xf5, 0xc1, 0x08, 0xe4, 0xdd, 0xff, 0xb7, 0xc3, 0xa5, 0x14, 0xb2, 0xf1, 0x8c, + 0x1a, 0xcf, 0xbc, 0xf3, 0xf8, 0x46, 0xae, 0x8c, 0xef, 0x09, 0xe0, 0xbe, 0x31, 0xdb, 0xdd, 0x0a, + 0xf7, 0xcf, 0x00, 0xa0, 0xd7, 0xed, 0x19, 0x0c, 0x44, 0x51, 0x75, 0xb2, 0x57, 0xf3, 0x5e, 0x27, + 0x2f, 0x2c, 0xb8, 0x34, 0x8f, 0x4e, 0x07, 0xa2, 0x30, 0xa5, 0x94, 0x96, 0xd9, 0xee, 0x3b, 0x33, + 0x7f, 0x81, 0x77, 0xa5, 0x6a, 0xf6, 0xab, 0x21, 0xc9, 0x0b, 0x98, 0xed, 0xca, 0x2d, 0x97, 0xd9, + 0x26, 0xcd, 0x9b, 0x39, 0x53, 0x09, 0xad, 0x0f, 0xa8, 0x7d, 0x27, 0x54, 0xa3, 0xee, 0x6f, 0x04, + 0x93, 0xf6, 0x00, 0x62, 0x03, 0x84, 0x11, 0xfb, 0x12, 0x5c, 0x06, 0xe1, 0x55, 0x80, 0x0f, 0xc8, + 0x14, 0xc6, 0x61, 0xc4, 0xfc, 0xcf, 0x18, 0x35, 0x30, 0xf0, 0xf1, 0xa0, 0x81, 0x1f, 0x13, 0x3c, + 0x6c, 0xa1, 0x8f, 0x47, 0x0d, 0x3c, 0x4f, 0xf0, 0xb8, 0x85, 0x3e, 0x3e, 0x24, 0xc7, 0x30, 0x09, + 0x23, 0x46, 0xfd, 0x73, 0xff, 0x1b, 0x3e, 0x22, 0x04, 0xec, 0x30, 0x62, 0x71, 0x72, 0x4a, 0x93, + 0x98, 0x5d, 0x5d, 0x24, 0x6b, 0x3c, 0x21, 0x33, 0xb0, 0xc2, 0x88, 0x9d, 0x85, 0x41, 0x72, 0x7a, + 0x11, 0xc4, 0x78, 0x6a, 0xae, 0xaa, 0x7b, 0xc4, 0x3a, 0xfd, 0xf5, 0x61, 0xf5, 0x9b, 0xdf, 0xfc, + 0x0b, 0x00, 0x00, 0xff, 0xff, 0x06, 0x22, 0x01, 0xaf, 0x09, 0x03, 0x00, 0x00, +} diff --git a/vendor/github.com/GoogleCloudPlatform/testgrid/pb/custom_evaluator/custom_evaluator.proto b/vendor/github.com/GoogleCloudPlatform/testgrid/pb/custom_evaluator/custom_evaluator.proto new file mode 100644 index 00000000000..eaff05bd84f --- /dev/null +++ b/vendor/github.com/GoogleCloudPlatform/testgrid/pb/custom_evaluator/custom_evaluator.proto @@ -0,0 +1,97 @@ +syntax = "proto3"; + +import "pb/test_status/test_status.proto"; + +// A configuration sub-object used to do custom evaluation of test results. + +// A collection of Rule objects. Used to define many rules. +message RuleSet { + repeated Rule rules = 1; +} + +// A single rule that describes how to evaluate a test_cases_pb2.TestResult +message Rule { + // Multiple comparisons to run against a result. EVERY TestResultComparison + // has to succeed for this Rule to succeed. + repeated TestResultComparison test_result_comparisons = 1; + + // Required: The TestStatus to return if the comparison succeeds. + TestStatus computed_status = 3; +} + +// Describes how to get information the TestResult proto and how to compare the +// value against the comparison value. +message TestResultComparison { + // Required: This is the comparison that will be used as + Comparison comparison = 1; + + oneof test_result_info { + // The name of the property to evaluate. + // Properties are usually strings, so a string comparison is assumed and required. + string property_key = 2; + + // This will find the scalar field with the given name within the TestResult + // proto. The value of that field will be used to evaluate. + // + // NOTE: Only supported for string and numerical values. + string test_result_field = 3; + + // This will find the field nested within the first error of the TestResult + // proto. The value of that field will be used to evaluate. + // + // NOTE: Only supported for string and numerical values + string test_result_error_field = 4; + } +} + +// The method of comparison used for evaluation. Describes how to compare two +// values. +message Comparison { + enum Operator { + // Unknown. May assume OP_EQ for legacy purposes, but should warn. + OP_UNKNOWN = 0; + + // Equals operator. + OP_EQ = 1; + + // Not equals operator. + OP_NE = 2; + + // Comparison value less than TestResult's value + OP_LT = 3; + + // Comparison value less than or equal TestResult's value + OP_LE = 4; + + // Comparison value greater than TestResult's value + OP_GT = 5; + + // Comparison value greater than or equal TestResult's value + OP_GE = 6; + + // Regex match of Comparison.value string with the TestResult's evaluation + // value string. + OP_REGEX = 7; + + // Checks to see if the evaluation value string starts with the + // Comparison.value string + OP_STARTS_WITH = 8; + + // Checks to see if the evaluation value string is contained within the + // Comparison.value string + OP_CONTAINS = 9; + } + + // Required: Defines how to compare two attributes. + // When the TestResult value is numerical, numerical_value will be used to + // compare. When the TestResult value is a string, string_value will be used. + Operator op = 1; + + oneof comparison_value { + // For operations EQ, NE, REGEX, STARTS_WITH, CONTAINS + string string_value = 2; + + // For operations EQ, NE, LT, LE, GT, GE + double numerical_value = 3; + } +} diff --git a/vendor/github.com/GoogleCloudPlatform/testgrid/pb/state/state.pb.go b/vendor/github.com/GoogleCloudPlatform/testgrid/pb/state/state.pb.go new file mode 100644 index 00000000000..92e8535dc58 --- /dev/null +++ b/vendor/github.com/GoogleCloudPlatform/testgrid/pb/state/state.pb.go @@ -0,0 +1,957 @@ +/* +Copyright The TestGrid Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: state.proto + +package state + +import ( + fmt "fmt" + config "github.com/GoogleCloudPlatform/testgrid/pb/config" + proto "github.com/golang/protobuf/proto" + timestamp "github.com/golang/protobuf/ptypes/timestamp" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +// A metric and its values for each test cycle. +type Metric struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Sparse encoding of values. Indices is a list of pairs of + // that details columns with metric values. So given: + // Indices: [0, 2, 6, 4] + // Values: [0.1,0.2,6.1,6.2,6.3,6.4] + // Decoded 12-value equivalent is: + // [0.1, 0.2, nil, nil, nil, nil, 6.1, 6.2, 6.3, 6.4, nil, nil, ...] + Indices []int32 `protobuf:"varint,2,rep,packed,name=indices,proto3" json:"indices,omitempty"` + Values []float64 `protobuf:"fixed64,3,rep,packed,name=values,proto3" json:"values,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Metric) Reset() { *m = Metric{} } +func (m *Metric) String() string { return proto.CompactTextString(m) } +func (*Metric) ProtoMessage() {} +func (*Metric) Descriptor() ([]byte, []int) { + return fileDescriptor_a888679467bb7853, []int{0} +} + +func (m *Metric) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Metric.Unmarshal(m, b) +} +func (m *Metric) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Metric.Marshal(b, m, deterministic) +} +func (m *Metric) XXX_Merge(src proto.Message) { + xxx_messageInfo_Metric.Merge(m, src) +} +func (m *Metric) XXX_Size() int { + return xxx_messageInfo_Metric.Size(m) +} +func (m *Metric) XXX_DiscardUnknown() { + xxx_messageInfo_Metric.DiscardUnknown(m) +} + +var xxx_messageInfo_Metric proto.InternalMessageInfo + +func (m *Metric) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Metric) GetIndices() []int32 { + if m != nil { + return m.Indices + } + return nil +} + +func (m *Metric) GetValues() []float64 { + if m != nil { + return m.Values + } + return nil +} + +type UpdatePhaseData struct { + // The name for a part of the update cycle. + PhaseName string `protobuf:"bytes,1,opt,name=phase_name,json=phaseName,proto3" json:"phase_name,omitempty"` + // Time taken for a part of the update cycle, in seconds. + PhaseSeconds float64 `protobuf:"fixed64,2,opt,name=phase_seconds,json=phaseSeconds,proto3" json:"phase_seconds,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UpdatePhaseData) Reset() { *m = UpdatePhaseData{} } +func (m *UpdatePhaseData) String() string { return proto.CompactTextString(m) } +func (*UpdatePhaseData) ProtoMessage() {} +func (*UpdatePhaseData) Descriptor() ([]byte, []int) { + return fileDescriptor_a888679467bb7853, []int{1} +} + +func (m *UpdatePhaseData) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UpdatePhaseData.Unmarshal(m, b) +} +func (m *UpdatePhaseData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UpdatePhaseData.Marshal(b, m, deterministic) +} +func (m *UpdatePhaseData) XXX_Merge(src proto.Message) { + xxx_messageInfo_UpdatePhaseData.Merge(m, src) +} +func (m *UpdatePhaseData) XXX_Size() int { + return xxx_messageInfo_UpdatePhaseData.Size(m) +} +func (m *UpdatePhaseData) XXX_DiscardUnknown() { + xxx_messageInfo_UpdatePhaseData.DiscardUnknown(m) +} + +var xxx_messageInfo_UpdatePhaseData proto.InternalMessageInfo + +func (m *UpdatePhaseData) GetPhaseName() string { + if m != nil { + return m.PhaseName + } + return "" +} + +func (m *UpdatePhaseData) GetPhaseSeconds() float64 { + if m != nil { + return m.PhaseSeconds + } + return 0 +} + +// Info on time taken to update test results during the last update cycle. +type UpdateInfo struct { + // Metrics for how long parts of the update cycle take. + UpdatePhaseData []*UpdatePhaseData `protobuf:"bytes,1,rep,name=update_phase_data,json=updatePhaseData,proto3" json:"update_phase_data,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UpdateInfo) Reset() { *m = UpdateInfo{} } +func (m *UpdateInfo) String() string { return proto.CompactTextString(m) } +func (*UpdateInfo) ProtoMessage() {} +func (*UpdateInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_a888679467bb7853, []int{2} +} + +func (m *UpdateInfo) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UpdateInfo.Unmarshal(m, b) +} +func (m *UpdateInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UpdateInfo.Marshal(b, m, deterministic) +} +func (m *UpdateInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_UpdateInfo.Merge(m, src) +} +func (m *UpdateInfo) XXX_Size() int { + return xxx_messageInfo_UpdateInfo.Size(m) +} +func (m *UpdateInfo) XXX_DiscardUnknown() { + xxx_messageInfo_UpdateInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_UpdateInfo proto.InternalMessageInfo + +func (m *UpdateInfo) GetUpdatePhaseData() []*UpdatePhaseData { + if m != nil { + return m.UpdatePhaseData + } + return nil +} + +// Info on a failing test row about the failure. +type AlertInfo struct { + // Number of results that have failed. + FailCount int32 `protobuf:"varint,1,opt,name=fail_count,json=failCount,proto3" json:"fail_count,omitempty"` + // The build ID the test first failed at. + FailBuildId string `protobuf:"bytes,2,opt,name=fail_build_id,json=failBuildId,proto3" json:"fail_build_id,omitempty"` + // The time the test first failed at. + FailTime *timestamp.Timestamp `protobuf:"bytes,3,opt,name=fail_time,json=failTime,proto3" json:"fail_time,omitempty"` + // The test ID for the first test failure. + FailTestId string `protobuf:"bytes,4,opt,name=fail_test_id,json=failTestId,proto3" json:"fail_test_id,omitempty"` + // The build ID the test last passed at. + PassBuildId string `protobuf:"bytes,5,opt,name=pass_build_id,json=passBuildId,proto3" json:"pass_build_id,omitempty"` + // The time the test last passed at. + PassTime *timestamp.Timestamp `protobuf:"bytes,6,opt,name=pass_time,json=passTime,proto3" json:"pass_time,omitempty"` + // A snippet explaining the failure. + FailureMessage string `protobuf:"bytes,7,opt,name=failure_message,json=failureMessage,proto3" json:"failure_message,omitempty"` + // Link to search for build changes, internally a code-search link. + BuildLink string `protobuf:"bytes,8,opt,name=build_link,json=buildLink,proto3" json:"build_link,omitempty"` + // Text for option to search for build changes. + BuildLinkText string `protobuf:"bytes,9,opt,name=build_link_text,json=buildLinkText,proto3" json:"build_link_text,omitempty"` + // Text to display for link to search for build changes. + BuildUrlText string `protobuf:"bytes,10,opt,name=build_url_text,json=buildUrlText,proto3" json:"build_url_text,omitempty"` + // The build ID for the latest test failure. (Does not indicate the failure is + // 'over', just the latest test failure we found.) + LatestFailBuildId string `protobuf:"bytes,11,opt,name=latest_fail_build_id,json=latestFailBuildId,proto3" json:"latest_fail_build_id,omitempty"` + // The test ID for the latest test failure. + LatestFailTestId string `protobuf:"bytes,14,opt,name=latest_fail_test_id,json=latestFailTestId,proto3" json:"latest_fail_test_id,omitempty"` + // Maps (property name):(property value) for arbitrary alert properties. + Properties map[string]string `protobuf:"bytes,12,rep,name=properties,proto3" json:"properties,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // A list of IDs for issue hotlists related to this failure. + HotlistIds []string `protobuf:"bytes,13,rep,name=hotlist_ids,json=hotlistIds,proto3" json:"hotlist_ids,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AlertInfo) Reset() { *m = AlertInfo{} } +func (m *AlertInfo) String() string { return proto.CompactTextString(m) } +func (*AlertInfo) ProtoMessage() {} +func (*AlertInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_a888679467bb7853, []int{3} +} + +func (m *AlertInfo) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_AlertInfo.Unmarshal(m, b) +} +func (m *AlertInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_AlertInfo.Marshal(b, m, deterministic) +} +func (m *AlertInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_AlertInfo.Merge(m, src) +} +func (m *AlertInfo) XXX_Size() int { + return xxx_messageInfo_AlertInfo.Size(m) +} +func (m *AlertInfo) XXX_DiscardUnknown() { + xxx_messageInfo_AlertInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_AlertInfo proto.InternalMessageInfo + +func (m *AlertInfo) GetFailCount() int32 { + if m != nil { + return m.FailCount + } + return 0 +} + +func (m *AlertInfo) GetFailBuildId() string { + if m != nil { + return m.FailBuildId + } + return "" +} + +func (m *AlertInfo) GetFailTime() *timestamp.Timestamp { + if m != nil { + return m.FailTime + } + return nil +} + +func (m *AlertInfo) GetFailTestId() string { + if m != nil { + return m.FailTestId + } + return "" +} + +func (m *AlertInfo) GetPassBuildId() string { + if m != nil { + return m.PassBuildId + } + return "" +} + +func (m *AlertInfo) GetPassTime() *timestamp.Timestamp { + if m != nil { + return m.PassTime + } + return nil +} + +func (m *AlertInfo) GetFailureMessage() string { + if m != nil { + return m.FailureMessage + } + return "" +} + +func (m *AlertInfo) GetBuildLink() string { + if m != nil { + return m.BuildLink + } + return "" +} + +func (m *AlertInfo) GetBuildLinkText() string { + if m != nil { + return m.BuildLinkText + } + return "" +} + +func (m *AlertInfo) GetBuildUrlText() string { + if m != nil { + return m.BuildUrlText + } + return "" +} + +func (m *AlertInfo) GetLatestFailBuildId() string { + if m != nil { + return m.LatestFailBuildId + } + return "" +} + +func (m *AlertInfo) GetLatestFailTestId() string { + if m != nil { + return m.LatestFailTestId + } + return "" +} + +func (m *AlertInfo) GetProperties() map[string]string { + if m != nil { + return m.Properties + } + return nil +} + +func (m *AlertInfo) GetHotlistIds() []string { + if m != nil { + return m.HotlistIds + } + return nil +} + +// Info on default test metadata for a dashboard tab. +type TestMetadata struct { + // Name of the test with associated test metadata. + TestName string `protobuf:"bytes,1,opt,name=test_name,json=testName,proto3" json:"test_name,omitempty"` + // Default bug component. + BugComponent int32 `protobuf:"varint,2,opt,name=bug_component,json=bugComponent,proto3" json:"bug_component,omitempty"` + // Default owner. + Owner string `protobuf:"bytes,3,opt,name=owner,proto3" json:"owner,omitempty"` + // Default list of cc's. + Cc []string `protobuf:"bytes,4,rep,name=cc,proto3" json:"cc,omitempty"` + // When present, only file a bug for failed tests with same error type. + // Otherwise, always file a bug. + ErrorType string `protobuf:"bytes,5,opt,name=error_type,json=errorType,proto3" json:"error_type,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *TestMetadata) Reset() { *m = TestMetadata{} } +func (m *TestMetadata) String() string { return proto.CompactTextString(m) } +func (*TestMetadata) ProtoMessage() {} +func (*TestMetadata) Descriptor() ([]byte, []int) { + return fileDescriptor_a888679467bb7853, []int{4} +} + +func (m *TestMetadata) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_TestMetadata.Unmarshal(m, b) +} +func (m *TestMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_TestMetadata.Marshal(b, m, deterministic) +} +func (m *TestMetadata) XXX_Merge(src proto.Message) { + xxx_messageInfo_TestMetadata.Merge(m, src) +} +func (m *TestMetadata) XXX_Size() int { + return xxx_messageInfo_TestMetadata.Size(m) +} +func (m *TestMetadata) XXX_DiscardUnknown() { + xxx_messageInfo_TestMetadata.DiscardUnknown(m) +} + +var xxx_messageInfo_TestMetadata proto.InternalMessageInfo + +func (m *TestMetadata) GetTestName() string { + if m != nil { + return m.TestName + } + return "" +} + +func (m *TestMetadata) GetBugComponent() int32 { + if m != nil { + return m.BugComponent + } + return 0 +} + +func (m *TestMetadata) GetOwner() string { + if m != nil { + return m.Owner + } + return "" +} + +func (m *TestMetadata) GetCc() []string { + if m != nil { + return m.Cc + } + return nil +} + +func (m *TestMetadata) GetErrorType() string { + if m != nil { + return m.ErrorType + } + return "" +} + +// TestGrid columns (also known as TestCycle). +type Column struct { + // Unique instance of the job, typically BUILD_NUMBER from prow or a guid + Build string `protobuf:"bytes,1,opt,name=build,proto3" json:"build,omitempty"` + // Name associated with the column (such as the run/invocation ID).No two + // columns should have the same build_id and name. The name field allows the + // display of multiple columns with the same build_id. + Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` + // Milliseconds since start of epoch (python time.time() * 1000) + Started float64 `protobuf:"fixed64,3,opt,name=started,proto3" json:"started,omitempty"` + // Additional custom headers like commit, image used, etc. + Extra []string `protobuf:"bytes,4,rep,name=extra,proto3" json:"extra,omitempty"` + // Custom hotlist ids. + HotlistIds string `protobuf:"bytes,5,opt,name=hotlist_ids,json=hotlistIds,proto3" json:"hotlist_ids,omitempty"` + // An optional hint for the updater. + Hint string `protobuf:"bytes,6,opt,name=hint,proto3" json:"hint,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Column) Reset() { *m = Column{} } +func (m *Column) String() string { return proto.CompactTextString(m) } +func (*Column) ProtoMessage() {} +func (*Column) Descriptor() ([]byte, []int) { + return fileDescriptor_a888679467bb7853, []int{5} +} + +func (m *Column) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Column.Unmarshal(m, b) +} +func (m *Column) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Column.Marshal(b, m, deterministic) +} +func (m *Column) XXX_Merge(src proto.Message) { + xxx_messageInfo_Column.Merge(m, src) +} +func (m *Column) XXX_Size() int { + return xxx_messageInfo_Column.Size(m) +} +func (m *Column) XXX_DiscardUnknown() { + xxx_messageInfo_Column.DiscardUnknown(m) +} + +var xxx_messageInfo_Column proto.InternalMessageInfo + +func (m *Column) GetBuild() string { + if m != nil { + return m.Build + } + return "" +} + +func (m *Column) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Column) GetStarted() float64 { + if m != nil { + return m.Started + } + return 0 +} + +func (m *Column) GetExtra() []string { + if m != nil { + return m.Extra + } + return nil +} + +func (m *Column) GetHotlistIds() string { + if m != nil { + return m.HotlistIds + } + return "" +} + +func (m *Column) GetHint() string { + if m != nil { + return m.Hint + } + return "" +} + +// TestGrid rows (also known as TestRow) +type Row struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Id string `protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"` + // Results for this row, run-length encoded to reduce size/improve + // performance. Thus (encoded -> decoded equivalent): + // [0, 3, 5, 4] -> [0, 0, 0, 5, 5, 5, 5] + // [5, 1] -> [5] + // [1, 5] -> [1, 1, 1, 1, 1] + // The decoded values are Result enums + Results []int32 `protobuf:"varint,3,rep,packed,name=results,proto3" json:"results,omitempty"` + // Test IDs for each test result in this test case. + // Must be present on every column, regardless of status. + CellIds []string `protobuf:"bytes,4,rep,name=cell_ids,json=cellIds,proto3" json:"cell_ids,omitempty"` + // Short description of the result, displayed on mouseover. + // Present for any column with a non-empty status (not NO_RESULT). + Messages []string `protobuf:"bytes,5,rep,name=messages,proto3" json:"messages,omitempty"` + // Names of metrics associated with this test case. Stored separate from + // metric info (which may be omitted). + Metric []string `protobuf:"bytes,7,rep,name=metric,proto3" json:"metric,omitempty"` + Metrics []*Metric `protobuf:"bytes,8,rep,name=metrics,proto3" json:"metrics,omitempty"` + // Short string to place inside the cell (F for fail, etc) + // Present for any column with a non-empty status (not NO_RESULT). + Icons []string `protobuf:"bytes,9,rep,name=icons,proto3" json:"icons,omitempty"` + // IDs for bugs associated with results in this test case. + BugId []string `protobuf:"bytes,10,rep,name=bug_id,json=bugId,proto3" json:"bug_id,omitempty"` + // An alert for the failure if there's a recent failure for this test case. + AlertInfo *AlertInfo `protobuf:"bytes,11,opt,name=alert_info,json=alertInfo,proto3" json:"alert_info,omitempty"` + // Values of a user-defined property found in test results for this row. + UserProperty []string `protobuf:"bytes,12,rep,name=user_property,json=userProperty,proto3" json:"user_property,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Row) Reset() { *m = Row{} } +func (m *Row) String() string { return proto.CompactTextString(m) } +func (*Row) ProtoMessage() {} +func (*Row) Descriptor() ([]byte, []int) { + return fileDescriptor_a888679467bb7853, []int{6} +} + +func (m *Row) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Row.Unmarshal(m, b) +} +func (m *Row) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Row.Marshal(b, m, deterministic) +} +func (m *Row) XXX_Merge(src proto.Message) { + xxx_messageInfo_Row.Merge(m, src) +} +func (m *Row) XXX_Size() int { + return xxx_messageInfo_Row.Size(m) +} +func (m *Row) XXX_DiscardUnknown() { + xxx_messageInfo_Row.DiscardUnknown(m) +} + +var xxx_messageInfo_Row proto.InternalMessageInfo + +func (m *Row) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Row) GetId() string { + if m != nil { + return m.Id + } + return "" +} + +func (m *Row) GetResults() []int32 { + if m != nil { + return m.Results + } + return nil +} + +func (m *Row) GetCellIds() []string { + if m != nil { + return m.CellIds + } + return nil +} + +func (m *Row) GetMessages() []string { + if m != nil { + return m.Messages + } + return nil +} + +func (m *Row) GetMetric() []string { + if m != nil { + return m.Metric + } + return nil +} + +func (m *Row) GetMetrics() []*Metric { + if m != nil { + return m.Metrics + } + return nil +} + +func (m *Row) GetIcons() []string { + if m != nil { + return m.Icons + } + return nil +} + +func (m *Row) GetBugId() []string { + if m != nil { + return m.BugId + } + return nil +} + +func (m *Row) GetAlertInfo() *AlertInfo { + if m != nil { + return m.AlertInfo + } + return nil +} + +func (m *Row) GetUserProperty() []string { + if m != nil { + return m.UserProperty + } + return nil +} + +// A single table of test results backing a dashboard tab. +type Grid struct { + // A cycle of test results, not including the results. In the TestGrid client, + // the cycles define the columns. + Columns []*Column `protobuf:"bytes,1,rep,name=columns,proto3" json:"columns,omitempty"` + // A test case with test results. In the TestGrid client, the cases define the + // rows (and the results define the individual cells). + Rows []*Row `protobuf:"bytes,2,rep,name=rows,proto3" json:"rows,omitempty"` + // The latest configuration used to generate this test group. + Config *config.TestGroup `protobuf:"bytes,4,opt,name=config,proto3" json:"config,omitempty"` + // Seconds since epoch for last time this cycle was updated. + LastTimeUpdated float64 `protobuf:"fixed64,6,opt,name=last_time_updated,json=lastTimeUpdated,proto3" json:"last_time_updated,omitempty"` + // Stored info on previous timing for parts of the update cycle. + UpdateInfo []*UpdateInfo `protobuf:"bytes,8,rep,name=update_info,json=updateInfo,proto3" json:"update_info,omitempty"` + // Stored info on default test metadata. + TestMetadata []*TestMetadata `protobuf:"bytes,9,rep,name=test_metadata,json=testMetadata,proto3" json:"test_metadata,omitempty"` + // Clusters of failures for a TestResultTable instance. + Cluster []*Cluster `protobuf:"bytes,10,rep,name=cluster,proto3" json:"cluster,omitempty"` + // Most recent timestamp that clusters have processed. + MostRecentClusterTimestamp float64 `protobuf:"fixed64,11,opt,name=most_recent_cluster_timestamp,json=mostRecentClusterTimestamp,proto3" json:"most_recent_cluster_timestamp,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Grid) Reset() { *m = Grid{} } +func (m *Grid) String() string { return proto.CompactTextString(m) } +func (*Grid) ProtoMessage() {} +func (*Grid) Descriptor() ([]byte, []int) { + return fileDescriptor_a888679467bb7853, []int{7} +} + +func (m *Grid) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Grid.Unmarshal(m, b) +} +func (m *Grid) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Grid.Marshal(b, m, deterministic) +} +func (m *Grid) XXX_Merge(src proto.Message) { + xxx_messageInfo_Grid.Merge(m, src) +} +func (m *Grid) XXX_Size() int { + return xxx_messageInfo_Grid.Size(m) +} +func (m *Grid) XXX_DiscardUnknown() { + xxx_messageInfo_Grid.DiscardUnknown(m) +} + +var xxx_messageInfo_Grid proto.InternalMessageInfo + +func (m *Grid) GetColumns() []*Column { + if m != nil { + return m.Columns + } + return nil +} + +func (m *Grid) GetRows() []*Row { + if m != nil { + return m.Rows + } + return nil +} + +func (m *Grid) GetConfig() *config.TestGroup { + if m != nil { + return m.Config + } + return nil +} + +func (m *Grid) GetLastTimeUpdated() float64 { + if m != nil { + return m.LastTimeUpdated + } + return 0 +} + +func (m *Grid) GetUpdateInfo() []*UpdateInfo { + if m != nil { + return m.UpdateInfo + } + return nil +} + +func (m *Grid) GetTestMetadata() []*TestMetadata { + if m != nil { + return m.TestMetadata + } + return nil +} + +func (m *Grid) GetCluster() []*Cluster { + if m != nil { + return m.Cluster + } + return nil +} + +func (m *Grid) GetMostRecentClusterTimestamp() float64 { + if m != nil { + return m.MostRecentClusterTimestamp + } + return 0 +} + +// A cluster of failures grouped by test status and message for a test results +// table. +type Cluster struct { + // Test status cluster grouped by. + TestStatus int32 `protobuf:"varint,1,opt,name=test_status,json=testStatus,proto3" json:"test_status,omitempty"` + // Error message or testFailureClassification string cluster grouped by. + Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"` + // ClusterRows that belong to this cluster. + ClusterRow []*ClusterRow `protobuf:"bytes,3,rep,name=cluster_row,json=clusterRow,proto3" json:"cluster_row,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Cluster) Reset() { *m = Cluster{} } +func (m *Cluster) String() string { return proto.CompactTextString(m) } +func (*Cluster) ProtoMessage() {} +func (*Cluster) Descriptor() ([]byte, []int) { + return fileDescriptor_a888679467bb7853, []int{8} +} + +func (m *Cluster) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Cluster.Unmarshal(m, b) +} +func (m *Cluster) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Cluster.Marshal(b, m, deterministic) +} +func (m *Cluster) XXX_Merge(src proto.Message) { + xxx_messageInfo_Cluster.Merge(m, src) +} +func (m *Cluster) XXX_Size() int { + return xxx_messageInfo_Cluster.Size(m) +} +func (m *Cluster) XXX_DiscardUnknown() { + xxx_messageInfo_Cluster.DiscardUnknown(m) +} + +var xxx_messageInfo_Cluster proto.InternalMessageInfo + +func (m *Cluster) GetTestStatus() int32 { + if m != nil { + return m.TestStatus + } + return 0 +} + +func (m *Cluster) GetMessage() string { + if m != nil { + return m.Message + } + return "" +} + +func (m *Cluster) GetClusterRow() []*ClusterRow { + if m != nil { + return m.ClusterRow + } + return nil +} + +// Cells in a TestRow that belong to a specific Cluster. +type ClusterRow struct { + // Name of TestRow. + DisplayName string `protobuf:"bytes,1,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"` + // Index within row that belongs to Cluster (refer to columns of the row). + Index []int32 `protobuf:"varint,2,rep,packed,name=index,proto3" json:"index,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ClusterRow) Reset() { *m = ClusterRow{} } +func (m *ClusterRow) String() string { return proto.CompactTextString(m) } +func (*ClusterRow) ProtoMessage() {} +func (*ClusterRow) Descriptor() ([]byte, []int) { + return fileDescriptor_a888679467bb7853, []int{9} +} + +func (m *ClusterRow) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ClusterRow.Unmarshal(m, b) +} +func (m *ClusterRow) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ClusterRow.Marshal(b, m, deterministic) +} +func (m *ClusterRow) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClusterRow.Merge(m, src) +} +func (m *ClusterRow) XXX_Size() int { + return xxx_messageInfo_ClusterRow.Size(m) +} +func (m *ClusterRow) XXX_DiscardUnknown() { + xxx_messageInfo_ClusterRow.DiscardUnknown(m) +} + +var xxx_messageInfo_ClusterRow proto.InternalMessageInfo + +func (m *ClusterRow) GetDisplayName() string { + if m != nil { + return m.DisplayName + } + return "" +} + +func (m *ClusterRow) GetIndex() []int32 { + if m != nil { + return m.Index + } + return nil +} + +func init() { + proto.RegisterType((*Metric)(nil), "Metric") + proto.RegisterType((*UpdatePhaseData)(nil), "UpdatePhaseData") + proto.RegisterType((*UpdateInfo)(nil), "UpdateInfo") + proto.RegisterType((*AlertInfo)(nil), "AlertInfo") + proto.RegisterMapType((map[string]string)(nil), "AlertInfo.PropertiesEntry") + proto.RegisterType((*TestMetadata)(nil), "TestMetadata") + proto.RegisterType((*Column)(nil), "Column") + proto.RegisterType((*Row)(nil), "Row") + proto.RegisterType((*Grid)(nil), "Grid") + proto.RegisterType((*Cluster)(nil), "Cluster") + proto.RegisterType((*ClusterRow)(nil), "ClusterRow") +} + +func init() { proto.RegisterFile("state.proto", fileDescriptor_a888679467bb7853) } + +var fileDescriptor_a888679467bb7853 = []byte{ + // 1076 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x55, 0x6f, 0x8f, 0xdb, 0xc4, + 0x13, 0x96, 0xf3, 0xd7, 0x1e, 0x27, 0x77, 0xe9, 0xfe, 0xfa, 0xab, 0x4c, 0x50, 0xd5, 0xd4, 0x45, + 0x10, 0x10, 0xf8, 0xa4, 0xf0, 0x02, 0x54, 0xc1, 0x8b, 0x72, 0x94, 0xea, 0x4e, 0xb4, 0xaa, 0xb6, + 0xd7, 0xd7, 0x96, 0x63, 0xef, 0xa5, 0x56, 0x1d, 0xaf, 0xb5, 0xbb, 0x26, 0x97, 0x0f, 0x82, 0xe0, + 0xa3, 0xf0, 0xa9, 0xf8, 0x0c, 0x68, 0x66, 0xd7, 0x49, 0x5a, 0x21, 0xf1, 0x2a, 0x9e, 0x67, 0x66, + 0x77, 0x26, 0x33, 0xcf, 0x3c, 0x0b, 0xa1, 0x36, 0x99, 0x11, 0x49, 0xa3, 0xa4, 0x91, 0xf3, 0x47, + 0x1b, 0x29, 0x37, 0x95, 0xb8, 0x20, 0x6b, 0xdd, 0xde, 0x5e, 0x98, 0x72, 0x2b, 0xb4, 0xc9, 0xb6, + 0x8d, 0x0b, 0x78, 0xd0, 0xac, 0x2f, 0x72, 0x59, 0xdf, 0x96, 0x1b, 0xf7, 0x63, 0xf1, 0xf8, 0x15, + 0x8c, 0x5e, 0x0a, 0xa3, 0xca, 0x9c, 0x31, 0x18, 0xd4, 0xd9, 0x56, 0x44, 0xde, 0xc2, 0x5b, 0x06, + 0x9c, 0xbe, 0x59, 0x04, 0xe3, 0xb2, 0x2e, 0xca, 0x5c, 0xe8, 0xa8, 0xb7, 0xe8, 0x2f, 0x87, 0xbc, + 0x33, 0xd9, 0x03, 0x18, 0xfd, 0x96, 0x55, 0xad, 0xd0, 0x51, 0x7f, 0xd1, 0x5f, 0x7a, 0xdc, 0x59, + 0xf1, 0x5b, 0x38, 0x7f, 0xdb, 0x14, 0x99, 0x11, 0xaf, 0xdf, 0x65, 0x5a, 0xfc, 0x9c, 0x99, 0x8c, + 0x3d, 0x04, 0x68, 0xd0, 0x48, 0x4f, 0xae, 0x0f, 0x08, 0x79, 0x85, 0x39, 0x9e, 0xc0, 0xd4, 0xba, + 0xb5, 0xc8, 0x65, 0x5d, 0x60, 0x26, 0x6f, 0xe9, 0xf1, 0x09, 0x81, 0x6f, 0x2c, 0x16, 0x5f, 0x03, + 0xd8, 0x6b, 0xaf, 0xea, 0x5b, 0xc9, 0x7e, 0x80, 0x7b, 0x2d, 0x59, 0xa9, 0x3d, 0x59, 0x64, 0x26, + 0x8b, 0xbc, 0x45, 0x7f, 0x19, 0xae, 0x66, 0xc9, 0x47, 0xe9, 0xf9, 0x79, 0xfb, 0x21, 0x10, 0xff, + 0x39, 0x84, 0xe0, 0x59, 0x25, 0x94, 0xa1, 0xbb, 0x1e, 0x02, 0xdc, 0x66, 0x65, 0x95, 0xe6, 0xb2, + 0xad, 0x0d, 0x55, 0x37, 0xe4, 0x01, 0x22, 0x97, 0x08, 0xb0, 0x18, 0xa6, 0xe4, 0x5e, 0xb7, 0x65, + 0x55, 0xa4, 0x65, 0x41, 0xd5, 0x05, 0x3c, 0x44, 0xf0, 0x27, 0xc4, 0xae, 0x0a, 0xf6, 0x1d, 0xd0, + 0x81, 0x14, 0x7b, 0x1e, 0xf5, 0x17, 0xde, 0x32, 0x5c, 0xcd, 0x13, 0x3b, 0x90, 0xa4, 0x1b, 0x48, + 0x72, 0xd3, 0x0d, 0x84, 0xfb, 0x18, 0x8c, 0x26, 0x5b, 0xc0, 0xc4, 0x1e, 0x14, 0xda, 0xe0, 0xdd, + 0x03, 0xba, 0x9b, 0xea, 0xb9, 0x11, 0xda, 0x5c, 0x15, 0x98, 0xbe, 0xc9, 0xb4, 0x3e, 0xa6, 0x1f, + 0xda, 0xf4, 0x08, 0x9e, 0xa4, 0xa7, 0x18, 0x4a, 0x3f, 0xfa, 0xef, 0xf4, 0x18, 0x4c, 0xe9, 0xbf, + 0x80, 0x73, 0x4c, 0xd5, 0x2a, 0x91, 0x6e, 0x85, 0xd6, 0xd9, 0x46, 0x44, 0x63, 0xba, 0xfe, 0xcc, + 0xc1, 0x2f, 0x2d, 0x8a, 0x3d, 0xb2, 0x05, 0x54, 0x65, 0xfd, 0x3e, 0xf2, 0xed, 0x04, 0x09, 0xf9, + 0xb5, 0xac, 0xdf, 0xb3, 0xcf, 0xe1, 0xfc, 0xe8, 0x4e, 0x8d, 0xb8, 0x33, 0x51, 0x40, 0x31, 0xd3, + 0x43, 0xcc, 0x8d, 0xb8, 0x33, 0xec, 0x33, 0x38, 0xb3, 0x71, 0xad, 0xaa, 0x6c, 0x18, 0x50, 0xd8, + 0x84, 0xd0, 0xb7, 0xaa, 0xa2, 0xa8, 0x0b, 0xb8, 0x5f, 0x65, 0xd4, 0x91, 0x0f, 0x1b, 0x1f, 0x52, + 0xec, 0x3d, 0xeb, 0xfb, 0xe5, 0xa4, 0xfd, 0xdf, 0xc0, 0xff, 0x4e, 0x0f, 0x74, 0xcd, 0x3c, 0xa3, + 0xf8, 0xd9, 0x31, 0xde, 0xb5, 0xf4, 0x29, 0x40, 0xa3, 0x64, 0x23, 0x94, 0x29, 0x85, 0x8e, 0x26, + 0xc4, 0x9a, 0x79, 0x72, 0x20, 0x44, 0xf2, 0xfa, 0xe0, 0x7c, 0x5e, 0x1b, 0xb5, 0xe7, 0x27, 0xd1, + 0xec, 0x11, 0x84, 0xef, 0xa4, 0xa9, 0x4a, 0xca, 0xa0, 0xa3, 0xe9, 0xa2, 0x8f, 0xf3, 0x72, 0xd0, + 0x55, 0xa1, 0xe7, 0x3f, 0xc2, 0xf9, 0x47, 0xe7, 0xd9, 0x0c, 0xfa, 0xef, 0xc5, 0xde, 0xf1, 0x1e, + 0x3f, 0xd9, 0x7d, 0x18, 0xd2, 0xb6, 0x38, 0x2e, 0x59, 0xe3, 0x69, 0xef, 0x7b, 0x2f, 0xfe, 0xdd, + 0x83, 0x09, 0x96, 0xf9, 0x52, 0x98, 0x0c, 0x49, 0xcd, 0x3e, 0x85, 0x80, 0xfe, 0xcf, 0xc9, 0xea, + 0xf8, 0x08, 0x74, 0x9b, 0xb3, 0x6e, 0x37, 0x69, 0x2e, 0xb7, 0x8d, 0xac, 0x45, 0x6d, 0xe8, 0xbe, + 0x21, 0xb6, 0x73, 0x73, 0xd9, 0x61, 0x98, 0x4c, 0xee, 0x6a, 0xa1, 0x88, 0x98, 0x01, 0xb7, 0x06, + 0x3b, 0x83, 0x5e, 0x9e, 0x47, 0x03, 0xaa, 0xbf, 0x97, 0xe7, 0x38, 0x61, 0xa1, 0x94, 0x54, 0xa9, + 0xd9, 0x37, 0xc2, 0x91, 0x2c, 0x20, 0xe4, 0x66, 0xdf, 0x88, 0xf8, 0x0f, 0x0f, 0x46, 0x97, 0xb2, + 0x6a, 0xb7, 0x35, 0xde, 0x47, 0x23, 0x71, 0xd5, 0x58, 0xe3, 0x20, 0x1e, 0xbd, 0x0f, 0xc5, 0x43, + 0x9b, 0x4c, 0x19, 0x51, 0x50, 0x6e, 0x8f, 0x77, 0x26, 0xde, 0x21, 0xee, 0x8c, 0xca, 0x5c, 0x01, + 0xd6, 0xf8, 0xb8, 0xb9, 0xb6, 0x88, 0x93, 0xe6, 0x62, 0x92, 0x77, 0x65, 0x6d, 0x88, 0xe3, 0x01, + 0xa7, 0xef, 0xf8, 0xaf, 0x1e, 0xf4, 0xb9, 0xdc, 0xfd, 0xab, 0x7a, 0x9d, 0x41, 0xef, 0xb0, 0xb0, + 0xbd, 0xb2, 0xc0, 0x82, 0x94, 0xd0, 0x6d, 0x65, 0xac, 0x68, 0x0d, 0x79, 0x67, 0xb2, 0x4f, 0xc0, + 0xcf, 0x45, 0x55, 0x51, 0x5e, 0x5b, 0xd3, 0x18, 0x6d, 0x4c, 0x3a, 0x07, 0xdf, 0x2d, 0x07, 0x96, + 0x84, 0xae, 0x83, 0x8d, 0x22, 0xb8, 0x25, 0xf1, 0x8c, 0xc6, 0xe4, 0x71, 0x16, 0x7b, 0x0c, 0x63, + 0xfb, 0xa5, 0x23, 0x9f, 0xf8, 0x35, 0x4e, 0xac, 0xc8, 0xf2, 0x0e, 0xc7, 0x16, 0x94, 0xb9, 0xac, + 0x75, 0x14, 0xd8, 0x16, 0x90, 0xc1, 0xfe, 0x0f, 0x23, 0x9c, 0x68, 0x59, 0x44, 0x60, 0xe1, 0x75, + 0xbb, 0xb9, 0x2a, 0xd8, 0x97, 0x00, 0x19, 0xf2, 0x33, 0x2d, 0xeb, 0x5b, 0x49, 0x8b, 0x10, 0xae, + 0xe0, 0x48, 0x59, 0x1e, 0x64, 0x07, 0x39, 0x7b, 0x02, 0xd3, 0x56, 0x0b, 0x95, 0x3a, 0xd2, 0xee, + 0x89, 0xe0, 0x01, 0x9f, 0x20, 0xe8, 0x98, 0xb9, 0xbf, 0x1e, 0xf8, 0xa3, 0xd9, 0x38, 0xfe, 0xbb, + 0x07, 0x83, 0x17, 0xaa, 0x2c, 0xb0, 0xdc, 0x9c, 0x86, 0xab, 0x9d, 0x88, 0x8e, 0x13, 0x3b, 0x6c, + 0xde, 0xe1, 0x2c, 0x82, 0x81, 0x92, 0x3b, 0xfb, 0x0a, 0x84, 0xab, 0x41, 0xc2, 0xe5, 0x8e, 0x13, + 0xc2, 0x62, 0x18, 0xd9, 0x07, 0x85, 0xd4, 0x0b, 0xeb, 0x42, 0x02, 0xbf, 0x50, 0xb2, 0x6d, 0xb8, + 0xf3, 0xb0, 0xaf, 0xe0, 0x5e, 0x95, 0x69, 0x43, 0x0a, 0x95, 0x5a, 0x39, 0x2e, 0x68, 0x8a, 0x1e, + 0x3f, 0x47, 0x07, 0xaa, 0x91, 0x95, 0xed, 0x82, 0x7d, 0x0d, 0xa1, 0xd3, 0x76, 0xfa, 0xb3, 0xb6, + 0x7f, 0x61, 0x72, 0x54, 0x7f, 0x0e, 0xed, 0xf1, 0x25, 0x58, 0xc1, 0x94, 0xf6, 0x63, 0xeb, 0x16, + 0x86, 0xda, 0x19, 0xae, 0xa6, 0xc9, 0xe9, 0x16, 0xf1, 0x89, 0x39, 0xdd, 0xa9, 0x18, 0xc6, 0x79, + 0xd5, 0x6a, 0x23, 0x14, 0x75, 0x39, 0x5c, 0xf9, 0xc9, 0xa5, 0xb5, 0x79, 0xe7, 0x60, 0xcf, 0xe0, + 0xe1, 0x56, 0x6a, 0x93, 0x2a, 0x91, 0x8b, 0xda, 0xa4, 0x0e, 0x4e, 0x0f, 0xaf, 0x2a, 0x0d, 0xc1, + 0xe3, 0x73, 0x0c, 0xe2, 0x14, 0xe3, 0xae, 0x38, 0xe8, 0xec, 0xf5, 0xc0, 0xef, 0xcf, 0x06, 0xd7, + 0x03, 0x7f, 0x38, 0x1b, 0x5d, 0x0f, 0xfc, 0xf1, 0xcc, 0x8f, 0x15, 0x8c, 0x5d, 0x14, 0x72, 0x9d, + 0xea, 0xc6, 0x37, 0xbc, 0xd5, 0xee, 0xd9, 0x01, 0x84, 0xde, 0x10, 0x82, 0x5c, 0xed, 0x34, 0xd9, + 0x12, 0xb8, 0x33, 0xb1, 0x41, 0x5d, 0x39, 0x4a, 0xee, 0x88, 0xc9, 0xd8, 0xa0, 0xee, 0x2f, 0xc8, + 0x1d, 0x87, 0xfc, 0xf0, 0x1d, 0x3f, 0x07, 0x38, 0x7a, 0xd8, 0x63, 0x98, 0x14, 0xa5, 0x6e, 0xaa, + 0x6c, 0x7f, 0xaa, 0x28, 0xa1, 0xc3, 0x48, 0x54, 0x90, 0x98, 0x75, 0x21, 0xee, 0xdc, 0x83, 0x6f, + 0x8d, 0xf5, 0x88, 0x1e, 0x92, 0x6f, 0xff, 0x09, 0x00, 0x00, 0xff, 0xff, 0x27, 0x5e, 0xeb, 0xa9, + 0x75, 0x08, 0x00, 0x00, +} diff --git a/vendor/github.com/GoogleCloudPlatform/testgrid/pb/state/state.proto b/vendor/github.com/GoogleCloudPlatform/testgrid/pb/state/state.proto new file mode 100644 index 00000000000..8add849c6cb --- /dev/null +++ b/vendor/github.com/GoogleCloudPlatform/testgrid/pb/state/state.proto @@ -0,0 +1,226 @@ +// Backing state for a test results table of a TestGrid dashboard tab. +// TestState() is updated and stored by test-focused update_server.py. +// Stored in GCS as "". + +// NOTE: Do NOT update this until you have updated the internal state.proto! + +syntax = "proto3"; + +import "google/protobuf/timestamp.proto"; +import "pb/config/config.proto"; + +// A metric and its values for each test cycle. +message Metric { + string name = 1; // Name of metric, such as duration + // Sparse encoding of values. Indices is a list of pairs of + // that details columns with metric values. So given: + // Indices: [0, 2, 6, 4] + // Values: [0.1,0.2,6.1,6.2,6.3,6.4] + // Decoded 12-value equivalent is: + // [0.1, 0.2, nil, nil, nil, nil, 6.1, 6.2, 6.3, 6.4, nil, nil, ...] + repeated int32 indices = + 2; // n=index of first value, n+1=count of filled values + repeated double values = 3; // only present for columns with a metric value +} + +message UpdatePhaseData { + // The name for a part of the update cycle. + string phase_name = 1; + + // Time taken for a part of the update cycle, in seconds. + double phase_seconds = 2; +} + +// Info on time taken to update test results during the last update cycle. +message UpdateInfo { + // Metrics for how long parts of the update cycle take. + repeated UpdatePhaseData update_phase_data = 1; +} + +// Info on a failing test row about the failure. +message AlertInfo { + // Number of results that have failed. + int32 fail_count = 1; + + // The build ID the test first failed at. + string fail_build_id = 2; + + // The time the test first failed at. + google.protobuf.Timestamp fail_time = 3; + + // The test ID for the first test failure. + string fail_test_id = 4; + + // The build ID the test last passed at. + string pass_build_id = 5; + + // The time the test last passed at. + google.protobuf.Timestamp pass_time = 6; + + // A snippet explaining the failure. + string failure_message = 7; + + // Link to search for build changes, internally a code-search link. + string build_link = 8; + + // Text for option to search for build changes. + string build_link_text = 9; + + // Text to display for link to search for build changes. + string build_url_text = 10; + + // The build ID for the latest test failure. (Does not indicate the failure is + // 'over', just the latest test failure we found.) + string latest_fail_build_id = 11; + + // The test ID for the latest test failure. + string latest_fail_test_id = 14; + + // Maps (property name):(property value) for arbitrary alert properties. + map properties = 12; + + // A list of IDs for issue hotlists related to this failure. + repeated string hotlist_ids = 13; +} + +// Info on default test metadata for a dashboard tab. +message TestMetadata { + // Name of the test with associated test metadata. + string test_name = 1; + + // Default bug component. + int32 bug_component = 2; + + // Default owner. + string owner = 3; + + // Default list of cc's. + repeated string cc = 4; + + // When present, only file a bug for failed tests with same error type. + // Otherwise, always file a bug. + string error_type = 5; +} + +// TestGrid columns (also known as TestCycle). +message Column { + // Unique instance of the job, typically BUILD_NUMBER from prow or a guid + string build = 1; + + // Name associated with the column (such as the run/invocation ID).No two + // columns should have the same build_id and name. The name field allows the + // display of multiple columns with the same build_id. + string name = 2; + + // Milliseconds since start of epoch (python time.time() * 1000) + double started = 3; + + // Additional custom headers like commit, image used, etc. + repeated string extra = 4; + + // Custom hotlist ids. + string hotlist_ids = 5; + + // An optional hint for the updater. + string hint = 6; +} + +// TestGrid rows (also known as TestRow) +message Row { + string name = 1; // Display name, which might process id to append/filter info. + string id = 2; // raw id for the row, such as the bazel target or golang package. + + // Results for this row, run-length encoded to reduce size/improve + // performance. Thus (encoded -> decoded equivalent): + // [0, 3, 5, 4] -> [0, 0, 0, 5, 5, 5, 5] + // [5, 1] -> [5] + // [1, 5] -> [1, 1, 1, 1, 1] + // The decoded values are Result enums + repeated int32 results = 3; + + // Test IDs for each test result in this test case. + // Must be present on every column, regardless of status. + repeated string cell_ids = 4; + + // Short description of the result, displayed on mouseover. + // Present for any column with a non-empty status (not NO_RESULT). + repeated string messages = 5; + + reserved 6; + + // Names of metrics associated with this test case. Stored separate from + // metric info (which may be omitted). + repeated string metric = 7; + + repeated Metric metrics = 8; // Numerical performance/timing data, etc. + + // Short string to place inside the cell (F for fail, etc) + // Present for any column with a non-empty status (not NO_RESULT). + repeated string icons = 9; + + // IDs for bugs associated with results in this test case. + repeated string bug_id = 10; + + // An alert for the failure if there's a recent failure for this test case. + AlertInfo alert_info = 11; + + // Values of a user-defined property found in test results for this row. + repeated string user_property = 12; +} + +// A single table of test results backing a dashboard tab. +message Grid { + // A cycle of test results, not including the results. In the TestGrid client, + // the cycles define the columns. + repeated Column columns = 1; + + // A test case with test results. In the TestGrid client, the cases define the + // rows (and the results define the individual cells). + repeated Row rows = 2; + + reserved 3; + + // The latest configuration used to generate this test group. + TestGroup config = 4; + + reserved 5; + + // Seconds since epoch for last time this cycle was updated. + double last_time_updated = 6; + + reserved 7; + + // Stored info on previous timing for parts of the update cycle. + repeated UpdateInfo update_info = 8; + + // Stored info on default test metadata. + repeated TestMetadata test_metadata = 9; + + // Clusters of failures for a TestResultTable instance. + repeated Cluster cluster = 10; + + // Most recent timestamp that clusters have processed. + double most_recent_cluster_timestamp = 11; +} + +// A cluster of failures grouped by test status and message for a test results +// table. +message Cluster { + // Test status cluster grouped by. + int32 test_status = 1; + + // Error message or testFailureClassification string cluster grouped by. + string message = 2; + + // ClusterRows that belong to this cluster. + repeated ClusterRow cluster_row = 3; +} + +// Cells in a TestRow that belong to a specific Cluster. +message ClusterRow { + // Name of TestRow. + string display_name = 1; + + // Index within row that belongs to Cluster (refer to columns of the row). + repeated int32 index = 2; +} diff --git a/vendor/github.com/GoogleCloudPlatform/testgrid/pb/test_status/test_status.pb.go b/vendor/github.com/GoogleCloudPlatform/testgrid/pb/test_status/test_status.pb.go new file mode 100644 index 00000000000..beaa0acbab5 --- /dev/null +++ b/vendor/github.com/GoogleCloudPlatform/testgrid/pb/test_status/test_status.pb.go @@ -0,0 +1,132 @@ +/* +Copyright The TestGrid Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: test_status.proto + +package test_status + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +type TestStatus int32 + +const ( + // Proto versions of test_status.py's GathererStatus + // Note that: NO_RESULT is used to signal that there should be no change. + // This must be updated every time a new GathererStatus is added. + TestStatus_NO_RESULT TestStatus = 0 + TestStatus_PASS TestStatus = 1 + TestStatus_PASS_WITH_ERRORS TestStatus = 2 + TestStatus_PASS_WITH_SKIPS TestStatus = 3 + TestStatus_RUNNING TestStatus = 4 + TestStatus_CATEGORIZED_ABORT TestStatus = 5 + TestStatus_UNKNOWN TestStatus = 6 + TestStatus_CANCEL TestStatus = 7 + TestStatus_BLOCKED TestStatus = 8 + TestStatus_TIMED_OUT TestStatus = 9 + TestStatus_CATEGORIZED_FAIL TestStatus = 10 + TestStatus_BUILD_FAIL TestStatus = 11 + TestStatus_FAIL TestStatus = 12 + TestStatus_FLAKY TestStatus = 13 + TestStatus_TOOL_FAIL TestStatus = 14 + TestStatus_BUILD_PASSED TestStatus = 15 +) + +var TestStatus_name = map[int32]string{ + 0: "NO_RESULT", + 1: "PASS", + 2: "PASS_WITH_ERRORS", + 3: "PASS_WITH_SKIPS", + 4: "RUNNING", + 5: "CATEGORIZED_ABORT", + 6: "UNKNOWN", + 7: "CANCEL", + 8: "BLOCKED", + 9: "TIMED_OUT", + 10: "CATEGORIZED_FAIL", + 11: "BUILD_FAIL", + 12: "FAIL", + 13: "FLAKY", + 14: "TOOL_FAIL", + 15: "BUILD_PASSED", +} + +var TestStatus_value = map[string]int32{ + "NO_RESULT": 0, + "PASS": 1, + "PASS_WITH_ERRORS": 2, + "PASS_WITH_SKIPS": 3, + "RUNNING": 4, + "CATEGORIZED_ABORT": 5, + "UNKNOWN": 6, + "CANCEL": 7, + "BLOCKED": 8, + "TIMED_OUT": 9, + "CATEGORIZED_FAIL": 10, + "BUILD_FAIL": 11, + "FAIL": 12, + "FLAKY": 13, + "TOOL_FAIL": 14, + "BUILD_PASSED": 15, +} + +func (x TestStatus) String() string { + return proto.EnumName(TestStatus_name, int32(x)) +} + +func (TestStatus) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_3f9a6ab41bff9dae, []int{0} +} + +func init() { + proto.RegisterEnum("TestStatus", TestStatus_name, TestStatus_value) +} + +func init() { proto.RegisterFile("test_status.proto", fileDescriptor_3f9a6ab41bff9dae) } + +var fileDescriptor_3f9a6ab41bff9dae = []byte{ + // 238 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x4c, 0x90, 0x4d, 0x4e, 0x03, 0x31, + 0x0c, 0x85, 0xa1, 0xb4, 0xd3, 0x8e, 0xfb, 0xe7, 0x1a, 0xb8, 0x04, 0x0b, 0x36, 0x9c, 0x20, 0x33, + 0x93, 0x96, 0x68, 0x42, 0x5c, 0xe5, 0x47, 0x15, 0x6c, 0x22, 0x90, 0xba, 0x2e, 0x62, 0xc2, 0x11, + 0xb8, 0x37, 0x4a, 0x8a, 0x44, 0x77, 0xcf, 0xcf, 0xcf, 0x4f, 0x9f, 0x0c, 0x9b, 0x74, 0x1c, 0x52, + 0x1c, 0xd2, 0x7b, 0xfa, 0x1e, 0x1e, 0x3f, 0xbf, 0x4e, 0xe9, 0xf4, 0xf0, 0x33, 0x02, 0xf0, 0xc7, + 0x21, 0xb9, 0x62, 0xd2, 0x12, 0x6a, 0xc3, 0xd1, 0x4a, 0x17, 0xb4, 0xc7, 0x2b, 0x9a, 0xc1, 0x78, + 0x2f, 0x9c, 0xc3, 0x6b, 0xba, 0x03, 0xcc, 0x2a, 0x1e, 0x94, 0x7f, 0x8e, 0xd2, 0x5a, 0xb6, 0x0e, + 0x47, 0x74, 0x0b, 0xeb, 0x7f, 0xd7, 0xf5, 0x6a, 0xef, 0xf0, 0x86, 0xe6, 0x30, 0xb5, 0xc1, 0x18, + 0x65, 0x76, 0x38, 0xa6, 0x7b, 0xd8, 0xb4, 0xc2, 0xcb, 0x1d, 0x5b, 0xf5, 0x26, 0xbb, 0x28, 0x1a, + 0xb6, 0x1e, 0x27, 0x39, 0x13, 0x4c, 0x6f, 0xf8, 0x60, 0xb0, 0x22, 0x80, 0xaa, 0x15, 0xa6, 0x95, + 0x1a, 0xa7, 0x79, 0xd1, 0x68, 0x6e, 0x7b, 0xd9, 0xe1, 0x2c, 0xd3, 0x78, 0xf5, 0x22, 0xbb, 0xc8, + 0xc1, 0x63, 0x9d, 0x19, 0x2e, 0xbb, 0xb6, 0x42, 0x69, 0x04, 0x5a, 0x01, 0x34, 0x41, 0xe9, 0xbf, + 0x79, 0x9e, 0x99, 0x8b, 0x5a, 0x50, 0x0d, 0x93, 0xad, 0x16, 0xfd, 0x2b, 0x2e, 0x4b, 0x13, 0xb3, + 0x3e, 0x67, 0x56, 0x84, 0xb0, 0x38, 0xdf, 0x64, 0x7a, 0xd9, 0xe1, 0xfa, 0xa3, 0x2a, 0xef, 0x78, + 0xfa, 0x0d, 0x00, 0x00, 0xff, 0xff, 0x4a, 0xb6, 0x9b, 0x15, 0x23, 0x01, 0x00, 0x00, +} diff --git a/vendor/github.com/GoogleCloudPlatform/testgrid/pb/test_status/test_status.proto b/vendor/github.com/GoogleCloudPlatform/testgrid/pb/test_status/test_status.proto new file mode 100644 index 00000000000..64810577da0 --- /dev/null +++ b/vendor/github.com/GoogleCloudPlatform/testgrid/pb/test_status/test_status.proto @@ -0,0 +1,23 @@ +syntax = "proto3"; + +enum TestStatus { + // Proto versions of test_status.py's GathererStatus + // Note that: NO_RESULT is used to signal that there should be no change. + // This must be updated every time a new GathererStatus is added. + NO_RESULT = 0; + PASS = 1; + PASS_WITH_ERRORS = 2; + PASS_WITH_SKIPS = 3; + RUNNING = 4; + CATEGORIZED_ABORT = 5; + UNKNOWN = 6; + CANCEL = 7; + BLOCKED = 8; + TIMED_OUT = 9; + CATEGORIZED_FAIL = 10; + BUILD_FAIL = 11; + FAIL = 12; + FLAKY = 13; + TOOL_FAIL = 14; + BUILD_PASSED = 15; +} diff --git a/vendor/github.com/GoogleCloudPlatform/testgrid/util/gcs/client.go b/vendor/github.com/GoogleCloudPlatform/testgrid/util/gcs/client.go new file mode 100644 index 00000000000..d2f9cbd33dd --- /dev/null +++ b/vendor/github.com/GoogleCloudPlatform/testgrid/util/gcs/client.go @@ -0,0 +1,134 @@ +/* +Copyright 2020 The TestGrid Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package gcs + +import ( + "context" + "io" + + "cloud.google.com/go/storage" +) + +// Uploader adds upload capabilities to a GCS client. +type Uploader interface { + Upload(context.Context, Path, []byte, bool, string) error +} + +// Downloader can list files and open them for reading. +type Downloader interface { + Lister + Opener +} + +// A Lister returns objects under a prefix. +type Lister interface { + Objects(ctx context.Context, prefix Path, delimiter, start string) Iterator +} + +// An Iterator returns the attributes of the listed objects or an iterator.Done error. +type Iterator interface { + Next() (*storage.ObjectAttrs, error) +} + +// An Opener opens a path for reading. +type Opener interface { + Open(ctx context.Context, path Path) (io.ReadCloser, error) +} + +// A Stater can stat an object and get its attributes. +type Stater interface { + Stat(ctx context.Context, prefix Path) (*storage.ObjectAttrs, error) +} + +// A Copier can cloud copy an object to a new location. +type Copier interface { + // Copy an object to the specified path + Copy(ctx context.Context, from, to Path) error +} + +// A Client can upload, download and stat. +type Client interface { + Uploader + Downloader + Stater + Copier +} + +// A ConditionalClient can limit actions to those matching conditions. +type ConditionalClient interface { + Client + // If specifies conditions on the object read from and/or written to. + If(read, write *storage.Conditions) ConditionalClient +} + +type gcsClient struct { + gcs *realGCSClient + local *localClient +} + +// NewClient returns a flexible (local or GCS) storage client. +func NewClient(client *storage.Client) ConditionalClient { + return gcsClient{ + gcs: &realGCSClient{client, nil, nil}, + local: &localClient{nil, nil}, + } +} + +// If returns a flexible (local or GCS) conditional client. +func (gc gcsClient) If(read, write *storage.Conditions) ConditionalClient { + return gcsClient{ + gcs: &realGCSClient{gc.gcs.client, read, write}, + local: &localClient{nil, nil}, + } +} + +func (gc gcsClient) clientFromPath(path Path) ConditionalClient { + if path.URL().Scheme == "gs" { + return gc.gcs + } + return gc.local +} + +// Copy copies the contents of 'from' into 'to'. +func (gc gcsClient) Copy(ctx context.Context, from, to Path) error { + client := gc.clientFromPath(from) + return client.Copy(ctx, from, to) +} + +// Open returns a handle for a given path. +func (gc gcsClient) Open(ctx context.Context, path Path) (io.ReadCloser, error) { + client := gc.clientFromPath(path) + return client.Open(ctx, path) +} + +// Objects returns an iterator of objects under a given path. +func (gc gcsClient) Objects(ctx context.Context, path Path, delimiter, startOffset string) Iterator { + client := gc.clientFromPath(path) + return client.Objects(ctx, path, delimiter, startOffset) +} + +// Upload writes content to the given path. +func (gc gcsClient) Upload(ctx context.Context, path Path, buf []byte, worldReadable bool, cacheControl string) error { + client := gc.clientFromPath(path) + return client.Upload(ctx, path, buf, worldReadable, cacheControl) +} + +// Stat returns object attributes for a given path. +func (gc gcsClient) Stat(ctx context.Context, path Path) (*storage.ObjectAttrs, error) { + client := gc.clientFromPath(path) + return client.Stat(ctx, path) +} diff --git a/vendor/github.com/GoogleCloudPlatform/testgrid/util/gcs/gcs.go b/vendor/github.com/GoogleCloudPlatform/testgrid/util/gcs/gcs.go new file mode 100644 index 00000000000..97c09efa744 --- /dev/null +++ b/vendor/github.com/GoogleCloudPlatform/testgrid/util/gcs/gcs.go @@ -0,0 +1,218 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package gcs provides utilities for interacting with GCS. +// +// This includes basic CRUD operations. It is primarily focused on +// reading prow build results uploaded to GCS. +package gcs + +import ( + "compress/zlib" + "context" + "encoding/json" + "errors" + "fmt" + "hash/crc32" + "io/ioutil" + "log" + "net/url" + "strings" + + statepb "github.com/GoogleCloudPlatform/testgrid/pb/state" + + "cloud.google.com/go/storage" + "github.com/golang/protobuf/proto" + "google.golang.org/api/option" +) + +// ClientWithCreds returns a storage client, optionally authenticated with the specified .json creds +func ClientWithCreds(ctx context.Context, creds ...string) (*storage.Client, error) { + var options []option.ClientOption + switch l := len(creds); l { + case 0: // Do nothing + case 1: + options = append(options, option.WithCredentialsFile(creds[0])) + default: + return nil, fmt.Errorf("%d creds files unsupported (at most 1)", l) + } + return storage.NewClient(ctx, options...) +} + +// Path parses gs://bucket/obj urls +type Path struct { + url url.URL +} + +// NewPath returns a new Path if it parses. +func NewPath(path string) (*Path, error) { + var p Path + err := p.Set(path) + if err != nil { + return nil, err + } + return &p, nil +} + +// String returns the gs://bucket/obj url +func (g Path) String() string { + return g.url.String() +} + +// URL returns the url +func (g Path) URL() url.URL { + return g.url +} + +// Set updates value from a gs://bucket/obj string, validating errors. +func (g *Path) Set(v string) error { + u, err := url.Parse(v) + if err != nil { + return fmt.Errorf("invalid gs:// url %s: %v", v, err) + } + return g.SetURL(u) +} + +// SetURL updates value to the passed in gs://bucket/obj url +func (g *Path) SetURL(u *url.URL) error { + switch { + case u == nil: + return errors.New("nil url") + case u.Scheme != "gs" && u.Scheme != "" && u.Scheme != "file": + return fmt.Errorf("must use a gs://, file://, or local filesystem url: %s", u) + case strings.Contains(u.Host, ":"): + return fmt.Errorf("gs://bucket may not contain a port: %s", u) + case u.Opaque != "": + return fmt.Errorf("url must start with gs://: %s", u) + case u.User != nil: + return fmt.Errorf("gs://bucket may not contain an user@ prefix: %s", u) + case u.RawQuery != "": + return fmt.Errorf("gs:// url may not contain a ?query suffix: %s", u) + case u.Fragment != "": + return fmt.Errorf("gs:// url may not contain a #fragment suffix: %s", u) + } + g.url = *u + return nil +} + +// MarshalJSON encodes Path as a string +func (g Path) MarshalJSON() ([]byte, error) { + return json.Marshal(g.String()) +} + +// MarshalJSON decodes a string into Path +func (g *Path) UnmarshalJSON(buf []byte) error { + var str string + err := json.Unmarshal(buf, &str) + if err != nil { + return err + } + if g == nil { + g = &Path{} + } + return g.Set(str) +} + +// ResolveReference returns the path relative to the current path +func (g Path) ResolveReference(ref *url.URL) (*Path, error) { + var newP Path + if err := newP.SetURL(g.url.ResolveReference(ref)); err != nil { + return nil, err + } + return &newP, nil +} + +// Bucket returns bucket in gs://bucket/obj +func (g Path) Bucket() string { + return g.url.Host +} + +// Object returns path/to/something in gs://bucket/path/to/something +func (g Path) Object() string { + if g.url.Path == "" { + return g.url.Path + } + return g.url.Path[1:] +} + +func calcCRC(buf []byte) uint32 { + return crc32.Checksum(buf, crc32.MakeTable(crc32.Castagnoli)) +} + +const ( + // DefaultACL for this upload + DefaultACL = false + // PublicRead ACL for this upload. + PublicRead = true +) + +// Upload writes bytes to the specified Path by converting the client and path into an ObjectHandle. +func Upload(ctx context.Context, client *storage.Client, path Path, buf []byte, worldReadable bool, cacheControl string) error { + return realGCSClient{client: client}.Upload(ctx, path, buf, worldReadable, cacheControl) +} + +// UploadHandle writes bytes to the specified ObjectHandle +func UploadHandle(ctx context.Context, handle *storage.ObjectHandle, buf []byte, worldReadable bool, cacheControl string) error { + crc := calcCRC(buf) + w := handle.NewWriter(ctx) + defer w.Close() + if worldReadable { + w.ACL = []storage.ACLRule{{Entity: storage.AllUsers, Role: storage.RoleReader}} + } + if cacheControl != "" { + w.ObjectAttrs.CacheControl = cacheControl + } + w.SendCRC32C = true + // Send our CRC32 to ensure google received the same data we sent. + // See checksum example at: + // https://godoc.org/cloud.google.com/go/storage#Writer.Write + w.ObjectAttrs.CRC32C = crc + w.ProgressFunc = func(bytes int64) { + log.Printf("Uploading gs://%s/%s: %d/%d...", handle.BucketName(), handle.ObjectName(), bytes, len(buf)) + } + if n, err := w.Write(buf); err != nil { + return fmt.Errorf("write: %w", err) + } else if n != len(buf) { + return fmt.Errorf("partial write: %d < %d", n, len(buf)) + } + if err := w.Close(); err != nil { + return fmt.Errorf("close: %w", err) + } + return nil +} + +// DownloadGrid downloads and decompresses a grid from the specified path. +func DownloadGrid(ctx context.Context, opener Opener, path Path) (*statepb.Grid, error) { + var g statepb.Grid + r, err := opener.Open(ctx, path) + if err != nil && err == storage.ErrObjectNotExist { + return &g, nil + } + if err != nil { + return nil, fmt.Errorf("open: %w", err) + } + defer r.Close() + zr, err := zlib.NewReader(r) + if err != nil { + return nil, fmt.Errorf("open zlib: %w", err) + } + pbuf, err := ioutil.ReadAll(zr) + if err != nil { + return nil, fmt.Errorf("decompress: %w", err) + } + err = proto.Unmarshal(pbuf, &g) + return &g, err +} diff --git a/vendor/github.com/GoogleCloudPlatform/testgrid/util/gcs/local_gcs.go b/vendor/github.com/GoogleCloudPlatform/testgrid/util/gcs/local_gcs.go new file mode 100644 index 00000000000..93fde960b0f --- /dev/null +++ b/vendor/github.com/GoogleCloudPlatform/testgrid/util/gcs/local_gcs.go @@ -0,0 +1,124 @@ +/* +Copyright 2021 The TestGrid Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package gcs + +import ( + "cloud.google.com/go/storage" + "context" + "google.golang.org/api/iterator" + "io" + "io/ioutil" + "os" + "path/filepath" + "strings" +) + +var ( + _ Client = &localClient{} // Ensure this implements interface +) + +type localIterator struct { + files []os.FileInfo + dir string + index int +} + +func convertIsNotExistsErr(err error) error { + if os.IsNotExist(err) { + return storage.ErrObjectNotExist + } + return err +} + +func cleanFilepath(path Path) string { + return strings.Replace(path.String(), "file://", "/", 1) +} + +func (li *localIterator) Next() (*storage.ObjectAttrs, error) { + defer func() { li.index++ }() + if li.index >= len(li.files) { + return nil, iterator.Done + } + info := li.files[li.index] + p, err := NewPath(filepath.Join(li.dir, info.Name())) + if err != nil { + return nil, err + } + return objectAttrs(info, *p), nil +} + +// NewLocalClient returns a GCSUploadClient for the storage.Client. +func NewLocalClient() ConditionalClient { + return localClient{nil, nil} +} + +type localClient struct { + readCond *storage.Conditions + writeCond *storage.Conditions +} + +func (lc localClient) If(_, _ *storage.Conditions) ConditionalClient { + return NewLocalClient() +} + +func (lc localClient) Copy(ctx context.Context, from, to Path) error { + buf, err := ioutil.ReadFile(cleanFilepath(from)) + if err != nil { + return err + } + return lc.Upload(ctx, to, buf, false, "") +} + +func (lc localClient) Open(ctx context.Context, path Path) (io.ReadCloser, error) { + return os.Open(cleanFilepath(path)) +} + +func (lc localClient) Objects(ctx context.Context, path Path, delimiter, startOffset string) Iterator { + p := cleanFilepath(path) + if !strings.HasSuffix(p, "/") { + p += "/" + } + files, err := ioutil.ReadDir(p) + if err != nil { + return &localIterator{} + } + return &localIterator{ + dir: filepath.Dir(p), + files: files, + } +} + +func (lc localClient) Upload(ctx context.Context, path Path, buf []byte, _ bool, _ string) error { + return ioutil.WriteFile(cleanFilepath(path), buf, 0666) +} + +func (lc localClient) Stat(ctx context.Context, path Path) (*storage.ObjectAttrs, error) { + info, err := os.Stat(cleanFilepath(path)) + if err != nil { + return nil, convertIsNotExistsErr(err) + } + return objectAttrs(info, path), nil +} + +func objectAttrs(info os.FileInfo, path Path) *storage.ObjectAttrs { + return &storage.ObjectAttrs{ + Bucket: path.Bucket(), + Name: path.Object(), + Size: info.Size(), + Updated: info.ModTime(), + } +} diff --git a/vendor/github.com/GoogleCloudPlatform/testgrid/util/gcs/read.go b/vendor/github.com/GoogleCloudPlatform/testgrid/util/gcs/read.go new file mode 100644 index 00000000000..2cbe031e793 --- /dev/null +++ b/vendor/github.com/GoogleCloudPlatform/testgrid/util/gcs/read.go @@ -0,0 +1,521 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package gcs + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "net/url" + "path" + "regexp" + "strconv" + "strings" + "sync" + + "cloud.google.com/go/storage" + "github.com/fvbommel/sortorder" + "google.golang.org/api/iterator" + core "k8s.io/api/core/v1" + + "github.com/GoogleCloudPlatform/testgrid/metadata" + "github.com/GoogleCloudPlatform/testgrid/metadata/junit" +) + +// PodInfo holds podinfo.json (data about the pod). +type PodInfo struct { + Pod *core.Pod `json:"pod,omitempty"` + // ignore unused events +} + +const ( + // MissingPodInfo appears when builds complete without a podinfo.json report. + MissingPodInfo = "podinfo.json not found, please install prow's GCS reporter" + // NoPodUtils appears when builds run without decoration. + NoPodUtils = "not using decoration, please set decorate: true on prowjob" +) + +func truncate(s string, max int) string { + if max <= 0 { + return s + } + l := len(s) + if l < max { + return s + } + h := max / 2 + return s[:h] + "..." + s[l-h:] +} + +func checkContainerStatus(status core.ContainerStatus) (bool, string) { + name := status.Name + if status.State.Waiting != nil { + return false, fmt.Sprintf("%s still waiting: %s", name, status.State.Waiting.Message) + } + if status.State.Running != nil { + return false, fmt.Sprintf("%s still running", name) + } + if status.State.Terminated != nil && status.State.Terminated.ExitCode != 0 { + return false, fmt.Sprintf("%s exited %d: %s", name, status.State.Terminated.ExitCode, truncate(status.State.Terminated.Message, 140)) + } + return true, "" +} + +// Summarize returns if the pod completed successfully and a diagnostic message. +func (pi PodInfo) Summarize() (bool, string) { + if pi.Pod == nil { + return false, MissingPodInfo + } + + if pi.Pod.Status.Phase == core.PodSucceeded { + return true, "" + } + + conditions := make(map[core.PodConditionType]core.PodCondition, len(pi.Pod.Status.Conditions)) + + for _, cond := range pi.Pod.Status.Conditions { + conditions[cond.Type] = cond + } + + if cond, ok := conditions[core.PodScheduled]; ok && cond.Status != core.ConditionTrue { + return false, fmt.Sprintf("pod did not schedule: %s", cond.Message) + } + + if cond, ok := conditions[core.PodInitialized]; ok && cond.Status != core.ConditionTrue { + return false, fmt.Sprintf("pod could not initialize: %s", cond.Message) + } + + for _, status := range pi.Pod.Status.InitContainerStatuses { + if pass, msg := checkContainerStatus(status); !pass { + return pass, fmt.Sprintf("init container %s", msg) + } + } + + var foundSidecar bool + for _, status := range pi.Pod.Status.ContainerStatuses { + if status.Name == "sidecar" { + foundSidecar = true + } + pass, msg := checkContainerStatus(status) + if pass { + continue + } + if status.Name == "sidecar" { + return pass, msg + } + if status.State.Terminated == nil { + return pass, msg + } + } + + if !foundSidecar { + return true, NoPodUtils + } + return true, "" +} + +// Started holds started.json data. +type Started struct { + metadata.Started + // Pending when the job has not started yet + Pending bool +} + +// Finished holds finished.json data. +type Finished struct { + metadata.Finished + // Running when the job hasn't finished and finished.json doesn't exist + Running bool +} + +// Build points to a build stored under a particular gcs prefix. +type Build struct { + Path Path + baseName string + suitesConcurrency int // override the max number of concurrent suite downloads +} + +func (build Build) object() string { + o := build.Path.Object() + if strings.HasSuffix(o, "/") { + return o[0 : len(o)-1] + } + return o +} + +// Build is the unique invocation id of the job. +func (build Build) Build() string { + return path.Base(build.object()) +} + +// Job is the name of the job for this build +func (build Build) Job() string { + return path.Base(path.Dir(build.object())) +} + +func (build Build) String() string { + return build.Path.String() +} + +func readLink(objAttrs *storage.ObjectAttrs) string { + if link, ok := objAttrs.Metadata["x-goog-meta-link"]; ok { + return link + } + if link, ok := objAttrs.Metadata["link"]; ok { + return link + } + return "" +} + +// hackOffset handles tot's sequential names, which GCS handles poorly +// AKA asking GCS to return results after 6 will never find 10 +// So we always have to list everything for these types of numbers. +func hackOffset(offset *string) string { + if *offset == "" { + return "" + } + offsetBaseName := path.Base(*offset) + const first = 1000000000000000000 + if n, err := strconv.Atoi(offsetBaseName); err == nil && n < first { + *offset = path.Join(path.Dir(*offset), "0") + } + return offsetBaseName +} + +// ListBuilds returns the array of builds under path, sorted in monotonically decreasing order. +func ListBuilds(parent context.Context, lister Lister, gcsPath Path, after *Path) ([]Build, error) { + ctx, cancel := context.WithCancel(parent) + defer cancel() + var offset string + if after != nil { + offset = after.Object() + } + offsetBaseName := hackOffset(&offset) + it := lister.Objects(ctx, gcsPath, "/", offset) + var all []Build + for { + objAttrs, err := it.Next() + if errors.Is(err, iterator.Done) { + break + } + if err != nil { + return nil, fmt.Errorf("list objects: %w", err) + } + + // if this is a link under directory/, resolve the build value + // This is used for PR type jobs which we store in a PR specific prefix. + // The directory prefix contains a link header to the result + // under the PR specific prefix. + if link := readLink(objAttrs); link != "" { + // links created by bootstrap.py have a space + link = strings.TrimSpace(link) + u, err := url.Parse(link) + if err != nil { + return nil, fmt.Errorf("parse %s link: %v", objAttrs.Name, err) + } + if !strings.HasSuffix(u.Path, "/") { + u.Path += "/" + } + var linkPath Path + if err := linkPath.SetURL(u); err != nil { + return nil, fmt.Errorf("bad %s link path %s: %w", objAttrs.Name, u, err) + } + all = append(all, Build{ + Path: linkPath, + baseName: path.Base(objAttrs.Name), + }) + continue + } + + if objAttrs.Prefix == "" { + continue // not a symlink to a directory + } + + loc := "gs://" + gcsPath.Bucket() + "/" + objAttrs.Prefix + gcsPath, err := NewPath(loc) + if err != nil { + return nil, fmt.Errorf("bad path %q: %w", loc, err) + } + + all = append(all, Build{ + Path: *gcsPath, + baseName: path.Base(objAttrs.Prefix), + }) + } + + Sort(all) + + if offsetBaseName != "" { + // GCS will return 200 2000 30 for a prefix of 100 + // testgrid expects this as 2000 200 (dropping 30) + for i, b := range all { + if sortorder.NaturalLess(b.baseName, offsetBaseName) || b.baseName == offsetBaseName { + return all[:i], nil // b <= offsetBaseName, so skip this one + } + } + } + return all, nil +} + +// junit_CONTEXT_TIMESTAMP_THREAD.xml +var re = regexp.MustCompile(`.+/junit((_[^_]+)?(_\d+-\d+)?(_\d+)?|.+)?\.xml$`) + +// dropPrefix removes the _ in _CONTEXT to help keep the regexp simple +func dropPrefix(name string) string { + if len(name) == 0 { + return name + } + return name[1:] +} + +// parseSuitesMeta returns the metadata for this junit file (nil for a non-junit file). +// +// Expected format: junit_context_20180102-1256_07.xml +// Results in { +// "Context": "context", +// "Timestamp": "20180102-1256", +// "Thread": "07", +// } +func parseSuitesMeta(name string) map[string]string { + mat := re.FindStringSubmatch(name) + if mat == nil { + return nil + } + c, ti, th := dropPrefix(mat[2]), dropPrefix(mat[3]), dropPrefix(mat[4]) + if c == "" && ti == "" && th == "" { + c = mat[1] + } + return map[string]string{ + "Context": c, + "Timestamp": ti, + "Thread": th, + } + +} + +// readJSON will decode the json object stored in GCS. +func readJSON(ctx context.Context, opener Opener, p Path, i interface{}) error { + reader, err := opener.Open(ctx, p) + if errors.Is(err, storage.ErrObjectNotExist) { + return err + } + if err != nil { + return fmt.Errorf("open: %w", err) + } + defer reader.Close() + if err = json.NewDecoder(reader).Decode(i); err != nil { + return fmt.Errorf("decode: %w", err) + } + if err := reader.Close(); err != nil { + return fmt.Errorf("close: %w", err) + } + return nil +} + +// PodInfo parses the build's pod state. +func (build Build) PodInfo(ctx context.Context, opener Opener) (*PodInfo, error) { + path, err := build.Path.ResolveReference(&url.URL{Path: "podinfo.json"}) + if err != nil { + return nil, fmt.Errorf("resolve: %w", err) + } + var podInfo PodInfo + err = readJSON(ctx, opener, *path, &podInfo) + if errors.Is(err, storage.ErrObjectNotExist) { + return nil, nil + } + if err != nil { + return nil, fmt.Errorf("read: %w", err) + } + return &podInfo, nil +} + +// Started parses the build's started metadata. +func (build Build) Started(ctx context.Context, opener Opener) (*Started, error) { + path, err := build.Path.ResolveReference(&url.URL{Path: "started.json"}) + if err != nil { + return nil, fmt.Errorf("resolve: %w", err) + } + var started Started + err = readJSON(ctx, opener, *path, &started) + if errors.Is(err, storage.ErrObjectNotExist) { + started.Pending = true + return &started, nil + } + if err != nil { + return nil, fmt.Errorf("read: %w", err) + } + return &started, nil +} + +// Finished parses the build's finished metadata. +func (build Build) Finished(ctx context.Context, opener Opener) (*Finished, error) { + path, err := build.Path.ResolveReference(&url.URL{Path: "finished.json"}) + if err != nil { + return nil, fmt.Errorf("resolve: %w", err) + } + var finished Finished + err = readJSON(ctx, opener, *path, &finished) + if errors.Is(err, storage.ErrObjectNotExist) { + finished.Running = true + return &finished, nil + } + if err != nil { + return nil, fmt.Errorf("read: %w", err) + } + return &finished, nil +} + +// Artifacts writes the object name of all paths under the build's artifact dir to the output channel. +func (build Build) Artifacts(ctx context.Context, lister Lister, artifacts chan<- string) error { + objs := lister.Objects(ctx, build.Path, "", "") // no delim or offset so we get all objects. + for { + obj, err := objs.Next() + if err == iterator.Done { + break + } + if err != nil { + return fmt.Errorf("list %s: %w", build.Path, err) + } + select { + case <-ctx.Done(): + return ctx.Err() + case artifacts <- obj.Name: + } + } + return nil +} + +// SuitesMeta holds testsuites xml and metadata from the filename +type SuitesMeta struct { + Suites junit.Suites // suites data extracted from file contents + Metadata map[string]string // metadata extracted from path name + Path string +} + +func readSuites(ctx context.Context, opener Opener, p Path) (*junit.Suites, error) { + r, err := opener.Open(ctx, p) + if err != nil { + return nil, fmt.Errorf("open: %w", err) + } + defer r.Close() + suitesMeta, err := junit.ParseStream(r) + if err != nil { + return nil, fmt.Errorf("parse: %w", err) + } + return suitesMeta, nil +} + +// Error wraps an error in an associated Path. +type Error struct { + Path + err error +} + +// Unwrap the underlying error +func (e Error) Unwrap() error { + return e.err +} + +// Error satisfies the error interface type. +func (e Error) Error() string { + return fmt.Sprintf("%s: %s", e.Path, e.err) +} + +// Suites takes a channel of artifact names, parses those representing junit suites, writing the result to the suites channel. +// +// Note that junit suites are parsed in parallel, so there are no guarantees about suites ordering. +func (build Build) Suites(parent context.Context, opener Opener, artifacts <-chan string, suites chan<- SuitesMeta) error { + var wg sync.WaitGroup + var work int + + ec := make(chan error) + ctx, cancel := context.WithCancel(parent) + + // semaphore sets a ceiling of size go-routines slots + size := build.suitesConcurrency + if size == 0 { + size = 5 + } + semaphore := make(chan int, size) + defer close(semaphore) // close after all goroutines are done + defer wg.Wait() // ensure all goroutines exit before returning + defer cancel() + + for art := range artifacts { + meta := parseSuitesMeta(art) + if meta == nil { + continue // not a junit file ignore it, ignore it + } + // concurrently parse each file because there may be a lot of them, and + // each takes a non-trivial amount of time waiting for the network. + work++ + wg.Add(1) + + go func(art string, meta map[string]string) { + semaphore <- 1 // wait for free slot + defer wg.Done() + defer func() { <-semaphore }() // free up slot + if art != "" && art[0] != '/' { + art = "/" + art + } + path, err := build.Path.ResolveReference(&url.URL{Path: art}) + if err != nil { + select { + case <-ctx.Done(): + case ec <- fmt.Errorf("resolve %q: %w", art, err): + } + return + } + out := SuitesMeta{ + Metadata: meta, + Path: path.String(), + } + s, err := readSuites(ctx, opener, *path) + if err != nil { + select { + case <-ctx.Done(): + case ec <- fmt.Errorf("read %w", Error{*path, err}): + } + return + } + out.Suites = *s + select { + case <-ctx.Done(): + return + case suites <- out: + } + + select { + case <-ctx.Done(): + case ec <- nil: + } + }(art, meta) + } + + for ; work > 0; work-- { + select { + case <-ctx.Done(): + return fmt.Errorf("timeout: %w", ctx.Err()) + case err := <-ec: + if err != nil { + return err + } + } + } + return nil +} diff --git a/vendor/github.com/GoogleCloudPlatform/testgrid/util/gcs/real_gcs.go b/vendor/github.com/GoogleCloudPlatform/testgrid/util/gcs/real_gcs.go new file mode 100644 index 00000000000..cb541449cfc --- /dev/null +++ b/vendor/github.com/GoogleCloudPlatform/testgrid/util/gcs/real_gcs.go @@ -0,0 +1,87 @@ +/* +Copyright 2021 The TestGrid Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package gcs + +import ( + "context" + "io" + "strings" + + "cloud.google.com/go/storage" +) + +var ( + _ Client = &realGCSClient{} // Ensure this implements interface +) + +// NewGCSClient returns a GCSUploadClient for the storage.Client. +func NewGCSClient(client *storage.Client) ConditionalClient { + return realGCSClient{client, nil, nil} +} + +type realGCSClient struct { + client *storage.Client + readCond *storage.Conditions + writeCond *storage.Conditions +} + +func (rgc realGCSClient) If(read, write *storage.Conditions) ConditionalClient { + return realGCSClient{ + client: rgc.client, + readCond: read, + writeCond: write, + } +} + +func (rgc realGCSClient) handle(path Path, cond *storage.Conditions) *storage.ObjectHandle { + oh := rgc.client.Bucket(path.Bucket()).Object(path.Object()) + if cond == nil { + return oh + } + return oh.If(*cond) +} + +func (rgc realGCSClient) Copy(ctx context.Context, from, to Path) error { + fromH := rgc.handle(from, rgc.readCond) + _, err := rgc.handle(to, rgc.writeCond).CopierFrom(fromH).Run(ctx) + return err +} + +func (rgc realGCSClient) Open(ctx context.Context, path Path) (io.ReadCloser, error) { + r, err := rgc.handle(path, rgc.readCond).NewReader(ctx) + return r, err +} + +func (rgc realGCSClient) Objects(ctx context.Context, path Path, delimiter, startOffset string) Iterator { + p := path.Object() + if !strings.HasSuffix(p, "/") { + p += "/" + } + return rgc.client.Bucket(path.Bucket()).Objects(ctx, &storage.Query{ + Delimiter: delimiter, + Prefix: p, + StartOffset: startOffset, + }) +} + +func (rgc realGCSClient) Upload(ctx context.Context, path Path, buf []byte, worldReadable bool, cacheControl string) error { + return UploadHandle(ctx, rgc.handle(path, rgc.writeCond), buf, worldReadable, cacheControl) +} + +func (rgc realGCSClient) Stat(ctx context.Context, path Path) (*storage.ObjectAttrs, error) { + return rgc.handle(path, rgc.readCond).Attrs(ctx) +} diff --git a/vendor/github.com/GoogleCloudPlatform/testgrid/util/gcs/sort.go b/vendor/github.com/GoogleCloudPlatform/testgrid/util/gcs/sort.go new file mode 100644 index 00000000000..beb62f75d44 --- /dev/null +++ b/vendor/github.com/GoogleCloudPlatform/testgrid/util/gcs/sort.go @@ -0,0 +1,112 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package gcs + +import ( + "context" + "sort" + "sync" + "time" + + "cloud.google.com/go/storage" + "github.com/fvbommel/sortorder" + "github.com/sirupsen/logrus" +) + +// LeastRecentlyUpdated sorts paths by their update timestamp, noting generations and any errors. +func LeastRecentlyUpdated(ctx context.Context, log logrus.FieldLogger, client Stater, paths []Path) map[Path]int64 { + ctx, cancel := context.WithTimeout(ctx, 5*time.Minute) + defer cancel() + log.Debug("Sorting groups") + updated := make(map[Path]time.Time, len(paths)) + generations := make(map[Path]int64, len(paths)) + var wg sync.WaitGroup + var lock sync.Mutex + for _, apath := range paths { + wg.Add(1) + path := apath + go func() { + defer wg.Done() + attrs, err := client.Stat(ctx, path) + lock.Lock() + defer lock.Unlock() + switch { + case err == storage.ErrObjectNotExist: + generations[path] = 0 + case err != nil: + log.WithError(err).WithField("path", path).Warning("Stat failed") + generations[path] = -1 + default: + updated[path] = attrs.Updated + generations[path] = attrs.Generation + } + }() + } + wg.Wait() + + sort.SliceStable(paths, func(i, j int) bool { + return !updated[paths[i]].After(updated[paths[j]]) + }) + + if n := len(paths) - 1; n > 0 { + p0 := paths[0] + pn := paths[n] + log.WithFields(logrus.Fields{ + "newest-path": pn, + "newest": updated[pn], + "oldest-path": p0, + "oldest": updated[p0], + }).Info("Sorted") + } + + return generations +} + +// Touch attempts to win an update of the object. +// +// Cloud copies the current object to itself when the object already exists. +// Otherwise uploads genZero bytes. +func Touch(ctx context.Context, client ConditionalClient, path Path, generation int64, genZero []byte) error { + var cond storage.Conditions + if generation != 0 { + // Attempt to cloud-copy the object to its current location + // - only 1 will win in a concurrent situation + // - Increases the last update time. + cond.GenerationMatch = generation + return client.If(&cond, &cond).Copy(ctx, path, path) + } + + // New group, upload the bytes for this situation. + cond.DoesNotExist = true + return client.If(&cond, &cond).Upload(ctx, path, genZero, DefaultACL, "no-cache") +} + +// Sort the builds by monotonically decreasing original prefix base name. +// +// In other words, +// gs://b/1 +// gs://a/5 +// gs://c/10 +// becomes: +// gs://c/10 +// gs://a/5 +// gs://b/1 +func Sort(builds []Build) { + sort.SliceStable(builds, func(i, j int) bool { // greater + return !sortorder.NaturalLess(builds[i].baseName, builds[j].baseName) && builds[i].baseName != builds[j].baseName + }) +} diff --git a/vendor/github.com/andygrunwald/go-gerrit/.gitignore b/vendor/github.com/andygrunwald/go-gerrit/.gitignore new file mode 100644 index 00000000000..4c7d39ea98f --- /dev/null +++ b/vendor/github.com/andygrunwald/go-gerrit/.gitignore @@ -0,0 +1 @@ +/coverage.txt \ No newline at end of file diff --git a/vendor/github.com/andygrunwald/go-gerrit/CHANGELOG.md b/vendor/github.com/andygrunwald/go-gerrit/CHANGELOG.md new file mode 100644 index 00000000000..2052a6c526d --- /dev/null +++ b/vendor/github.com/andygrunwald/go-gerrit/CHANGELOG.md @@ -0,0 +1,105 @@ +# Changelog + +This is a high level log of changes, bugfixes, enhancements, etc +that have taken place between releases. Later versions are shown +first. For more complete details see +[the releases on GitHub.](https://github.com/andygrunwald/go-gerrit/releases) + +## Versions + +### Latest + +### 0.5.2 + +* Fix panic in checkAuth() if Gerrit is down #42 +* Implement ListVotes(), DeleteVotes() and add missing tests + +### 0.5.1 + +* Added the `AbandonChange`, `RebaseChange`, `RestoreChange` and + `RevertChange` functions. + +### 0.5.0 + +**WARNING**: This release includes breaking changes. + +* [BREAKING CHANGE] The SetReview function was returning the wrong + entity type. (#40) + +### 0.4.0 + +**WARNING**: This release includes breaking changes. + +* [BREAKING CHANGE] - Added gometalinter to the build and fixed problems + discovered by the linters. + * Comment and error string fixes. + * Numerous lint and styling fixes. + * Ensured error values are being properly checked where appropriate. + * Addition of missing documentation + * Removed filePath parameter from DeleteChangeEdit which was unused and + unnecessary for the request. + * Fixed CherryPickRevision and IncludeGroups functions which didn't pass + along the provided input structs into the request. +* Go 1.5 has been removed from testing on Travis. The linters introduced in + 0.4.0 do not support this version, Go 1.5 is lacking security updates and + most Linux distros have moved beyond Go 1.5 now. +* Add Go 1.9 to the Travis matrix. +* Fixed an issue where urls containing certain characters in the credentials + could cause NewClient() to use an invalid url. Something like `/`, which + Gerrit could use for generated passwords, for example would break url.Parse's + expectations. + +### 0.3.0 + +**WARNING**: This release includes breaking changes. + +* [BREAKING CHANGE] Fix Changes.PublishDraftChange to accept a notify parameter. +* [BREAKING CHANGE] Fix PublishChangeEdit to accept a notify parameter. +* [BREAKING CHANGE] Fix ChangeFileContentInChangeEdit to allow the file content + to be included in the request. +* Fix the url being used by CreateChange +* Fix type serialization of EventInfo.PatchSet.Number so it's consistent. +* Fix Changes.AddReviewer so it passes along the reviewer to the request. +* Simplify and optimize RemoveMagicPrefixLine + +### 0.2.0 + +**WARNING**: This release includes breaking changes. + +* [BREAKING CHANGE] Several bugfixes to GetEvents: + * Update EventInfo to handle the changeKey field and apply + the proper type for the Project field + * Provide a means to ignore marshaling errors + * Update GetEvents() to return the failed lines and remove + the pointer to the return value because it's unnecessary. +* [BREAKING CHANGE] In ec28f77 `ChangeInfo.Labels` has been changed to map + to fix #21. + + +### 0.1.1 + +* Minor fix to SubmitChange to use the `http.StatusConflict` constant + instead of a hard coded value when comparing response codes. +* Updated AccountInfo.AccountID to be omitted of empty (such as when + used in ApprovalInfo). +* + and : in url parameters for queries are no longer escaped. This was + causing `400 Bad Request` to be returned when the + symbol was + included as part of the query. To match behavior with Gerrit's search + handling, the : symbol was also excluded. +* Fixed documentation for NewClient and moved fmt.Errorf call from + inside the function to a `ErrNoInstanceGiven` variable so it's + easier to compare against. +* Updated internal function digestAuthHeader to return exported errors + (ErrWWWAuthenticateHeader*) rather than calling fmt.Errorf. This makes + it easier to test against externally and also fixes a lint issue too. +* Updated NewClient function to handle credentials in the url. +* Added the missing `Submitted` field to `ChangeInfo`. +* Added the missing `URL` field to `ChangeInfo` which is usually included + as part of an event from the events-log plugin. + +### 0.1.0 + +* The first official release +* Implemented digest auth and several fixes for it. +* Ensured Content-Type is included in all requests +* Fixed several internal bugs as well as a few documentation issues diff --git a/vendor/github.com/andygrunwald/go-gerrit/LICENSE b/vendor/github.com/andygrunwald/go-gerrit/LICENSE new file mode 100644 index 00000000000..692f6bea285 --- /dev/null +++ b/vendor/github.com/andygrunwald/go-gerrit/LICENSE @@ -0,0 +1,22 @@ +The MIT License (MIT) + +Copyright (c) 2015 Andy Grunwald + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + diff --git a/vendor/github.com/andygrunwald/go-gerrit/Makefile b/vendor/github.com/andygrunwald/go-gerrit/Makefile new file mode 100644 index 00000000000..e77bc13b9d5 --- /dev/null +++ b/vendor/github.com/andygrunwald/go-gerrit/Makefile @@ -0,0 +1,18 @@ +.DEFAULT_GOAL := help + +.PHONY: help +help: ## Outputs the help + @grep -E '^[a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | sort | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}' + +.PHONY: test +test: ## Runs all unit tests + go test -v -race ./... + +.PHONY: vet +vet: ## Runs go vet + go vet ./... + +.PHONY: staticcheck +staticcheck: ## Runs static code analyzer staticcheck + go get -u honnef.co/go/tools/cmd/staticcheck + staticcheck ./... \ No newline at end of file diff --git a/vendor/github.com/andygrunwald/go-gerrit/README.md b/vendor/github.com/andygrunwald/go-gerrit/README.md new file mode 100644 index 00000000000..9b56e882ca3 --- /dev/null +++ b/vendor/github.com/andygrunwald/go-gerrit/README.md @@ -0,0 +1,266 @@ +# go-gerrit + +[![GoDoc](https://godoc.org/github.com/andygrunwald/go-gerrit?status.svg)](https://pkg.go.dev/github.com/andygrunwald/go-gerrit) +[![Go Report Card](https://goreportcard.com/badge/github.com/andygrunwald/go-gerrit)](https://goreportcard.com/report/github.com/andygrunwald/go-gerrit) + +go-gerrit is a [Go](https://golang.org/) client library for accessing the [Gerrit Code Review](https://www.gerritcodereview.com/) API. + +![go-gerrit - Go client/library for Gerrit Code Review](./img/logo.png "go-gerrit - Go client/library for Gerrit Code Review") + +## Features + +* [Authentication](https://pkg.go.dev/github.com/andygrunwald/go-gerrit#AuthenticationService) (HTTP Basic, HTTP Digest, HTTP Cookie) +* Every API Endpoint like Gerrit + * [/access/](https://pkg.go.dev/github.com/andygrunwald/go-gerrit#AccessService) + * [/accounts/](https://pkg.go.dev/github.com/andygrunwald/go-gerrit#AccountsService) + * [/changes/](https://pkg.go.dev/github.com/andygrunwald/go-gerrit#ChangesService) + * [/config/](https://pkg.go.dev/github.com/andygrunwald/go-gerrit#ConfigService) + * [/groups/](https://pkg.go.dev/github.com/andygrunwald/go-gerrit#GroupsService) + * [/plugins/](https://pkg.go.dev/github.com/andygrunwald/go-gerrit#PluginsService) + * [/projects/](https://pkg.go.dev/github.com/andygrunwald/go-gerrit#ProjectsService) +* Supports optional plugin APIs such as + * events-log - [About](https://gerrit.googlesource.com/plugins/events-log/+/master/src/main/resources/Documentation/about.md), [REST API](https://gerrit.googlesource.com/plugins/events-log/+/master/src/main/resources/Documentation/rest-api-events.md) + +## Installation + +go-gerrit follows the [Go Release Policy](https://golang.org/doc/devel/release.html#policy). +This means, we support the current + previous Go version releases. + +It is go gettable ... + +```sh +$ go get github.com/andygrunwald/go-gerrit +``` + +... (optional) to run checks and tests: + +**Tests Only** + +```sh +$ make test +``` + +**Checks, Tests, Linters, etc** + +```sh +$ make vet staticcheck +``` + +## API / Usage + +Please have a look at the [GoDoc documentation](https://pkg.go.dev/github.com/andygrunwald/go-gerrit) for a detailed API description. + +The [Gerrit Code Review - REST API](https://gerrit-review.googlesource.com/Documentation/rest-api.html) was the base document. + +### Authentication + +Gerrit support multiple ways for [authentication](https://gerrit-review.googlesource.com/Documentation/rest-api.html#authentication). + +#### HTTP Basic + +Some Gerrit instances (like [TYPO3](https://review.typo3.org/)) has [auth.gitBasicAuth](https://gerrit-review.googlesource.com/Documentation/config-gerrit.html#auth.gitBasicAuth) activated. +With this you can authenticate with HTTP Basic like this: + +```go +instance := "https://review.typo3.org/" +client, _ := gerrit.NewClient(instance, nil) +client.Authentication.SetBasicAuth("andy.grunwald", "my secrect password") + +self, _, _ := client.Accounts.GetAccount("self") + +fmt.Printf("Username: %s", self.Name) + +// Username: Andy Grunwald +``` + +If you get an `401 Unauthorized`, check your Account Settings and have a look at the `HTTP Password` configuration. + +#### HTTP Digest + +Some Gerrit instances (like [Wikimedia](https://gerrit.wikimedia.org/)) has [Digest access authentication](https://en.wikipedia.org/wiki/Digest_access_authentication) activated. + +```go +instance := "https://gerrit.wikimedia.org/r/" +client, _ := gerrit.NewClient(instance, nil) +client.Authentication.SetDigestAuth("andy.grunwald", "my secrect http password") + +self, resp, err := client.Accounts.GetAccount("self") + +fmt.Printf("Username: %s", self.Name) + +// Username: Andy Grunwald +``` + +If digest auth is not supported by the choosen Gerrit instance, an error like `WWW-Authenticate header type is not Digest` is thrown. + +If you get an `401 Unauthorized`, check your Account Settings and have a look at the `HTTP Password` configuration. + +#### HTTP Cookie + +Some Gerrit instances hosted like the one hosted googlesource.com (e.g. [Go](https://go-review.googlesource.com/), [Android](https://android-review.googlesource.com/) or [Gerrit](https://gerrit-review.googlesource.com/)) support HTTP Cookie authentication. + +You need the cookie name and the cookie value. +You can get them by click on "Settings > HTTP Password > Obtain Password" in your Gerrit instance. + +There you can receive your values. +The cookie name will be (mostly) `o` (if hosted on googlesource.com). +Your cookie secret will be something like `git-your@email.com=SomeHash...`. + +```go +instance := "https://gerrit-review.googlesource.com/" +client, _ := gerrit.NewClient(instance, nil) +client.Authentication.SetCookieAuth("o", "my-cookie-secret") + +self, _, _ := client.Accounts.GetAccount("self") + +fmt.Printf("Username: %s", self.Name) + +// Username: Andy G. +``` + +### More more more + +In the examples chapter below you will find a few more examples. +If you miss one or got a question how to do something please [open a new issue](https://github.com/andygrunwald/go-gerrit/issues/new) with your question. +We will be happy to answer them. + +## Examples + +Further a few examples how the API can be used. +A few more examples are available in the [GoDoc examples section](https://pkg.go.dev/github.com/andygrunwald/go-gerrit#pkg-examples). + +### Get version of Gerrit instance + +Receive the version of the [Gerrit instance used by the Gerrit team](https://gerrit-review.googlesource.com/) for development: + +```go +package main + +import ( + "fmt" + "github.com/andygrunwald/go-gerrit" +) + +func main() { + instance := "https://gerrit-review.googlesource.com/" + client, err := gerrit.NewClient(instance, nil) + if err != nil { + panic(err) + } + + v, _, err := client.Config.GetVersion() + + fmt.Printf("Version: %s", v) + + // Version: 2.12.2-2512-g0b1bccd +} +``` + +### Get all public projects + +List all projects from [Chromium](https://chromium-review.googlesource.com/): + +```go +package main + +import ( + "fmt" + "github.com/andygrunwald/go-gerrit" +) + +func main() { + instance := "https://chromium-review.googlesource.com/" + client, err := gerrit.NewClient(instance, nil) + if err != nil { + panic(err) + } + + opt := &gerrit.ProjectOptions{ + Description: true, + } + projects, _, err := client.Projects.ListProjects(opt) + for name, p := range *projects { + fmt.Printf("%s - State: %s\n", name, p.State) + } + + // chromiumos/platform/depthcharge - State: ACTIVE + // external/github.com/maruel/subcommands - State: ACTIVE + // external/junit - State: ACTIVE + // ... +} +``` + +### Query changes + +Get some changes of the [kernel/common project](https://android-review.googlesource.com/#/q/project:kernel/common) from the [Android](http://source.android.com/) [Gerrit Review System](https://android-review.googlesource.com/). + +```go +package main + +import ( + "fmt" + "github.com/andygrunwald/go-gerrit" +) + +func main() { + instance := "https://android-review.googlesource.com/" + client, err := gerrit.NewClient(instance, nil) + if err != nil { + panic(err) + } + + opt := &gerrit.QueryChangeOptions{} + opt.Query = []string{"project:kernel/common"} + opt.AdditionalFields = []string{"LABELS"} + changes, _, err := client.Changes.QueryChanges(opt) + + for _, change := range *changes { + fmt.Printf("Project: %s -> %s -> %s%d\n", change.Project, change.Subject, instance, change.Number) + } + + // Project: kernel/common -> android: binder: Fix BR_ERROR usage and change LSM denials to use it. -> https://android-review.googlesource.com/150839 + // Project: kernel/common -> android: binder: fix duplicate error return. -> https://android-review.googlesource.com/155031 + // Project: kernel/common -> dm-verity: Add modes and emit uevent on corrupted blocks -> https://android-review.googlesource.com/169572 + // ... +} +``` + +## FAQ + +### How is the source code organized? + +The source code organisation was inspired by [go-github by Google](https://github.com/google/go-github). + +Every REST API Endpoint (e.g. [/access/](https://gerrit-review.googlesource.com/Documentation/rest-api-access.html) or [/changes/](https://gerrit-review.googlesource.com/Documentation/rest-api-changes.html)) is coupled in a service (e.g. [AccessService in access.go](./access.go) or [ChangesService in changes.go](./changes.go)). +Every service is part of [gerrit.Client](./gerrit.go) as a member variable. + +gerrit.Client can provide basic helper functions to avoid unnecessary code duplications such as building a new request, parse responses and so on. + +Based on this structure implementing a new API functionality is straight forwarded. Here is an example of *ChangeService.DeleteTopic* / [DELETE /changes/{change-id}/topic](https://gerrit-review.googlesource.com/Documentation/rest-api-changes.html#delete-topic): + +```go +func (s *ChangesService) DeleteTopic(changeID string) (*Response, error) { + u := fmt.Sprintf("changes/%s/topic", changeID) + return s.client.DeleteRequest(u, nil) +} +``` + +### What about the version compatibility with Gerrit? + +The library was implemented based on the REST API of Gerrit version 2.11.3-1230-gb8336f1 and tested against this version. + +This library might be working with older versions as well. +If you notice an incompatibility [open a new issue](https://github.com/andygrunwald/go-gerrit/issues/new) or try to fix it. +We welcome contribution! + + +### What about adding code to support the REST API of an optional plugin? + +It will depend on the plugin, you are welcome to [open a new issue](https://github.com/andygrunwald/go-gerrit/issues/new) first to propose the idea if you wish. +As an example the addition of support for events-log plugin was supported because the plugin itself is fairly +popular and the structures that the REST API uses could also be used by `gerrit stream-events`. + + +## License + +This project is released under the terms of the [MIT license](http://en.wikipedia.org/wiki/MIT_License). diff --git a/vendor/github.com/andygrunwald/go-gerrit/access.go b/vendor/github.com/andygrunwald/go-gerrit/access.go new file mode 100644 index 00000000000..b38bbc2a1f8 --- /dev/null +++ b/vendor/github.com/andygrunwald/go-gerrit/access.go @@ -0,0 +1,77 @@ +package gerrit + +// AccessService contains Access Right related REST endpoints +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-access.html +type AccessService struct { + client *Client +} + +// AccessSectionInfo describes the access rights that are assigned on a ref. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-access.html#access-section-info +type AccessSectionInfo struct { + Permissions map[string]PermissionInfo `json:"permissions"` +} + +// PermissionInfo entity contains information about an assigned permission. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-access.html#permission-info +type PermissionInfo struct { + Label string `json:"label,omitempty"` + Exclusive bool `json:"exclusive"` + Rules map[string]PermissionRuleInfo `json:"rules"` +} + +// PermissionRuleInfo entity contains information about a permission rule that is assigned to group. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-access.html#permission-rule-info +type PermissionRuleInfo struct { + // TODO Possible values for action: ALLOW, DENY or BLOCK, INTERACTIVE and BATCH + Action string `json:"action"` + Force bool `json:"force"` + Min int `json:"min"` + Max int `json:"max"` +} + +// ProjectAccessInfo entity contains information about the access rights for a project. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-access.html#project-access-info +type ProjectAccessInfo struct { + Revision string `json:"revision"` + InheritsFrom ProjectInfo `json:"inherits_from"` + Local map[string]AccessSectionInfo `json:"local"` + IsOwner bool `json:"is_owner"` + OwnerOf []string `json:"owner_of"` + CanUpload bool `json:"can_upload"` + CanAdd bool `json:"can_add"` + CanAddTags bool `json:"can_add_tags"` + ConfigVisible bool `json:"config_visible"` + Groups map[string]GroupInfo `json:"groups"` + ConfigWebLinks []string `json:"configWebLinks"` +} + +// ListAccessRightsOptions specifies the parameters to the AccessService.ListAccessRights. +type ListAccessRightsOptions struct { + // The projects for which the access rights should be returned must be specified as project options. + // The project can be specified multiple times. + Project []string `url:"project,omitempty"` +} + +// ListAccessRights lists the access rights for projects. +// As result a map is returned that maps the project name to ProjectAccessInfo entities. +// The entries in the map are sorted by project name. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-access.html#list-access +func (s *AccessService) ListAccessRights(opt *ListAccessRightsOptions) (*map[string]ProjectAccessInfo, *Response, error) { + u := "access/" + + u, err := addOptions(u, opt) + if err != nil { + return nil, nil, err + } + + v := new(map[string]ProjectAccessInfo) + resp, err := s.client.Call("GET", u, nil, v) + return v, resp, err +} diff --git a/vendor/github.com/andygrunwald/go-gerrit/accounts.go b/vendor/github.com/andygrunwald/go-gerrit/accounts.go new file mode 100644 index 00000000000..3f828635c2f --- /dev/null +++ b/vendor/github.com/andygrunwald/go-gerrit/accounts.go @@ -0,0 +1,874 @@ +package gerrit + +import ( + "fmt" +) + +// AccountsService contains Account related REST endpoints +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-accounts.html +type AccountsService struct { + client *Client +} + +// AccountInfo entity contains information about an account. +type AccountInfo struct { + AccountID int `json:"_account_id,omitempty"` + Name string `json:"name,omitempty"` + Email string `json:"email,omitempty"` + Username string `json:"username,omitempty"` + + // Avatars lists avatars of various sizes for the account. + // This field is only populated if the avatars plugin is enabled. + Avatars []struct { + URL string `json:"url,omitempty"` + Height int `json:"height,omitempty"` + } `json:"avatars,omitempty"` +} + +// SSHKeyInfo entity contains information about an SSH key of a user. +type SSHKeyInfo struct { + Seq int `json:"seq"` + SSHPublicKey string `json:"ssh_public_key"` + EncodedKey string `json:"encoded_key"` + Algorithm string `json:"algorithm"` + Comment string `json:"comment,omitempty"` + Valid bool `json:"valid"` +} + +// UsernameInput entity contains information for setting the username for an account. +type UsernameInput struct { + Username string `json:"username"` +} + +// QueryLimitInfo entity contains information about the Query Limit of a user. +type QueryLimitInfo struct { + Min int `json:"min"` + Max int `json:"max"` +} + +// HTTPPasswordInput entity contains information for setting/generating an HTTP password. +type HTTPPasswordInput struct { + Generate bool `json:"generate,omitempty"` + HTTPPassword string `json:"http_password,omitempty"` +} + +// GpgKeysInput entity contains information for adding/deleting GPG keys. +type GpgKeysInput struct { + Add []string `json:"add"` + Delete []string `json:"delete"` +} + +// GpgKeyInfo entity contains information about a GPG public key. +type GpgKeyInfo struct { + ID string `json:"id,omitempty"` + Fingerprint string `json:"fingerprint,omitempty"` + UserIDs []string `json:"user_ids,omitempty"` + Key string `json:"key,omitempty"` +} + +// EmailInput entity contains information for registering a new email address. +type EmailInput struct { + Email string `json:"email"` + Preferred bool `json:"preferred,omitempty"` + NoConfirmation bool `json:"no_confirmation,omitempty"` +} + +// EmailInfo entity contains information about an email address of a user. +type EmailInfo struct { + Email string `json:"email"` + Preferred bool `json:"preferred,omitempty"` + PendingConfirmation bool `json:"pending_confirmation,omitempty"` +} + +// AccountInput entity contains information for the creation of a new account. +type AccountInput struct { + Username string `json:"username,omitempty"` + Name string `json:"name,omitempty"` + Email string `json:"email,omitempty"` + SSHKey string `json:"ssh_key,omitempty"` + HTTPPassword string `json:"http_password,omitempty"` + Groups []string `json:"groups,omitempty"` +} + +// AccountDetailInfo entity contains detailed information about an account. +type AccountDetailInfo struct { + AccountInfo + RegisteredOn Timestamp `json:"registered_on"` +} + +// AccountNameInput entity contains information for setting a name for an account. +type AccountNameInput struct { + Name string `json:"name,omitempty"` +} + +// AccountCapabilityInfo entity contains information about the global capabilities of a user. +type AccountCapabilityInfo struct { + AccessDatabase bool `json:"accessDatabase,omitempty"` + AdministrateServer bool `json:"administrateServer,omitempty"` + CreateAccount bool `json:"createAccount,omitempty"` + CreateGroup bool `json:"createGroup,omitempty"` + CreateProject bool `json:"createProject,omitempty"` + EmailReviewers bool `json:"emailReviewers,omitempty"` + FlushCaches bool `json:"flushCaches,omitempty"` + KillTask bool `json:"killTask,omitempty"` + MaintainServer bool `json:"maintainServer,omitempty"` + Priority string `json:"priority,omitempty"` + QueryLimit QueryLimitInfo `json:"queryLimit"` + RunAs bool `json:"runAs,omitempty"` + RunGC bool `json:"runGC,omitempty"` + StreamEvents bool `json:"streamEvents,omitempty"` + ViewAllAccounts bool `json:"viewAllAccounts,omitempty"` + ViewCaches bool `json:"viewCaches,omitempty"` + ViewConnections bool `json:"viewConnections,omitempty"` + ViewPlugins bool `json:"viewPlugins,omitempty"` + ViewQueue bool `json:"viewQueue,omitempty"` +} + +// DiffPreferencesInfo entity contains information about the diff preferences of a user. +type DiffPreferencesInfo struct { + Context int `json:"context"` + Theme string `json:"theme"` + ExpandAllComments bool `json:"expand_all_comments,omitempty"` + IgnoreWhitespace string `json:"ignore_whitespace"` + IntralineDifference bool `json:"intraline_difference,omitempty"` + LineLength int `json:"line_length"` + ManualReview bool `json:"manual_review,omitempty"` + RetainHeader bool `json:"retain_header,omitempty"` + ShowLineEndings bool `json:"show_line_endings,omitempty"` + ShowTabs bool `json:"show_tabs,omitempty"` + ShowWhitespaceErrors bool `json:"show_whitespace_errors,omitempty"` + SkipDeleted bool `json:"skip_deleted,omitempty"` + SkipUncommented bool `json:"skip_uncommented,omitempty"` + SyntaxHighlighting bool `json:"syntax_highlighting,omitempty"` + HideTopMenu bool `json:"hide_top_menu,omitempty"` + AutoHideDiffTableHeader bool `json:"auto_hide_diff_table_header,omitempty"` + HideLineNumbers bool `json:"hide_line_numbers,omitempty"` + TabSize int `json:"tab_size"` + HideEmptyPane bool `json:"hide_empty_pane,omitempty"` +} + +// DiffPreferencesInput entity contains information for setting the diff preferences of a user. +// Fields which are not set will not be updated. +type DiffPreferencesInput struct { + Context int `json:"context,omitempty"` + ExpandAllComments bool `json:"expand_all_comments,omitempty"` + IgnoreWhitespace string `json:"ignore_whitespace,omitempty"` + IntralineDifference bool `json:"intraline_difference,omitempty"` + LineLength int `json:"line_length,omitempty"` + ManualReview bool `json:"manual_review,omitempty"` + RetainHeader bool `json:"retain_header,omitempty"` + ShowLineEndings bool `json:"show_line_endings,omitempty"` + ShowTabs bool `json:"show_tabs,omitempty"` + ShowWhitespaceErrors bool `json:"show_whitespace_errors,omitempty"` + SkipDeleted bool `json:"skip_deleted,omitempty"` + SkipUncommented bool `json:"skip_uncommented,omitempty"` + SyntaxHighlighting bool `json:"syntax_highlighting,omitempty"` + HideTopMenu bool `json:"hide_top_menu,omitempty"` + AutoHideDiffTableHeader bool `json:"auto_hide_diff_table_header,omitempty"` + HideLineNumbers bool `json:"hide_line_numbers,omitempty"` + TabSize int `json:"tab_size,omitempty"` +} + +// PreferencesInfo entity contains information about a user’s preferences. +type PreferencesInfo struct { + ChangesPerPage int `json:"changes_per_page"` + ShowSiteHeader bool `json:"show_site_header,omitempty"` + UseFlashClipboard bool `json:"use_flash_clipboard,omitempty"` + DownloadScheme string `json:"download_scheme"` + DownloadCommand string `json:"download_command"` + CopySelfOnEmail bool `json:"copy_self_on_email,omitempty"` + DateFormat string `json:"date_format"` + TimeFormat string `json:"time_format"` + RelativeDateInChangeTable bool `json:"relative_date_in_change_table,omitempty"` + SizeBarInChangeTable bool `json:"size_bar_in_change_table,omitempty"` + LegacycidInChangeTable bool `json:"legacycid_in_change_table,omitempty"` + MuteCommonPathPrefixes bool `json:"mute_common_path_prefixes,omitempty"` + ReviewCategoryStrategy string `json:"review_category_strategy"` + DiffView string `json:"diff_view"` + My []TopMenuItemInfo `json:"my"` + URLAliases string `json:"url_aliases,omitempty"` +} + +// PreferencesInput entity contains information for setting the user preferences. +// Fields which are not set will not be updated. +type PreferencesInput struct { + ChangesPerPage int `json:"changes_per_page,omitempty"` + ShowSiteHeader bool `json:"show_site_header,omitempty"` + UseFlashClipboard bool `json:"use_flash_clipboard,omitempty"` + DownloadScheme string `json:"download_scheme,omitempty"` + DownloadCommand string `json:"download_command,omitempty"` + CopySelfOnEmail bool `json:"copy_self_on_email,omitempty"` + DateFormat string `json:"date_format,omitempty"` + TimeFormat string `json:"time_format,omitempty"` + RelativeDateInChangeTable bool `json:"relative_date_in_change_table,omitempty"` + SizeBarInChangeTable bool `json:"size_bar_in_change_table,omitempty"` + LegacycidInChangeTable bool `json:"legacycid_in_change_table,omitempty"` + MuteCommonPathPrefixes bool `json:"mute_common_path_prefixes,omitempty"` + ReviewCategoryStrategy string `json:"review_category_strategy,omitempty"` + DiffView string `json:"diff_view,omitempty"` + My []TopMenuItemInfo `json:"my,omitempty"` + URLAliases string `json:"url_aliases,omitempty"` +} + +// CapabilityOptions specifies the parameters to filter for capabilities. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-accounts.html#list-account-capabilities +type CapabilityOptions struct { + // To filter the set of global capabilities the q parameter can be used. + // Filtering may decrease the response time by avoiding looking at every possible alternative for the caller. + Filter []string `url:"q,omitempty"` +} + +// GetAccount returns an account as an AccountInfo entity. +// If account is "self" the current authenticated account will be returned. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-accounts.html#get-account +func (s *AccountsService) GetAccount(account string) (*AccountInfo, *Response, error) { + u := fmt.Sprintf("accounts/%s", account) + + req, err := s.client.NewRequest("GET", u, nil) + if err != nil { + return nil, nil, err + } + + v := new(AccountInfo) + resp, err := s.client.Do(req, v) + if err != nil { + return nil, resp, err + } + + return v, resp, err +} + +// GetAccountDetails retrieves the details of an account. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-accounts.html#get-detail +func (s *AccountsService) GetAccountDetails(accountID string) (*AccountDetailInfo, *Response, error) { + u := fmt.Sprintf("accounts/%s/detail", accountID) + + req, err := s.client.NewRequest("GET", u, nil) + if err != nil { + return nil, nil, err + } + + v := new(AccountDetailInfo) + resp, err := s.client.Do(req, v) + if err != nil { + return nil, resp, err + } + + return v, resp, err +} + +// GetAccountName retrieves the full name of an account. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-accounts.html#get-account-name +func (s *AccountsService) GetAccountName(accountID string) (string, *Response, error) { + u := fmt.Sprintf("accounts/%s/name", accountID) + return getStringResponseWithoutOptions(s.client, u) +} + +// GetUsername retrieves the username of an account. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-accounts.html#get-username +func (s *AccountsService) GetUsername(accountID string) (string, *Response, error) { + u := fmt.Sprintf("accounts/%s/username", accountID) + return getStringResponseWithoutOptions(s.client, u) +} + +// GetHTTPPassword retrieves the HTTP password of an account. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-accounts.html#get-http-password +func (s *AccountsService) GetHTTPPassword(accountID string) (string, *Response, error) { + u := fmt.Sprintf("accounts/%s/password.http", accountID) + return getStringResponseWithoutOptions(s.client, u) +} + +// ListAccountEmails returns the email addresses that are configured for the specified user. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-accounts.html#list-account-emails +func (s *AccountsService) ListAccountEmails(accountID string) (*[]EmailInfo, *Response, error) { + u := fmt.Sprintf("accounts/%s/emails", accountID) + + req, err := s.client.NewRequest("GET", u, nil) + if err != nil { + return nil, nil, err + } + + v := new([]EmailInfo) + resp, err := s.client.Do(req, v) + if err != nil { + return nil, resp, err + } + + return v, resp, err +} + +// GetAccountEmail retrieves an email address of a user. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-accounts.html#get-account-email +func (s *AccountsService) GetAccountEmail(accountID, emailID string) (*EmailInfo, *Response, error) { + u := fmt.Sprintf("accounts/%s/emails/%s", accountID, emailID) + + req, err := s.client.NewRequest("GET", u, nil) + if err != nil { + return nil, nil, err + } + + v := new(EmailInfo) + resp, err := s.client.Do(req, v) + if err != nil { + return nil, resp, err + } + + return v, resp, err +} + +// ListSSHKeys returns the SSH keys of an account. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-accounts.html#list-ssh-keys +func (s *AccountsService) ListSSHKeys(accountID string) (*[]SSHKeyInfo, *Response, error) { + u := fmt.Sprintf("accounts/%s/sshkeys", accountID) + + req, err := s.client.NewRequest("GET", u, nil) + if err != nil { + return nil, nil, err + } + + v := new([]SSHKeyInfo) + resp, err := s.client.Do(req, v) + if err != nil { + return nil, resp, err + } + + return v, resp, err +} + +// GetSSHKey retrieves an SSH key of a user. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-accounts.html#get-ssh-key +func (s *AccountsService) GetSSHKey(accountID, sshKeyID string) (*SSHKeyInfo, *Response, error) { + u := fmt.Sprintf("accounts/%s/sshkeys/%s", accountID, sshKeyID) + + req, err := s.client.NewRequest("GET", u, nil) + if err != nil { + return nil, nil, err + } + + v := new(SSHKeyInfo) + resp, err := s.client.Do(req, v) + if err != nil { + return nil, resp, err + } + + return v, resp, err +} + +// ListGPGKeys returns the GPG keys of an account. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-accounts.html#list-gpg-keys +func (s *AccountsService) ListGPGKeys(accountID string) (*map[string]GpgKeyInfo, *Response, error) { + u := fmt.Sprintf("accounts/%s/gpgkeys", accountID) + + req, err := s.client.NewRequest("GET", u, nil) + if err != nil { + return nil, nil, err + } + + v := new(map[string]GpgKeyInfo) + resp, err := s.client.Do(req, v) + if err != nil { + return nil, resp, err + } + + return v, resp, err +} + +// GetGPGKey retrieves a GPG key of a user. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-accounts.html#get-gpg-key +func (s *AccountsService) GetGPGKey(accountID, gpgKeyID string) (*GpgKeyInfo, *Response, error) { + u := fmt.Sprintf("accounts/%s/gpgkeys/%s", accountID, gpgKeyID) + + req, err := s.client.NewRequest("GET", u, nil) + if err != nil { + return nil, nil, err + } + + v := new(GpgKeyInfo) + resp, err := s.client.Do(req, v) + if err != nil { + return nil, resp, err + } + + return v, resp, err +} + +// ListAccountCapabilities returns the global capabilities that are enabled for the specified user. +// If the global capabilities for the calling user should be listed, self can be used as account-id. +// This can be used by UI tools to discover if administrative features are available to the caller, so they can hide (or show) relevant UI actions. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-accounts.html#list-account-capabilities +func (s *AccountsService) ListAccountCapabilities(accountID string, opt *CapabilityOptions) (*AccountCapabilityInfo, *Response, error) { + u := fmt.Sprintf("accounts/%s/capabilities", accountID) + + u, err := addOptions(u, opt) + if err != nil { + return nil, nil, err + } + + req, err := s.client.NewRequest("GET", u, nil) + if err != nil { + return nil, nil, err + } + + v := new(AccountCapabilityInfo) + resp, err := s.client.Do(req, v) + if err != nil { + return nil, resp, err + } + + return v, resp, err +} + +// ListGroups lists all groups that contain the specified user as a member. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-accounts.html#list-groups +func (s *AccountsService) ListGroups(accountID string) (*[]GroupInfo, *Response, error) { + u := fmt.Sprintf("accounts/%s/groups", accountID) + + req, err := s.client.NewRequest("GET", u, nil) + if err != nil { + return nil, nil, err + } + + v := new([]GroupInfo) + resp, err := s.client.Do(req, v) + if err != nil { + return nil, resp, err + } + + return v, resp, err +} + +// GetUserPreferences retrieves the user’s preferences. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-accounts.html#get-user-preferences +func (s *AccountsService) GetUserPreferences(accountID string) (*PreferencesInfo, *Response, error) { + u := fmt.Sprintf("accounts/%s/preferences", accountID) + + req, err := s.client.NewRequest("GET", u, nil) + if err != nil { + return nil, nil, err + } + + v := new(PreferencesInfo) + resp, err := s.client.Do(req, v) + if err != nil { + return nil, resp, err + } + + return v, resp, err +} + +// GetDiffPreferences retrieves the diff preferences of a user. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-accounts.html#get-diff-preferences +func (s *AccountsService) GetDiffPreferences(accountID string) (*DiffPreferencesInfo, *Response, error) { + u := fmt.Sprintf("accounts/%s/preferences.diff", accountID) + + req, err := s.client.NewRequest("GET", u, nil) + if err != nil { + return nil, nil, err + } + + v := new(DiffPreferencesInfo) + resp, err := s.client.Do(req, v) + if err != nil { + return nil, resp, err + } + + return v, resp, err +} + +// GetStarredChanges gets the changes starred by the identified user account. +// This URL endpoint is functionally identical to the changes query GET /changes/?q=is:starred. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-accounts.html#get-starred-changes +func (s *AccountsService) GetStarredChanges(accountID string) (*[]ChangeInfo, *Response, error) { + u := fmt.Sprintf("accounts/%s/starred.changes", accountID) + + req, err := s.client.NewRequest("GET", u, nil) + if err != nil { + return nil, nil, err + } + + v := new([]ChangeInfo) + resp, err := s.client.Do(req, v) + if err != nil { + return nil, resp, err + } + + return v, resp, err +} + +// SuggestAccount suggests users for a given query q and result limit n. +// If result limit is not passed, then the default 10 is used. +// Returns a list of matching AccountInfo entities. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-accounts.html#query-account +func (s *AccountsService) SuggestAccount(opt *QueryOptions) (*[]AccountInfo, *Response, error) { + u := "accounts/" + + u, err := addOptions(u, opt) + if err != nil { + return nil, nil, err + } + + req, err := s.client.NewRequest("GET", u, nil) + if err != nil { + return nil, nil, err + } + + v := new([]AccountInfo) + resp, err := s.client.Do(req, v) + if err != nil { + return nil, resp, err + } + + return v, resp, err +} + +// CreateAccount creates a new account. +// In the request body additional data for the account can be provided as AccountInput. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-accounts.html#create-account +func (s *AccountsService) CreateAccount(username string, input *AccountInput) (*AccountInfo, *Response, error) { + u := fmt.Sprintf("accounts/%s", username) + + req, err := s.client.NewRequest("PUT", u, input) + if err != nil { + return nil, nil, err + } + + v := new(AccountInfo) + resp, err := s.client.Do(req, v) + if err != nil { + return nil, resp, err + } + + return v, resp, err +} + +// SetAccountName sets the full name of an account. +// The new account name must be provided in the request body inside an AccountNameInput entity. +// +// As response the new account name is returned. +// If the name was deleted the response is “204 No Content”. +// Some realms may not allow to modify the account name. +// In this case the request is rejected with “405 Method Not Allowed”. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-accounts.html#set-account-name +func (s *AccountsService) SetAccountName(accountID string, input *AccountNameInput) (*string, *Response, error) { + u := fmt.Sprintf("accounts/%s/name", accountID) + + // TODO Use here the getStringResponseWithoutOptions (for PUT requests) + + req, err := s.client.NewRequest("PUT", u, input) + if err != nil { + return nil, nil, err + } + + v := new(string) + resp, err := s.client.Do(req, v) + if err != nil { + return nil, resp, err + } + + return v, resp, err +} + +// DeleteAccountName deletes the name of an account. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-accounts.html#delete-account-name +func (s *AccountsService) DeleteAccountName(accountID string) (*Response, error) { + u := fmt.Sprintf("accounts/%s/name", accountID) + return s.client.DeleteRequest(u, nil) +} + +// DeleteActive sets the account state to inactive. +// If the account was already inactive the response is “404 Not Found”. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-accounts.html#delete-active +func (s *AccountsService) DeleteActive(accountID string) (*Response, error) { + u := fmt.Sprintf("accounts/%s/active", accountID) + return s.client.DeleteRequest(u, nil) +} + +// DeleteHTTPPassword deletes the HTTP password of an account. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-accounts.html#delete-http-password +func (s *AccountsService) DeleteHTTPPassword(accountID string) (*Response, error) { + u := fmt.Sprintf("accounts/%s/password.http", accountID) + return s.client.DeleteRequest(u, nil) +} + +// DeleteAccountEmail deletes an email address of an account. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-accounts.html#delete-account-email +func (s *AccountsService) DeleteAccountEmail(accountID, emailID string) (*Response, error) { + u := fmt.Sprintf("accounts/%s/emails/%s", accountID, emailID) + return s.client.DeleteRequest(u, nil) +} + +// DeleteSSHKey deletes an SSH key of a user. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-accounts.html#delete-ssh-key +func (s *AccountsService) DeleteSSHKey(accountID, sshKeyID string) (*Response, error) { + u := fmt.Sprintf("accounts/%s/sshkeys/%s", accountID, sshKeyID) + return s.client.DeleteRequest(u, nil) +} + +// DeleteGPGKey deletes a GPG key of a user. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-accounts.html#delete-gpg-key +func (s *AccountsService) DeleteGPGKey(accountID, gpgKeyID string) (*Response, error) { + u := fmt.Sprintf("accounts/%s/gpgkeys/%s", accountID, gpgKeyID) + return s.client.DeleteRequest(u, nil) +} + +// SetUsername sets a new username. +// The new username must be provided in the request body inside a UsernameInput entity. +// Once set, the username cannot be changed or deleted. +// If attempted this fails with “405 Method Not Allowed”. +// +// As response the new username is returned. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-accounts.html#set-username +func (s *AccountsService) SetUsername(accountID string, input *UsernameInput) (*string, *Response, error) { + u := fmt.Sprintf("accounts/%s/username", accountID) + + req, err := s.client.NewRequest("PUT", u, input) + if err != nil { + return nil, nil, err + } + + v := new(string) + resp, err := s.client.Do(req, v) + if err != nil { + return nil, resp, err + } + + return v, resp, err +} + +// GetActive checks if an account is active. +// +// If the account is active the string ok is returned. +// If the account is inactive the response is “204 No Content”. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-accounts.html#get-active +func (s *AccountsService) GetActive(accountID string) (string, *Response, error) { + u := fmt.Sprintf("accounts/%s/active", accountID) + return getStringResponseWithoutOptions(s.client, u) +} + +// SetActive sets the account state to active. +// +// If the account was already active the response is “200 OK”. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-accounts.html#set-active +func (s *AccountsService) SetActive(accountID string) (*Response, error) { + u := fmt.Sprintf("accounts/%s/active", accountID) + + req, err := s.client.NewRequest("PUT", u, nil) + if err != nil { + return nil, err + } + return s.client.Do(req, nil) +} + +// SetHTTPPassword sets/Generates the HTTP password of an account. +// The options for setting/generating the HTTP password must be provided in the request body inside a HTTPPasswordInput entity. +// +// As response the new HTTP password is returned. +// If the HTTP password was deleted the response is “204 No Content”. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-accounts.html#set-http-password +func (s *AccountsService) SetHTTPPassword(accountID string, input *HTTPPasswordInput) (*string, *Response, error) { + u := fmt.Sprintf("accounts/%s/password.http", accountID) + + req, err := s.client.NewRequest("PUT", u, input) + if err != nil { + return nil, nil, err + } + + v := new(string) + resp, err := s.client.Do(req, v) + if err != nil { + return nil, resp, err + } + + return v, resp, err +} + +// CreateAccountEmail registers a new email address for the user. +// A verification email is sent with a link that needs to be visited to confirm the email address, unless DEVELOPMENT_BECOME_ANY_ACCOUNT is used as authentication type. +// For the development mode email addresses are directly added without confirmation. +// A Gerrit administrator may add an email address without confirmation by setting no_confirmation in the EmailInput. +// In the request body additional data for the email address can be provided as EmailInput. +// +// As response the new email address is returned as EmailInfo entity. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-accounts.html#create-account-email +func (s *AccountsService) CreateAccountEmail(accountID, emailID string, input *EmailInput) (*EmailInfo, *Response, error) { + u := fmt.Sprintf("accounts/%s/emails/%s", accountID, emailID) + + req, err := s.client.NewRequest("PUT", u, input) + if err != nil { + return nil, nil, err + } + + v := new(EmailInfo) + resp, err := s.client.Do(req, v) + if err != nil { + return nil, resp, err + } + + return v, resp, err +} + +// SetPreferredEmail sets an email address as preferred email address for an account. +// +// If the email address was already the preferred email address of the account the response is “200 OK”. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-accounts.html#set-preferred-email +func (s *AccountsService) SetPreferredEmail(accountID, emailID string) (*Response, error) { + u := fmt.Sprintf("accounts/%s/emails/%s/preferred", accountID, emailID) + + req, err := s.client.NewRequest("PUT", u, nil) + if err != nil { + return nil, err + } + return s.client.Do(req, nil) +} + +// GetAvatarChangeURL retrieves the URL where the user can change the avatar image. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-accounts.html#get-avatar-change-url +func (s *AccountsService) GetAvatarChangeURL(accountID string) (string, *Response, error) { + u := fmt.Sprintf("accounts/%s/avatar.change.url", accountID) + return getStringResponseWithoutOptions(s.client, u) +} + +// AddGPGKeys Add or delete one or more GPG keys for a user. +// The changes must be provided in the request body as a GpgKeysInput entity. +// Each new GPG key is provided in ASCII armored format, and must contain a self-signed certification matching a registered email or other identity of the user. +// +// As a response, the modified GPG keys are returned as a map of GpgKeyInfo entities, keyed by ID. Deleted keys are represented by an empty object. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-accounts.html#add-delete-gpg-keys +func (s *AccountsService) AddGPGKeys(accountID string, input *GpgKeysInput) (*map[string]GpgKeyInfo, *Response, error) { + u := fmt.Sprintf("accounts/%s/gpgkeys", accountID) + + req, err := s.client.NewRequest("POST", u, input) + if err != nil { + return nil, nil, err + } + + v := new(map[string]GpgKeyInfo) + resp, err := s.client.Do(req, v) + if err != nil { + return nil, resp, err + } + + return v, resp, err +} + +// CheckAccountCapability checks if a user has a certain global capability. +// +// If the user has the global capability the string ok is returned. +// If the user doesn’t have the global capability the response is “404 Not Found”. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-accounts.html#check-account-capability +func (s *AccountsService) CheckAccountCapability(accountID, capabilityID string) (string, *Response, error) { + u := fmt.Sprintf("accounts/%s/capabilities/%s", accountID, capabilityID) + return getStringResponseWithoutOptions(s.client, u) +} + +// SetUserPreferences sets the user’s preferences. +// The new preferences must be provided in the request body as a PreferencesInput entity. +// +// As result the new preferences of the user are returned as a PreferencesInfo entity. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-accounts.html#set-user-preferences +func (s *AccountsService) SetUserPreferences(accountID string, input *PreferencesInput) (*PreferencesInfo, *Response, error) { + u := fmt.Sprintf("accounts/%s/preferences", accountID) + + req, err := s.client.NewRequest("PUT", u, input) + if err != nil { + return nil, nil, err + } + + v := new(PreferencesInfo) + resp, err := s.client.Do(req, v) + if err != nil { + return nil, resp, err + } + + return v, resp, err +} + +// SetDiffPreferences sets the diff preferences of a user. +// The new diff preferences must be provided in the request body as a DiffPreferencesInput entity. +// +// As result the new diff preferences of the user are returned as a DiffPreferencesInfo entity. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-accounts.html#set-diff-preferences +func (s *AccountsService) SetDiffPreferences(accountID string, input *DiffPreferencesInput) (*DiffPreferencesInfo, *Response, error) { + u := fmt.Sprintf("accounts/%s/preferences.diff", accountID) + + req, err := s.client.NewRequest("PUT", u, input) + if err != nil { + return nil, nil, err + } + + v := new(DiffPreferencesInfo) + resp, err := s.client.Do(req, v) + if err != nil { + return nil, resp, err + } + + return v, resp, err +} + +// StarChange star a change. +// Starred changes are returned for the search query is:starred or starredby:USER and automatically notify the user whenever updates are made to the change. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-accounts.html#star-change +func (s *AccountsService) StarChange(accountID, changeID string) (*Response, error) { + u := fmt.Sprintf("accounts/%s/starred.changes/%s", accountID, changeID) + + req, err := s.client.NewRequest("PUT", u, nil) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} + +// UnstarChange nstar a change. +// Removes the starred flag, stopping notifications. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-accounts.html#unstar-change +func (s *AccountsService) UnstarChange(accountID, changeID string) (*Response, error) { + u := fmt.Sprintf("accounts/%s/starred.changes/%s", accountID, changeID) + return s.client.DeleteRequest(u, nil) +} + +/* +Missing Account Endpoints: + Add SSH Key + Get Avatar +*/ diff --git a/vendor/github.com/andygrunwald/go-gerrit/authentication.go b/vendor/github.com/andygrunwald/go-gerrit/authentication.go new file mode 100644 index 00000000000..193f62203b4 --- /dev/null +++ b/vendor/github.com/andygrunwald/go-gerrit/authentication.go @@ -0,0 +1,187 @@ +package gerrit + +import ( + "crypto/md5" // nolint: gosec + "crypto/rand" + "encoding/base64" + "errors" + "fmt" + "io" + "net/http" + "strings" +) + +var ( + // ErrWWWAuthenticateHeaderMissing is returned by digestAuthHeader when the WWW-Authenticate header is missing + ErrWWWAuthenticateHeaderMissing = errors.New("WWW-Authenticate header is missing") + + // ErrWWWAuthenticateHeaderInvalid is returned by digestAuthHeader when the WWW-Authenticate invalid + ErrWWWAuthenticateHeaderInvalid = errors.New("WWW-Authenticate header is invalid") + + // ErrWWWAuthenticateHeaderNotDigest is returned by digestAuthHeader when the WWW-Authenticate header is not 'Digest' + ErrWWWAuthenticateHeaderNotDigest = errors.New("WWW-Authenticate header type is not Digest") +) + +const ( + // HTTP Basic Authentication + authTypeBasic = 1 + // HTTP Digest Authentication + authTypeDigest = 2 + // HTTP Cookie Authentication + authTypeCookie = 3 +) + +// AuthenticationService contains Authentication related functions. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api.html#authentication +type AuthenticationService struct { + client *Client + + // Storage for authentication + // Username or name of cookie + name string + // Password or value of cookie + secret string + authType int +} + +// SetBasicAuth sets basic parameters for HTTP Basic auth +func (s *AuthenticationService) SetBasicAuth(username, password string) { + s.name = username + s.secret = password + s.authType = authTypeBasic +} + +// SetDigestAuth sets digest parameters for HTTP Digest auth. +func (s *AuthenticationService) SetDigestAuth(username, password string) { + s.name = username + s.secret = password + s.authType = authTypeDigest +} + +// digestAuthHeader is called by gerrit.Client.Do in the event the server +// returns 401 Unauthorized and authType was set to authTypeDigest. The +// resulting string is used to set the Authorization header before retrying +// the request. +func (s *AuthenticationService) digestAuthHeader(response *http.Response) (string, error) { + authenticateHeader := response.Header.Get("WWW-Authenticate") + if authenticateHeader == "" { + return "", ErrWWWAuthenticateHeaderMissing + } + + split := strings.SplitN(authenticateHeader, " ", 2) + if len(split) != 2 { + return "", ErrWWWAuthenticateHeaderInvalid + } + + if split[0] != "Digest" { + return "", ErrWWWAuthenticateHeaderNotDigest + } + + // Iterate over all the fields from the WWW-Authenticate header + // and create a map of keys and values. + authenticate := map[string]string{} + for _, value := range strings.Split(split[1], ",") { + kv := strings.SplitN(value, "=", 2) + if len(kv) != 2 { + continue + } + + key := strings.Trim(strings.Trim(kv[0], " "), "\"") + value := strings.Trim(strings.Trim(kv[1], " "), "\"") + authenticate[key] = value + } + + // Gerrit usually responds without providing the algorithm. According + // to RFC2617 if no algorithm is provided then the default is to use + // MD5. At the time this code was implemented Gerrit did not appear + // to support other algorithms or provide a means of changing the + // algorithm. + if value, ok := authenticate["algorithm"]; ok { + if value != "MD5" { + return "", fmt.Errorf( + "algorithm not implemented: %s", value) + } + } + + realmHeader := authenticate["realm"] + qopHeader := authenticate["qop"] + nonceHeader := authenticate["nonce"] + + // If the server does not inform us what the uri is supposed + // to be then use the last requests's uri instead. + if _, ok := authenticate["uri"]; !ok { + authenticate["uri"] = response.Request.URL.Path + } + + uriHeader := authenticate["uri"] + + // A1 + h := md5.New() // nolint: gosec + A1 := fmt.Sprintf("%s:%s:%s", s.name, realmHeader, s.secret) + if _, err := io.WriteString(h, A1); err != nil { + return "", err + } + HA1 := fmt.Sprintf("%x", h.Sum(nil)) + + // A2 + h = md5.New() // nolint: gosec + A2 := fmt.Sprintf("%s:%s", response.Request.Method, uriHeader) + if _, err := io.WriteString(h, A2); err != nil { + return "", err + } + HA2 := fmt.Sprintf("%x", h.Sum(nil)) + + k := make([]byte, 12) + for bytes := 0; bytes < len(k); { + n, err := rand.Read(k[bytes:]) + if err != nil { + return "", fmt.Errorf("cnonce generation failed: %s", err) + } + bytes += n + } + cnonce := base64.StdEncoding.EncodeToString(k) + digest := md5.New() // nolint: gosec + if _, err := digest.Write([]byte(strings.Join([]string{HA1, nonceHeader, "00000001", cnonce, qopHeader, HA2}, ":"))); err != nil { + return "", err + } + responseField := fmt.Sprintf("%x", digest.Sum(nil)) + + return fmt.Sprintf( + `Digest username="%s", realm="%s", nonce="%s", uri="%s", cnonce="%s", nc=00000001, qop=%s, response="%s"`, + s.name, realmHeader, nonceHeader, uriHeader, cnonce, qopHeader, responseField), nil +} + +// SetCookieAuth sets basic parameters for HTTP Cookie +func (s *AuthenticationService) SetCookieAuth(name, value string) { + s.name = name + s.secret = value + s.authType = authTypeCookie +} + +// HasBasicAuth checks if the auth type is HTTP Basic auth +func (s *AuthenticationService) HasBasicAuth() bool { + return s.authType == authTypeBasic +} + +// HasDigestAuth checks if the auth type is HTTP Digest based +func (s *AuthenticationService) HasDigestAuth() bool { + return s.authType == authTypeDigest +} + +// HasCookieAuth checks if the auth type is HTTP Cookie based +func (s *AuthenticationService) HasCookieAuth() bool { + return s.authType == authTypeCookie +} + +// HasAuth checks if an auth type is used +func (s *AuthenticationService) HasAuth() bool { + return s.authType > 0 +} + +// ResetAuth resets all former authentification settings +func (s *AuthenticationService) ResetAuth() { + s.name = "" + s.secret = "" + s.authType = 0 +} diff --git a/vendor/github.com/andygrunwald/go-gerrit/changes.go b/vendor/github.com/andygrunwald/go-gerrit/changes.go new file mode 100644 index 00000000000..5f6fbb27376 --- /dev/null +++ b/vendor/github.com/andygrunwald/go-gerrit/changes.go @@ -0,0 +1,923 @@ +package gerrit + +import ( + "errors" + "fmt" + "io/ioutil" + "net/http" +) + +// ChangesService contains Change related REST endpoints +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-changes.html +type ChangesService struct { + client *Client +} + +// WebLinkInfo entity describes a link to an external site. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-changes.html#web-link-info +type WebLinkInfo struct { + Name string `json:"name"` + URL string `json:"url"` + ImageURL string `json:"image_url"` +} + +// GitPersonInfo entity contains information about the author/committer of a commit. +type GitPersonInfo struct { + Name string `json:"name"` + Email string `json:"email"` + Date Timestamp `json:"date"` + TZ int `json:"tz"` +} + +// NotifyInfo entity contains detailed information about who should be +// notified about an update +type NotifyInfo struct { + Accounts []AccountInfo `json:"accounts"` +} + +// AbandonInput entity contains information for abandoning a change. +type AbandonInput struct { + Message string `json:"message,omitempty"` + Notify string `json:"notify,omitempty"` + NotifyDetails []NotifyInfo `json:"notify_details,omitempty"` +} + +// ApprovalInfo entity contains information about an approval from a user for a label on a change. +type ApprovalInfo struct { + AccountInfo + Value int `json:"value,omitempty"` + Date string `json:"date,omitempty"` +} + +// CommitMessageInput entity contains information for changing the commit message of a change. +type CommitMessageInput struct { + Message string `json:"message,omitempty"` + Notify string `json:"notify,omitempty"` + NotifyDetails []NotifyInfo `json:"notify_details"` +} + +// ChangeEditInput entity contains information for restoring a path within change edit. +type ChangeEditInput struct { + RestorePath string `json:"restore_path,omitempty"` + OldPath string `json:"old_path,omitempty"` + NewPath string `json:"new_path,omitempty"` +} + +// ChangeEditMessageInput entity contains information for changing the commit message within a change edit. +type ChangeEditMessageInput struct { + Message string `json:"message"` +} + +// ChangeMessageInfo entity contains information about a message attached to a change. +type ChangeMessageInfo struct { + ID string `json:"id"` + Author AccountInfo `json:"author,omitempty"` + Date Timestamp `json:"date"` + Message string `json:"message"` + Tag string `json:"tag,omitempty"` + RevisionNumber int `json:"_revision_number,omitempty"` +} + +// CherryPickInput entity contains information for cherry-picking a change to a new branch. +type CherryPickInput struct { + Message string `json:"message"` + Destination string `json:"destination"` +} + +// CommentRange entity describes the range of an inline comment. +type CommentRange struct { + StartLine int `json:"start_line"` + StartCharacter int `json:"start_character"` + EndLine int `json:"end_line"` + EndCharacter int `json:"end_character"` +} + +// DiffFileMetaInfo entity contains meta information about a file diff +type DiffFileMetaInfo struct { + Name string `json:"name"` + ContentType string `json:"content_type"` + Lines int `json:"lines"` + WebLinks []WebLinkInfo `json:"web_links,omitempty"` +} + +// DiffWebLinkInfo entity describes a link on a diff screen to an external site. +type DiffWebLinkInfo struct { + Name string `json:"name"` + URL string `json:"url"` + ImageURL string `json:"image_url"` + ShowOnSideBySideDiffView bool `json:"show_on_side_by_side_diff_view"` + ShowOnUnifiedDiffView bool `json:"show_on_unified_diff_view"` +} + +// FetchInfo entity contains information about how to fetch a patch set via a certain protocol. +type FetchInfo struct { + URL string `json:"url"` + Ref string `json:"ref"` + Commands map[string]string `json:"commands,omitempty"` +} + +// FixInput entity contains options for fixing commits using the fix change endpoint. +type FixInput struct { + DeletePatchSetIfCommitMissing bool `json:"delete_patch_set_if_commit_missing"` + ExpectMergedAs string `json:"expect_merged_as"` +} + +// GroupBaseInfo entity contains base information about the group. +type GroupBaseInfo struct { + ID int `json:"id"` + Name string `json:"name"` +} + +// IncludedInInfo entity contains information about the branches a change was merged into and tags it was tagged with. +type IncludedInInfo struct { + Branches []string `json:"branches"` + Tags []string `json:"tags"` + External map[string]string `json:"external,omitempty"` +} + +// ProblemInfo entity contains a description of a potential consistency problem with a change. +// These are not related to the code review process, but rather indicate some inconsistency in Gerrit’s database or repository metadata related to the enclosing change. +type ProblemInfo struct { + Message string `json:"message"` + Status string `json:"status,omitempty"` + Outcome string `json:"outcome,omitempty"` +} + +// RebaseInput entity contains information for changing parent when rebasing. +type RebaseInput struct { + Base string `json:"base,omitempty"` +} + +// RestoreInput entity contains information for restoring a change. +type RestoreInput struct { + Message string `json:"message,omitempty"` +} + +// RevertInput entity contains information for reverting a change. +type RevertInput struct { + Message string `json:"message,omitempty"` +} + +// ReviewInfo entity contains information about a review. +type ReviewInfo struct { + Labels map[string]int `json:"labels"` +} + +// ReviewerUpdateInfo entity contains information about updates +// to change's reviewers set. +type ReviewerUpdateInfo struct { + Updated Timestamp `json:"updated"` // Timestamp of the update. + UpdatedBy AccountInfo `json:"updated_by"` // The account which modified state of the reviewer in question. + Reviewer AccountInfo `json:"reviewer"` // The reviewer account added or removed from the change. + State string `json:"state"` // The reviewer state, one of "REVIEWER", "CC" or "REMOVED". +} + +// ReviewResult entity contains information regarding the updates that were +// made to a review. +type ReviewResult struct { + ReviewInfo + Reviewers map[string]AddReviewerResult `json:"reviewers,omitempty"` + Ready bool `json:"ready,omitempty"` +} + +// TopicInput entity contains information for setting a topic. +type TopicInput struct { + Topic string `json:"topic,omitempty"` +} + +// SubmitRecord entity describes results from a submit_rule. +type SubmitRecord struct { + Status string `json:"status"` + Ok map[string]map[string]AccountInfo `json:"ok,omitempty"` + Reject map[string]map[string]AccountInfo `json:"reject,omitempty"` + Need map[string]interface{} `json:"need,omitempty"` + May map[string]map[string]AccountInfo `json:"may,omitempty"` + Impossible map[string]interface{} `json:"impossible,omitempty"` + ErrorMessage string `json:"error_message,omitempty"` +} + +// SubmitInput entity contains information for submitting a change. +type SubmitInput struct { + WaitForMerge bool `json:"wait_for_merge"` +} + +// SubmitInfo entity contains information about the change status after submitting. +type SubmitInfo struct { + Status string `json:"status"` + OnBehalfOf string `json:"on_behalf_of,omitempty"` +} + +// RuleInput entity contains information to test a Prolog rule. +type RuleInput struct { + Rule string `json:"rule"` + Filters string `json:"filters,omitempty"` +} + +// ReviewerInput entity contains information for adding a reviewer to a change. +type ReviewerInput struct { + Reviewer string `json:"reviewer"` + Confirmed bool `json:"confirmed,omitempty"` +} + +// ReviewInput entity contains information for adding a review to a revision. +type ReviewInput struct { + Message string `json:"message,omitempty"` + Tag string `json:"tag,omitempty"` + Labels map[string]string `json:"labels,omitempty"` + Comments map[string][]CommentInput `json:"comments,omitempty"` + RobotComments map[string][]RobotCommentInput `json:"robot_comments,omitempty"` + StrictLabels bool `json:"strict_labels,omitempty"` + Drafts string `json:"drafts,omitempty"` + Notify string `json:"notify,omitempty"` + OmitDuplicateComments bool `json:"omit_duplicate_comments,omitempty"` + OnBehalfOf string `json:"on_behalf_of,omitempty"` +} + +// RelatedChangeAndCommitInfo entity contains information about a related change and commit. +type RelatedChangeAndCommitInfo struct { + ChangeID string `json:"change_id,omitempty"` + Commit CommitInfo `json:"commit"` + ChangeNumber int `json:"_change_number,omitempty"` + RevisionNumber int `json:"_revision_number,omitempty"` + CurrentRevisionNumber int `json:"_current_revision_number,omitempty"` + Status string `json:"status,omitempty"` +} + +// DiffContent entity contains information about the content differences in a file. +type DiffContent struct { + A []string `json:"a,omitempty"` + B []string `json:"b,omitempty"` + AB []string `json:"ab,omitempty"` + EditA DiffIntralineInfo `json:"edit_a,omitempty"` + EditB DiffIntralineInfo `json:"edit_b,omitempty"` + Skip int `json:"skip,omitempty"` + Common bool `json:"common,omitempty"` +} + +// CommentInput entity contains information for creating an inline comment. +type CommentInput struct { + ID string `json:"id,omitempty"` + Path string `json:"path,omitempty"` + Side string `json:"side,omitempty"` + Line int `json:"line,omitempty"` + Range *CommentRange `json:"range,omitempty"` + InReplyTo string `json:"in_reply_to,omitempty"` + Updated *Timestamp `json:"updated,omitempty"` + Message string `json:"message,omitempty"` +} + +// MoveInput entity contains information for moving a change. +type MoveInput struct { + DestinationBranch string `json:"destination_branch"` + Message string `json:"message,omitempty"` + KeepAllLabels bool `json:"keep_all_labels"` +} + +// RobotCommentInput entity contains information for creating an inline robot comment. +// https://gerrit-review.googlesource.com/Documentation/rest-api-changes.html#robot-comment-input +type RobotCommentInput struct { + CommentInput + + // The ID of the robot that generated this comment. + RobotID string `json:"robot_id"` + // An ID of the run of the robot. + RobotRunID string `json:"robot_run_id"` + // URL to more information. + URL string `json:"url,omitempty"` + // Robot specific properties as map that maps arbitrary keys to values. + Properties *map[string]*string `json:"properties,omitempty"` + // Suggested fixes for this robot comment as a list of FixSuggestionInfo + // entities. + FixSuggestions *FixSuggestionInfo `json:"fix_suggestions,omitempty"` +} + +// RobotCommentInfo entity contains information about a robot inline comment +// RobotCommentInfo has the same fields as CommentInfo. In addition RobotCommentInfo has the following fields: +// https://gerrit-review.googlesource.com/Documentation/rest-api-changes.html#robot-comment-info +type RobotCommentInfo struct { + CommentInfo + + // The ID of the robot that generated this comment. + RobotID string `json:"robot_id"` + // An ID of the run of the robot. + RobotRunID string `json:"robot_run_id"` + // URL to more information. + URL string `json:"url,omitempty"` + // Robot specific properties as map that maps arbitrary keys to values. + Properties map[string]string `json:"properties,omitempty"` + // Suggested fixes for this robot comment as a list of FixSuggestionInfo + // entities. + FixSuggestions *FixSuggestionInfo `json:"fix_suggestions,omitempty"` +} + +// FixSuggestionInfo entity represents a suggested fix. +// https://gerrit-review.googlesource.com/Documentation/rest-api-changes.html#fix-suggestion-info +type FixSuggestionInfo struct { + // The UUID of the suggested fix. It will be generated automatically and + // hence will be ignored if it’s set for input objects. + FixID string `json:"fix_id"` + // A description of the suggested fix. + Description string `json:"description"` + // A list of FixReplacementInfo entities indicating how the content of one or + // several files should be modified. Within a file, they should refer to + // non-overlapping regions. + Replacements FixReplacementInfo `json:"replacements"` +} + +// FixReplacementInfo entity describes how the content of a file should be replaced by another content. +// https://gerrit-review.googlesource.com/Documentation/rest-api-changes.html#fix-replacement-info +type FixReplacementInfo struct { + // The path of the file which should be modified. Any file in the repository may be modified. + Path string `json:"path"` + + // A CommentRange indicating which content of the file should be replaced. + // Lines in the file are assumed to be separated by the line feed character, + // the carriage return character, the carriage return followed by the line + // feed character, or one of the other Unicode linebreak sequences supported + // by Java. + Range CommentRange `json:"range"` + + // The content which should be used instead of the current one. + Replacement string `json:"replacement,omitempty"` +} + +// AttentionSetInfo entity contains details of users that are in the attention set. +// https://gerrit-review.googlesource.com/Documentation/rest-api-changes.html#attention-set-info +type AttentionSetInfo struct { + // AccountInfo entity. + Account AccountInfo `json:"account"` + // The timestamp of the last update. + LastUpdate Timestamp `json:"last_update"` + // The reason of for adding or removing the user. + Reason string `json:"reason"` +} + +// DiffIntralineInfo entity contains information about intraline edits in a file. +// +// The information consists of a list of pairs, +// where the skip length is the number of characters between the end of +// the previous edit and the start of this edit, and the mark length is the +// number of edited characters following the skip. The start of the edits +// is from the beginning of the related diff content lines. +// +// Note that the implied newline character at the end of each line +// is included in the length calculation, and thus it is possible for +// the edits to span newlines. +type DiffIntralineInfo [][2]int + +// ChangeInfo entity contains information about a change. +type ChangeInfo struct { + ID string `json:"id"` + URL string `json:"url,omitempty"` + Project string `json:"project"` + Branch string `json:"branch"` + Topic string `json:"topic,omitempty"` + AttentionSet map[string]AttentionSetInfo `json:"attention_set,omitempty"` + Assignee AccountInfo `json:"assignee,omitempty"` + Hashtags []string `json:"hashtags,omitempty"` + ChangeID string `json:"change_id"` + Subject string `json:"subject"` + Status string `json:"status"` + Created Timestamp `json:"created"` + Updated Timestamp `json:"updated"` + Submitted *Timestamp `json:"submitted,omitempty"` + Submitter AccountInfo `json:"submitter,omitempty"` + Starred bool `json:"starred,omitempty"` + Reviewed bool `json:"reviewed,omitempty"` + SubmitType string `json:"submit_type,omitempty"` + Mergeable bool `json:"mergeable,omitempty"` + Submittable bool `json:"submittable,omitempty"` + Insertions int `json:"insertions"` + Deletions int `json:"deletions"` + TotalCommentCount int `json:"total_comment_count,omitempty"` + UnresolvedCommentCount int `json:"unresolved_comment_count,omitempty"` + Number int `json:"_number"` + Owner AccountInfo `json:"owner"` + Actions map[string]ActionInfo `json:"actions,omitempty"` + Labels map[string]LabelInfo `json:"labels,omitempty"` + PermittedLabels map[string][]string `json:"permitted_labels,omitempty"` + RemovableReviewers []AccountInfo `json:"removable_reviewers,omitempty"` + Reviewers map[string][]AccountInfo `json:"reviewers,omitempty"` + PendingReviewers map[string][]AccountInfo `json:"pending_reviewers,omitempty"` + ReviewerUpdates []ReviewerUpdateInfo `json:"reviewer_updates,omitempty"` + Messages []ChangeMessageInfo `json:"messages,omitempty"` + CurrentRevision string `json:"current_revision,omitempty"` + Revisions map[string]RevisionInfo `json:"revisions,omitempty"` + MoreChanges bool `json:"_more_changes,omitempty"` + Problems []ProblemInfo `json:"problems,omitempty"` + IsPrivate bool `json:"is_private,omitempty"` + WorkInProgress bool `json:"work_in_progress,omitempty"` + HasReviewStarted bool `json:"has_review_started,omitempty"` + RevertOf int `json:"revert_of,omitempty"` + SubmissionID string `json:"submission_id,omitempty"` + CherryPickOfChange int `json:"cherry_pick_of_change,omitempty"` + CherryPickOfPatchSet int `json:"cherry_pick_of_patch_set,omitempty"` + ContainsGitConflicts bool `json:"contains_git_conflicts,omitempty"` + BaseChange string `json:"base_change,omitempty"` +} + +// LabelInfo entity contains information about a label on a change, always corresponding to the current patch set. +type LabelInfo struct { + Optional bool `json:"optional,omitempty"` + + // Fields set by LABELS + Approved AccountInfo `json:"approved,omitempty"` + Rejected AccountInfo `json:"rejected,omitempty"` + Recommended AccountInfo `json:"recommended,omitempty"` + Disliked AccountInfo `json:"disliked,omitempty"` + Blocking bool `json:"blocking,omitempty"` + Value int `json:"value,omitempty"` + DefaultValue int `json:"default_value,omitempty"` + + // Fields set by DETAILED_LABELS + All []ApprovalInfo `json:"all,omitempty"` + Values map[string]string `json:"values,omitempty"` +} + +// RevisionInfo entity contains information about a patch set. +type RevisionInfo struct { + Draft bool `json:"draft,omitempty"` + Number int `json:"_number"` + Created Timestamp `json:"created"` + Uploader AccountInfo `json:"uploader"` + Ref string `json:"ref"` + Fetch map[string]FetchInfo `json:"fetch"` + Commit CommitInfo `json:"commit,omitempty"` + Files map[string]FileInfo `json:"files,omitempty"` + Actions map[string]ActionInfo `json:"actions,omitempty"` + Reviewed bool `json:"reviewed,omitempty"` + MessageWithFooter string `json:"messageWithFooter,omitempty"` +} + +// CommentInfo entity contains information about an inline comment. +type CommentInfo struct { + PatchSet int `json:"patch_set,omitempty"` + ID string `json:"id"` + Path string `json:"path,omitempty"` + Side string `json:"side,omitempty"` + Line int `json:"line,omitempty"` + Range *CommentRange `json:"range,omitempty"` + InReplyTo string `json:"in_reply_to,omitempty"` + Message string `json:"message,omitempty"` + Updated *Timestamp `json:"updated"` + Author AccountInfo `json:"author,omitempty"` +} + +// QueryOptions specifies global parameters to query changes / reviewers. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-changes.html#list-changes +type QueryOptions struct { + // Query parameter + // Clients are allowed to specify more than one query by setting the q parameter multiple times. + // In this case the result is an array of arrays, one per query in the same order the queries were given in. + // + // Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/user-search.html#_search_operators + Query []string `url:"q,omitempty"` + + // The n parameter can be used to limit the returned results. + // If the n query parameter is supplied and additional changes exist that match the query beyond the end, the last change object has a _more_changes: true JSON field set. + Limit int `url:"n,omitempty"` +} + +// QueryChangeOptions specifies the parameters to the ChangesService.QueryChanges. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-changes.html#list-changes +type QueryChangeOptions struct { + QueryOptions + + // The S or start query parameter can be supplied to skip a number of changes from the list. + Skip int `url:"S,omitempty"` + Start int `url:"start,omitempty"` + + ChangeOptions +} + +// ChangeOptions specifies the parameters for Query changes. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-changes.html#list-changes +type ChangeOptions struct { + // Additional fields can be obtained by adding o parameters, each option requires more database lookups and slows down the query response time to the client so they are generally disabled by default. + // + // Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-changes.html#list-changes + AdditionalFields []string `url:"o,omitempty"` +} + +// QueryChanges lists changes visible to the caller. +// The query string must be provided by the q parameter. +// The n parameter can be used to limit the returned results. +// +// The change output is sorted by the last update time, most recently updated to oldest updated. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-changes.html#list-changes +func (s *ChangesService) QueryChanges(opt *QueryChangeOptions) (*[]ChangeInfo, *Response, error) { + u := "changes/" + + u, err := addOptions(u, opt) + if err != nil { + return nil, nil, err + } + + req, err := s.client.NewRequest("GET", u, nil) + if err != nil { + return nil, nil, err + } + + v := new([]ChangeInfo) + resp, err := s.client.Do(req, v) + if err != nil { + return nil, resp, err + } + + return v, resp, err +} + +// GetChange retrieves a change. +// Additional fields can be obtained by adding o parameters, each option requires more database lookups and slows down the query response time to the client so they are generally disabled by default. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-changes.html#get-change +func (s *ChangesService) GetChange(changeID string, opt *ChangeOptions) (*ChangeInfo, *Response, error) { + u := fmt.Sprintf("changes/%s", changeID) + return s.getChangeInfoResponse(u, opt) +} + +// GetChangeDetail retrieves a change with labels, detailed labels, detailed accounts, and messages. +// Additional fields can be obtained by adding o parameters, each option requires more database lookups and slows down the query response time to the client so they are generally disabled by default. +// +// This response will contain all votes for each label and include one combined vote. +// The combined label vote is calculated in the following order (from highest to lowest): REJECTED > APPROVED > DISLIKED > RECOMMENDED. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-changes.html#get-change-detail +func (s *ChangesService) GetChangeDetail(changeID string, opt *ChangeOptions) (*ChangeInfo, *Response, error) { + u := fmt.Sprintf("changes/%s/detail", changeID) + return s.getChangeInfoResponse(u, opt) +} + +// getChangeInfoResponse retrieved a single ChangeInfo Response for a GET request +func (s *ChangesService) getChangeInfoResponse(u string, opt *ChangeOptions) (*ChangeInfo, *Response, error) { + u, err := addOptions(u, opt) + if err != nil { + return nil, nil, err + } + + req, err := s.client.NewRequest("GET", u, nil) + if err != nil { + return nil, nil, err + } + + v := new(ChangeInfo) + resp, err := s.client.Do(req, v) + if err != nil { + return nil, resp, err + } + + return v, resp, err +} + +// GetTopic retrieves the topic of a change. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-changes.html#get-topic +func (s *ChangesService) GetTopic(changeID string) (string, *Response, error) { + u := fmt.Sprintf("changes/%s/topic", changeID) + return getStringResponseWithoutOptions(s.client, u) +} + +// ChangesSubmittedTogether returns a list of all changes which are submitted when {submit} is called for this change, including the current change itself. +// An empty list is returned if this change will be submitted by itself (no other changes). +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-changes.html#submitted_together +func (s *ChangesService) ChangesSubmittedTogether(changeID string) (*[]ChangeInfo, *Response, error) { + u := fmt.Sprintf("changes/%s/submitted_together", changeID) + + req, err := s.client.NewRequest("GET", u, nil) + if err != nil { + return nil, nil, err + } + + v := new([]ChangeInfo) + resp, err := s.client.Do(req, v) + if err != nil { + return nil, resp, err + } + + return v, resp, err +} + +// GetIncludedIn retrieves the branches and tags in which a change is included. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-changes.html#get-included-in +func (s *ChangesService) GetIncludedIn(changeID string) (*IncludedInInfo, *Response, error) { + u := fmt.Sprintf("changes/%s/in", changeID) + + req, err := s.client.NewRequest("GET", u, nil) + if err != nil { + return nil, nil, err + } + + v := new(IncludedInInfo) + resp, err := s.client.Do(req, v) + if err != nil { + return nil, resp, err + } + + return v, resp, err +} + +// ListChangeComments lists the published comments of all revisions of the change. +// The entries in the map are sorted by file path, and the comments for each path are sorted by patch set number. +// Each comment has the patch_set and author fields set. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-changes.html#list-change-comments +func (s *ChangesService) ListChangeComments(changeID string) (*map[string][]CommentInfo, *Response, error) { + u := fmt.Sprintf("changes/%s/comments", changeID) + return s.getCommentInfoMapResponse(u) +} + +// ListChangeDrafts lLists the draft comments of all revisions of the change that belong to the calling user. +// The entries in the map are sorted by file path, and the comments for each path are sorted by patch set number. +// Each comment has the patch_set field set, and no author. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-changes.html#list-change-drafts +func (s *ChangesService) ListChangeDrafts(changeID string) (*map[string][]CommentInfo, *Response, error) { + u := fmt.Sprintf("changes/%s/drafts", changeID) + return s.getCommentInfoMapResponse(u) +} + +// getCommentInfoMapResponse retrieved a map of CommentInfo Response for a GET request +func (s *ChangesService) getCommentInfoMapResponse(u string) (*map[string][]CommentInfo, *Response, error) { + req, err := s.client.NewRequest("GET", u, nil) + if err != nil { + return nil, nil, err + } + + v := new(map[string][]CommentInfo) + resp, err := s.client.Do(req, v) + if err != nil { + return nil, resp, err + } + + return v, resp, err +} + +// CheckChange performs consistency checks on the change, and returns a ChangeInfo entity with the problems field set to a list of ProblemInfo entities. +// Depending on the type of problem, some fields not marked optional may be missing from the result. +// At least id, project, branch, and _number will be present. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-changes.html#check-change +func (s *ChangesService) CheckChange(changeID string) (*ChangeInfo, *Response, error) { + u := fmt.Sprintf("changes/%s/check", changeID) + return s.getChangeInfoResponse(u, nil) +} + +// getCommentInfoResponse retrieved a CommentInfo Response for a GET request +func (s *ChangesService) getCommentInfoResponse(u string) (*CommentInfo, *Response, error) { + req, err := s.client.NewRequest("GET", u, nil) + if err != nil { + return nil, nil, err + } + + v := new(CommentInfo) + resp, err := s.client.Do(req, v) + if err != nil { + return nil, resp, err + } + + return v, resp, err +} + +// getCommentInfoMapSliceResponse retrieved a map with a slice of CommentInfo Response for a GET request +func (s *ChangesService) getCommentInfoMapSliceResponse(u string) (*map[string][]CommentInfo, *Response, error) { + req, err := s.client.NewRequest("GET", u, nil) + if err != nil { + return nil, nil, err + } + + v := new(map[string][]CommentInfo) + resp, err := s.client.Do(req, v) + if err != nil { + return nil, resp, err + } + + return v, resp, err +} + +// CreateChange creates a new change. +// The change info ChangeInfo entity must be provided in the request body. +// Only the following attributes are honored: project, branch, subject, status and topic. +// The first three attributes are mandatory. +// Valid values for status are: DRAFT and NEW. +// +// As response a ChangeInfo entity is returned that describes the resulting change. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-changes.html#create-change +func (s *ChangesService) CreateChange(input *ChangeInfo) (*ChangeInfo, *Response, error) { + u := "changes/" + + req, err := s.client.NewRequest("POST", u, input) + if err != nil { + return nil, nil, err + } + + v := new(ChangeInfo) + resp, err := s.client.Do(req, v) + if err != nil { + return nil, resp, err + } + + return v, resp, err +} + +// SetCommitMessage creates a new patch set with a new commit message. +// The new commit message must be provided in the request body inside a CommitMessageInput entity. +// If a Change-Id footer is specified, it must match the current Change-Id footer. +// If the Change-Id footer is absent, the current Change-Id is added to the message. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-changes.html#set-message +func (s *ChangesService) SetCommitMessage(changeID string, input *CommitMessageInput) (*Response, error) { + u := fmt.Sprintf("changes/%s/message", changeID) + + req, err := s.client.NewRequest("PUT", u, input) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} + +// SetTopic sets the topic of a change. +// The new topic must be provided in the request body inside a TopicInput entity. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-changes.html#set-topic +func (s *ChangesService) SetTopic(changeID string, input *TopicInput) (*string, *Response, error) { + u := fmt.Sprintf("changes/%s/topic", changeID) + + req, err := s.client.NewRequest("PUT", u, input) + if err != nil { + return nil, nil, err + } + + v := new(string) + resp, err := s.client.Do(req, v) + if err != nil { + return nil, resp, err + } + + return v, resp, err +} + +// DeleteTopic deletes the topic of a change. +// The request body does not need to include a TopicInput entity if no review comment is added. +// +// Please note that some proxies prohibit request bodies for DELETE requests. +// In this case, if you want to specify a commit message, use PUT to delete the topic. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-changes.html#delete-topic +func (s *ChangesService) DeleteTopic(changeID string) (*Response, error) { + u := fmt.Sprintf("changes/%s/topic", changeID) + return s.client.DeleteRequest(u, nil) +} + +// DeleteDraftChange deletes a draft change. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-changes.html#delete-draft-change +func (s *ChangesService) DeleteDraftChange(changeID string) (*Response, error) { + u := fmt.Sprintf("changes/%s", changeID) + return s.client.DeleteRequest(u, nil) +} + +// PublishDraftChange publishes a draft change. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-changes.html#publish-draft-change +func (s *ChangesService) PublishDraftChange(changeID, notify string) (*Response, error) { + u := fmt.Sprintf("changes/%s/publish", changeID) + + req, err := s.client.NewRequest("POST", u, map[string]string{ + "notify": notify, + }) + if err != nil { + return nil, err + } + return s.client.Do(req, nil) +} + +// IndexChange adds or updates the change in the secondary index. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-changes.html#index-change +func (s *ChangesService) IndexChange(changeID string) (*Response, error) { + u := fmt.Sprintf("changes/%s/index", changeID) + + req, err := s.client.NewRequest("POST", u, nil) + if err != nil { + return nil, err + } + return s.client.Do(req, nil) +} + +// FixChange performs consistency checks on the change as with GET /check, and additionally fixes any problems that can be fixed automatically. +// The returned field values reflect any fixes. +// +// Some fixes have options controlling their behavior, which can be set in the FixInput entity body. +// Only the change owner, a project owner, or an administrator may fix changes. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-changes.html#fix-change +func (s *ChangesService) FixChange(changeID string, input *FixInput) (*ChangeInfo, *Response, error) { + u := fmt.Sprintf("changes/%s/check", changeID) + + req, err := s.client.NewRequest("PUT", u, input) + if err != nil { + return nil, nil, err + } + + v := new(ChangeInfo) + resp, err := s.client.Do(req, v) + if err != nil { + return nil, resp, err + } + + return v, resp, err +} + +// change is an internal function to consolidate code used by SubmitChange, +// AbandonChange and other similar functions. +func (s *ChangesService) change(tail string, changeID string, input interface{}) (*ChangeInfo, *Response, error) { + u := fmt.Sprintf("changes/%s/%s", changeID, tail) + req, err := s.client.NewRequest("POST", u, input) + if err != nil { + return nil, nil, err + } + + v := new(ChangeInfo) + resp, err := s.client.Do(req, v) + if err != nil { + return nil, resp, err + } + if resp.StatusCode == http.StatusConflict { + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return v, resp, err + } + return v, resp, errors.New(string(body[:])) + } + return v, resp, nil +} + +// SubmitChange submits a change. +// +// The request body only needs to include a SubmitInput entity if submitting on behalf of another user. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-changes.html#submit-change +func (s *ChangesService) SubmitChange(changeID string, input *SubmitInput) (*ChangeInfo, *Response, error) { + return s.change("submit", changeID, input) +} + +// AbandonChange abandons a change. +// +// The request body does not need to include a AbandonInput entity if no review +// comment is added. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-changes.html#abandon-change +func (s *ChangesService) AbandonChange(changeID string, input *AbandonInput) (*ChangeInfo, *Response, error) { + return s.change("abandon", changeID, input) +} + +// RebaseChange rebases a change. +// +// Optionally, the parent revision can be changed to another patch set through +// the RebaseInput entity. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-changes.html#rebase-change +func (s *ChangesService) RebaseChange(changeID string, input *RebaseInput) (*ChangeInfo, *Response, error) { + return s.change("rebase", changeID, input) +} + +// RestoreChange restores a change. +// +// The request body does not need to include a RestoreInput entity if no review +// comment is added. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-changes.html#restore-change +func (s *ChangesService) RestoreChange(changeID string, input *RestoreInput) (*ChangeInfo, *Response, error) { + return s.change("restore", changeID, input) +} + +// RevertChange reverts a change. +// +// The request body does not need to include a RevertInput entity if no +// review comment is added. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-changes.html#revert-change +func (s *ChangesService) RevertChange(changeID string, input *RevertInput) (*ChangeInfo, *Response, error) { + return s.change("revert", changeID, input) +} + +// MoveChange moves a change. +// +// The destination branch must be provided in the request body inside a MoveInput entity. +// Only veto votes that are blocking the change from submission are moved to the destination +// branch by default. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-changes.html#move-change +func (s *ChangesService) MoveChange(changeID string, input *MoveInput) (*ChangeInfo, *Response, error) { + return s.change("move", changeID, input) +} diff --git a/vendor/github.com/andygrunwald/go-gerrit/changes_edit.go b/vendor/github.com/andygrunwald/go-gerrit/changes_edit.go new file mode 100644 index 00000000000..58033e21b51 --- /dev/null +++ b/vendor/github.com/andygrunwald/go-gerrit/changes_edit.go @@ -0,0 +1,231 @@ +package gerrit + +import ( + "fmt" + "net/url" +) + +// EditInfo entity contains information about a change edit. +type EditInfo struct { + Commit CommitInfo `json:"commit"` + BaseRevision string `json:"baseRevision"` + Fetch map[string]FetchInfo `json:"fetch"` + Files map[string]FileInfo `json:"files,omitempty"` +} + +// EditFileInfo entity contains additional information of a file within a change edit. +type EditFileInfo struct { + WebLinks []WebLinkInfo `json:"web_links,omitempty"` +} + +// ChangeEditDetailOptions specifies the parameters to the ChangesService.GetChangeEditDetails. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-changes.html#get-edit-detail +type ChangeEditDetailOptions struct { + // When request parameter list is provided the response also includes the file list. + List bool `url:"list,omitempty"` + // When base request parameter is provided the file list is computed against this base revision. + Base bool `url:"base,omitempty"` + // When request parameter download-commands is provided fetch info map is also included. + DownloadCommands bool `url:"download-commands,omitempty"` +} + +// GetChangeEditDetails retrieves a change edit details. +// As response an EditInfo entity is returned that describes the change edit, or “204 No Content” when change edit doesn’t exist for this change. +// Change edits are stored on special branches and there can be max one edit per user per change. +// Edits aren’t tracked in the database. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-changes.html#get-edit-detail +func (s *ChangesService) GetChangeEditDetails(changeID string, opt *ChangeEditDetailOptions) (*EditInfo, *Response, error) { + u := fmt.Sprintf("changes/%s/edit", changeID) + + u, err := addOptions(u, opt) + if err != nil { + return nil, nil, err + } + + req, err := s.client.NewRequest("GET", u, nil) + if err != nil { + return nil, nil, err + } + + v := new(EditInfo) + resp, err := s.client.Do(req, v) + if err != nil { + return nil, resp, err + } + + return v, resp, err +} + +// RetrieveMetaDataOfAFileFromChangeEdit retrieves meta data of a file from a change edit. +// Currently only web links are returned. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-changes.html#get-edit-meta-data +func (s *ChangesService) RetrieveMetaDataOfAFileFromChangeEdit(changeID, filePath string) (*EditFileInfo, *Response, error) { + u := fmt.Sprintf("changes/%s/edit/%s/meta", changeID, filePath) + + req, err := s.client.NewRequest("GET", u, nil) + if err != nil { + return nil, nil, err + } + + v := new(EditFileInfo) + resp, err := s.client.Do(req, v) + if err != nil { + return nil, resp, err + } + + return v, resp, err +} + +// RetrieveCommitMessageFromChangeEdit retrieves commit message from change edit. +// The commit message is returned as base64 encoded string. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-changes.html#get-edit-message +func (s *ChangesService) RetrieveCommitMessageFromChangeEdit(changeID string) (string, *Response, error) { + u := fmt.Sprintf("changes/%s/edit:message", changeID) + return getStringResponseWithoutOptions(s.client, u) +} + +// ChangeFileContentInChangeEdit put content of a file to a change edit. +// +// When change edit doesn’t exist for this change yet it is created. +// When file content isn’t provided, it is wiped out for that file. +// As response “204 No Content” is returned. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-changes.html#put-edit-file +func (s *ChangesService) ChangeFileContentInChangeEdit(changeID, filePath, content string) (*Response, error) { + u := fmt.Sprintf("changes/%s/edit/%s", changeID, url.QueryEscape(filePath)) + + req, err := s.client.NewRawPutRequest(u, content) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} + +// ChangeCommitMessageInChangeEdit modify commit message. +// The request body needs to include a ChangeEditMessageInput entity. +// +// If a change edit doesn’t exist for this change yet, it is created. +// As response “204 No Content” is returned. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-changes.html#put-change-edit-message +func (s *ChangesService) ChangeCommitMessageInChangeEdit(changeID string, input *ChangeEditMessageInput) (*Response, error) { + u := fmt.Sprintf("changes/%s/edit:message", changeID) + + req, err := s.client.NewRequest("PUT", u, input) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} + +// DeleteFileInChangeEdit deletes a file from a change edit. +// This deletes the file from the repository completely. +// This is not the same as reverting or restoring a file to its previous contents. +// +// When change edit doesn’t exist for this change yet it is created. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-changes.html#delete-edit-file +func (s *ChangesService) DeleteFileInChangeEdit(changeID, filePath string) (*Response, error) { + u := fmt.Sprintf("changes/%s/edit/%s", changeID, filePath) + return s.client.DeleteRequest(u, nil) +} + +// DeleteChangeEdit deletes change edit. +// +// As response “204 No Content” is returned. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-changes.html#delete-edit +func (s *ChangesService) DeleteChangeEdit(changeID string) (*Response, error) { + u := fmt.Sprintf("changes/%s/edit", changeID) + return s.client.DeleteRequest(u, nil) +} + +// PublishChangeEdit promotes change edit to a regular patch set. +// +// As response “204 No Content” is returned. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-changes.html#publish-edit +func (s *ChangesService) PublishChangeEdit(changeID, notify string) (*Response, error) { + u := fmt.Sprintf("changes/%s/edit:publish", changeID) + + req, err := s.client.NewRequest("POST", u, map[string]string{ + "notify": notify, + }) + if err != nil { + return nil, err + } + return s.client.Do(req, nil) +} + +// RebaseChangeEdit rebases change edit on top of latest patch set. +// +// When change was rebased on top of latest patch set, response “204 No Content” is returned. +// When change edit is already based on top of the latest patch set, the response “409 Conflict” is returned. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-changes.html#rebase-edit +func (s *ChangesService) RebaseChangeEdit(changeID string) (*Response, error) { + u := fmt.Sprintf("changes/%s/edit:rebase", changeID) + + req, err := s.client.NewRequest("POST", u, nil) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} + +// RetrieveFileContentFromChangeEdit retrieves content of a file from a change edit. +// +// The content of the file is returned as text encoded inside base64. +// The Content-Type header will always be text/plain reflecting the outer base64 encoding. +// A Gerrit-specific X-FYI-Content-Type header can be examined to find the server detected content type of the file. +// +// When the specified file was deleted in the change edit “204 No Content” is returned. +// If only the content type is required, callers should use HEAD to avoid downloading the encoded file contents. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-changes.html#get-edit-file +func (s *ChangesService) RetrieveFileContentFromChangeEdit(changeID, filePath string) (*string, *Response, error) { + u := fmt.Sprintf("changes/%s/edit/%s", changeID, filePath) + + req, err := s.client.NewRequest("GET", u, nil) + if err != nil { + return nil, nil, err + } + + v := new(string) + resp, err := s.client.Do(req, v) + if err != nil { + return nil, resp, err + } + + return v, resp, err +} + +// RetrieveFileContentTypeFromChangeEdit retrieves content type of a file from a change edit. +// This is nearly the same as RetrieveFileContentFromChangeEdit. +// But if only the content type is required, callers should use HEAD to avoid downloading the encoded file contents. +// +// For further documentation please have a look at RetrieveFileContentFromChangeEdit. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-changes.html#get-edit-file +func (s *ChangesService) RetrieveFileContentTypeFromChangeEdit(changeID, filePath string) (*Response, error) { + u := fmt.Sprintf("changes/%s/edit/%s", changeID, filePath) + + req, err := s.client.NewRequest("HEAD", u, nil) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} + +/* +Missing Change Edit Endpoints + Restore file content or rename files in Change Edit +*/ diff --git a/vendor/github.com/andygrunwald/go-gerrit/changes_reviewer.go b/vendor/github.com/andygrunwald/go-gerrit/changes_reviewer.go new file mode 100644 index 00000000000..31099d3b954 --- /dev/null +++ b/vendor/github.com/andygrunwald/go-gerrit/changes_reviewer.go @@ -0,0 +1,163 @@ +package gerrit + +import ( + "fmt" +) + +// ReviewerInfo entity contains information about a reviewer and its votes on a change. +type ReviewerInfo struct { + AccountInfo + Approvals map[string]string `json:"approvals"` +} + +// SuggestedReviewerInfo entity contains information about a reviewer that can be added to a change (an account or a group). +type SuggestedReviewerInfo struct { + Account AccountInfo `json:"account,omitempty"` + Group GroupBaseInfo `json:"group,omitempty"` +} + +// AddReviewerResult entity describes the result of adding a reviewer to a change. +type AddReviewerResult struct { + Input string `json:"input,omitempty"` + Reviewers []ReviewerInfo `json:"reviewers,omitempty"` + CCS []ReviewerInfo `json:"ccs,omitempty"` + Error string `json:"error,omitempty"` + Confirm bool `json:"confirm,omitempty"` +} + +// DeleteVoteInput entity contains options for the deletion of a vote. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-changes.html#delete-vote-input +type DeleteVoteInput struct { + Label string `json:"label,omitempty"` + Notify string `json:"notify,omitempty"` + NotifyDetails map[string]NotifyInfo `json:"notify_details"` +} + +// ListReviewers lists the reviewers of a change. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-changes.html#list-reviewers +func (s *ChangesService) ListReviewers(changeID string) (*[]ReviewerInfo, *Response, error) { + u := fmt.Sprintf("changes/%s/reviewers/", changeID) + + req, err := s.client.NewRequest("GET", u, nil) + if err != nil { + return nil, nil, err + } + + v := new([]ReviewerInfo) + resp, err := s.client.Do(req, v) + if err != nil { + return nil, resp, err + } + + return v, resp, err +} + +// SuggestReviewers suggest the reviewers for a given query q and result limit n. +// If result limit is not passed, then the default 10 is used. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-changes.html#suggest-reviewers +func (s *ChangesService) SuggestReviewers(changeID string, opt *QueryOptions) (*[]SuggestedReviewerInfo, *Response, error) { + u := fmt.Sprintf("changes/%s/suggest_reviewers", changeID) + + u, err := addOptions(u, opt) + if err != nil { + return nil, nil, err + } + + req, err := s.client.NewRequest("GET", u, nil) + if err != nil { + return nil, nil, err + } + + v := new([]SuggestedReviewerInfo) + resp, err := s.client.Do(req, v) + if err != nil { + return nil, resp, err + } + + return v, resp, err +} + +// GetReviewer retrieves a reviewer of a change. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-changes.html#get-reviewer +func (s *ChangesService) GetReviewer(changeID, accountID string) (*ReviewerInfo, *Response, error) { + u := fmt.Sprintf("changes/%s/reviewers/%s", changeID, accountID) + + req, err := s.client.NewRequest("GET", u, nil) + if err != nil { + return nil, nil, err + } + + v := new(ReviewerInfo) + resp, err := s.client.Do(req, v) + if err != nil { + return nil, resp, err + } + + return v, resp, err +} + +// AddReviewer adds one user or all members of one group as reviewer to the change. +// The reviewer to be added to the change must be provided in the request body as a ReviewerInput entity. +// +// As response an AddReviewerResult entity is returned that describes the newly added reviewers. +// If a group is specified, adding the group members as reviewers is an atomic operation. +// This means if an error is returned, none of the members are added as reviewer. +// If a group with many members is added as reviewer a confirmation may be required. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-changes.html#add-reviewer +func (s *ChangesService) AddReviewer(changeID string, input *ReviewerInput) (*AddReviewerResult, *Response, error) { + u := fmt.Sprintf("changes/%s/reviewers", changeID) + + req, err := s.client.NewRequest("POST", u, input) + if err != nil { + return nil, nil, err + } + + v := new(AddReviewerResult) + resp, err := s.client.Do(req, v) + if err != nil { + return nil, resp, err + } + + return v, resp, err +} + +// DeleteReviewer deletes a reviewer from a change. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-changes.html#delete-reviewer +func (s *ChangesService) DeleteReviewer(changeID, accountID string) (*Response, error) { + u := fmt.Sprintf("changes/%s/reviewers/%s", changeID, accountID) + return s.client.DeleteRequest(u, nil) +} + +// ListVotes lists the votes for a specific reviewer of the change. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-changes.html#list-votes +func (s *ChangesService) ListVotes(changeID string, accountID string) (map[string]int, *Response, error) { + u := fmt.Sprintf("changes/%s/reviewers/%s/votes/", changeID, accountID) + req, err := s.client.NewRequest("GET", u, nil) + if err != nil { + return nil, nil, err + } + + var v map[string]int + resp, err := s.client.Do(req, &v) + if err != nil { + return nil, resp, err + } + return v, resp, err +} + +// DeleteVote deletes a single vote from a change. Note, that even when the +// last vote of a reviewer is removed the reviewer itself is still listed on +// the change. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-changes.html#delete-vote +func (s *ChangesService) DeleteVote(changeID string, accountID string, label string, input *DeleteVoteInput) (*Response, error) { + u := fmt.Sprintf("changes/%s/reviewers/%s/votes/%s", changeID, accountID, label) + return s.client.DeleteRequest(u, input) +} diff --git a/vendor/github.com/andygrunwald/go-gerrit/changes_revision.go b/vendor/github.com/andygrunwald/go-gerrit/changes_revision.go new file mode 100644 index 00000000000..c20c5ac67a3 --- /dev/null +++ b/vendor/github.com/andygrunwald/go-gerrit/changes_revision.go @@ -0,0 +1,651 @@ +package gerrit + +import ( + "fmt" + "net/url" +) + +// DiffInfo entity contains information about the diff of a file in a revision. +type DiffInfo struct { + MetaA DiffFileMetaInfo `json:"meta_a,omitempty"` + MetaB DiffFileMetaInfo `json:"meta_b,omitempty"` + ChangeType string `json:"change_type"` + IntralineStatus string `json:"intraline_status,omitempty"` + DiffHeader []string `json:"diff_header"` + Content []DiffContent `json:"content"` + WebLinks []DiffWebLinkInfo `json:"web_links,omitempty"` + Binary bool `json:"binary,omitempty"` +} + +// RelatedChangesInfo entity contains information about related changes. +type RelatedChangesInfo struct { + Changes []RelatedChangeAndCommitInfo `json:"changes"` +} + +// FileInfo entity contains information about a file in a patch set. +type FileInfo struct { + Status string `json:"status,omitempty"` + Binary bool `json:"binary,omitempty"` + OldPath string `json:"old_path,omitempty"` + LinesInserted int `json:"lines_inserted,omitempty"` + LinesDeleted int `json:"lines_deleted,omitempty"` + SizeDelta int `json:"size_delta"` + Size int `json:"size"` +} + +// ActionInfo entity describes a REST API call the client can make to manipulate a resource. +// These are frequently implemented by plugins and may be discovered at runtime. +type ActionInfo struct { + Method string `json:"method,omitempty"` + Label string `json:"label,omitempty"` + Title string `json:"title,omitempty"` + Enabled bool `json:"enabled,omitempty"` +} + +// CommitInfo entity contains information about a commit. +type CommitInfo struct { + Commit string `json:"commit,omitempty"` + Parents []CommitInfo `json:"parents"` + Author GitPersonInfo `json:"author"` + Committer GitPersonInfo `json:"committer"` + Subject string `json:"subject"` + Message string `json:"message"` + WebLinks []WebLinkInfo `json:"web_links,omitempty"` +} + +// MergeableInfo entity contains information about the mergeability of a change. +type MergeableInfo struct { + SubmitType string `json:"submit_type"` + Mergeable bool `json:"mergeable"` + MergeableInto []string `json:"mergeable_into,omitempty"` +} + +// DiffOptions specifies the parameters for GetDiff call. +type DiffOptions struct { + // If the intraline parameter is specified, intraline differences are included in the diff. + Intraline bool `url:"intraline,omitempty"` + + // The base parameter can be specified to control the base patch set from which the diff + // should be generated. + Base string `url:"base,omitempty"` + + // The integer-valued request parameter parent can be specified to control the parent commit number + // against which the diff should be generated. This is useful for supporting review of merge commits. + // The value is the 1-based index of the parent’s position in the commit object. + Parent int `url:"parent,omitempty"` + + // If the weblinks-only parameter is specified, only the diff web links are returned. + WeblinksOnly bool `url:"weblinks-only,omitempty"` + + // The ignore-whitespace parameter can be specified to control how whitespace differences are reported in the result. Valid values are NONE, TRAILING, CHANGED or ALL. + IgnoreWhitespace string `url:"ignore-whitespace,omitempty"` + + // The context parameter can be specified to control the number of lines of surrounding context in the diff. + // Valid values are ALL or number of lines. + Context string `url:"context,omitempty"` +} + +// CommitOptions specifies the parameters for GetCommit call. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-changes.html#get-commit +type CommitOptions struct { + // Adding query parameter links (for example /changes/.../commit?links) returns a CommitInfo with the additional field web_links. + Weblinks bool `url:"links,omitempty"` +} + +// MergableOptions specifies the parameters for GetMergable call. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-changes.html#get-mergeable +type MergableOptions struct { + // If the other-branches parameter is specified, the mergeability will also be checked for all other branches. + OtherBranches bool `url:"other-branches,omitempty"` +} + +// FilesOptions specifies the parameters for ListFiles and ListFilesReviewed calls. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-changes.html#list-files +type FilesOptions struct { + // The request parameter q changes the response to return a list of all files (modified or unmodified) + // that contain that substring in the path name. This is useful to implement suggestion services + // finding a file by partial name. + Q string `url:"q,omitempty"` + + // The base parameter can be specified to control the base patch set from which the list of files + // should be generated. + // + // Note: This option is undocumented. + Base string `url:"base,omitempty"` + + // The integer-valued request parameter parent changes the response to return a list of the files + // which are different in this commit compared to the given parent commit. This is useful for + // supporting review of merge commits. The value is the 1-based index of the parent’s position + // in the commit object. + Parent int `url:"parent,omitempty"` +} + +// PatchOptions specifies the parameters for GetPatch call. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-changes.html#get-patch +type PatchOptions struct { + // Adding query parameter zip (for example /changes/.../patch?zip) returns the patch as a single file inside of a ZIP archive. + // Clients can expand the ZIP to obtain the plain text patch, avoiding the need for a base64 decoding step. + // This option implies download. + Zip bool `url:"zip,omitempty"` + + // Query parameter download (e.g. /changes/.../patch?download) will suggest the browser save the patch as commitsha1.diff.base64, for later processing by command line tools. + Download bool `url:"download,omitempty"` + + // If the path parameter is set, the returned content is a diff of the single file that the path refers to. + Path string `url:"path,omitempty"` +} + +// GetDiff gets the diff of a file from a certain revision. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-changes.html#get-diff +func (s *ChangesService) GetDiff(changeID, revisionID, fileID string, opt *DiffOptions) (*DiffInfo, *Response, error) { + u := fmt.Sprintf("changes/%s/revisions/%s/files/%s/diff", changeID, revisionID, url.PathEscape(fileID)) + + u, err := addOptions(u, opt) + if err != nil { + return nil, nil, err + } + + req, err := s.client.NewRequest("GET", u, nil) + if err != nil { + return nil, nil, err + } + + v := new(DiffInfo) + resp, err := s.client.Do(req, v) + if err != nil { + return nil, resp, err + } + + return v, resp, err +} + +// GetRelatedChanges retrieves related changes of a revision. +// Related changes are changes that either depend on, or are dependencies of the revision. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-changes.html#get-related-changes +func (s *ChangesService) GetRelatedChanges(changeID, revisionID string) (*RelatedChangesInfo, *Response, error) { + u := fmt.Sprintf("changes/%s/revisions/%s/related", changeID, revisionID) + + req, err := s.client.NewRequest("GET", u, nil) + if err != nil { + return nil, nil, err + } + + v := new(RelatedChangesInfo) + resp, err := s.client.Do(req, v) + if err != nil { + return nil, resp, err + } + + return v, resp, err +} + +// GetDraft retrieves a draft comment of a revision that belongs to the calling user. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-changes.html#get-draft +func (s *ChangesService) GetDraft(changeID, revisionID, draftID string) (*CommentInfo, *Response, error) { + u := fmt.Sprintf("changes/%s/revisions/%s/drafts/%s", changeID, revisionID, draftID) + return s.getCommentInfoResponse(u) +} + +// GetComment retrieves a published comment of a revision. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-changes.html#get-comment +func (s *ChangesService) GetComment(changeID, revisionID, commentID string) (*CommentInfo, *Response, error) { + u := fmt.Sprintf("changes/%s/revisions/%s//comments/%s", changeID, revisionID, commentID) + return s.getCommentInfoResponse(u) +} + +// GetSubmitType gets the method the server will use to submit (merge) the change. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-changes.html#get-submit-type +func (s *ChangesService) GetSubmitType(changeID, revisionID string) (string, *Response, error) { + u := fmt.Sprintf("changes/%s/revisions/%s/submit_type", changeID, revisionID) + return getStringResponseWithoutOptions(s.client, u) +} + +// GetRevisionActions retrieves revision actions of the revision of a change. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-changes.html#get-revision-actions +func (s *ChangesService) GetRevisionActions(changeID, revisionID string) (*map[string]ActionInfo, *Response, error) { + u := fmt.Sprintf("changes/%s/revisions/%s/actions", changeID, revisionID) + + req, err := s.client.NewRequest("GET", u, nil) + if err != nil { + return nil, nil, err + } + + v := new(map[string]ActionInfo) + resp, err := s.client.Do(req, v) + if err != nil { + return nil, resp, err + } + + return v, resp, err +} + +// GetCommit retrieves a parsed commit of a revision. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-changes.html#get-commit +func (s *ChangesService) GetCommit(changeID, revisionID string, opt *CommitOptions) (*CommitInfo, *Response, error) { + u := fmt.Sprintf("changes/%s/revisions/%s/commit", changeID, revisionID) + + u, err := addOptions(u, opt) + if err != nil { + return nil, nil, err + } + + req, err := s.client.NewRequest("GET", u, nil) + if err != nil { + return nil, nil, err + } + + v := new(CommitInfo) + resp, err := s.client.Do(req, v) + if err != nil { + return nil, resp, err + } + + return v, resp, err +} + +// GetReview retrieves a review of a revision. +// +// As response a ChangeInfo entity with detailed labels and detailed accounts is returned that describes the review of the revision. +// The revision for which the review is retrieved is contained in the revisions field. +// In addition the current_revision field is set if the revision for which the review is retrieved is the current revision of the change. +// Please note that the returned labels are always for the current patch set. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-changes.html#get-review +func (s *ChangesService) GetReview(changeID, revisionID string) (*ChangeInfo, *Response, error) { + u := fmt.Sprintf("changes/%s/revisions/%s/review", changeID, revisionID) + return s.getChangeInfoResponse(u, nil) +} + +// GetMergeable gets the method the server will use to submit (merge) the change and an indicator if the change is currently mergeable. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-changes.html#get-mergeable +func (s *ChangesService) GetMergeable(changeID, revisionID string, opt *MergableOptions) (*MergeableInfo, *Response, error) { + u := fmt.Sprintf("changes/%s/revisions/%s/mergeable", changeID, revisionID) + + u, err := addOptions(u, opt) + if err != nil { + return nil, nil, err + } + + req, err := s.client.NewRequest("GET", u, nil) + if err != nil { + return nil, nil, err + } + + v := new(MergeableInfo) + resp, err := s.client.Do(req, v) + if err != nil { + return nil, resp, err + } + + return v, resp, err +} + +// ListRevisionDrafts lists the draft comments of a revision that belong to the calling user. +// Returns a map of file paths to lists of CommentInfo entries. +// The entries in the map are sorted by file path. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-changes.html#list-drafts +func (s *ChangesService) ListRevisionDrafts(changeID, revisionID string) (*map[string][]CommentInfo, *Response, error) { + u := fmt.Sprintf("changes/%s/revisions/%s/drafts/", changeID, revisionID) + return s.getCommentInfoMapSliceResponse(u) +} + +// ListRevisionComments lists the published comments of a revision. +// As result a map is returned that maps the file path to a list of CommentInfo entries. +// The entries in the map are sorted by file path and only include file (or inline) comments. +// Use the Get Change Detail endpoint to retrieve the general change message (or comment). +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-changes.html#list-comments +func (s *ChangesService) ListRevisionComments(changeID, revisionID string) (*map[string][]CommentInfo, *Response, error) { + u := fmt.Sprintf("changes/%s/revisions/%s/comments/", changeID, revisionID) + return s.getCommentInfoMapSliceResponse(u) +} + +// ListFiles lists the files that were modified, added or deleted in a revision. +// As result a map is returned that maps the file path to a list of FileInfo entries. +// The entries in the map are sorted by file path. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-changes.html#list-files +func (s *ChangesService) ListFiles(changeID, revisionID string, opt *FilesOptions) (map[string]FileInfo, *Response, error) { + u := fmt.Sprintf("changes/%s/revisions/%s/files/", changeID, revisionID) + + u, err := addOptions(u, opt) + if err != nil { + return nil, nil, err + } + + req, err := s.client.NewRequest("GET", u, nil) + if err != nil { + return nil, nil, err + } + + var v map[string]FileInfo + resp, err := s.client.Do(req, &v) + if err != nil { + return nil, resp, err + } + + return v, resp, err +} + +// ListFilesReviewed lists the files that were modified, added or deleted in a revision. +// Unlike ListFiles, the response of ListFilesReviewed is a list of the paths the caller +// has marked as reviewed. Clients that also need the FileInfo should make two requests. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-changes.html#list-files +func (s *ChangesService) ListFilesReviewed(changeID, revisionID string, opt *FilesOptions) ([]string, *Response, error) { + u := fmt.Sprintf("changes/%s/revisions/%s/files/", changeID, revisionID) + + o := struct { + // The request parameter reviewed changes the response to return a list of the paths the caller has marked as reviewed. + Reviewed bool `url:"reviewed,omitempty"` + + FilesOptions + }{ + Reviewed: true, + } + if opt != nil { + o.FilesOptions = *opt + } + u, err := addOptions(u, o) + if err != nil { + return nil, nil, err + } + + req, err := s.client.NewRequest("GET", u, nil) + if err != nil { + return nil, nil, err + } + + var v []string + resp, err := s.client.Do(req, &v) + if err != nil { + return nil, resp, err + } + + return v, resp, err +} + +// SetReview sets a review on a revision. +// The review must be provided in the request body as a ReviewInput entity. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-changes.html#set-review +func (s *ChangesService) SetReview(changeID, revisionID string, input *ReviewInput) (*ReviewResult, *Response, error) { + u := fmt.Sprintf("changes/%s/revisions/%s/review", changeID, revisionID) + + req, err := s.client.NewRequest("POST", u, input) + if err != nil { + return nil, nil, err + } + + v := new(ReviewResult) + resp, err := s.client.Do(req, v) + if err != nil { + return nil, resp, err + } + + return v, resp, err +} + +// PublishDraftRevision publishes a draft revision. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-changes.html#publish-draft-revision +func (s *ChangesService) PublishDraftRevision(changeID, revisionID string) (*Response, error) { + u := fmt.Sprintf("changes/%s/revisions/%s/publish", changeID, revisionID) + + req, err := s.client.NewRequest("POST", u, nil) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} + +// DeleteDraftRevision deletes a draft revision. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-changes.html#delete-draft-revision +func (s *ChangesService) DeleteDraftRevision(changeID, revisionID string) (*Response, error) { + u := fmt.Sprintf("changes/%s/revisions/%s", changeID, revisionID) + return s.client.DeleteRequest(u, nil) +} + +// GetPatch gets the formatted patch for one revision. +// +// The formatted patch is returned as text encoded inside base64. +// Adding query parameter zip (for example /changes/.../patch?zip) returns the patch as a single file inside of a ZIP archive. +// Clients can expand the ZIP to obtain the plain text patch, avoiding the need for a base64 decoding step. +// This option implies download. +// +// Query parameter download (e.g. /changes/.../patch?download) will suggest the browser save the patch as commitsha1.diff.base64, for later processing by command line tools. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-changes.html#get-patch +func (s *ChangesService) GetPatch(changeID, revisionID string, opt *PatchOptions) (*string, *Response, error) { + u := fmt.Sprintf("changes/%s/revisions/%s/patch", changeID, revisionID) + + u, err := addOptions(u, opt) + if err != nil { + return nil, nil, err + } + + req, err := s.client.NewRequest("GET", u, nil) + if err != nil { + return nil, nil, err + } + + v := new(string) + resp, err := s.client.Do(req, v) + if err != nil { + return nil, resp, err + } + + return v, resp, err +} + +// TestSubmitType tests the submit_type Prolog rule in the project, or the one given. +// +// Request body may be either the Prolog code as text/plain or a RuleInput object. +// The query parameter filters may be set to SKIP to bypass parent project filters while testing a project-specific rule. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-changes.html#test-submit-type +func (s *ChangesService) TestSubmitType(changeID, revisionID string, input *RuleInput) (*string, *Response, error) { + u := fmt.Sprintf("changes/%s/revisions/%s/test.submit_type", changeID, revisionID) + + req, err := s.client.NewRequest("POST", u, input) + if err != nil { + return nil, nil, err + } + + v := new(string) + resp, err := s.client.Do(req, v) + if err != nil { + return nil, resp, err + } + + return v, resp, err +} + +// TestSubmitRule tests the submit_rule Prolog rule in the project, or the one given. +// +// Request body may be either the Prolog code as text/plain or a RuleInput object. +// The query parameter filters may be set to SKIP to bypass parent project filters while testing a project-specific rule. +// +// The response is a list of SubmitRecord entries describing the permutations that satisfy the tested submit rule. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-changes.html#test-submit-rule +func (s *ChangesService) TestSubmitRule(changeID, revisionID string, input *RuleInput) (*[]SubmitRecord, *Response, error) { + u := fmt.Sprintf("changes/%s/revisions/%s/test.submit_rule", changeID, revisionID) + + req, err := s.client.NewRequest("POST", u, input) + if err != nil { + return nil, nil, err + } + + v := new([]SubmitRecord) + resp, err := s.client.Do(req, v) + if err != nil { + return nil, resp, err + } + + return v, resp, err +} + +// CreateDraft creates a draft comment on a revision. +// The new draft comment must be provided in the request body inside a CommentInput entity. +// +// As response a CommentInfo entity is returned that describes the draft comment. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-changes.html#create-draft +func (s *ChangesService) CreateDraft(changeID, revisionID string, input *CommentInput) (*CommentInfo, *Response, error) { + u := fmt.Sprintf("changes/%s/revisions/%s/drafts", changeID, revisionID) + + req, err := s.client.NewRequest("PUT", u, input) + if err != nil { + return nil, nil, err + } + + v := new(CommentInfo) + resp, err := s.client.Do(req, v) + if err != nil { + return nil, resp, err + } + + return v, resp, err +} + +// UpdateDraft updates a draft comment on a revision. +// The new draft comment must be provided in the request body inside a CommentInput entity. +// +// As response a CommentInfo entity is returned that describes the draft comment. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-changes.html#update-draft +func (s *ChangesService) UpdateDraft(changeID, revisionID, draftID string, input *CommentInput) (*CommentInfo, *Response, error) { + u := fmt.Sprintf("changes/%s/revisions/%s/drafts/%s", changeID, revisionID, draftID) + + req, err := s.client.NewRequest("PUT", u, input) + if err != nil { + return nil, nil, err + } + + v := new(CommentInfo) + resp, err := s.client.Do(req, v) + if err != nil { + return nil, resp, err + } + + return v, resp, err +} + +// DeleteDraft deletes a draft comment from a revision. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-changes.html#delete-draft +func (s *ChangesService) DeleteDraft(changeID, revisionID, draftID string) (*Response, error) { + u := fmt.Sprintf("changes/%s/revisions/%s/drafts/%s", changeID, revisionID, draftID) + return s.client.DeleteRequest(u, nil) +} + +// DeleteReviewed deletes the reviewed flag of the calling user from a file of a revision. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-changes.html#delete-reviewed +func (s *ChangesService) DeleteReviewed(changeID, revisionID, fileID string) (*Response, error) { + u := fmt.Sprintf("changes/%s/revisions/%s/files/%s/reviewed", changeID, revisionID, url.PathEscape(fileID)) + return s.client.DeleteRequest(u, nil) +} + +// GetContent gets the content of a file from a certain revision. +// The content is returned as base64 encoded string. +// The HTTP response Content-Type is always text/plain, reflecting the base64 wrapping. +// A Gerrit-specific X-FYI-Content-Type header is returned describing the server detected content type of the file. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-changes.html#get-content +func (s *ChangesService) GetContent(changeID, revisionID, fileID string) (*string, *Response, error) { + u := fmt.Sprintf("changes/%s/revisions/%s/files/%s/content", changeID, revisionID, url.PathEscape(fileID)) + + req, err := s.client.NewRequest("GET", u, nil) + if err != nil { + return nil, nil, err + } + + v := new(string) + resp, err := s.client.Do(req, v) + if err != nil { + return nil, resp, err + } + + return v, resp, err +} + +// GetContentType gets the content type of a file from a certain revision. +// This is nearly the same as GetContent. +// But if only the content type is required, callers should use HEAD to avoid downloading the encoded file contents. +// +// For further documentation see GetContent. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-changes.html#get-content +func (s *ChangesService) GetContentType(changeID, revisionID, fileID string) (*Response, error) { + u := fmt.Sprintf("changes/%s/revisions/%s/files/%s/content", changeID, revisionID, url.PathEscape(fileID)) + + req, err := s.client.NewRequest("HEAD", u, nil) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} + +// SetReviewed marks a file of a revision as reviewed by the calling user. +// +// If the file was already marked as reviewed by the calling user the response is “200 OK”. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-changes.html#set-reviewed +func (s *ChangesService) SetReviewed(changeID, revisionID, fileID string) (*Response, error) { + u := fmt.Sprintf("changes/%s/revisions/%s/files/%s/reviewed", changeID, revisionID, url.PathEscape(fileID)) + + req, err := s.client.NewRequest("PUT", u, nil) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} + +// CherryPickRevision cherry picks a revision to a destination branch. +// The commit message and destination branch must be provided in the request body inside a CherryPickInput entity. +// +// As response a ChangeInfo entity is returned that describes the resulting cherry picked change. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-changes.html#cherry-pick +func (s *ChangesService) CherryPickRevision(changeID, revisionID string, input *CherryPickInput) (*ChangeInfo, *Response, error) { + u := fmt.Sprintf("changes/%s/revisions/%s/cherrypick", changeID, revisionID) + + req, err := s.client.NewRequest("POST", u, input) + if err != nil { + return nil, nil, err + } + + v := new(ChangeInfo) + resp, err := s.client.Do(req, v) + if err != nil { + return nil, resp, err + } + + return v, resp, err +} + +/* +TODO: Missing Revision Endpoints + Rebase Revision + Submit Revision + DownloadContent (https://gerrit-review.googlesource.com/Documentation/rest-api-changes.html#get-safe-content) +*/ diff --git a/vendor/github.com/andygrunwald/go-gerrit/config.go b/vendor/github.com/andygrunwald/go-gerrit/config.go new file mode 100644 index 00000000000..088b0fb429c --- /dev/null +++ b/vendor/github.com/andygrunwald/go-gerrit/config.go @@ -0,0 +1,529 @@ +package gerrit + +import ( + "fmt" +) + +// ConfigService contains Config related REST endpoints +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-config.html +type ConfigService struct { + client *Client +} + +// TopMenuItemInfo entity contains information about a menu item in a top menu entry. +type TopMenuItemInfo struct { + URL string `json:"url"` + Name string `json:"name"` + Target string `json:"target"` + ID string `json:"id,omitempty"` +} + +// AuthInfo entity contains information about the authentication configuration of the Gerrit server. +type AuthInfo struct { + Type string `json:"type"` + UseContributorAgreements bool `json:"use_contributor_agreements,omitempty"` + EditableAccountFields []string `json:"editable_account_fields"` + LoginURL string `json:"login_url,omitempty"` + LoginText string `json:"login_text,omitempty"` + SwitchAccountURL string `json:"switch_account_url,omitempty"` + RegisterURL string `json:"register_url,omitempty"` + RegisterText string `json:"register_text,omitempty"` + EditFullNameURL string `json:"edit_full_name_url,omitempty"` + HTTPPasswordURL string `json:"http_password_url,omitempty"` + IsGitBasicAuth bool `json:"is_git_basic_auth,omitempty"` +} + +// CacheInfo entity contains information about a cache. +type CacheInfo struct { + Name string `json:"name,omitempty"` + Type string `json:"type"` + Entries EntriesInfo `json:"entries"` + AverageGet string `json:"average_get,omitempty"` + HitRatio HitRatioInfo `json:"hit_ratio"` +} + +// CacheOperationInput entity contains information about an operation that should be executed on caches. +type CacheOperationInput struct { + Operation string `json:"operation"` + Caches []string `json:"caches,omitempty"` +} + +// ConfigCapabilityInfo entity contains information about a capability.type +type ConfigCapabilityInfo struct { + ID string `json:"id"` + Name string `json:"name"` +} + +// HitRatioInfo entity contains information about the hit ratio of a cache. +type HitRatioInfo struct { + Mem int `json:"mem"` + Disk int `json:"disk,omitempty"` +} + +// EntriesInfo entity contains information about the entries in a cache. +type EntriesInfo struct { + Mem int `json:"mem,omitempty"` + Disk int `json:"disk,omitempty"` + Space string `json:"space,omitempty"` +} + +// UserConfigInfo entity contains information about Gerrit configuration from the user section. +type UserConfigInfo struct { + AnonymousCowardName string `json:"anonymous_coward_name"` +} + +// TopMenuEntryInfo entity contains information about a top menu entry. +type TopMenuEntryInfo struct { + Name string `json:"name"` + Items []TopMenuItemInfo `json:"items"` +} + +// ThreadSummaryInfo entity contains information about the current threads. +type ThreadSummaryInfo struct { + CPUs int `json:"cpus"` + Threads int `json:"threads"` + Counts map[string]map[string]int `json:"counts"` +} + +// TaskSummaryInfo entity contains information about the current tasks. +type TaskSummaryInfo struct { + Total int `json:"total,omitempty"` + Running int `json:"running,omitempty"` + Ready int `json:"ready,omitempty"` + Sleeping int `json:"sleeping,omitempty"` +} + +// TaskInfo entity contains information about a task in a background work queue. +type TaskInfo struct { + ID string `json:"id"` + State string `json:"state"` + StartTime string `json:"start_time"` + Delay int `json:"delay"` + Command string `json:"command"` + RemoteName string `json:"remote_name,omitempty"` + Project string `json:"project,omitempty"` +} + +// SummaryInfo entity contains information about the current state of the server. +type SummaryInfo struct { + TaskSummary TaskSummaryInfo `json:"task_summary"` + MemSummary MemSummaryInfo `json:"mem_summary"` + ThreadSummary ThemeInfo `json:"thread_summary"` + JVMSummary JvmSummaryInfo `json:"jvm_summary,omitempty"` +} + +// SuggestInfo entity contains information about Gerrit configuration from the suggest section. +type SuggestInfo struct { + From int `json:"from"` +} + +// SSHdInfo entity contains information about Gerrit configuration from the sshd section. +type SSHdInfo struct{} + +// ServerInfo entity contains information about the configuration of the Gerrit server. +type ServerInfo struct { + Auth AuthInfo `json:"auth"` + Change ChangeConfigInfo `json:"change"` + Download DownloadInfo `json:"download"` + Gerrit Info `json:"gerrit"` + Gitweb map[string]string `json:"gitweb,omitempty"` + Plugin PluginConfigInfo `json:"plugin"` + Receive ReceiveInfo `json:"receive,omitempty"` + SSHd SSHdInfo `json:"sshd,omitempty"` + Suggest SuggestInfo `json:"suggest"` + URLAliases map[string]string `json:"url_aliases,omitempty"` + User UserConfigInfo `json:"user"` +} + +// ReceiveInfo entity contains information about the configuration of git-receive-pack behavior on the server. +type ReceiveInfo struct { + EnableSignedPush bool `json:"enableSignedPush,omitempty"` +} + +// PluginConfigInfo entity contains information about Gerrit extensions by plugins. +type PluginConfigInfo struct { + // HasAvatars reports whether an avatar provider is registered. + HasAvatars bool `json:"has_avatars,omitempty"` +} + +// MemSummaryInfo entity contains information about the current memory usage. +type MemSummaryInfo struct { + Total string `json:"total"` + Used string `json:"used"` + Free string `json:"free"` + Buffers string `json:"buffers"` + Max string `json:"max"` + OpenFiles int `json:"open_files,omitempty"` +} + +// JvmSummaryInfo entity contains information about the JVM. +type JvmSummaryInfo struct { + VMVendor string `json:"vm_vendor"` + VMName string `json:"vm_name"` + VMVersion string `json:"vm_version"` + OSName string `json:"os_name"` + OSVersion string `json:"os_version"` + OSArch string `json:"os_arch"` + User string `json:"user"` + Host string `json:"host,omitempty"` + CurrentWorkingDirectory string `json:"current_working_directory"` + Site string `json:"site"` +} + +// Info entity contains information about Gerrit configuration from the gerrit section. +type Info struct { + AllProjectsName string `json:"all_projects_name"` + AllUsersName string `json:"all_users_name"` + DocURL string `json:"doc_url,omitempty"` + ReportBugURL string `json:"report_bug_url,omitempty"` + ReportBugText string `json:"report_bug_text,omitempty"` +} + +// GitwebInfo entity contains information about the gitweb configuration. +type GitwebInfo struct { + URL string `json:"url"` + Type GitwebTypeInfo `json:"type"` +} + +// GitwebTypeInfo entity contains information about the gitweb configuration. +type GitwebTypeInfo struct { + Name string `json:"name"` + Revision string `json:"revision,omitempty"` + Project string `json:"project,omitempty"` + Branch string `json:"branch,omitempty"` + RootTree string `json:"root_tree,omitempty"` + File string `json:"file,omitempty"` + FileHistory string `json:"file_history,omitempty"` + PathSeparator string `json:"path_separator"` + LinkDrafts bool `json:"link_drafts,omitempty"` + URLEncode bool `json:"url_encode,omitempty"` +} + +// EmailConfirmationInput entity contains information for confirming an email address. +type EmailConfirmationInput struct { + Token string `json:"token"` +} + +// DownloadSchemeInfo entity contains information about a supported download scheme and its commands. +type DownloadSchemeInfo struct { + URL string `json:"url"` + IsAuthRequired bool `json:"is_auth_required,omitempty"` + IsAuthSupported bool `json:"is_auth_supported,omitempty"` + Commands map[string]string `json:"commands"` + CloneCommands map[string]string `json:"clone_commands"` +} + +// DownloadInfo entity contains information about supported download options. +type DownloadInfo struct { + Schemes map[string]DownloadSchemeInfo `json:"schemes"` + Archives []string `json:"archives"` +} + +// ChangeConfigInfo entity contains information about Gerrit configuration from the change section. +type ChangeConfigInfo struct { + AllowDrafts bool `json:"allow_drafts,omitempty"` + LargeChange int `json:"large_change"` + ReplyLabel string `json:"reply_label"` + ReplyTooltip string `json:"reply_tooltip"` + UpdateDelay int `json:"update_delay"` + SubmitWholeTopic bool `json:"submit_whole_topic"` +} + +// ListCachesOptions specifies the different output formats. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-config.html#list-caches +type ListCachesOptions struct { + // Format specifies the different output formats. + Format string `url:"format,omitempty"` +} + +// SummaryOptions specifies the different options for the GetSummary call. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-config.html#get-summary +type SummaryOptions struct { + // JVM includes a JVM summary. + JVM bool `url:"jvm,omitempty"` + // GC requests a Java garbage collection before computing the information about the Java memory heap. + GC bool `url:"gc,omitempty"` +} + +// GetVersion returns the version of the Gerrit server. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-config.html#get-version +func (s *ConfigService) GetVersion() (string, *Response, error) { + u := "config/server/version" + return getStringResponseWithoutOptions(s.client, u) +} + +// GetServerInfo returns the information about the Gerrit server configuration. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-config.html#get-info +func (s *ConfigService) GetServerInfo() (*ServerInfo, *Response, error) { + u := "config/server/info" + + req, err := s.client.NewRequest("GET", u, nil) + if err != nil { + return nil, nil, err + } + + v := new(ServerInfo) + resp, err := s.client.Do(req, v) + if err != nil { + return nil, resp, err + } + + return v, resp, err +} + +// ListCaches lists the caches of the server. Caches defined by plugins are included. +// The caller must be a member of a group that is granted one of the following capabilities: +// * View Caches +// * Maintain Server +// * Administrate Server +// The entries in the map are sorted by cache name. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-config.html#list-caches +func (s *ConfigService) ListCaches(opt *ListCachesOptions) (*map[string]CacheInfo, *Response, error) { + u := "config/server/caches/" + + u, err := addOptions(u, opt) + if err != nil { + return nil, nil, err + } + + req, err := s.client.NewRequest("GET", u, nil) + if err != nil { + return nil, nil, err + } + + v := new(map[string]CacheInfo) + resp, err := s.client.Do(req, v) + if err != nil { + return nil, resp, err + } + + return v, resp, err +} + +// GetCache retrieves information about a cache. +// The caller must be a member of a group that is granted one of the following capabilities: +// * View Caches +// * Maintain Server +// * Administrate Server +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-config.html#get-cache +func (s *ConfigService) GetCache(cacheName string) (*CacheInfo, *Response, error) { + u := fmt.Sprintf("config/server/caches/%s", cacheName) + + req, err := s.client.NewRequest("GET", u, nil) + if err != nil { + return nil, nil, err + } + + v := new(CacheInfo) + resp, err := s.client.Do(req, v) + if err != nil { + return nil, resp, err + } + + return v, resp, err +} + +// GetSummary retrieves a summary of the current server state. +// The caller must be a member of a group that is granted the Administrate Server capability. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-config.html#get-summary +func (s *ConfigService) GetSummary(opt *SummaryOptions) (*SummaryInfo, *Response, error) { + u := "config/server/summary" + + u, err := addOptions(u, opt) + if err != nil { + return nil, nil, err + } + + req, err := s.client.NewRequest("GET", u, nil) + if err != nil { + return nil, nil, err + } + + v := new(SummaryInfo) + resp, err := s.client.Do(req, v) + if err != nil { + return nil, resp, err + } + + return v, resp, err +} + +// ListCapabilities lists the capabilities that are available in the system. +// There are two kinds of capabilities: core and plugin-owned capabilities. +// The entries in the map are sorted by capability ID. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-config.html#list-capabilities +func (s *ConfigService) ListCapabilities() (*map[string]ConfigCapabilityInfo, *Response, error) { + u := "config/server/capabilities" + + req, err := s.client.NewRequest("GET", u, nil) + if err != nil { + return nil, nil, err + } + + v := new(map[string]ConfigCapabilityInfo) + resp, err := s.client.Do(req, v) + if err != nil { + return nil, resp, err + } + + return v, resp, err +} + +// ListTasks lists the tasks from the background work queues that the Gerrit daemon is currently performing, or will perform in the near future. +// Gerrit contains an internal scheduler, similar to cron, that it uses to queue and dispatch both short and long term tasks. +// Tasks that are completed or canceled exit the queue very quickly once they enter this state, but it can be possible to observe tasks in these states. +// End-users may see a task only if they can also see the project the task is associated with. +// Tasks operating on other projects, or that do not have a specific project, are hidden. +// +// The caller must be a member of a group that is granted one of the following capabilities: +// * View Queue +// * Maintain Server +// * Administrate Server +// +// The entries in the list are sorted by task state, remaining delay and command. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-config.html#list-tasks +func (s *ConfigService) ListTasks() (*[]TaskInfo, *Response, error) { + u := "config/server/tasks" + + req, err := s.client.NewRequest("GET", u, nil) + if err != nil { + return nil, nil, err + } + + v := new([]TaskInfo) + resp, err := s.client.Do(req, v) + if err != nil { + return nil, resp, err + } + + return v, resp, err +} + +// GetTask retrieves a task from the background work queue that the Gerrit daemon is currently performing, or will perform in the near future. +// End-users may see a task only if they can also see the project the task is associated with. +// Tasks operating on other projects, or that do not have a specific project, are hidden. +// +// The caller must be a member of a group that is granted one of the following capabilities: +// * View Queue +// * Maintain Server +// * Administrate Server +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-config.html#get-task +func (s *ConfigService) GetTask(taskID string) (*TaskInfo, *Response, error) { + u := fmt.Sprintf("config/server/tasks/%s", taskID) + + req, err := s.client.NewRequest("GET", u, nil) + if err != nil { + return nil, nil, err + } + + v := new(TaskInfo) + resp, err := s.client.Do(req, v) + if err != nil { + return nil, resp, err + } + + return v, resp, err +} + +// GetTopMenus returns the list of additional top menu entries. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-config.html#get-top-menus +func (s *ConfigService) GetTopMenus() (*[]TopMenuEntryInfo, *Response, error) { + u := "config/server/top-menus" + + req, err := s.client.NewRequest("GET", u, nil) + if err != nil { + return nil, nil, err + } + + v := new([]TopMenuEntryInfo) + resp, err := s.client.Do(req, v) + if err != nil { + return nil, resp, err + } + + return v, resp, err +} + +// ConfirmEmail confirms that the user owns an email address. +// The email token must be provided in the request body inside an EmailConfirmationInput entity. +// +// The response is “204 No Content”. +// If the token is invalid or if it’s the token of another user the request fails and the response is “422 Unprocessable Entity”. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-config.html#confirm-email +func (s *ConfigService) ConfirmEmail(input *EmailConfirmationInput) (*Response, error) { + u := "config/server/email.confirm" + + req, err := s.client.NewRequest("PUT", u, input) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} + +// CacheOperations executes a cache operation that is specified in the request body in a CacheOperationInput entity. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-config.html#cache-operations +func (s *ConfigService) CacheOperations(input *CacheOperationInput) (*Response, error) { + u := "config/server/caches/" + + req, err := s.client.NewRequest("POST", u, input) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} + +// FlushCache flushes a cache. +// The caller must be a member of a group that is granted one of the following capabilities: +// +// * Flush Caches (any cache except "web_sessions") +// * Maintain Server (any cache including "web_sessions") +// * Administrate Server (any cache including "web_sessions") +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-config.html#flush-cache +func (s *ConfigService) FlushCache(cacheName string, input *CacheOperationInput) (*Response, error) { + u := fmt.Sprintf("config/server/caches/%s/flush", cacheName) + + req, err := s.client.NewRequest("POST", u, input) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} + +// DeleteTask kills a task from the background work queue that the Gerrit daemon is currently performing, or will perform in the near future. +// The caller must be a member of a group that is granted one of the following capabilities: +// +// * Kill Task +// * Maintain Server +// * Administrate Server +// +// End-users may see a task only if they can also see the project the task is associated with. +// Tasks operating on other projects, or that do not have a specific project, are hidden. +// Members of a group granted one of the following capabilities may view all tasks: +// +// * View Queue +// * Maintain Server +// * Administrate Server +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-config.html#delete-task +func (s *ConfigService) DeleteTask(taskID string) (*Response, error) { + u := fmt.Sprintf("config/server/tasks/%s", taskID) + return s.client.DeleteRequest(u, nil) +} diff --git a/vendor/github.com/andygrunwald/go-gerrit/doc.go b/vendor/github.com/andygrunwald/go-gerrit/doc.go new file mode 100644 index 00000000000..e3336ef3d73 --- /dev/null +++ b/vendor/github.com/andygrunwald/go-gerrit/doc.go @@ -0,0 +1,68 @@ +/* +Package gerrit provides a client for using the Gerrit API. + +Construct a new Gerrit client, then use the various services on the client to +access different parts of the Gerrit API. For example: + + instance := "https://go-review.googlesource.com/" + client, _ := gerrit.NewClient(instance, nil) + + // Get all public projects + projects, _, err := client.Projects.ListProjects(nil) + +Set optional parameters for an API method by passing an Options object. + + // Get all projects with descriptions + opt := &gerrit.ProjectOptions{ + Description: true, + } + projects, _, err := client.Projects.ListProjects(opt) + +The services of a client divide the API into logical chunks and correspond to +the structure of the Gerrit API documentation at +https://gerrit-review.googlesource.com/Documentation/rest-api.html#_endpoints. + +Authentication + +The go-gerrit library supports various methods to support the authentication. +This methods are combined in the AuthenticationService that is available at client.Authentication. + +One way is an authentication via HTTP cookie. +Some Gerrit instances hosted like the one hosted googlesource.com (e.g. https://go-review.googlesource.com/, +https://android-review.googlesource.com/ or https://gerrit-review.googlesource.com/) support HTTP Cookie authentication. + +You need the cookie name and the cookie value. +You can get them by click on "Settings > HTTP Password > Obtain Password" in your Gerrit instance. +There you can receive your values. +The cookie name will be (mostly) "o" (if hosted on googlesource.com). +Your cookie secret will be something like "git-your@email.com=SomeHash...". + + instance := "https://gerrit-review.googlesource.com/" + client, _ := gerrit.NewClient(instance, nil) + client.Authentication.SetCookieAuth("o", "my-cookie-secret") + + self, _, _ := client.Accounts.GetAccount("self") + + fmt.Printf("Username: %s", self.Name) + + // Username: Andy G. + +Some other Gerrit instances (like https://review.typo3.org/) has auth.gitBasicAuth activated. +With this you can authenticate with HTTP Basic like this: + + instance := "https://review.typo3.org/" + client, _ := gerrit.NewClient(instance, nil) + client.Authentication.SetBasicAuth("andy.grunwald", "my secrect password") + + self, _, _ := client.Accounts.GetAccount("self") + + fmt.Printf("Username: %s", self.Name) + + // Username: Andy Grunwald + +Additionally when creating a new client, pass an http.Client that supports further actions for you. +For more information regarding authentication have a look at the Gerrit documentation: +https://gerrit-review.googlesource.com/Documentation/rest-api.html#authentication + +*/ +package gerrit diff --git a/vendor/github.com/andygrunwald/go-gerrit/events.go b/vendor/github.com/andygrunwald/go-gerrit/events.go new file mode 100644 index 00000000000..4850ba4b141 --- /dev/null +++ b/vendor/github.com/andygrunwald/go-gerrit/events.go @@ -0,0 +1,166 @@ +package gerrit + +import ( + "bytes" + "encoding/json" + "io/ioutil" + "net/url" + "time" +) + +// PatchSet contains detailed information about a specific patch set. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/json.html#patchSet +type PatchSet struct { + Number Number `json:"number"` + Revision string `json:"revision"` + Parents []string `json:"parents"` + Ref string `json:"ref"` + Uploader AccountInfo `json:"uploader"` + Author AccountInfo `json:"author"` + CreatedOn int `json:"createdOn"` + IsDraft bool `json:"isDraft"` + Kind string `json:"kind"` +} + +// RefUpdate contains data about a reference update. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/json.html#refUpdate +type RefUpdate struct { + OldRev string `json:"oldRev"` + NewRev string `json:"newRev"` + RefName string `json:"refName"` + Project string `json:"project"` +} + +// EventInfo contains information about an event emitted by Gerrit. This +// structure can be used either when parsing streamed events or when reading +// the output of the events-log plugin. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/cmd-stream-events.html#events +type EventInfo struct { + Type string `json:"type"` + Change ChangeInfo `json:"change,omitempty"` + ChangeKey ChangeInfo `json:"changeKey,omitempty"` + PatchSet PatchSet `json:"patchSet,omitempty"` + EventCreatedOn int `json:"eventCreatedOn,omitempty"` + Reason string `json:"reason,omitempty"` + Abandoner AccountInfo `json:"abandoner,omitempty"` + Restorer AccountInfo `json:"restorer,omitempty"` + Submitter AccountInfo `json:"submitter,omitempty"` + Author AccountInfo `json:"author,omitempty"` + Uploader AccountInfo `json:"uploader,omitempty"` + Approvals []AccountInfo `json:"approvals,omitempty"` + Comment string `json:"comment,omitempty"` + Editor AccountInfo `json:"editor,omitempty"` + Added []string `json:"added,omitempty"` + Removed []string `json:"removed,omitempty"` + Hashtags []string `json:"hashtags,omitempty"` + RefUpdate RefUpdate `json:"refUpdate,omitempty"` + Project ProjectInfo `json:"project,omitempty"` + Reviewer AccountInfo `json:"reviewer,omitempty"` + OldTopic string `json:"oldTopic,omitempty"` + Changer AccountInfo `json:"changer,omitempty"` +} + +// EventsLogService contains functions for querying the API provided +// by the optional events-log plugin. +type EventsLogService struct { + client *Client +} + +// EventsLogOptions contains options for querying events from the events-logs +// plugin. +type EventsLogOptions struct { + From time.Time + To time.Time + + // IgnoreUnmarshalErrors will cause GetEvents to ignore any errors + // that come up when calling json.Unmarshal. This can be useful in + // cases where the events-log plugin was not kept up to date with + // the Gerrit version for some reason. In these cases the events-log + // plugin will return data structs that don't match the EventInfo + // struct which in turn causes issues for json.Unmarshal. + IgnoreUnmarshalErrors bool +} + +// getURL returns the url that should be used in the request. This will vary +// depending on the options provided to GetEvents. +func (events *EventsLogService) getURL(options *EventsLogOptions) (string, error) { + parsed, err := url.Parse("/plugins/events-log/events/") + if err != nil { + return "", err + } + + query := parsed.Query() + + if !options.From.IsZero() { + query.Set("t1", options.From.Format("2006-01-02 15:04:05")) + } + + if !options.To.IsZero() { + query.Set("t2", options.To.Format("2006-01-02 15:04:05")) + } + + encoded := query.Encode() + if len(encoded) > 0 { + parsed.RawQuery = encoded + } + + return parsed.String(), nil +} + +// GetEvents returns a list of events for the given input options. Use of this +// function requires an authenticated user and for the events-log plugin to be +// installed. This function returns the unmarshalled EventInfo structs, response, +// failed lines and errors. Marshaling errors will cause this function to return +// before processing is complete unless you set EventsLogOptions.IgnoreUnmarshalErrors +// to true. This can be useful in cases where the events-log plugin got out of sync +// with the Gerrit version which in turn produced events which can't be transformed +// unmarshalled into EventInfo. +// +// Gerrit API docs: https:///plugins/events-log/Documentation/rest-api-events.html +func (events *EventsLogService) GetEvents(options *EventsLogOptions) ([]EventInfo, *Response, [][]byte, error) { + info := []EventInfo{} + failures := [][]byte{} + requestURL, err := events.getURL(options) + + if err != nil { + return info, nil, failures, err + } + + request, err := events.client.NewRequest("GET", requestURL, nil) + if err != nil { + return info, nil, failures, err + } + + // Perform the request but do not pass in a structure to unpack + // the response into. The format of the response is one EventInfo + // object per line so we need to manually handle the response here. + response, err := events.client.Do(request, nil) + if err != nil { + return info, response, failures, err + } + + body, err := ioutil.ReadAll(response.Body) + defer response.Body.Close() // nolint: errcheck + if err != nil { + return info, response, failures, err + } + + for _, line := range bytes.Split(body, []byte("\n")) { + if len(line) > 0 { + event := EventInfo{} + if err := json.Unmarshal(line, &event); err != nil { // nolint: vetshadow + failures = append(failures, line) + + if !options.IgnoreUnmarshalErrors { + return info, response, failures, err + } + continue + } + info = append(info, event) + } + } + return info, response, failures, err +} diff --git a/vendor/github.com/andygrunwald/go-gerrit/gerrit.go b/vendor/github.com/andygrunwald/go-gerrit/gerrit.go new file mode 100644 index 00000000000..ebcbda0035a --- /dev/null +++ b/vendor/github.com/andygrunwald/go-gerrit/gerrit.go @@ -0,0 +1,564 @@ +package gerrit + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "reflect" + "regexp" + "strings" + + "github.com/google/go-querystring/query" +) + +// TODO Try to reduce the code duplications of a std API req +// Maybe with http://play.golang.org/p/j-667shCCB +// and https://groups.google.com/forum/#!topic/golang-nuts/D-gIr24k5uY + +// A Client manages communication with the Gerrit API. +type Client struct { + // client is the HTTP client used to communicate with the API. + client *http.Client + + // baseURL is the base URL of the Gerrit instance for API requests. + // It must have a trailing slash. + baseURL *url.URL + + // Gerrit service for authentication. + Authentication *AuthenticationService + + // Services used for talking to different parts of the standard Gerrit API. + Access *AccessService + Accounts *AccountsService + Changes *ChangesService + Config *ConfigService + Groups *GroupsService + Plugins *PluginsService + Projects *ProjectsService + + // Additional services used for talking to non-standard Gerrit APIs. + EventsLog *EventsLogService +} + +// Response is a Gerrit API response. +// This wraps the standard http.Response returned from Gerrit. +type Response struct { + *http.Response +} + +var ( + // ErrNoInstanceGiven is returned by NewClient in the event the + // gerritURL argument was blank. + ErrNoInstanceGiven = errors.New("no Gerrit instance given") + + // ErrUserProvidedWithoutPassword is returned by NewClient + // if a user name is provided without a password. + ErrUserProvidedWithoutPassword = errors.New("a username was provided without a password") + + // ErrAuthenticationFailed is returned by NewClient in the event the provided + // credentials didn't allow us to query account information using digest, basic or cookie + // auth. + ErrAuthenticationFailed = errors.New("failed to authenticate using the provided credentials") + + // ReParseURL is used to parse the url provided to NewClient(). This + // regular expression contains five groups which capture the scheme, + // username, password, hostname and port. If we parse the url with this + // regular expression + ReParseURL = regexp.MustCompile(`^(http|https)://(.+):(.+)@(.+):(\d+)(.*)$`) +) + +// NewClient returns a new Gerrit API client. gerritURL specifies the +// HTTP endpoint of the Gerrit instance. For example, "http://localhost:8080/". +// If gerritURL does not have a trailing slash, one is added automatically. +// If a nil httpClient is provided, http.DefaultClient will be used. +// +// The url may contain credentials, http://admin:secret@localhost:8081/ for +// example. These credentials may either be a user name and password or +// name and value as in the case of cookie based authentication. If the url contains +// credentials then this function will attempt to validate the credentials before +// returning the client. ErrAuthenticationFailed will be returned if the credentials +// cannot be validated. The process of validating the credentials is relatively simple and +// only requires that the provided user have permission to GET /a/accounts/self. +func NewClient(gerritURL string, httpClient *http.Client) (*Client, error) { + if httpClient == nil { + httpClient = http.DefaultClient + } + + endpoint := gerritURL + if endpoint == "" { + return nil, ErrNoInstanceGiven + } + + hasAuth := false + username := "" + password := "" + + // Depending on the contents of the username and password the default + // url.Parse may not work. The below is an example URL that + // would end up being parsed incorrectly with url.Parse: + // http://admin:ZOSOKjgV/kgEkN0bzPJp+oGeJLqpXykqWFJpon/Ckg@localhost:38607 + // So instead of depending on url.Parse we'll try using a regular expression + // first to match a specific pattern. If that ends up working we modify + // the incoming endpoint to remove the username and password so the rest + // of this function will run as expected. + submatches := ReParseURL.FindAllStringSubmatch(endpoint, -1) + if len(submatches) > 0 && len(submatches[0]) > 5 { + submatch := submatches[0] + username = submatch[2] + password = submatch[3] + endpoint = fmt.Sprintf( + "%s://%s:%s%s", submatch[1], submatch[4], submatch[5], submatch[6]) + hasAuth = true + } + + baseURL, err := url.Parse(endpoint) + if err != nil { + return nil, err + } + if !strings.HasSuffix(baseURL.Path, "/") { + baseURL.Path += "/" + } + + // Note, if we retrieved the URL and password using the regular + // expression above then the below code will do nothing. + if baseURL.User != nil { + username = baseURL.User.Username() + parsedPassword, haspassword := baseURL.User.Password() + + // Catches cases like http://user@localhost:8081/ where no password + // was at all. If a blank password is required + if !haspassword { + return nil, ErrUserProvidedWithoutPassword + } + + password = parsedPassword + + // Reconstruct the url but without the username and password. + baseURL, err = url.Parse( + fmt.Sprintf("%s://%s%s", baseURL.Scheme, baseURL.Host, baseURL.RequestURI())) + if err != nil { + return nil, err + } + hasAuth = true + } + + c := &Client{ + client: httpClient, + baseURL: baseURL, + } + c.Authentication = &AuthenticationService{client: c} + c.Access = &AccessService{client: c} + c.Accounts = &AccountsService{client: c} + c.Changes = &ChangesService{client: c} + c.Config = &ConfigService{client: c} + c.Groups = &GroupsService{client: c} + c.Plugins = &PluginsService{client: c} + c.Projects = &ProjectsService{client: c} + c.EventsLog = &EventsLogService{client: c} + + if hasAuth { + // Digest auth (first since that's the default auth type) + c.Authentication.SetDigestAuth(username, password) + if success, err := checkAuth(c); success || err != nil { + return c, err + } + + // Basic auth + c.Authentication.SetBasicAuth(username, password) + if success, err := checkAuth(c); success || err != nil { + return c, err + } + + // Cookie auth + c.Authentication.SetCookieAuth(username, password) + if success, err := checkAuth(c); success || err != nil { + return c, err + } + + // Reset auth in case the consumer needs to do something special. + c.Authentication.ResetAuth() + return c, ErrAuthenticationFailed + } + + return c, nil +} + +// checkAuth is used by NewClient to check if the current credentials are +// valid. If the response is 401 Unauthorized then the error will be discarded. +func checkAuth(client *Client) (bool, error) { + _, response, err := client.Accounts.GetAccount("self") + switch err { + case ErrWWWAuthenticateHeaderMissing: + return false, nil + case ErrWWWAuthenticateHeaderNotDigest: + return false, nil + default: + // Response could be nil if the connection outright failed + // or some other error occurred before we got a response. + if response == nil && err != nil { + return false, err + } + + if err != nil && response.StatusCode == http.StatusUnauthorized { + err = nil + } + return response.StatusCode == http.StatusOK, err + } +} + +// NewRequest creates an API request. +// A relative URL can be provided in urlStr, in which case it is resolved relative to the baseURL of the Client. +// Relative URLs should always be specified without a preceding slash. +// If specified, the value pointed to by body is JSON encoded and included as the request body. +func (c *Client) NewRequest(method, urlStr string, body interface{}) (*http.Request, error) { + // Build URL for request + u, err := c.buildURLForRequest(urlStr) + if err != nil { + return nil, err + } + + var buf io.ReadWriter + if body != nil { + buf = new(bytes.Buffer) + err = json.NewEncoder(buf).Encode(body) + if err != nil { + return nil, err + } + } + + req, err := http.NewRequest(method, u, buf) + if err != nil { + return nil, err + } + + // Apply Authentication + if err := c.addAuthentication(req); err != nil { + return nil, err + } + + // Request compact JSON + // See https://gerrit-review.googlesource.com/Documentation/rest-api.html#output + req.Header.Add("Accept", "application/json") + req.Header.Add("Content-Type", "application/json") + + // TODO: Add gzip encoding + // Accept-Encoding request header is set to gzip + // See https://gerrit-review.googlesource.com/Documentation/rest-api.html#output + + return req, nil +} + +// NewRawPutRequest creates a raw PUT request and makes no attempt to encode +// or marshal the body. Just passes it straight through. +func (c *Client) NewRawPutRequest(urlStr string, body string) (*http.Request, error) { + // Build URL for request + u, err := c.buildURLForRequest(urlStr) + if err != nil { + return nil, err + } + + buf := bytes.NewBuffer([]byte(body)) + req, err := http.NewRequest("PUT", u, buf) + if err != nil { + return nil, err + } + + // Apply Authentication + if err := c.addAuthentication(req); err != nil { + return nil, err + } + + // Request compact JSON + // See https://gerrit-review.googlesource.com/Documentation/rest-api.html#output + req.Header.Add("Accept", "application/json") + req.Header.Add("Content-Type", "application/x-www-form-urlencoded") + + // TODO: Add gzip encoding + // Accept-Encoding request header is set to gzip + // See https://gerrit-review.googlesource.com/Documentation/rest-api.html#output + + return req, nil +} + +// Call is a combine function for Client.NewRequest and Client.Do. +// +// Most API methods are quite the same. +// Get the URL, apply options, make a request, and get the response. +// Without adding special headers or something. +// To avoid a big amount of code duplication you can Client.Call. +// +// method is the HTTP method you want to call. +// u is the URL you want to call. +// body is the HTTP body. +// v is the HTTP response. +// +// For more information read https://github.com/google/go-github/issues/234 +func (c *Client) Call(method, u string, body interface{}, v interface{}) (*Response, error) { + req, err := c.NewRequest(method, u, body) + if err != nil { + return nil, err + } + + resp, err := c.Do(req, v) + if err != nil { + return resp, err + } + + return resp, err +} + +// buildURLForRequest will build the URL (as string) that will be called. +// We need such a utility method, because the URL.Path needs to be escaped (partly). +// +// E.g. if a project is called via "projects/%s" and the project is named "plugin/delete-project" +// there has to be "projects/plugin%25Fdelete-project" instead of "projects/plugin/delete-project". +// The second url will return nothing. +func (c *Client) buildURLForRequest(urlStr string) (string, error) { + // If there is a "/" at the start, remove it. + // TODO: It can be arranged for all callers of buildURLForRequest to never have a "/" prefix, + // which can be ensured via tests. This is how it's done in go-github. + // Then, this run-time check becomes unnecessary and can be removed. + urlStr = strings.TrimPrefix(urlStr, "/") + + // If we are authenticated, let's apply the "a/" prefix, + // but only if it has not already been applied. + if c.Authentication.HasAuth() && !strings.HasPrefix(urlStr, "a/") { + urlStr = "a/" + urlStr + } + + rel, err := url.Parse(urlStr) + if err != nil { + return "", err + } + + return c.baseURL.String() + rel.String(), nil +} + +// Do sends an API request and returns the API response. +// The API response is JSON decoded and stored in the value pointed to by v, +// or returned as an error if an API error has occurred. +// If v implements the io.Writer interface, the raw response body will be written to v, +// without attempting to first decode it. +func (c *Client) Do(req *http.Request, v interface{}) (*Response, error) { + resp, err := c.client.Do(req) + if err != nil { + return nil, err + } + + // Wrap response + response := &Response{Response: resp} + + err = CheckResponse(resp) + if err != nil { + // even though there was an error, we still return the response + // in case the caller wants to inspect it further + return response, err + } + + if v != nil { + defer resp.Body.Close() // nolint: errcheck + if w, ok := v.(io.Writer); ok { + if _, err := io.Copy(w, resp.Body); err != nil { // nolint: vetshadow + return nil, err + } + } else { + var body []byte + body, err = ioutil.ReadAll(resp.Body) + if err != nil { + // even though there was an error, we still return the response + // in case the caller wants to inspect it further + return response, err + } + + body = RemoveMagicPrefixLine(body) + err = json.Unmarshal(body, v) + } + } + return response, err +} + +func (c *Client) addAuthentication(req *http.Request) error { + // Apply HTTP Basic Authentication + if c.Authentication.HasBasicAuth() { + req.SetBasicAuth(c.Authentication.name, c.Authentication.secret) + return nil + } + + // Apply HTTP Cookie + if c.Authentication.HasCookieAuth() { + req.AddCookie(&http.Cookie{ + Name: c.Authentication.name, + Value: c.Authentication.secret, + }) + return nil + } + + // Apply Digest Authentication. If we're using digest based + // authentication we need to make a request, process the + // WWW-Authenticate header, then set the Authorization header on the + // incoming request. We do not need to send a body along because + // the request itself should fail first. + if c.Authentication.HasDigestAuth() { + uri, err := c.buildURLForRequest(req.URL.RequestURI()) + if err != nil { + return err + } + + // WARNING: Don't use c.NewRequest here unless you like + // infinite recursion. + digestRequest, err := http.NewRequest(req.Method, uri, nil) + digestRequest.Header.Set("Accept", "*/*") + digestRequest.Header.Set("Content-Type", "application/json") + if err != nil { + return err + } + + response, err := c.client.Do(digestRequest) + if err != nil { + return err + } + + // When the function exits discard the rest of the + // body and close it. This should cause go to + // reuse the connection. + defer io.Copy(ioutil.Discard, response.Body) // nolint: errcheck + defer response.Body.Close() // nolint: errcheck + + if response.StatusCode == http.StatusUnauthorized { + authorization, err := c.Authentication.digestAuthHeader(response) + if err != nil { + return err + } + req.Header.Set("Authorization", authorization) + } + } + + return nil +} + +// DeleteRequest sends an DELETE API Request to urlStr with optional body. +// It is a shorthand combination for Client.NewRequest with Client.Do. +// +// Relative URLs should always be specified without a preceding slash. +// If specified, the value pointed to by body is JSON encoded and included as the request body. +func (c *Client) DeleteRequest(urlStr string, body interface{}) (*Response, error) { + req, err := c.NewRequest("DELETE", urlStr, body) + if err != nil { + return nil, err + } + + return c.Do(req, nil) +} + +// BaseURL returns the client's Gerrit instance HTTP endpoint. +func (c *Client) BaseURL() url.URL { + return *c.baseURL +} + +// RemoveMagicPrefixLine removes the "magic prefix line" of Gerris JSON +// response if present. The JSON response body starts with a magic prefix line +// that must be stripped before feeding the rest of the response body to a JSON +// parser. The reason for this is to prevent against Cross Site Script +// Inclusion (XSSI) attacks. By default all standard Gerrit APIs include this +// prefix line though some plugins may not. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api.html#output +func RemoveMagicPrefixLine(body []byte) []byte { + if bytes.HasPrefix(body, magicPrefix) { + return body[5:] + } + return body +} + +var magicPrefix = []byte(")]}'\n") + +// CheckResponse checks the API response for errors, and returns them if present. +// A response is considered an error if it has a status code outside the 200 range. +// API error responses are expected to have no response body. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api.html#response-codes +func CheckResponse(r *http.Response) error { + if c := r.StatusCode; 200 <= c && c <= 299 { + return nil + } + + // Some calls require an authentification + // In such cases errors like: + // API call to https://review.typo3.org/accounts/self failed: 403 Forbidden + // will be thrown. + + err := fmt.Errorf("API call to %s failed: %s", r.Request.URL.String(), r.Status) + return err +} + +// queryParameterReplacements are values in a url, specifically the query +// portion of the url, which should not be escaped before being sent to +// Gerrit. Note, Gerrit itself does not escape these values when using the +// search box so we shouldn't escape them either. +var queryParameterReplacements = map[string]string{ + "+": "GOGERRIT_URL_PLACEHOLDER_PLUS", + ":": "GOGERRIT_URL_PLACEHOLDER_COLON"} + +// addOptions adds the parameters in opt as URL query parameters to s. +// opt must be a struct whose fields may contain "url" tags. +func addOptions(s string, opt interface{}) (string, error) { + v := reflect.ValueOf(opt) + if v.Kind() == reflect.Ptr && v.IsNil() { + return s, nil + } + + u, err := url.Parse(s) + if err != nil { + return s, err + } + + qs, err := query.Values(opt) + if err != nil { + return s, err + } + + // If the url contained one or more query parameters (q) then we need + // to do some escaping on these values before Encode() is called. By + // doing so we're ensuring that : and + don't get encoded which means + // they'll be passed along to Gerrit as raw ascii. Without this Gerrit + // could return 400 Bad Request depending on the query parameters. For + // more complete information see this issue on GitHub: + // https://github.com/andygrunwald/go-gerrit/issues/18 + _, hasQuery := qs["q"] + if hasQuery { + values := []string{} + for _, value := range qs["q"] { + for key, replacement := range queryParameterReplacements { + value = strings.Replace(value, key, replacement, -1) + } + values = append(values, value) + } + + qs.Del("q") + for _, value := range values { + qs.Add("q", value) + } + } + encoded := qs.Encode() + + if hasQuery { + for key, replacement := range queryParameterReplacements { + encoded = strings.Replace(encoded, replacement, key, -1) + } + } + + u.RawQuery = encoded + return u.String(), nil +} + +// getStringResponseWithoutOptions retrieved a single string Response for a GET request +func getStringResponseWithoutOptions(client *Client, u string) (string, *Response, error) { + v := new(string) + resp, err := client.Call("GET", u, nil, v) + return *v, resp, err +} diff --git a/vendor/github.com/andygrunwald/go-gerrit/go.mod b/vendor/github.com/andygrunwald/go-gerrit/go.mod new file mode 100644 index 00000000000..994f772f03b --- /dev/null +++ b/vendor/github.com/andygrunwald/go-gerrit/go.mod @@ -0,0 +1,5 @@ +module github.com/andygrunwald/go-gerrit + +go 1.15 + +require github.com/google/go-querystring v1.1.0 diff --git a/vendor/github.com/andygrunwald/go-gerrit/go.sum b/vendor/github.com/andygrunwald/go-gerrit/go.sum new file mode 100644 index 00000000000..30ee1e8cdb6 --- /dev/null +++ b/vendor/github.com/andygrunwald/go-gerrit/go.sum @@ -0,0 +1,5 @@ +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-querystring v1.0.0 h1:Xkwi/a1rcvNg1PPYe5vI8GbeBY/jrVuDX5ASuANWTrk= +github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= +github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/vendor/github.com/andygrunwald/go-gerrit/groups.go b/vendor/github.com/andygrunwald/go-gerrit/groups.go new file mode 100644 index 00000000000..531dbce350c --- /dev/null +++ b/vendor/github.com/andygrunwald/go-gerrit/groups.go @@ -0,0 +1,360 @@ +package gerrit + +import ( + "fmt" +) + +// GroupsService contains Group related REST endpoints +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-groups.html +type GroupsService struct { + client *Client +} + +// GroupAuditEventInfo entity contains information about an audit event of a group. +type GroupAuditEventInfo struct { + // TODO Member AccountInfo OR GroupInfo `json:"member"` + Type string `json:"type"` + User AccountInfo `json:"user"` + Date Timestamp `json:"date"` +} + +// GroupInfo entity contains information about a group. +// This can be a Gerrit internal group, or an external group that is known to Gerrit. +type GroupInfo struct { + ID string `json:"id"` + Name string `json:"name,omitempty"` + URL string `json:"url,omitempty"` + Options GroupOptionsInfo `json:"options"` + Description string `json:"description,omitempty"` + GroupID int `json:"group_id,omitempty"` + Owner string `json:"owner,omitempty"` + OwnerID string `json:"owner_id,omitempty"` + CreatedOn *Timestamp `json:"created_on,omitempty"` + Members []AccountInfo `json:"members,omitempty"` + Includes []GroupInfo `json:"includes,omitempty"` +} + +// GroupInput entity contains information for the creation of a new internal group. +type GroupInput struct { + Name string `json:"name,omitempty"` + Description string `json:"description,omitempty"` + VisibleToAll bool `json:"visible_to_all,omitempty"` + OwnerID string `json:"owner_id,omitempty"` +} + +// GroupOptionsInfo entity contains options of the group. +type GroupOptionsInfo struct { + VisibleToAll bool `json:"visible_to_all,omitempty"` +} + +// GroupOptionsInput entity contains new options for a group. +type GroupOptionsInput struct { + VisibleToAll bool `json:"visible_to_all,omitempty"` +} + +// GroupsInput entity contains information about groups that should be included into a group or that should be deleted from a group. +type GroupsInput struct { + OneGroup string `json:"_one_group,omitempty"` + Groups []string `json:"groups,omitempty"` +} + +// ListGroupsOptions specifies the different options for the ListGroups call. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-groups.html#list-groups +type ListGroupsOptions struct { + // Group Options + // Options fields can be obtained by adding o parameters, each option requires more lookups and slows down the query response time to the client so they are generally disabled by default. + // Optional fields are: + // INCLUDES: include list of directly included groups. + // MEMBERS: include list of direct group members. + Options []string `url:"o,omitempty"` + + // Check if a group is owned by the calling user + // By setting the option owned and specifying a group to inspect with the option q, it is possible to find out, if this group is owned by the calling user. + // If the group is owned by the calling user, the returned map contains this group. If the calling user doesn’t own this group an empty map is returned. + Owned string `url:"owned,omitempty"` + Group string `url:"q,omitempty"` + + // Group Limit + // The /groups/ URL also accepts a limit integer in the n parameter. This limits the results to show n groups. + Limit int `url:"n,omitempty"` + // The /groups/ URL also accepts a start integer in the S parameter. The results will skip S groups from group list. + Skip int `url:"S,omitempty"` +} + +// ListGroups lists the groups accessible by the caller. +// This is the same as using the ls-groups command over SSH, and accepts the same options as query parameters. +// The entries in the map are sorted by group name. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-groups.html#list-groups +func (s *GroupsService) ListGroups(opt *ListGroupsOptions) (*map[string]GroupInfo, *Response, error) { + u := "groups/" + + u, err := addOptions(u, opt) + if err != nil { + return nil, nil, err + } + + req, err := s.client.NewRequest("GET", u, nil) + if err != nil { + return nil, nil, err + } + + v := new(map[string]GroupInfo) + resp, err := s.client.Do(req, v) + if err != nil { + return nil, resp, err + } + + return v, resp, err +} + +// GetGroup retrieves a group. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-groups.html#get-group +func (s *GroupsService) GetGroup(groupID string) (*GroupInfo, *Response, error) { + u := fmt.Sprintf("groups/%s", groupID) + return s.getGroupInfoResponse(u) +} + +// GetGroupDetail retrieves a group with the direct members and the directly included groups. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-groups.html#get-group-detail +func (s *GroupsService) GetGroupDetail(groupID string) (*GroupInfo, *Response, error) { + u := fmt.Sprintf("groups/%s/detail", groupID) + return s.getGroupInfoResponse(u) +} + +// getGroupInfoResponse retrieved a single GroupInfo Response for a GET request +func (s *GroupsService) getGroupInfoResponse(u string) (*GroupInfo, *Response, error) { + req, err := s.client.NewRequest("GET", u, nil) + if err != nil { + return nil, nil, err + } + + v := new(GroupInfo) + resp, err := s.client.Do(req, v) + if err != nil { + return nil, resp, err + } + + return v, resp, err +} + +// GetGroupName retrieves the name of a group. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-groups.html#get-group-name +func (s *GroupsService) GetGroupName(groupID string) (string, *Response, error) { + u := fmt.Sprintf("groups/%s/name", groupID) + return getStringResponseWithoutOptions(s.client, u) +} + +// GetGroupDescription retrieves the description of a group. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-groups.html#get-group-description +func (s *GroupsService) GetGroupDescription(groupID string) (string, *Response, error) { + u := fmt.Sprintf("groups/%s/description", groupID) + return getStringResponseWithoutOptions(s.client, u) +} + +// GetGroupOptions retrieves the options of a group. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-groups.html#get-group-options +func (s *GroupsService) GetGroupOptions(groupID string) (*GroupOptionsInfo, *Response, error) { + u := fmt.Sprintf("groups/%s/options", groupID) + + req, err := s.client.NewRequest("GET", u, nil) + if err != nil { + return nil, nil, err + } + + v := new(GroupOptionsInfo) + resp, err := s.client.Do(req, v) + if err != nil { + return nil, resp, err + } + + return v, resp, err +} + +// GetGroupOwner retrieves the owner group of a Gerrit internal group. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-groups.html#get-group-owner +func (s *GroupsService) GetGroupOwner(groupID string) (*GroupInfo, *Response, error) { + u := fmt.Sprintf("groups/%s/owner", groupID) + + req, err := s.client.NewRequest("GET", u, nil) + if err != nil { + return nil, nil, err + } + + v := new(GroupInfo) + resp, err := s.client.Do(req, v) + if err != nil { + return nil, resp, err + } + + return v, resp, err +} + +// GetAuditLog gets the audit log of a Gerrit internal group. +// The returned audit events are sorted by date in reverse order so that the newest audit event comes first. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-groups.html#get-audit-log +func (s *GroupsService) GetAuditLog(groupID string) (*[]GroupAuditEventInfo, *Response, error) { + u := fmt.Sprintf("groups/%s/log.audit", groupID) + + req, err := s.client.NewRequest("GET", u, nil) + if err != nil { + return nil, nil, err + } + + v := new([]GroupAuditEventInfo) + resp, err := s.client.Do(req, v) + if err != nil { + return nil, resp, err + } + + return v, resp, err +} + +// CreateGroup creates a new Gerrit internal group. +// In the request body additional data for the group can be provided as GroupInput. +// +// As response the GroupInfo entity is returned that describes the created group. +// If the group creation fails because the name is already in use the response is “409 Conflict”. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-groups.html#create-group +func (s *GroupsService) CreateGroup(groupID string, input *GroupInput) (*GroupInfo, *Response, error) { + u := fmt.Sprintf("groups/%s", groupID) + + req, err := s.client.NewRequest("PUT", u, input) + if err != nil { + return nil, nil, err + } + + v := new(GroupInfo) + resp, err := s.client.Do(req, v) + if err != nil { + return nil, resp, err + } + + return v, resp, err +} + +// RenameGroup renames a Gerrit internal group. +// The new group name must be provided in the request body. +// +// As response the new group name is returned. +// If renaming the group fails because the new name is already in use the response is “409 Conflict”. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-groups.html#rename-group +func (s *GroupsService) RenameGroup(groupID, name string) (*string, *Response, error) { + u := fmt.Sprintf("groups/%s/name", groupID) + input := struct { + Name string `json:"name"` + }{ + Name: name, + } + + req, err := s.client.NewRequest("PUT", u, input) + if err != nil { + return nil, nil, err + } + + v := new(string) + resp, err := s.client.Do(req, v) + if err != nil { + return nil, resp, err + } + + return v, resp, err +} + +// SetGroupDescription sets the description of a Gerrit internal group. +// The new group description must be provided in the request body. +// +// As response the new group description is returned. +// If the description was deleted the response is “204 No Content”. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-groups.html#set-group-description +func (s *GroupsService) SetGroupDescription(groupID, description string) (*string, *Response, error) { + u := fmt.Sprintf("groups/%s/description", groupID) + input := struct { + Description string `json:"description"` + }{ + Description: description, + } + + req, err := s.client.NewRequest("PUT", u, input) + if err != nil { + return nil, nil, err + } + + v := new(string) + resp, err := s.client.Do(req, v) + if err != nil { + return nil, resp, err + } + + return v, resp, err +} + +// DeleteGroupDescription deletes the description of a Gerrit internal group. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-groups.html#delete-group-description +func (s *GroupsService) DeleteGroupDescription(groupID string) (*Response, error) { + u := fmt.Sprintf("groups/%s/description", groupID) + return s.client.DeleteRequest(u, nil) +} + +// SetGroupOptions sets the options of a Gerrit internal group. +// The new group options must be provided in the request body as a GroupOptionsInput entity. +// +// As response the new group options are returned as a GroupOptionsInfo entity. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-groups.html#set-group-options +func (s *GroupsService) SetGroupOptions(groupID string, input *GroupOptionsInput) (*GroupOptionsInfo, *Response, error) { + u := fmt.Sprintf("groups/%s/options", groupID) + + req, err := s.client.NewRequest("PUT", u, input) + if err != nil { + return nil, nil, err + } + + v := new(GroupOptionsInfo) + resp, err := s.client.Do(req, v) + if err != nil { + return nil, resp, err + } + + return v, resp, err +} + +// SetGroupOwner sets the owner group of a Gerrit internal group. +// The new owner group must be provided in the request body. +// The new owner can be specified by name, by group UUID or by the legacy numeric group ID. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-groups.html#set-group-owner +func (s *GroupsService) SetGroupOwner(groupID, owner string) (*GroupInfo, *Response, error) { + u := fmt.Sprintf("groups/%s/owner", groupID) + input := struct { + Owner string `json:"owner"` + }{ + Owner: owner, + } + + req, err := s.client.NewRequest("PUT", u, input) + if err != nil { + return nil, nil, err + } + + v := new(GroupInfo) + resp, err := s.client.Do(req, v) + if err != nil { + return nil, resp, err + } + + return v, resp, err +} diff --git a/vendor/github.com/andygrunwald/go-gerrit/groups_include.go b/vendor/github.com/andygrunwald/go-gerrit/groups_include.go new file mode 100644 index 00000000000..e8f4afc4be6 --- /dev/null +++ b/vendor/github.com/andygrunwald/go-gerrit/groups_include.go @@ -0,0 +1,117 @@ +package gerrit + +import ( + "fmt" +) + +// ListIncludedGroups lists the directly included groups of a group. +// The entries in the list are sorted by group name and UUID. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-groups.html#included-groups +func (s *GroupsService) ListIncludedGroups(groupID string) (*[]GroupInfo, *Response, error) { + u := fmt.Sprintf("groups/%s/groups/", groupID) + + req, err := s.client.NewRequest("GET", u, nil) + if err != nil { + return nil, nil, err + } + + v := new([]GroupInfo) + resp, err := s.client.Do(req, v) + if err != nil { + return nil, resp, err + } + + return v, resp, err +} + +// GetIncludedGroup retrieves an included group. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-groups.html#get-included-group +func (s *GroupsService) GetIncludedGroup(groupID, includeGroupID string) (*GroupInfo, *Response, error) { + u := fmt.Sprintf("groups/%s/groups/%s", groupID, includeGroupID) + + req, err := s.client.NewRequest("GET", u, nil) + if err != nil { + return nil, nil, err + } + + v := new(GroupInfo) + resp, err := s.client.Do(req, v) + if err != nil { + return nil, resp, err + } + + return v, resp, err +} + +// IncludeGroup includes an internal or external group into a Gerrit internal group. +// External groups must be specified using the UUID. +// +// As response a GroupInfo entity is returned that describes the included group. +// The request also succeeds if the group is already included in this group, but then the HTTP response code is 200 OK. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-groups.html#include-group +func (s *GroupsService) IncludeGroup(groupID, includeGroupID string) (*GroupInfo, *Response, error) { + u := fmt.Sprintf("groups/%s/groups/%s", groupID, includeGroupID) + + req, err := s.client.NewRequest("PUT", u, nil) + if err != nil { + return nil, nil, err + } + + v := new(GroupInfo) + resp, err := s.client.Do(req, v) + if err != nil { + return nil, resp, err + } + + return v, resp, err +} + +// IncludeGroups includes one or several groups into a Gerrit internal group. +// The groups to be included into the group must be provided in the request body as a GroupsInput entity. +// +// As response a list of GroupInfo entities is returned that describes the groups that were specified in the GroupsInput. +// A GroupInfo entity is returned for each group specified in the input, independently of whether the group was newly included into the group or whether the group was already included in the group. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-groups.html#include-groups +func (s *GroupsService) IncludeGroups(groupID string, input *GroupsInput) (*[]GroupInfo, *Response, error) { + u := fmt.Sprintf("groups/%s/groups", groupID) + + req, err := s.client.NewRequest("POST", u, input) + if err != nil { + return nil, nil, err + } + + v := new([]GroupInfo) + resp, err := s.client.Do(req, v) + if err != nil { + return nil, resp, err + } + + return v, resp, err +} + +// DeleteIncludedGroup deletes an included group from a Gerrit internal group. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-groups.html#include-group +func (s *GroupsService) DeleteIncludedGroup(groupID, includeGroupID string) (*Response, error) { + u := fmt.Sprintf("groups/%s/groups/%s", groupID, includeGroupID) + return s.client.DeleteRequest(u, nil) +} + +// DeleteIncludedGroups delete one or several included groups from a Gerrit internal group. +// The groups to be deleted from the group must be provided in the request body as a GroupsInput entity. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-groups.html#delete-included-groups +func (s *GroupsService) DeleteIncludedGroups(groupID string, input *GroupsInput) (*Response, error) { + u := fmt.Sprintf("groups/%s/groups.delete", groupID) + + req, err := s.client.NewRequest("POST", u, input) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} diff --git a/vendor/github.com/andygrunwald/go-gerrit/groups_member.go b/vendor/github.com/andygrunwald/go-gerrit/groups_member.go new file mode 100644 index 00000000000..f5906f8b443 --- /dev/null +++ b/vendor/github.com/andygrunwald/go-gerrit/groups_member.go @@ -0,0 +1,133 @@ +package gerrit + +import ( + "fmt" +) + +// ListGroupMembersOptions specifies the different options for the ListGroupMembers call. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-groups.html#group-members +type ListGroupMembersOptions struct { + // To resolve the included groups of a group recursively and to list all members the parameter recursive can be set. + // Members from included external groups and from included groups which are not visible to the calling user are ignored. + Recursive bool `url:"recursive,omitempty"` +} + +// MembersInput entity contains information about accounts that should be added as members to a group or that should be deleted from the group +type MembersInput struct { + OneMember string `json:"_one_member,omitempty"` + Members []string `json:"members,omitempty"` +} + +// ListGroupMembers lists the direct members of a Gerrit internal group. +// The entries in the list are sorted by full name, preferred email and id. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-groups.html#group-members +func (s *GroupsService) ListGroupMembers(groupID string, opt *ListGroupMembersOptions) (*[]AccountInfo, *Response, error) { + u := fmt.Sprintf("groups/%s/members/", groupID) + + u, err := addOptions(u, opt) + if err != nil { + return nil, nil, err + } + + req, err := s.client.NewRequest("GET", u, nil) + if err != nil { + return nil, nil, err + } + + v := new([]AccountInfo) + resp, err := s.client.Do(req, v) + if err != nil { + return nil, resp, err + } + + return v, resp, err +} + +// GetGroupMember retrieves a group member. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-groups.html#get-group-member +func (s *GroupsService) GetGroupMember(groupID, accountID string) (*AccountInfo, *Response, error) { + u := fmt.Sprintf("groups/%s/members/%s", groupID, accountID) + + req, err := s.client.NewRequest("GET", u, nil) + if err != nil { + return nil, nil, err + } + + v := new(AccountInfo) + resp, err := s.client.Do(req, v) + if err != nil { + return nil, resp, err + } + + return v, resp, err +} + +// AddGroupMember adds a user as member to a Gerrit internal group. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-groups.html#add-group-member +func (s *GroupsService) AddGroupMember(groupID, accountID string) (*AccountInfo, *Response, error) { + u := fmt.Sprintf("groups/%s/members/%s", groupID, accountID) + + req, err := s.client.NewRequest("PUT", u, nil) + if err != nil { + return nil, nil, err + } + + v := new(AccountInfo) + resp, err := s.client.Do(req, v) + if err != nil { + return nil, resp, err + } + + return v, resp, err +} + +// AddGroupMembers adds one or several users to a Gerrit internal group. +// The users to be added to the group must be provided in the request body as a MembersInput entity. +// +// As response a list of detailed AccountInfo entities is returned that describes the group members that were specified in the MembersInput. +// An AccountInfo entity is returned for each user specified in the input, independently of whether the user was newly added to the group or whether the user was already a member of the group. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-groups.html#_add_group_members +func (s *GroupsService) AddGroupMembers(groupID string, input *MembersInput) (*[]AccountInfo, *Response, error) { + u := fmt.Sprintf("groups/%s/members", groupID) + + req, err := s.client.NewRequest("POST", u, input) + if err != nil { + return nil, nil, err + } + + v := new([]AccountInfo) + resp, err := s.client.Do(req, v) + if err != nil { + return nil, resp, err + } + + return v, resp, err +} + +// DeleteGroupMember deletes a user from a Gerrit internal group. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-groups.html#delete-group-member +func (s *GroupsService) DeleteGroupMember(groupID, accountID string) (*Response, error) { + u := fmt.Sprintf("groups/%s/members/%s", groupID, accountID) + return s.client.DeleteRequest(u, nil) +} + +// DeleteGroupMembers delete one or several users from a Gerrit internal group. +// The users to be deleted from the group must be provided in the request body as a MembersInput entity. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-groups.html#delete-group-members +func (s *GroupsService) DeleteGroupMembers(groupID string, input *MembersInput) (*Response, error) { + u := fmt.Sprintf("groups/%s/members.delete", groupID) + + req, err := s.client.NewRequest("POST", u, input) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} diff --git a/vendor/github.com/andygrunwald/go-gerrit/plugins.go b/vendor/github.com/andygrunwald/go-gerrit/plugins.go new file mode 100644 index 00000000000..c4460140070 --- /dev/null +++ b/vendor/github.com/andygrunwald/go-gerrit/plugins.go @@ -0,0 +1,131 @@ +package gerrit + +import ( + "fmt" +) + +// PluginsService contains Plugin related REST endpoints +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-plugins.html +type PluginsService struct { + client *Client +} + +// PluginInfo entity describes a plugin. +type PluginInfo struct { + ID string `json:"id"` + Version string `json:"version"` + IndexURL string `json:"index_url,omitempty"` + Disabled bool `json:"disabled,omitempty"` +} + +// PluginInput entity describes a plugin that should be installed. +type PluginInput struct { + URL string `json:"url"` +} + +// PluginOptions specifies the different options for the ListPlugins call. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-plugins.html#list-plugins +type PluginOptions struct { + // All enabled that all plugins are returned (enabled and disabled). + All bool `url:"all,omitempty"` +} + +// ListPlugins lists the plugins installed on the Gerrit server. +// Only the enabled plugins are returned unless the all option is specified. +// +// To be allowed to see the installed plugins, a user must be a member of a group that is granted the 'View Plugins' capability or the 'Administrate Server' capability. +// The entries in the map are sorted by plugin ID. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-plugins.html#list-plugins +func (s *PluginsService) ListPlugins(opt *PluginOptions) (*map[string]PluginInfo, *Response, error) { + u := "plugins/" + + u, err := addOptions(u, opt) + if err != nil { + return nil, nil, err + } + + req, err := s.client.NewRequest("GET", u, nil) + if err != nil { + return nil, nil, err + } + + v := new(map[string]PluginInfo) + resp, err := s.client.Do(req, v) + if err != nil { + return nil, resp, err + } + + return v, resp, err +} + +// GetPluginStatus retrieves the status of a plugin on the Gerrit server. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-plugins.html#get-plugin-status +func (s *PluginsService) GetPluginStatus(pluginID string) (*PluginInfo, *Response, error) { + u := fmt.Sprintf("plugins/%s/gerrit~status", pluginID) + return s.requestWithPluginInfoResponse("GET", u, nil) +} + +// InstallPlugin installs a new plugin on the Gerrit server. +// If a plugin with the specified name already exists it is overwritten. +// +// Note: if the plugin provides its own name in the MANIFEST file, then the plugin name from the MANIFEST file has precedence over the {plugin-id} above. +// +// The plugin jar can either be sent as binary data in the request body or a URL to the plugin jar must be provided in the request body inside a PluginInput entity. +// +// As response a PluginInfo entity is returned that describes the plugin. +// If an existing plugin was overwritten the response is “200 OK”. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-projects.html#set-dashboard +func (s *PluginsService) InstallPlugin(pluginID string, input *PluginInput) (*PluginInfo, *Response, error) { + u := fmt.Sprintf("plugins/%s", pluginID) + return s.requestWithPluginInfoResponse("PUT", u, input) +} + +// EnablePlugin enables a plugin on the Gerrit server. +// +// As response a PluginInfo entity is returned that describes the plugin. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-plugins.html#enable-plugin +func (s *PluginsService) EnablePlugin(pluginID string) (*PluginInfo, *Response, error) { + u := fmt.Sprintf("plugins/%s/gerrit~enable", pluginID) + return s.requestWithPluginInfoResponse("POST", u, nil) +} + +// DisablePlugin disables a plugin on the Gerrit server. +// +// As response a PluginInfo entity is returned that describes the plugin. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-plugins.html#disable-plugin +func (s *PluginsService) DisablePlugin(pluginID string) (*PluginInfo, *Response, error) { + u := fmt.Sprintf("plugins/%s/gerrit~disable", pluginID) + return s.requestWithPluginInfoResponse("POST", u, nil) +} + +// ReloadPlugin reloads a plugin on the Gerrit server. +// +// As response a PluginInfo entity is returned that describes the plugin. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-plugins.html#disable-plugin +func (s *PluginsService) ReloadPlugin(pluginID string) (*PluginInfo, *Response, error) { + u := fmt.Sprintf("plugins/%s/gerrit~reload", pluginID) + return s.requestWithPluginInfoResponse("POST", u, nil) +} + +func (s *PluginsService) requestWithPluginInfoResponse(method, u string, input interface{}) (*PluginInfo, *Response, error) { + req, err := s.client.NewRequest(method, u, input) + if err != nil { + return nil, nil, err + } + + v := new(PluginInfo) + resp, err := s.client.Do(req, v) + if err != nil { + return nil, resp, err + } + + return v, resp, err +} diff --git a/vendor/github.com/andygrunwald/go-gerrit/projects.go b/vendor/github.com/andygrunwald/go-gerrit/projects.go new file mode 100644 index 00000000000..52a967d93e2 --- /dev/null +++ b/vendor/github.com/andygrunwald/go-gerrit/projects.go @@ -0,0 +1,465 @@ +package gerrit + +import ( + "fmt" + "net/url" +) + +// ProjectsService contains Project related REST endpoints +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-projects.html +type ProjectsService struct { + client *Client +} + +// ProjectInfo entity contains information about a project. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-projects.html#project-info +type ProjectInfo struct { + ID string `json:"id"` + Name string `json:"name"` + Parent string `json:"parent,omitempty"` + Description string `json:"description,omitempty"` + State string `json:"state,omitempty"` + Branches map[string]string `json:"branches,omitempty"` + WebLinks []WebLinkInfo `json:"web_links,omitempty"` +} + +// ProjectInput entity contains information for the creation of a new project. +type ProjectInput struct { + Name string `json:"name,omitempty"` + Parent string `json:"parent,omitempty"` + Description string `json:"description,omitempty"` + PermissionsOnly bool `json:"permissions_only"` + CreateEmptyCommit bool `json:"create_empty_commit"` + SubmitType string `json:"submit_type,omitempty"` + Branches []string `json:"branches,omitempty"` + Owners []string `json:"owners,omitempty"` + UseContributorAgreements string `json:"use_contributor_agreements"` + UseSignedOffBy string `json:"use_signed_off_by"` + CreateNewChangeForAllNotInTarget string `json:"create_new_change_for_all_not_in_target"` + UseContentMerge string `json:"use_content_merge"` + RequireChangeID string `json:"require_change_id"` + MaxObjectSizeLimit string `json:"max_object_size_limit,omitempty"` + PluginConfigValues map[string]map[string]string `json:"plugin_config_values,omitempty"` +} + +// GCInput entity contains information to run the Git garbage collection. +type GCInput struct { + ShowProgress bool `json:"show_progress"` + Aggressive bool `json:"aggressive"` +} + +// HeadInput entity contains information for setting HEAD for a project. +type HeadInput struct { + Ref string `json:"ref"` +} + +// BanInput entity contains information for banning commits in a project. +type BanInput struct { + Commits []string `json:"commits"` + Reason string `json:"reason,omitempty"` +} + +// BanResultInfo entity describes the result of banning commits. +type BanResultInfo struct { + NewlyBanned []string `json:"newly_banned,omitempty"` + AlreadyBanned []string `json:"already_banned,omitempty"` + Ignored []string `json:"ignored,omitempty"` +} + +// ThemeInfo entity describes a theme. +type ThemeInfo struct { + CSS string `type:"css,omitempty"` + Header string `type:"header,omitempty"` + Footer string `type:"footer,omitempty"` +} + +// ReflogEntryInfo entity describes an entry in a reflog. +type ReflogEntryInfo struct { + OldID string `json:"old_id"` + NewID string `json:"new_id"` + Who GitPersonInfo `json:"who"` + Comment string `json:"comment"` +} + +// ProjectParentInput entity contains information for setting a project parent. +type ProjectParentInput struct { + Parent string `json:"parent"` + CommitMessage string `json:"commit_message,omitempty"` +} + +// RepositoryStatisticsInfo entity contains information about statistics of a Git repository. +type RepositoryStatisticsInfo struct { + NumberOfLooseObjects int `json:"number_of_loose_objects"` + NumberOfLooseRefs int `json:"number_of_loose_refs"` + NumberOfPackFiles int `json:"number_of_pack_files"` + NumberOfPackedObjects int `json:"number_of_packed_objects"` + NumberOfPackedRefs int `json:"number_of_packed_refs"` + SizeOfLooseObjects int `json:"size_of_loose_objects"` + SizeOfPackedObjects int `json:"size_of_packed_objects"` +} + +// InheritedBooleanInfo entity represents a boolean value that can also be inherited. +type InheritedBooleanInfo struct { + Value bool `json:"value"` + ConfiguredValue string `json:"configured_value"` + InheritedValue bool `json:"inherited_value,omitempty"` +} + +// MaxObjectSizeLimitInfo entity contains information about the max object size limit of a project. +type MaxObjectSizeLimitInfo struct { + Value string `json:"value,omitempty"` + ConfiguredValue string `json:"configured_value,omitempty"` + InheritedValue string `json:"inherited_value,omitempty"` +} + +// ConfigParameterInfo entity describes a project configuration parameter. +type ConfigParameterInfo struct { + DisplayName string `json:"display_name,omitempty"` + Description string `json:"description,omitempty"` + Warning string `json:"warning,omitempty"` + Type string `json:"type"` + Value string `json:"value,omitempty"` + Values []string `json:"values,omitempty"` + // TODO: 5 fields are missing here, because the documentation seems to be fucked up + // See https://gerrit-review.googlesource.com/Documentation/rest-api-projects.html#config-parameter-info +} + +// ProjectDescriptionInput entity contains information for setting a project description. +type ProjectDescriptionInput struct { + Description string `json:"description,omitempty"` + CommitMessage string `json:"commit_message,omitempty"` +} + +// ConfigInfo entity contains information about the effective project configuration. +type ConfigInfo struct { + Description string `json:"description,omitempty"` + UseContributorAgreements InheritedBooleanInfo `json:"use_contributor_agreements,omitempty"` + UseContentMerge InheritedBooleanInfo `json:"use_content_merge,omitempty"` + UseSignedOffBy InheritedBooleanInfo `json:"use_signed_off_by,omitempty"` + CreateNewChangeForAllNotInTarget InheritedBooleanInfo `json:"create_new_change_for_all_not_in_target,omitempty"` + RequireChangeID InheritedBooleanInfo `json:"require_change_id,omitempty"` + EnableSignedPush InheritedBooleanInfo `json:"enable_signed_push,omitempty"` + MaxObjectSizeLimit MaxObjectSizeLimitInfo `json:"max_object_size_limit"` + SubmitType string `json:"submit_type"` + State string `json:"state,omitempty"` + Commentlinks map[string]string `json:"commentlinks"` + Theme ThemeInfo `json:"theme,omitempty"` + PluginConfig map[string]ConfigParameterInfo `json:"plugin_config,omitempty"` + Actions map[string]ActionInfo `json:"actions,omitempty"` +} + +// ConfigInput entity describes a new project configuration. +type ConfigInput struct { + Description string `json:"description,omitempty"` + UseContributorAgreements string `json:"use_contributor_agreements,omitempty"` + UseContentMerge string `json:"use_content_merge,omitempty"` + UseSignedOffBy string `json:"use_signed_off_by,omitempty"` + CreateNewChangeForAllNotInTarget string `json:"create_new_change_for_all_not_in_target,omitempty"` + RequireChangeID string `json:"require_change_id,omitempty"` + MaxObjectSizeLimit MaxObjectSizeLimitInfo `json:"max_object_size_limit,omitempty"` + SubmitType string `json:"submit_type,omitempty"` + State string `json:"state,omitempty"` + PluginConfigValues map[string]map[string]string `json:"plugin_config_values,omitempty"` +} + +// ProjectBaseOptions specifies the really basic options for projects +// and sub functionality (e.g. Tags) +type ProjectBaseOptions struct { + // Limit the number of projects to be included in the results. + Limit int `url:"n,omitempty"` + + // Skip the given number of branches from the beginning of the list. + Skip string `url:"s,omitempty"` +} + +// ProjectOptions specifies the parameters to the ProjectsService.ListProjects. +type ProjectOptions struct { + ProjectBaseOptions + + // Limit the results to the projects having the specified branch and include the sha1 of the branch in the results. + Branch string `url:"b,omitempty"` + + // Include project description in the results. + Description bool `url:"d,omitempty"` + + // Limit the results to those projects that start with the specified prefix. + Prefix string `url:"p,omitempty"` + + // Limit the results to those projects that match the specified regex. + // Boundary matchers '^' and '$' are implicit. + // For example: the regex 'test.*' will match any projects that start with 'test' and regex '.*test' will match any project that end with 'test'. + Regex string `url:"r,omitempty"` + + // Skip the given number of projects from the beginning of the list. + Skip string `url:"S,omitempty"` + + // Limit the results to those projects that match the specified substring. + Substring string `url:"m,omitempty"` + + // Get projects inheritance in a tree-like format. + // This option does not work together with the branch option. + Tree bool `url:"t,omitempty"` + + // Get projects with specified type: ALL, CODE, PERMISSIONS. + Type string `url:"type,omitempty"` +} + +// ListProjects lists the projects accessible by the caller. +// This is the same as using the ls-projects command over SSH, and accepts the same options as query parameters. +// The entries in the map are sorted by project name. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-projects.html#list-projects +func (s *ProjectsService) ListProjects(opt *ProjectOptions) (*map[string]ProjectInfo, *Response, error) { + u := "projects/" + + u, err := addOptions(u, opt) + if err != nil { + return nil, nil, err + } + + v := new(map[string]ProjectInfo) + resp, err := s.client.Call("GET", u, nil, v) + return v, resp, err +} + +// GetProject retrieves a project. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-projects.html#get-project +func (s *ProjectsService) GetProject(projectName string) (*ProjectInfo, *Response, error) { + u := fmt.Sprintf("projects/%s", url.QueryEscape(projectName)) + + v := new(ProjectInfo) + resp, err := s.client.Call("GET", u, nil, v) + return v, resp, err +} + +// CreateProject creates a new project. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-projects.html#create-project +func (s *ProjectsService) CreateProject(projectName string, input *ProjectInput) (*ProjectInfo, *Response, error) { + u := fmt.Sprintf("projects/%s/", url.QueryEscape(projectName)) + + v := new(ProjectInfo) + resp, err := s.client.Call("PUT", u, input, v) + return v, resp, err +} + +// GetProjectDescription retrieves the description of a project. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-projects.html#get-project-description +func (s *ProjectsService) GetProjectDescription(projectName string) (string, *Response, error) { + u := fmt.Sprintf("projects/%s/description", url.QueryEscape(projectName)) + + return getStringResponseWithoutOptions(s.client, u) +} + +// GetProjectParent retrieves the name of a project’s parent project. +// For the All-Projects root project an empty string is returned. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-projects.html#get-project-parent +func (s *ProjectsService) GetProjectParent(projectName string) (string, *Response, error) { + u := fmt.Sprintf("projects/%s/parent", url.QueryEscape(projectName)) + return getStringResponseWithoutOptions(s.client, u) +} + +// GetHEAD retrieves for a project the name of the branch to which HEAD points. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-projects.html#get-head +func (s *ProjectsService) GetHEAD(projectName string) (string, *Response, error) { + u := fmt.Sprintf("projects/%s/HEAD", url.QueryEscape(projectName)) + return getStringResponseWithoutOptions(s.client, u) +} + +// GetRepositoryStatistics return statistics for the repository of a project. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-projects.html#get-repository-statistics +func (s *ProjectsService) GetRepositoryStatistics(projectName string) (*RepositoryStatisticsInfo, *Response, error) { + u := fmt.Sprintf("projects/%s/statistics.git", url.QueryEscape(projectName)) + + req, err := s.client.NewRequest("GET", u, nil) + if err != nil { + return nil, nil, err + } + + v := new(RepositoryStatisticsInfo) + resp, err := s.client.Do(req, v) + if err != nil { + return nil, resp, err + } + + return v, resp, err +} + +// GetConfig gets some configuration information about a project. +// Note that this config info is not simply the contents of project.config; +// it generally contains fields that may have been inherited from parent projects. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-projects.html#get-config +func (s *ProjectsService) GetConfig(projectName string) (*ConfigInfo, *Response, error) { + u := fmt.Sprintf("projects/%s/config", url.QueryEscape(projectName)) + + req, err := s.client.NewRequest("GET", u, nil) + if err != nil { + return nil, nil, err + } + + v := new(ConfigInfo) + resp, err := s.client.Do(req, v) + if err != nil { + return nil, resp, err + } + + return v, resp, err +} + +// SetProjectDescription sets the description of a project. +// The new project description must be provided in the request body inside a ProjectDescriptionInput entity. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-projects.html#set-project-description +func (s *ProjectsService) SetProjectDescription(projectName string, input *ProjectDescriptionInput) (*string, *Response, error) { + u := fmt.Sprintf("projects/%s/description", url.QueryEscape(projectName)) + + // TODO Use here the getStringResponseWithoutOptions (for PUT requests) + + req, err := s.client.NewRequest("PUT", u, input) + if err != nil { + return nil, nil, err + } + + v := new(string) + resp, err := s.client.Do(req, v) + if err != nil { + return nil, resp, err + } + + return v, resp, err +} + +// DeleteProjectDescription deletes the description of a project. +// The request body does not need to include a ProjectDescriptionInput entity if no commit message is specified. +// +// Please note that some proxies prohibit request bodies for DELETE requests. +// In this case, if you want to specify a commit message, use PUT to delete the description. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-projects.html#delete-project-description +func (s *ProjectsService) DeleteProjectDescription(projectName string) (*Response, error) { + u := fmt.Sprintf("projects/%s/description", url.QueryEscape(projectName)) + return s.client.DeleteRequest(u, nil) +} + +// BanCommit marks commits as banned for the project. +// If a commit is banned Gerrit rejects every push that includes this commit with contains banned commit ... +// +// Note: +// This REST endpoint only marks the commits as banned, but it does not remove the commits from the history of any central branch. +// This needs to be done manually. +// The commits to be banned must be specified in the request body as a BanInput entity. +// +// The caller must be project owner. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-projects.html#ban-commit +func (s *ProjectsService) BanCommit(projectName string, input *BanInput) (*BanResultInfo, *Response, error) { + u := fmt.Sprintf("projects/%s/ban", url.QueryEscape(projectName)) + + req, err := s.client.NewRequest("PUT", u, input) + if err != nil { + return nil, nil, err + } + + v := new(BanResultInfo) + resp, err := s.client.Do(req, v) + if err != nil { + return nil, resp, err + } + + return v, resp, err +} + +// SetConfig sets the configuration of a project. +// The new configuration must be provided in the request body as a ConfigInput entity. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-projects.html#set-config +func (s *ProjectsService) SetConfig(projectName string, input *ConfigInput) (*ConfigInfo, *Response, error) { + u := fmt.Sprintf("projects/%s/config", url.QueryEscape(projectName)) + + req, err := s.client.NewRequest("PUT", u, input) + if err != nil { + return nil, nil, err + } + + v := new(ConfigInfo) + resp, err := s.client.Do(req, v) + if err != nil { + return nil, resp, err + } + + return v, resp, err +} + +// SetHEAD sets HEAD for a project. +// The new ref to which HEAD should point must be provided in the request body inside a HeadInput entity. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-projects.html#set-head +func (s *ProjectsService) SetHEAD(projectName string, input *HeadInput) (*string, *Response, error) { + u := fmt.Sprintf("projects/%s/HEAD", url.QueryEscape(projectName)) + + // TODO Use here the getStringResponseWithoutOptions (for PUT requests) + + req, err := s.client.NewRequest("PUT", u, input) + if err != nil { + return nil, nil, err + } + + v := new(string) + resp, err := s.client.Do(req, v) + if err != nil { + return nil, resp, err + } + + return v, resp, err +} + +// SetProjectParent sets the parent project for a project. +// The new name of the parent project must be provided in the request body inside a ProjectParentInput entity. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-projects.html#set-project-parent +func (s *ProjectsService) SetProjectParent(projectName string, input *ProjectParentInput) (*string, *Response, error) { + u := fmt.Sprintf("projects/%s/parent", url.QueryEscape(projectName)) + + // TODO Use here the getStringResponseWithoutOptions (for PUT requests) + + req, err := s.client.NewRequest("PUT", u, input) + if err != nil { + return nil, nil, err + } + + v := new(string) + resp, err := s.client.Do(req, v) + if err != nil { + return nil, resp, err + } + + return v, resp, err +} + +// RunGC runs the Git garbage collection for the repository of a project. +// The response is the streamed output of the garbage collection. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-projects.html#run-gc +func (s *ProjectsService) RunGC(projectName string, input *GCInput) (*Response, error) { + u := fmt.Sprintf("projects/%s/gc", url.QueryEscape(projectName)) + + req, err := s.client.NewRequest("POST", u, input) + if err != nil { + return nil, err + } + + resp, err := s.client.Do(req, nil) + if err != nil { + return resp, err + } + + return resp, err +} diff --git a/vendor/github.com/andygrunwald/go-gerrit/projects_access.go b/vendor/github.com/andygrunwald/go-gerrit/projects_access.go new file mode 100644 index 00000000000..9be2f069484 --- /dev/null +++ b/vendor/github.com/andygrunwald/go-gerrit/projects_access.go @@ -0,0 +1,148 @@ +package gerrit + +import ( + "fmt" + "net/url" +) + +// ProjectAccessInput describes changes that should be applied to a project access config +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-projects.html#project-access-input +type ProjectAccessInput struct { + // A list of deductions to be applied to the project access as ProjectAccessInfo entities. + Remove map[string]AccessSectionInfo `json:"remove"` + + // A list of additions to be applied to the project access as ProjectAccessInfo entities. + Add map[string]AccessSectionInfo `json:"add"` + + // A commit message for this change. + Message string `json:"message"` + + // A new parent for the project to inherit from. Changing the parent project requires administrative privileges. + Parent string `json:"parent"` +} + +// AccessCheckInfo entity is the result of an access check. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-projects.html#access-check-info +type AccessCheckInfo struct { + // The HTTP status code for the access. 200 means success and 403 means denied. + Status int `json:"status"` + + // A clarifying message if status is not 200. + Message string `json:"message"` +} + +// CheckAccessOptions is options for check access +type CheckAccessOptions struct { + // The account for which to check access. Mandatory. + Account string `url:"account,omitempty"` + + // The ref permission for which to check access. If not specified, read access to at least branch is checked. + Permission string `url:"perm,omitempty"` + + // The branch for which to check access. This must be given if perm is specified. + Ref string `url:"ref,omitempty"` +} + +// ListAccessRights lists the access rights for a single project +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-projects.html#get-access +func (s *ProjectsService) ListAccessRights(projectName string) (*ProjectAccessInfo, *Response, error) { + u := fmt.Sprintf("projects/%s/access", url.QueryEscape(projectName)) + + req, err := s.client.NewRequest("GET", u, nil) + if err != nil { + return nil, nil, err + } + + v := new(ProjectAccessInfo) + resp, err := s.client.Do(req, v) + if err != nil { + return nil, resp, err + } + + return v, resp, err +} + +// AddUpdateDeleteAccessRights add, update and delete access rights for project +// +// Sets access rights for the project using the diff schema provided by ProjectAccessInput. +// Deductions are used to remove access sections, permissions or permission rules. +// The backend will remove the entity with the finest granularity in the request, +// meaning that if an access section without permissions is posted, the access section will be removed; +// if an access section with a permission but no permission rules is posted, the permission will be removed; +// if an access section with a permission and a permission rule is posted, the permission rule will be removed. +// +// Additionally, access sections and permissions will be cleaned up after applying the deductions by +// removing items that have no child elements. +// +// After removals have been applied, additions will be applied. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-projects.html#set-access +func (s *ProjectsService) AddUpdateDeleteAccessRights(projectName string, input *ProjectAccessInput) (*ProjectAccessInfo, *Response, error) { + u := fmt.Sprintf("projects/%s/access", url.QueryEscape(projectName)) + + req, err := s.client.NewRequest("POST", u, input) + if err != nil { + return nil, nil, err + } + + v := new(ProjectAccessInfo) + resp, err := s.client.Do(req, v) + if err != nil { + return nil, resp, err + } + + return v, resp, err +} + +// CreateAccessRightChange sets access rights for the project using the diff schema provided by ProjectAccessInput +// +// This takes the same input as Update Access Rights, but creates a pending change for review. +// Like Create Change, it returns a ChangeInfo entity describing the resulting change. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-projects.html#create-access-change +func (s *ProjectsService) CreateAccessRightChange(projectName string, input *ProjectAccessInput) (*ChangeInfo, *Response, error) { + u := fmt.Sprintf("projects/%s/access:review", url.QueryEscape(projectName)) + + req, err := s.client.NewRequest("PUT", u, input) + if err != nil { + return nil, nil, err + } + + v := new(ChangeInfo) + resp, err := s.client.Do(req, v) + if err != nil { + return nil, resp, err + } + + return v, resp, err +} + +// CheckAccess runs access checks for other users. This requires the View Access global capability. +// +// The result is a AccessCheckInfo entity detailing the access of the given user for the given project, project-ref, or project-permission-ref combination. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-projects.html#check-access +func (s *ProjectsService) CheckAccess(projectName string, opt *CheckAccessOptions) (*AccessCheckInfo, *Response, error) { + u := fmt.Sprintf("projects/%s/check.access", url.QueryEscape(projectName)) + + u, err := addOptions(u, opt) + if err != nil { + return nil, nil, err + } + + req, err := s.client.NewRequest("GET", u, nil) + if err != nil { + return nil, nil, err + } + + v := new(AccessCheckInfo) + resp, err := s.client.Do(req, v) + if err != nil { + return nil, resp, err + } + + return v, resp, err +} diff --git a/vendor/github.com/andygrunwald/go-gerrit/projects_branch.go b/vendor/github.com/andygrunwald/go-gerrit/projects_branch.go new file mode 100644 index 00000000000..285f20fcb88 --- /dev/null +++ b/vendor/github.com/andygrunwald/go-gerrit/projects_branch.go @@ -0,0 +1,157 @@ +package gerrit + +import ( + "fmt" + "net/url" +) + +// BranchInfo entity contains information about a branch. +type BranchInfo struct { + Ref string `json:"ref"` + Revision string `json:"revision"` + CanDelete bool `json:"can_delete"` + WebLinks []WebLinkInfo `json:"web_links,omitempty"` +} + +// BranchInput entity contains information for the creation of a new branch. +type BranchInput struct { + Ref string `json:"ref,omitempty"` + Revision string `json:"revision,omitempty"` +} + +// DeleteBranchesInput entity contains information about branches that should be deleted. +type DeleteBranchesInput struct { + Branches []string `json:"DeleteBranchesInput"` +} + +// BranchOptions specifies the parameters to the branch API endpoints. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-projects.html#branch-options +type BranchOptions struct { + // Limit the number of branches to be included in the results. + Limit int `url:"n,omitempty"` + + // Skip the given number of branches from the beginning of the list. + Skip string `url:"s,omitempty"` + + // Substring limits the results to those projects that match the specified substring. + Substring string `url:"m,omitempty"` + + // Limit the results to those branches that match the specified regex. + // Boundary matchers '^' and '$' are implicit. + // For example: the regex 't*' will match any branches that start with 'test' and regex '*t' will match any branches that end with 'test'. + Regex string `url:"r,omitempty"` +} + +// ListBranches list the branches of a project. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-projects.html#list-branches +func (s *ProjectsService) ListBranches(projectName string, opt *BranchOptions) (*[]BranchInfo, *Response, error) { + u := fmt.Sprintf("projects/%s/branches/", url.QueryEscape(projectName)) + + u, err := addOptions(u, opt) + if err != nil { + return nil, nil, err + } + + req, err := s.client.NewRequest("GET", u, nil) + if err != nil { + return nil, nil, err + } + + v := new([]BranchInfo) + resp, err := s.client.Do(req, v) + if err != nil { + return nil, resp, err + } + + return v, resp, err +} + +// GetBranch retrieves a branch of a project. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-projects.html#get-branch +func (s *ProjectsService) GetBranch(projectName, branchID string) (*BranchInfo, *Response, error) { + u := fmt.Sprintf("projects/%s/branches/%s", url.QueryEscape(projectName), url.QueryEscape(branchID)) + + req, err := s.client.NewRequest("GET", u, nil) + if err != nil { + return nil, nil, err + } + + v := new(BranchInfo) + resp, err := s.client.Do(req, v) + if err != nil { + return nil, resp, err + } + + return v, resp, err +} + +// GetReflog gets the reflog of a certain branch. +// The caller must be project owner. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-projects.html#get-reflog +func (s *ProjectsService) GetReflog(projectName, branchID string) (*[]ReflogEntryInfo, *Response, error) { + u := fmt.Sprintf("projects/%s/branches/%s/reflog", url.QueryEscape(projectName), url.QueryEscape(branchID)) + + req, err := s.client.NewRequest("GET", u, nil) + if err != nil { + return nil, nil, err + } + + v := new([]ReflogEntryInfo) + resp, err := s.client.Do(req, v) + if err != nil { + return nil, resp, err + } + + return v, resp, err +} + +// CreateBranch creates a new branch. +// In the request body additional data for the branch can be provided as BranchInput. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-projects.html#create-branch +func (s *ProjectsService) CreateBranch(projectName, branchID string, input *BranchInput) (*BranchInfo, *Response, error) { + u := fmt.Sprintf("projects/%s/branches/%s", url.QueryEscape(projectName), url.QueryEscape(branchID)) + + req, err := s.client.NewRequest("PUT", u, input) + if err != nil { + return nil, nil, err + } + + v := new(BranchInfo) + resp, err := s.client.Do(req, v) + if err != nil { + return nil, resp, err + } + + return v, resp, err +} + +// DeleteBranch deletes a branch. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-projects.html#delete-branch +func (s *ProjectsService) DeleteBranch(projectName, branchID string) (*Response, error) { + u := fmt.Sprintf("projects/%s/branches/%s", url.QueryEscape(projectName), url.QueryEscape(branchID)) + return s.client.DeleteRequest(u, nil) +} + +// DeleteBranches delete one or more branches. +// If some branches could not be deleted, the response is “409 Conflict” and the error message is contained in the response body. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-projects.html#delete-branches +func (s *ProjectsService) DeleteBranches(projectName string, input *DeleteBranchesInput) (*Response, error) { + u := fmt.Sprintf("projects/%s/branches:delete", url.QueryEscape(projectName)) + return s.client.DeleteRequest(u, input) +} + +// GetBranchContent gets the content of a file from the HEAD revision of a certain branch. +// The content is returned as base64 encoded string. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-projects.html#get-content +func (s *ProjectsService) GetBranchContent(projectName, branchID, fileID string) (string, *Response, error) { + u := fmt.Sprintf("projects/%s/branches/%s/files/%s/content", url.QueryEscape(projectName), url.QueryEscape(branchID), fileID) + return getStringResponseWithoutOptions(s.client, u) +} diff --git a/vendor/github.com/andygrunwald/go-gerrit/projects_childproject.go b/vendor/github.com/andygrunwald/go-gerrit/projects_childproject.go new file mode 100644 index 00000000000..ef0e100bd0e --- /dev/null +++ b/vendor/github.com/andygrunwald/go-gerrit/projects_childproject.go @@ -0,0 +1,66 @@ +package gerrit + +import ( + "fmt" + "net/url" +) + +// ChildProjectOptions specifies the parameters to the Child Project API endpoints. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-projects.html#list-child-projects +type ChildProjectOptions struct { + // Recursive resolve the child projects of a project recursively. + // Child projects that are not visible to the calling user are ignored and are not resolved further. + Recursive int `url:"recursive,omitempty"` +} + +// ListChildProjects lists the direct child projects of a project. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-projects.html#list-child-projects +func (s *ProjectsService) ListChildProjects(projectName string, opt *ChildProjectOptions) (*[]ProjectInfo, *Response, error) { + u := fmt.Sprintf("projects/%s/children/", url.QueryEscape(projectName)) + + u, err := addOptions(u, opt) + if err != nil { + return nil, nil, err + } + + req, err := s.client.NewRequest("GET", u, nil) + if err != nil { + return nil, nil, err + } + + v := new([]ProjectInfo) + resp, err := s.client.Do(req, v) + if err != nil { + return nil, resp, err + } + + return v, resp, err +} + +// GetChildProject retrieves a child project. +// If a non-direct child project should be retrieved the parameter recursive must be set. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-projects.html#get-child-project +func (s *ProjectsService) GetChildProject(projectName, childProjectName string, opt *ChildProjectOptions) (*ProjectInfo, *Response, error) { + u := fmt.Sprintf("projects/%s/children/%s", url.QueryEscape(projectName), url.QueryEscape(childProjectName)) + + u, err := addOptions(u, opt) + if err != nil { + return nil, nil, err + } + + req, err := s.client.NewRequest("GET", u, nil) + if err != nil { + return nil, nil, err + } + + v := new(ProjectInfo) + resp, err := s.client.Do(req, v) + if err != nil { + return nil, resp, err + } + + return v, resp, err +} diff --git a/vendor/github.com/andygrunwald/go-gerrit/projects_commit.go b/vendor/github.com/andygrunwald/go-gerrit/projects_commit.go new file mode 100644 index 00000000000..d4bb9425b3c --- /dev/null +++ b/vendor/github.com/andygrunwald/go-gerrit/projects_commit.go @@ -0,0 +1,36 @@ +package gerrit + +import ( + "fmt" + "net/url" +) + +// GetCommit retrieves a commit of a project. +// The commit must be visible to the caller. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-projects.html#get-commit +func (s *ProjectsService) GetCommit(projectName, commitID string) (*CommitInfo, *Response, error) { + u := fmt.Sprintf("projects/%s/commits/%s", url.QueryEscape(projectName), commitID) + + req, err := s.client.NewRequest("GET", u, nil) + if err != nil { + return nil, nil, err + } + + v := new(CommitInfo) + resp, err := s.client.Do(req, v) + if err != nil { + return nil, resp, err + } + + return v, resp, err +} + +// GetCommitContent gets the content of a file from a certain commit. +// The content is returned as base64 encoded string. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-projects.html##get-content-from-commit +func (s *ProjectsService) GetCommitContent(projectName, commitID, fileID string) (string, *Response, error) { + u := fmt.Sprintf("projects/%s/commits/%s/files/%s/content", url.QueryEscape(projectName), commitID, fileID) + return getStringResponseWithoutOptions(s.client, u) +} diff --git a/vendor/github.com/andygrunwald/go-gerrit/projects_dashboard.go b/vendor/github.com/andygrunwald/go-gerrit/projects_dashboard.go new file mode 100644 index 00000000000..07c38b58c73 --- /dev/null +++ b/vendor/github.com/andygrunwald/go-gerrit/projects_dashboard.go @@ -0,0 +1,108 @@ +package gerrit + +import ( + "fmt" + "net/url" +) + +// DashboardSectionInfo entity contains information about a section in a dashboard. +type DashboardSectionInfo struct { + Name string `json:"name"` + Query string `json:"query"` +} + +// DashboardInput entity contains information to create/update a project dashboard. +type DashboardInput struct { + ID string `json:"id,omitempty"` + CommitMessage string `json:"commit_message,omitempty"` +} + +// DashboardInfo entity contains information about a project dashboard. +type DashboardInfo struct { + ID string `json:"id"` + Project string `json:"project"` + DefiningProject string `json:"defining_project"` + Ref string `json:"ref"` + Path string `json:"path"` + Description string `json:"description,omitempty"` + Foreach string `json:"foreach,omitempty"` + URL string `json:"url"` + Default bool `json:"default"` + Title string `json:"title,omitempty"` + Sections []DashboardSectionInfo `json:"sections"` +} + +// ListDashboards list custom dashboards for a project. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-projects.html#list-dashboards +func (s *ProjectsService) ListDashboards(projectName string) (*[]DashboardInfo, *Response, error) { + u := fmt.Sprintf("projects/%s/dashboards/", url.QueryEscape(projectName)) + + req, err := s.client.NewRequest("GET", u, nil) + if err != nil { + return nil, nil, err + } + + v := new([]DashboardInfo) + resp, err := s.client.Do(req, v) + if err != nil { + return nil, resp, err + } + + return v, resp, err +} + +// GetDashboard list custom dashboards for a project. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-projects.html#get-dashboard +func (s *ProjectsService) GetDashboard(projectName, dashboardName string) (*DashboardInfo, *Response, error) { + u := fmt.Sprintf("projects/%s/dashboards/%s", url.QueryEscape(projectName), url.QueryEscape(dashboardName)) + + req, err := s.client.NewRequest("GET", u, nil) + if err != nil { + return nil, nil, err + } + + v := new(DashboardInfo) + resp, err := s.client.Do(req, v) + if err != nil { + return nil, resp, err + } + + return v, resp, err +} + +// SetDashboard updates/Creates a project dashboard. +// Currently only supported for the default dashboard. +// +// The creation/update information for the dashboard must be provided in the request body as a DashboardInput entity. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-projects.html#set-dashboard +func (s *ProjectsService) SetDashboard(projectName, dashboardID string, input *DashboardInput) (*DashboardInfo, *Response, error) { + u := fmt.Sprintf("projects/%s/dashboards/%s", url.QueryEscape(projectName), url.QueryEscape(dashboardID)) + + req, err := s.client.NewRequest("PUT", u, input) + if err != nil { + return nil, nil, err + } + + v := new(DashboardInfo) + resp, err := s.client.Do(req, v) + if err != nil { + return nil, resp, err + } + + return v, resp, err +} + +// DeleteDashboard deletes a project dashboard. +// Currently only supported for the default dashboard. +// +// The request body does not need to include a DashboardInput entity if no commit message is specified. +// Please note that some proxies prohibit request bodies for DELETE requests. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-projects.html#delete-dashboard +func (s *ProjectsService) DeleteDashboard(projectName, dashboardID string, input *DashboardInput) (*Response, error) { + u := fmt.Sprintf("projects/%s/dashboards/%s", url.QueryEscape(projectName), url.QueryEscape(dashboardID)) + return s.client.DeleteRequest(u, input) +} diff --git a/vendor/github.com/andygrunwald/go-gerrit/projects_tag.go b/vendor/github.com/andygrunwald/go-gerrit/projects_tag.go new file mode 100644 index 00000000000..7070d3f732d --- /dev/null +++ b/vendor/github.com/andygrunwald/go-gerrit/projects_tag.go @@ -0,0 +1,60 @@ +package gerrit + +import ( + "fmt" + "net/url" +) + +// TagInfo entity contains information about a tag. +type TagInfo struct { + Ref string `json:"ref"` + Revision string `json:"revision"` + Object string `json:"object"` + Message string `json:"message"` + Tagger GitPersonInfo `json:"tagger"` + Created *Timestamp `json:"created,omitempty"` +} + +// ListTags list the tags of a project. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-projects.html#list-tags +func (s *ProjectsService) ListTags(projectName string, opt *ProjectBaseOptions) (*[]TagInfo, *Response, error) { + u := fmt.Sprintf("projects/%s/tags/", url.QueryEscape(projectName)) + u, err := addOptions(u, opt) + if err != nil { + return nil, nil, err + } + + req, err := s.client.NewRequest("GET", u, nil) + if err != nil { + return nil, nil, err + } + + v := new([]TagInfo) + resp, err := s.client.Do(req, v) + if err != nil { + return nil, resp, err + } + + return v, resp, err +} + +// GetTag retrieves a tag of a project. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-projects.html#get-tag +func (s *ProjectsService) GetTag(projectName, tagName string) (*TagInfo, *Response, error) { + u := fmt.Sprintf("projects/%s/tags/%s", url.QueryEscape(projectName), url.QueryEscape(tagName)) + + req, err := s.client.NewRequest("GET", u, nil) + if err != nil { + return nil, nil, err + } + + v := new(TagInfo) + resp, err := s.client.Do(req, v) + if err != nil { + return nil, resp, err + } + + return v, resp, err +} diff --git a/vendor/github.com/andygrunwald/go-gerrit/types.go b/vendor/github.com/andygrunwald/go-gerrit/types.go new file mode 100644 index 00000000000..baf8ee4aafd --- /dev/null +++ b/vendor/github.com/andygrunwald/go-gerrit/types.go @@ -0,0 +1,88 @@ +package gerrit + +import ( + "encoding/json" + "errors" + "strconv" + "time" +) + +// Timestamp represents an instant in time with nanosecond precision, in UTC time zone. +// It encodes to and from JSON in Gerrit's timestamp format. +// All exported methods of time.Time can be called on Timestamp. +// +// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api.html#timestamp +type Timestamp struct { + // Time is an instant in time. Its time zone must be UTC. + time.Time +} + +// MarshalJSON implements the json.Marshaler interface. +// The time is a quoted string in Gerrit's timestamp format. +// An error is returned if t.Time time zone is not UTC. +func (t Timestamp) MarshalJSON() ([]byte, error) { + if t.Location() != time.UTC { + return nil, errors.New("Timestamp.MarshalJSON: time zone must be UTC") + } + if y := t.Year(); y < 0 || 9999 < y { + // RFC 3339 is clear that years are 4 digits exactly. + // See golang.org/issue/4556#issuecomment-66073163 for more discussion. + return nil, errors.New("Timestamp.MarshalJSON: year outside of range [0,9999]") + } + b := make([]byte, 0, len(timeLayout)+2) + b = append(b, '"') + b = t.AppendFormat(b, timeLayout) + b = append(b, '"') + return b, nil +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +// The time is expected to be a quoted string in Gerrit's timestamp format. +func (t *Timestamp) UnmarshalJSON(b []byte) error { + // Ignore null, like in the main JSON package. + if string(b) == "null" { + return nil + } + var err error + t.Time, err = time.Parse(`"`+timeLayout+`"`, string(b)) + return err +} + +// Gerrit's timestamp layout is like time.RFC3339Nano, but with a space instead +// of the "T", without a timezone (it's always in UTC), and always includes nanoseconds. +// See https://gerrit-review.googlesource.com/Documentation/rest-api.html#timestamp. +const timeLayout = "2006-01-02 15:04:05.000000000" + +// Number is a string representing a number. This type is only used in cases +// where the API being queried may return an inconsistent result. +type Number string + +// String returns the string representing the current number. +func (n *Number) String() string { + return string(*n) +} + +// Int returns the current number as an integer +func (n *Number) Int() (int, error) { + return strconv.Atoi(n.String()) +} + +// UnmarshalJSON will marshal the provided data into the current *Number struct. +func (n *Number) UnmarshalJSON(data []byte) error { + // `data` is a number represented as a string (ex. "5"). + var stringNumber string + if err := json.Unmarshal(data, &stringNumber); err == nil { + *n = Number(stringNumber) + return nil + } + + // `data` is a number represented as an integer (ex. 5). Here + // we're using json.Unmarshal to convert bytes -> number which + // we then convert to our own Number type. + var number int + if err := json.Unmarshal(data, &number); err == nil { + *n = Number(strconv.Itoa(number)) + return nil + } + return errors.New("cannot convert data to number") +} diff --git a/vendor/github.com/andygrunwald/go-jira/.gitignore b/vendor/github.com/andygrunwald/go-jira/.gitignore new file mode 100644 index 00000000000..1e57f8a7f7c --- /dev/null +++ b/vendor/github.com/andygrunwald/go-jira/.gitignore @@ -0,0 +1,29 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Don't check in vendor +vendor/ + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof +*.iml +.idea diff --git a/vendor/github.com/andygrunwald/go-jira/CHANGELOG.md b/vendor/github.com/andygrunwald/go-jira/CHANGELOG.md new file mode 100644 index 00000000000..5915ef43241 --- /dev/null +++ b/vendor/github.com/andygrunwald/go-jira/CHANGELOG.md @@ -0,0 +1,104 @@ +# Changelog + +All notable changes to this project will be documented in this file. See [standard-version](https://github.com/conventional-changelog/standard-version) for commit guidelines. + +## [1.13.0](https://github.com/andygrunwald/go-jira/compare/v1.11.1...v1.13.0) (2020-10-25) + + +### Features + +* add AddRemoteLink method ([f200e15](https://github.com/andygrunwald/go-jira/commit/f200e158b997a303db081cbbc5a9d8ad5d89566d)), closes [/developer.atlassian.com/cloud/jira/platform/rest/v2/#api-rest-api-2](https://github.com/andygrunwald//developer.atlassian.com/cloud/jira/platform/rest/v2//issues/api-rest-api-2) +* Add Names support on Issue struct ([#278](https://github.com/andygrunwald/go-jira/issues/278)) ([1fc10e0](https://github.com/andygrunwald/go-jira/commit/1fc10e0606784f745673ccc4d8d706c36f385a7a)) +* Extend Makefile for more source code quality targets ([5e52236](https://github.com/andygrunwald/go-jira/commit/5e5223631a29d10a13e598318a6abe47384e2982)) +* **context:** Add support for context package ([e1f4265](https://github.com/andygrunwald/go-jira/commit/e1f4265e2b467b938fe0c095caf6d36f3136d2ff)) +* **issues:** Add GetEditMeta on issue ([a783764](https://github.com/andygrunwald/go-jira/commit/a783764b52dc890773658ddd0483a9d0393e385d)), closes [/docs.atlassian.com/DAC/rest/jira/6.1.html#d2e1364](https://github.com/andygrunwald//docs.atlassian.com/DAC/rest/jira/6.1.html/issues/d2e1364) +* **IssueService:** allow empty JQL ([#268](https://github.com/andygrunwald/go-jira/issues/268)) ([4b91cf2](https://github.com/andygrunwald/go-jira/commit/4b91cf2b135355de7ecee41727c3e65f4e7067bc)) +* **project:** Add cronjob to check for stale issues ([#287](https://github.com/andygrunwald/go-jira/issues/287)) ([2096b04](https://github.com/andygrunwald/go-jira/commit/2096b04e52b434c1fb1c841bab487a94674a271e)) +* **project:** Add GitHub Actions testing workflow ([#289](https://github.com/andygrunwald/go-jira/issues/289)) ([80c0282](https://github.com/andygrunwald/go-jira/commit/80c02828ca9e4eb0e4a1877275baae14d330a2d9)), closes [#290](https://github.com/andygrunwald/go-jira/issues/290) +* **project:** Add workflow to greet new contributors ([#288](https://github.com/andygrunwald/go-jira/issues/288)) ([c357b61](https://github.com/andygrunwald/go-jira/commit/c357b61a40f62a919ebd94a555390958f99c8db7)) + + +### Bug Fixes + +* change millisecond time format ([8c77107](https://github.com/andygrunwald/go-jira/commit/8c77107df3757c4ec5eae6e9d7c018618e708bfa)) +* paging with load balancer going to endless loop ([19d3fc0](https://github.com/andygrunwald/go-jira/commit/19d3fc0aecde547ffe1ab547c5ffb6c7972d387c)), closes [#260](https://github.com/andygrunwald/go-jira/issues/260) +* **issue:** IssueService.Search() with a not empty JQL triggers 400 bad request ([#292](https://github.com/andygrunwald/go-jira/issues/292)) ([8b64c7f](https://github.com/andygrunwald/go-jira/commit/8b64c7f005fbceb11fa43a7aff3de61eb3166fca)), closes [#291](https://github.com/andygrunwald/go-jira/issues/291) +* **IssueService.GetWatchers:** UserService.GetByAccountID support accountId params ([436469b](https://github.com/andygrunwald/go-jira/commit/436469b62d4d62037f380b38c918a13f4a5f0ab2)) +* **product:** Make product naming consistent, rename JIRA to Jira ([#286](https://github.com/andygrunwald/go-jira/issues/286)) ([146229d](https://github.com/andygrunwald/go-jira/commit/146229d2ab58a3fb128ddc8dcbe03aff72e20857)), closes [#284](https://github.com/andygrunwald/go-jira/issues/284) +* **tests:** Fix TestIssueService_PostAttachment unit test ([f6b1dca](https://github.com/andygrunwald/go-jira/commit/f6b1dcafcfdd8fe69f842b1053c4030da6c97c7f)) +* removing the use of username field in searching for users ([#297](https://github.com/andygrunwald/go-jira/issues/297)) ([f50cb07](https://github.com/andygrunwald/go-jira/commit/f50cb07b297d79138b13e5ab49ea33965d32f5c1)) + +## [1.12.0](https://github.com/andygrunwald/go-jira/compare/v1.11.1...v1.12.0) (2019-12-14) + + +### Features + +* Add IssueLinkTypeService with GetList and test ([261889a](https://github.com/andygrunwald/go-jira/commit/261889adc63623fcea0fa8cab0d5da26eec37e68)) +* add worklog update method ([9ff562a](https://github.com/andygrunwald/go-jira/commit/9ff562ae3ea037961f277be10412ad0a42ff8a6f)) +* Implement get remote links method ([1946cac](https://github.com/andygrunwald/go-jira/commit/1946cac0fe6ee91f784e3dda3c12f3f30f7115b8)) +* Implement issue link type DELETE ([e37cc6c](https://github.com/andygrunwald/go-jira/commit/e37cc6c6897830492c070667ab8b68bd85683fc3)) +* Implement issue link type GET ([57538b9](https://github.com/andygrunwald/go-jira/commit/57538b926c558e97940760a30bdc16cdd37ef4f1)) +* Implement issue link type POST ([75b9df8](https://github.com/andygrunwald/go-jira/commit/75b9df8b01557f01dc318d33c0bc2841a9c084eb)) +* Implement issue link type PUT ([48a15c1](https://github.com/andygrunwald/go-jira/commit/48a15c10443a3cff78f0fb2c8034dd772320e238)) +* provide access to issue transitions loaded from JIRA API ([7530b7c](https://github.com/andygrunwald/go-jira/commit/7530b7cd8266d82cdb4afe831518986772e742ba)) + +### [1.11.1](https://github.com/andygrunwald/go-jira/compare/v1.11.0...v1.11.1) (2019-10-17) + +## [1.11.0](https://github.com/andygrunwald/go-jira/compare/v1.10.0...v1.11.0) (2019-10-17) + + +### Features + +* Add AccountID and AccountType to GroupMember struct ([216e005](https://github.com/andygrunwald/go-jira/commit/216e0056d6385eba9d31cb37e6ff64314860d2cc)) +* Add AccountType and Locale to User struct ([52ab347](https://github.com/andygrunwald/go-jira/commit/52ab34790307144087f0d9bf86c93a2b2209fe46)) +* Add GetAllStatuses ([afc96b1](https://github.com/andygrunwald/go-jira/commit/afc96b18d17b77e32cec9e1ac7e4f5dec7e627f5)) +* Add GetMyFilters to FilterService ([ebae19d](https://github.com/andygrunwald/go-jira/commit/ebae19dda6afd0e54578f30300bc36012381e99b)) +* Add Search to FilterService ([38a755b](https://github.com/andygrunwald/go-jira/commit/38a755b407cd70d11fe2e2897d814552ca29ab51)) +* add support for JWT auth with qsh needed by add-ons ([a8bdfed](https://github.com/andygrunwald/go-jira/commit/a8bdfed27ff42a9bb0468b8cf192871780919def)) +* AddGetBoardConfiguration ([fd698c5](https://github.com/andygrunwald/go-jira/commit/fd698c57163f248f21285d5ebc6a3bb60d46694f)) +* Replace http.Client with interface for extensibility ([b59a65c](https://github.com/andygrunwald/go-jira/commit/b59a65c365dcefd42e135579e9b7ce9c9c006489)) + + +### Bug Fixes + +* Fix fixversion description tag ([8383e2f](https://github.com/andygrunwald/go-jira/commit/8383e2f5f145d04f6bcdb47fb12a95b58bdcedfa)) +* Fix typos in filter_test.go ([e9a261c](https://github.com/andygrunwald/go-jira/commit/e9a261c52249073345e5895b22e2cf4d7286497a)) + +# [1.10.0](https://github.com/andygrunwald/go-jira/compare/v1.9.0...v1.10.0) (2019-05-23) + + +### Bug Fixes + +* empty SearchOptions causing malformed request ([b3bf8c2](https://github.com/andygrunwald/go-jira/commit/b3bf8c2)) + + +### Features + +* added DeleteAttachment ([e93c0e1](https://github.com/andygrunwald/go-jira/commit/e93c0e1)) + + + +# [1.9.0](https://github.com/andygrunwald/go-jira/compare/v1.8.0...v1.9.0) (2019-05-19) + + +### Features + +* **issues:** Added support for AddWorklog and GetWorklogs ([1ebd7e7](https://github.com/andygrunwald/go-jira/commit/1ebd7e7)) + + + +# [1.8.0](https://github.com/andygrunwald/go-jira/compare/v1.7.0...v1.8.0) (2019-05-16) + + +### Bug Fixes + +* Add PriorityService to the main ([8491cb0](https://github.com/andygrunwald/go-jira/commit/8491cb0)) + + +### Features + +* **filter:** Add GetFavouriteList to FilterService. ([645898e](https://github.com/andygrunwald/go-jira/commit/645898e)) +* Add get all priorities ([1c63e25](https://github.com/andygrunwald/go-jira/commit/1c63e25)) +* Add ResolutionService to retrieve resolutions ([fb1ce22](https://github.com/andygrunwald/go-jira/commit/fb1ce22)) +* Add status category constants ([6223ddd](https://github.com/andygrunwald/go-jira/commit/6223ddd)) +* Add StatusCategory GetList ([049a756](https://github.com/andygrunwald/go-jira/commit/049a756)) diff --git a/vendor/github.com/andygrunwald/go-jira/LICENSE b/vendor/github.com/andygrunwald/go-jira/LICENSE new file mode 100644 index 00000000000..692f6bea285 --- /dev/null +++ b/vendor/github.com/andygrunwald/go-jira/LICENSE @@ -0,0 +1,22 @@ +The MIT License (MIT) + +Copyright (c) 2015 Andy Grunwald + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + diff --git a/vendor/github.com/andygrunwald/go-jira/Makefile b/vendor/github.com/andygrunwald/go-jira/Makefile new file mode 100644 index 00000000000..928c554c801 --- /dev/null +++ b/vendor/github.com/andygrunwald/go-jira/Makefile @@ -0,0 +1,25 @@ +.DEFAULT_GOAL := help + +.PHONY: help +help: ## Outputs the help. + @grep -E '^[a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | sort | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}' + +.PHONY: test +test: ## Runs all unit, integration and example tests. + go test -race -v ./... + +.PHONY: vet +vet: ## Runs go vet (to detect suspicious constructs). + go vet ./... + +.PHONY: fmt +fmt: ## Runs go fmt (to check for go coding guidelines). + gofmt -d -s . + +.PHONY: staticcheck +staticcheck: ## Runs static analysis to prevend bugs, foster code simplicity, performance and editor integration. + go get -u honnef.co/go/tools/cmd/staticcheck + staticcheck ./... + +.PHONY: all +all: test vet fmt staticcheck ## Runs all source code quality targets (like test, vet, fmt, staticcheck) diff --git a/vendor/github.com/andygrunwald/go-jira/README.md b/vendor/github.com/andygrunwald/go-jira/README.md new file mode 100644 index 00000000000..b2b1edec087 --- /dev/null +++ b/vendor/github.com/andygrunwald/go-jira/README.md @@ -0,0 +1,334 @@ +# go-jira + +[![GoDoc](https://godoc.org/github.com/andygrunwald/go-jira?status.svg)](https://godoc.org/github.com/andygrunwald/go-jira) +[![Build Status](https://github.com/andygrunwald/go-jira/actions/workflows/testing.yml/badge.svg)](https://github.com/andygrunwald/go-jira/actions/workflows/testing.yml) +[![Go Report Card](https://goreportcard.com/badge/github.com/andygrunwald/go-jira)](https://goreportcard.com/report/github.com/andygrunwald/go-jira) + +[Go](https://golang.org/) client library for [Atlassian Jira](https://www.atlassian.com/software/jira). + +![Go client library for Atlassian Jira](./img/logo_small.png "Go client library for Atlassian Jira.") + +## Features + +* Authentication (HTTP Basic, OAuth, Session Cookie) +* Create and retrieve issues +* Create and retrieve issue transitions (status updates) +* Call every API endpoint of the Jira, even if it is not directly implemented in this library + +This package is not Jira API complete (yet), but you can call every API endpoint you want. See [Call a not implemented API endpoint](#call-a-not-implemented-api-endpoint) how to do this. For all possible API endpoints of Jira have a look at [latest Jira REST API documentation](https://docs.atlassian.com/jira/REST/latest/). + +## Requirements + +* Go >= 1.14 +* Jira v6.3.4 & v7.1.2. + +Note that we also run our tests against 1.13, though only the last two versions +of Go are officially supported. + +## Installation + +It is go gettable + +```bash +go get github.com/andygrunwald/go-jira +``` + +For stable versions you can use one of our tags with [gopkg.in](http://labix.org/gopkg.in). E.g. + +```go +package main + +import ( + jira "gopkg.in/andygrunwald/go-jira.v1" +) +... +``` + +(optional) to run unit / example tests: + +```bash +cd $GOPATH/src/github.com/andygrunwald/go-jira +go test -v ./... +``` + +## API + +Please have a look at the [GoDoc documentation](https://godoc.org/github.com/andygrunwald/go-jira) for a detailed API description. + +The [latest Jira REST API documentation](https://docs.atlassian.com/jira/REST/latest/) was the base document for this package. + +## Examples + +Further a few examples how the API can be used. +A few more examples are available in the [GoDoc examples section](https://godoc.org/github.com/andygrunwald/go-jira#pkg-examples). + +### Get a single issue + +Lets retrieve [MESOS-3325](https://issues.apache.org/jira/browse/MESOS-3325) from the [Apache Mesos](http://mesos.apache.org/) project. + +```go +package main + +import ( + "fmt" + "github.com/andygrunwald/go-jira" +) + +func main() { + jiraClient, _ := jira.NewClient(nil, "https://issues.apache.org/jira/") + issue, _, _ := jiraClient.Issue.Get("MESOS-3325", nil) + + fmt.Printf("%s: %+v\n", issue.Key, issue.Fields.Summary) + fmt.Printf("Type: %s\n", issue.Fields.Type.Name) + fmt.Printf("Priority: %s\n", issue.Fields.Priority.Name) + + // MESOS-3325: Running mesos-slave@0.23 in a container causes slave to be lost after a restart + // Type: Bug + // Priority: Critical +} +``` + +### Authentication + +The `go-jira` library does not handle most authentication directly. Instead, authentication should be handled within +an `http.Client`. That client can then be passed into the `NewClient` function when creating a jira client. + +For convenience, capability for basic and cookie-based authentication is included in the main library. + +#### Token (Jira on Atlassian Cloud) + +Token-based authentication uses the basic authentication scheme, with a user-generated API token in place of a user's password. You can generate a token for your user [here](https://id.atlassian.com/manage-profile/security/api-tokens). Additional information about Atlassian Cloud API tokens can be found [here](https://confluence.atlassian.com/cloud/api-tokens-938839638.html). + +A more thorough, [runnable example](examples/basicauth/main.go) is provided in the examples directory. + +```go +func main() { + tp := jira.BasicAuthTransport{ + Username: "username", + Password: "token", + } + + client, err := jira.NewClient(tp.Client(), "https://my.jira.com") + + u, _, err := client.User.Get("some_user") + + fmt.Printf("\nEmail: %v\nSuccess!\n", u.EmailAddress) +} +``` + +#### Basic (self-hosted Jira) + +Password-based API authentication works for self-hosted Jira **only**, and has been [deprecated for users of Atlassian Cloud](https://developer.atlassian.com/cloud/jira/platform/deprecation-notice-basic-auth-and-cookie-based-auth/). + +The above token authentication example may be used, substituting a user's password for a generated token. + +#### Authenticate with OAuth + +If you want to connect via OAuth to your Jira Cloud instance checkout the [example of using OAuth authentication with Jira in Go](https://gist.github.com/Lupus/edafe9a7c5c6b13407293d795442fe67) by [@Lupus](https://github.com/Lupus). + +For more details have a look at the [issue #56](https://github.com/andygrunwald/go-jira/issues/56). + +### Create an issue + +Example how to create an issue. + +```go +package main + +import ( + "fmt" + "github.com/andygrunwald/go-jira" +) + +func main() { + base := "https://my.jira.com" + tp := jira.BasicAuthTransport{ + Username: "username", + Password: "token", + } + + jiraClient, err := jira.NewClient(tp.Client(), base) + if err != nil { + panic(err) + } + + i := jira.Issue{ + Fields: &jira.IssueFields{ + Assignee: &jira.User{ + Name: "myuser", + }, + Reporter: &jira.User{ + Name: "youruser", + }, + Description: "Test Issue", + Type: jira.IssueType{ + Name: "Bug", + }, + Project: jira.Project{ + Key: "PROJ1", + }, + Summary: "Just a demo issue", + }, + } + issue, _, err := jiraClient.Issue.Create(&i) + if err != nil { + panic(err) + } + + fmt.Printf("%s: %+v\n", issue.Key, issue.Fields.Summary) +} +``` + +### Change an issue status + +This is how one can change an issue status. In this example, we change the issue from "To Do" to "In Progress." + +```go +package main + +import ( + "fmt" + "github.com/andygrunwald/go-jira" +) + +func main() { + base := "https://my.jira.com" + tp := jira.BasicAuthTransport{ + Username: "username", + Password: "token", + } + + jiraClient, err := jira.NewClient(tp.Client(), base) + if err != nil { + panic(err) + } + + issue, _, _ := jiraClient.Issue.Get("FART-1", nil) + currentStatus := issue.Fields.Status.Name + fmt.Printf("Current status: %s\n", currentStatus) + + var transitionID string + possibleTransitions, _, _ := jiraClient.Issue.GetTransitions("FART-1") + for _, v := range possibleTransitions { + if v.Name == "In Progress" { + transitionID = v.ID + break + } + } + + jiraClient.Issue.DoTransition("FART-1", transitionID) + issue, _, _ = jiraClient.Issue.Get(testIssueID, nil) + fmt.Printf("Status after transition: %+v\n", issue.Fields.Status.Name) +} +``` +### Get all the issues for JQL with Pagination +Jira API has limit on maxResults it can return. You may have a usecase where you need to get all issues for given JQL. +This example shows reference implementation of GetAllIssues function which does pagination on Jira API to get all the issues for given JQL + +please look at [Pagination Example](https://github.com/andygrunwald/go-jira/blob/master/examples/pagination/main.go) + + + + +### Call a not implemented API endpoint + +Not all API endpoints of the Jira API are implemented into *go-jira*. +But you can call them anyway: +Lets get all public projects of [Atlassian`s Jira instance](https://jira.atlassian.com/). + +```go +package main + +import ( + "fmt" + "github.com/andygrunwald/go-jira" +) + +func main() { + base := "https://my.jira.com" + tp := jira.BasicAuthTransport{ + Username: "username", + Password: "token", + } + + jiraClient, err := jira.NewClient(tp.Client(), base) + req, _ := jiraClient.NewRequest("GET", "rest/api/2/project", nil) + + projects := new([]jira.Project) + _, err = jiraClient.Do(req, projects) + if err != nil { + panic(err) + } + + for _, project := range *projects { + fmt.Printf("%s: %s\n", project.Key, project.Name) + } + + // ... + // BAM: Bamboo + // BAMJ: Bamboo Jira Plugin + // CLOV: Clover + // CONF: Confluence + // ... +} +``` + +## Implementations + +* [andygrunwald/jitic](https://github.com/andygrunwald/jitic) - The Jira Ticket Checker + +## Code structure + +The code structure of this package was inspired by [google/go-github](https://github.com/google/go-github). + +There is one main part (the client). +Based on this main client the other endpoints, like Issues or Authentication are extracted in services. E.g. `IssueService` or `AuthenticationService`. +These services own a responsibility of the single endpoints / usecases of Jira. + +## Contribution + +We ❤️ PR's + +Contribution, in any kind of way, is highly welcome! +It doesn't matter if you are not able to write code. +Creating issues or holding talks and help other people to use [go-jira](https://github.com/andygrunwald/go-jira) is contribution, too! +A few examples: + +* Correct typos in the README / documentation +* Reporting bugs +* Implement a new feature or endpoint +* Sharing the love of [go-jira](https://github.com/andygrunwald/go-jira) and help people to get use to it + +If you are new to pull requests, checkout [Collaborating on projects using issues and pull requests / Creating a pull request](https://help.github.com/articles/creating-a-pull-request/). + +### Dependency management + +`go-jira` uses `go modules` for dependency management. After cloning the repo, it's easy to make sure you have the correct dependencies by running `go mod tidy`. + +For adding new dependencies, updating dependencies, and other operations, the [Daily workflow](https://github.com/golang/go/wiki/Modules#daily-workflow) is a good place to start. + +### Sandbox environment for testing + +Jira offers sandbox test environments at http://go.atlassian.com/cloud-dev. + +You can read more about them at https://developer.atlassian.com/blog/2016/04/cloud-ecosystem-dev-env/. + +## Releasing + +Install [standard-version](https://github.com/conventional-changelog/standard-version) +```bash +npm i -g standard-version +``` + +```bash +standard-version +git push --tags +``` + +Manually copy/paste text from changelog (for this new version) into the release on Github.com. E.g. + +[https://github.com/andygrunwald/go-jira/releases/edit/v1.11.0](https://github.com/andygrunwald/go-jira/releases/edit/v1.11.0) + +## License + +This project is released under the terms of the [MIT license](http://en.wikipedia.org/wiki/MIT_License). diff --git a/vendor/github.com/andygrunwald/go-jira/authentication.go b/vendor/github.com/andygrunwald/go-jira/authentication.go new file mode 100644 index 00000000000..bd123b906f1 --- /dev/null +++ b/vendor/github.com/andygrunwald/go-jira/authentication.go @@ -0,0 +1,208 @@ +package jira + +import ( + "context" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" +) + +const ( + // HTTP Basic Authentication + authTypeBasic = 1 + // HTTP Session Authentication + authTypeSession = 2 +) + +// AuthenticationService handles authentication for the Jira instance / API. +// +// Jira API docs: https://docs.atlassian.com/jira/REST/latest/#authentication +type AuthenticationService struct { + client *Client + + // Authentication type + authType int + + // Basic auth username + username string + + // Basic auth password + password string +} + +// Session represents a Session JSON response by the Jira API. +type Session struct { + Self string `json:"self,omitempty"` + Name string `json:"name,omitempty"` + Session struct { + Name string `json:"name"` + Value string `json:"value"` + } `json:"session,omitempty"` + LoginInfo struct { + FailedLoginCount int `json:"failedLoginCount"` + LoginCount int `json:"loginCount"` + LastFailedLoginTime string `json:"lastFailedLoginTime"` + PreviousLoginTime string `json:"previousLoginTime"` + } `json:"loginInfo"` + Cookies []*http.Cookie +} + +// AcquireSessionCookieWithContext creates a new session for a user in Jira. +// Once a session has been successfully created it can be used to access any of Jira's remote APIs and also the web UI by passing the appropriate HTTP Cookie header. +// The header will by automatically applied to every API request. +// Note that it is generally preferrable to use HTTP BASIC authentication with the REST API. +// However, this resource may be used to mimic the behaviour of Jira's log-in page (e.g. to display log-in errors to a user). +// +// Jira API docs: https://docs.atlassian.com/jira/REST/latest/#auth/1/session +// +// Deprecated: Use CookieAuthTransport instead +func (s *AuthenticationService) AcquireSessionCookieWithContext(ctx context.Context, username, password string) (bool, error) { + apiEndpoint := "rest/auth/1/session" + body := struct { + Username string `json:"username"` + Password string `json:"password"` + }{ + username, + password, + } + + req, err := s.client.NewRequestWithContext(ctx, "POST", apiEndpoint, body) + if err != nil { + return false, err + } + + session := new(Session) + resp, err := s.client.Do(req, session) + + if resp != nil { + session.Cookies = resp.Cookies() + } + + if err != nil { + return false, fmt.Errorf("auth at Jira instance failed (HTTP(S) request). %s", err) + } + if resp != nil && resp.StatusCode != 200 { + return false, fmt.Errorf("auth at Jira instance failed (HTTP(S) request). Status code: %d", resp.StatusCode) + } + + s.client.session = session + s.authType = authTypeSession + + return true, nil +} + +// AcquireSessionCookie wraps AcquireSessionCookieWithContext using the background context. +// +// Deprecated: Use CookieAuthTransport instead +func (s *AuthenticationService) AcquireSessionCookie(username, password string) (bool, error) { + return s.AcquireSessionCookieWithContext(context.Background(), username, password) +} + +// SetBasicAuth sets username and password for the basic auth against the Jira instance. +// +// Deprecated: Use BasicAuthTransport instead +func (s *AuthenticationService) SetBasicAuth(username, password string) { + s.username = username + s.password = password + s.authType = authTypeBasic +} + +// Authenticated reports if the current Client has authentication details for Jira +func (s *AuthenticationService) Authenticated() bool { + if s != nil { + if s.authType == authTypeSession { + return s.client.session != nil + } else if s.authType == authTypeBasic { + return s.username != "" + } + + } + return false +} + +// LogoutWithContext logs out the current user that has been authenticated and the session in the client is destroyed. +// +// Jira API docs: https://docs.atlassian.com/jira/REST/latest/#auth/1/session +// +// Deprecated: Use CookieAuthTransport to create base client. Logging out is as simple as not using the +// client anymore +func (s *AuthenticationService) LogoutWithContext(ctx context.Context) error { + if s.authType != authTypeSession || s.client.session == nil { + return fmt.Errorf("no user is authenticated") + } + + apiEndpoint := "rest/auth/1/session" + req, err := s.client.NewRequestWithContext(ctx, "DELETE", apiEndpoint, nil) + if err != nil { + return fmt.Errorf("creating the request to log the user out failed : %s", err) + } + + resp, err := s.client.Do(req, nil) + if err != nil { + return fmt.Errorf("error sending the logout request: %s", err) + } + if resp.StatusCode != 204 { + return fmt.Errorf("the logout was unsuccessful with status %d", resp.StatusCode) + } + + // If logout successful, delete session + s.client.session = nil + + return nil + +} + +// Logout wraps LogoutWithContext using the background context. +// +// Deprecated: Use CookieAuthTransport to create base client. Logging out is as simple as not using the +// client anymore +func (s *AuthenticationService) Logout() error { + return s.LogoutWithContext(context.Background()) +} + +// GetCurrentUserWithContext gets the details of the current user. +// +// Jira API docs: https://docs.atlassian.com/jira/REST/latest/#auth/1/session +func (s *AuthenticationService) GetCurrentUserWithContext(ctx context.Context) (*Session, error) { + if s == nil { + return nil, fmt.Errorf("authentication Service is not instantiated") + } + if s.authType != authTypeSession || s.client.session == nil { + return nil, fmt.Errorf("no user is authenticated yet") + } + + apiEndpoint := "rest/auth/1/session" + req, err := s.client.NewRequestWithContext(ctx, "GET", apiEndpoint, nil) + if err != nil { + return nil, fmt.Errorf("could not create request for getting user info : %s", err) + } + + resp, err := s.client.Do(req, nil) + if err != nil { + return nil, fmt.Errorf("error sending request to get user info : %s", err) + } + if resp.StatusCode != 200 { + return nil, fmt.Errorf("getting user info failed with status : %d", resp.StatusCode) + } + + defer resp.Body.Close() + ret := new(Session) + data, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, fmt.Errorf("couldn't read body from the response : %s", err) + } + + err = json.Unmarshal(data, &ret) + + if err != nil { + return nil, fmt.Errorf("could not unmarshall received user info : %s", err) + } + + return ret, nil +} + +// GetCurrentUser wraps GetCurrentUserWithContext using the background context. +func (s *AuthenticationService) GetCurrentUser() (*Session, error) { + return s.GetCurrentUserWithContext(context.Background()) +} diff --git a/vendor/github.com/andygrunwald/go-jira/board.go b/vendor/github.com/andygrunwald/go-jira/board.go new file mode 100644 index 00000000000..27516872732 --- /dev/null +++ b/vendor/github.com/andygrunwald/go-jira/board.go @@ -0,0 +1,311 @@ +package jira + +import ( + "context" + "fmt" + "strconv" + "time" +) + +// BoardService handles Agile Boards for the Jira instance / API. +// +// Jira API docs: https://docs.atlassian.com/jira-software/REST/server/ +type BoardService struct { + client *Client +} + +// BoardsList reflects a list of agile boards +type BoardsList struct { + MaxResults int `json:"maxResults" structs:"maxResults"` + StartAt int `json:"startAt" structs:"startAt"` + Total int `json:"total" structs:"total"` + IsLast bool `json:"isLast" structs:"isLast"` + Values []Board `json:"values" structs:"values"` +} + +// Board represents a Jira agile board +type Board struct { + ID int `json:"id,omitempty" structs:"id,omitempty"` + Self string `json:"self,omitempty" structs:"self,omitempty"` + Name string `json:"name,omitempty" structs:"name,omitemtpy"` + Type string `json:"type,omitempty" structs:"type,omitempty"` + FilterID int `json:"filterId,omitempty" structs:"filterId,omitempty"` +} + +// BoardListOptions specifies the optional parameters to the BoardService.GetList +type BoardListOptions struct { + // BoardType filters results to boards of the specified type. + // Valid values: scrum, kanban. + BoardType string `url:"type,omitempty"` + // Name filters results to boards that match or partially match the specified name. + Name string `url:"name,omitempty"` + // ProjectKeyOrID filters results to boards that are relevant to a project. + // Relevance meaning that the JQL filter defined in board contains a reference to a project. + ProjectKeyOrID string `url:"projectKeyOrId,omitempty"` + + SearchOptions +} + +// GetAllSprintsOptions specifies the optional parameters to the BoardService.GetList +type GetAllSprintsOptions struct { + // State filters results to sprints in the specified states, comma-separate list + State string `url:"state,omitempty"` + + SearchOptions +} + +// SprintsList reflects a list of agile sprints +type SprintsList struct { + MaxResults int `json:"maxResults" structs:"maxResults"` + StartAt int `json:"startAt" structs:"startAt"` + Total int `json:"total" structs:"total"` + IsLast bool `json:"isLast" structs:"isLast"` + Values []Sprint `json:"values" structs:"values"` +} + +// Sprint represents a sprint on Jira agile board +type Sprint struct { + ID int `json:"id" structs:"id"` + Name string `json:"name" structs:"name"` + CompleteDate *time.Time `json:"completeDate" structs:"completeDate"` + EndDate *time.Time `json:"endDate" structs:"endDate"` + StartDate *time.Time `json:"startDate" structs:"startDate"` + OriginBoardID int `json:"originBoardId" structs:"originBoardId"` + Self string `json:"self" structs:"self"` + State string `json:"state" structs:"state"` +} + +// BoardConfiguration represents a boardConfiguration of a jira board +type BoardConfiguration struct { + ID int `json:"id"` + Name string `json:"name"` + Self string `json:"self"` + Location BoardConfigurationLocation `json:"location"` + Filter BoardConfigurationFilter `json:"filter"` + SubQuery BoardConfigurationSubQuery `json:"subQuery"` + ColumnConfig BoardConfigurationColumnConfig `json:"columnConfig"` +} + +// BoardConfigurationFilter reference to the filter used by the given board. +type BoardConfigurationFilter struct { + ID string `json:"id"` + Self string `json:"self"` +} + +// BoardConfigurationSubQuery (Kanban only) - JQL subquery used by the given board. +type BoardConfigurationSubQuery struct { + Query string `json:"query"` +} + +// BoardConfigurationLocation reference to the container that the board is located in +type BoardConfigurationLocation struct { + Type string `json:"type"` + Key string `json:"key"` + ID string `json:"id"` + Self string `json:"self"` + Name string `json:"name"` +} + +// BoardConfigurationColumnConfig lists the columns for a given board in the order defined in the column configuration +// with constrainttype (none, issueCount, issueCountExclSubs) +type BoardConfigurationColumnConfig struct { + Columns []BoardConfigurationColumn `json:"columns"` + ConstraintType string `json:"constraintType"` +} + +// BoardConfigurationColumn lists the name of the board with the statuses that maps to a particular column +type BoardConfigurationColumn struct { + Name string `json:"name"` + Status []BoardConfigurationColumnStatus `json:"statuses"` +} + +// BoardConfigurationColumnStatus represents a status in the column configuration +type BoardConfigurationColumnStatus struct { + ID string `json:"id"` + Self string `json:"self"` +} + +// GetAllBoardsWithContext will returns all boards. This only includes boards that the user has permission to view. +// +// Jira API docs: https://docs.atlassian.com/jira-software/REST/cloud/#agile/1.0/board-getAllBoards +func (s *BoardService) GetAllBoardsWithContext(ctx context.Context, opt *BoardListOptions) (*BoardsList, *Response, error) { + apiEndpoint := "rest/agile/1.0/board" + url, err := addOptions(apiEndpoint, opt) + if err != nil { + return nil, nil, err + } + req, err := s.client.NewRequestWithContext(ctx, "GET", url, nil) + if err != nil { + return nil, nil, err + } + + boards := new(BoardsList) + resp, err := s.client.Do(req, boards) + if err != nil { + jerr := NewJiraError(resp, err) + return nil, resp, jerr + } + + return boards, resp, err +} + +// GetAllBoards wraps GetAllBoardsWithContext using the background context. +func (s *BoardService) GetAllBoards(opt *BoardListOptions) (*BoardsList, *Response, error) { + return s.GetAllBoardsWithContext(context.Background(), opt) +} + +// GetBoardWithContext will returns the board for the given boardID. +// This board will only be returned if the user has permission to view it. +// +// Jira API docs: https://docs.atlassian.com/jira-software/REST/cloud/#agile/1.0/board-getBoard +func (s *BoardService) GetBoardWithContext(ctx context.Context, boardID int) (*Board, *Response, error) { + apiEndpoint := fmt.Sprintf("rest/agile/1.0/board/%v", boardID) + req, err := s.client.NewRequestWithContext(ctx, "GET", apiEndpoint, nil) + if err != nil { + return nil, nil, err + } + + board := new(Board) + resp, err := s.client.Do(req, board) + if err != nil { + jerr := NewJiraError(resp, err) + return nil, resp, jerr + } + + return board, resp, nil +} + +// GetBoard wraps GetBoardWithContext using the background context. +func (s *BoardService) GetBoard(boardID int) (*Board, *Response, error) { + return s.GetBoardWithContext(context.Background(), boardID) +} + +// CreateBoardWithContext creates a new board. Board name, type and filter Id is required. +// name - Must be less than 255 characters. +// type - Valid values: scrum, kanban +// filterId - Id of a filter that the user has permissions to view. +// Note, if the user does not have the 'Create shared objects' permission and tries to create a shared board, a private +// board will be created instead (remember that board sharing depends on the filter sharing). +// +// Jira API docs: https://docs.atlassian.com/jira-software/REST/cloud/#agile/1.0/board-createBoard +func (s *BoardService) CreateBoardWithContext(ctx context.Context, board *Board) (*Board, *Response, error) { + apiEndpoint := "rest/agile/1.0/board" + req, err := s.client.NewRequestWithContext(ctx, "POST", apiEndpoint, board) + if err != nil { + return nil, nil, err + } + + responseBoard := new(Board) + resp, err := s.client.Do(req, responseBoard) + if err != nil { + jerr := NewJiraError(resp, err) + return nil, resp, jerr + } + + return responseBoard, resp, nil +} + +// CreateBoard wraps CreateBoardWithContext using the background context. +func (s *BoardService) CreateBoard(board *Board) (*Board, *Response, error) { + return s.CreateBoardWithContext(context.Background(), board) +} + +// DeleteBoardWithContext will delete an agile board. +// +// Jira API docs: https://docs.atlassian.com/jira-software/REST/cloud/#agile/1.0/board-deleteBoard +func (s *BoardService) DeleteBoardWithContext(ctx context.Context, boardID int) (*Board, *Response, error) { + apiEndpoint := fmt.Sprintf("rest/agile/1.0/board/%v", boardID) + req, err := s.client.NewRequestWithContext(ctx, "DELETE", apiEndpoint, nil) + if err != nil { + return nil, nil, err + } + + resp, err := s.client.Do(req, nil) + if err != nil { + err = NewJiraError(resp, err) + } + return nil, resp, err +} + +// DeleteBoard wraps DeleteBoardWithContext using the background context. +func (s *BoardService) DeleteBoard(boardID int) (*Board, *Response, error) { + return s.DeleteBoardWithContext(context.Background(), boardID) +} + +// GetAllSprintsWithContext will return all sprints from a board, for a given board Id. +// This only includes sprints that the user has permission to view. +// +// Jira API docs: https://docs.atlassian.com/jira-software/REST/cloud/#agile/1.0/board/{boardId}/sprint +func (s *BoardService) GetAllSprintsWithContext(ctx context.Context, boardID string) ([]Sprint, *Response, error) { + id, err := strconv.Atoi(boardID) + if err != nil { + return nil, nil, err + } + + result, response, err := s.GetAllSprintsWithOptions(id, &GetAllSprintsOptions{}) + if err != nil { + return nil, nil, err + } + + return result.Values, response, nil +} + +// GetAllSprints wraps GetAllSprintsWithContext using the background context. +func (s *BoardService) GetAllSprints(boardID string) ([]Sprint, *Response, error) { + return s.GetAllSprintsWithContext(context.Background(), boardID) +} + +// GetAllSprintsWithOptionsWithContext will return sprints from a board, for a given board Id and filtering options +// This only includes sprints that the user has permission to view. +// +// Jira API docs: https://docs.atlassian.com/jira-software/REST/cloud/#agile/1.0/board/{boardId}/sprint +func (s *BoardService) GetAllSprintsWithOptionsWithContext(ctx context.Context, boardID int, options *GetAllSprintsOptions) (*SprintsList, *Response, error) { + apiEndpoint := fmt.Sprintf("rest/agile/1.0/board/%d/sprint", boardID) + url, err := addOptions(apiEndpoint, options) + if err != nil { + return nil, nil, err + } + req, err := s.client.NewRequestWithContext(ctx, "GET", url, nil) + if err != nil { + return nil, nil, err + } + + result := new(SprintsList) + resp, err := s.client.Do(req, result) + if err != nil { + err = NewJiraError(resp, err) + } + + return result, resp, err +} + +// GetAllSprintsWithOptions wraps GetAllSprintsWithOptionsWithContext using the background context. +func (s *BoardService) GetAllSprintsWithOptions(boardID int, options *GetAllSprintsOptions) (*SprintsList, *Response, error) { + return s.GetAllSprintsWithOptionsWithContext(context.Background(), boardID, options) +} + +// GetBoardConfigurationWithContext will return a board configuration for a given board Id +// Jira API docs:https://developer.atlassian.com/cloud/jira/software/rest/#api-rest-agile-1-0-board-boardId-configuration-get +func (s *BoardService) GetBoardConfigurationWithContext(ctx context.Context, boardID int) (*BoardConfiguration, *Response, error) { + apiEndpoint := fmt.Sprintf("rest/agile/1.0/board/%d/configuration", boardID) + + req, err := s.client.NewRequestWithContext(ctx, "GET", apiEndpoint, nil) + + if err != nil { + return nil, nil, err + } + + result := new(BoardConfiguration) + resp, err := s.client.Do(req, result) + if err != nil { + err = NewJiraError(resp, err) + } + + return result, resp, err + +} + +// GetBoardConfiguration wraps GetBoardConfigurationWithContext using the background context. +func (s *BoardService) GetBoardConfiguration(boardID int) (*BoardConfiguration, *Response, error) { + return s.GetBoardConfigurationWithContext(context.Background(), boardID) +} diff --git a/vendor/github.com/andygrunwald/go-jira/component.go b/vendor/github.com/andygrunwald/go-jira/component.go new file mode 100644 index 00000000000..b76fe0cf012 --- /dev/null +++ b/vendor/github.com/andygrunwald/go-jira/component.go @@ -0,0 +1,44 @@ +package jira + +import "context" + +// ComponentService handles components for the Jira instance / API.// +// Jira API docs: https://docs.atlassian.com/software/jira/docs/api/REST/7.10.1/#api/2/component +type ComponentService struct { + client *Client +} + +// CreateComponentOptions are passed to the ComponentService.Create function to create a new Jira component +type CreateComponentOptions struct { + Name string `json:"name,omitempty" structs:"name,omitempty"` + Description string `json:"description,omitempty" structs:"description,omitempty"` + Lead *User `json:"lead,omitempty" structs:"lead,omitempty"` + LeadUserName string `json:"leadUserName,omitempty" structs:"leadUserName,omitempty"` + AssigneeType string `json:"assigneeType,omitempty" structs:"assigneeType,omitempty"` + Assignee *User `json:"assignee,omitempty" structs:"assignee,omitempty"` + Project string `json:"project,omitempty" structs:"project,omitempty"` + ProjectID int `json:"projectId,omitempty" structs:"projectId,omitempty"` +} + +// CreateWithContext creates a new Jira component based on the given options. +func (s *ComponentService) CreateWithContext(ctx context.Context, options *CreateComponentOptions) (*ProjectComponent, *Response, error) { + apiEndpoint := "rest/api/2/component" + req, err := s.client.NewRequestWithContext(ctx, "POST", apiEndpoint, options) + if err != nil { + return nil, nil, err + } + + component := new(ProjectComponent) + resp, err := s.client.Do(req, component) + + if err != nil { + return nil, resp, NewJiraError(resp, err) + } + + return component, resp, nil +} + +// Create wraps CreateWithContext using the background context. +func (s *ComponentService) Create(options *CreateComponentOptions) (*ProjectComponent, *Response, error) { + return s.CreateWithContext(context.Background(), options) +} diff --git a/vendor/github.com/andygrunwald/go-jira/error.go b/vendor/github.com/andygrunwald/go-jira/error.go new file mode 100644 index 00000000000..c7bc2e58e5e --- /dev/null +++ b/vendor/github.com/andygrunwald/go-jira/error.go @@ -0,0 +1,90 @@ +package jira + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "strings" + + "github.com/pkg/errors" +) + +// Error message from Jira +// See https://docs.atlassian.com/jira/REST/cloud/#error-responses +type Error struct { + HTTPError error + ErrorMessages []string `json:"errorMessages"` + Errors map[string]string `json:"errors"` +} + +// NewJiraError creates a new jira Error +func NewJiraError(resp *Response, httpError error) error { + if resp == nil { + return errors.Wrap(httpError, "No response returned") + } + + defer resp.Body.Close() + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return errors.Wrap(err, httpError.Error()) + } + jerr := Error{HTTPError: httpError} + contentType := resp.Header.Get("Content-Type") + if strings.HasPrefix(contentType, "application/json") { + err = json.Unmarshal(body, &jerr) + if err != nil { + httpError = errors.Wrap(errors.New("could not parse JSON"), httpError.Error()) + return errors.Wrap(err, httpError.Error()) + } + } else { + if httpError == nil { + return fmt.Errorf("got response status %s:%s", resp.Status, string(body)) + } + return errors.Wrap(httpError, fmt.Sprintf("%s: %s", resp.Status, string(body))) + } + + return &jerr +} + +// Error is a short string representing the error +func (e *Error) Error() string { + if len(e.ErrorMessages) > 0 { + // return fmt.Sprintf("%v", e.HTTPError) + return fmt.Sprintf("%s: %v", e.ErrorMessages[0], e.HTTPError) + } + if len(e.Errors) > 0 { + for key, value := range e.Errors { + return fmt.Sprintf("%s - %s: %v", key, value, e.HTTPError) + } + } + return e.HTTPError.Error() +} + +// LongError is a full representation of the error as a string +func (e *Error) LongError() string { + var msg bytes.Buffer + if e.HTTPError != nil { + msg.WriteString("Original:\n") + msg.WriteString(e.HTTPError.Error()) + msg.WriteString("\n") + } + if len(e.ErrorMessages) > 0 { + msg.WriteString("Messages:\n") + for _, v := range e.ErrorMessages { + msg.WriteString(" - ") + msg.WriteString(v) + msg.WriteString("\n") + } + } + if len(e.Errors) > 0 { + for key, value := range e.Errors { + msg.WriteString(" - ") + msg.WriteString(key) + msg.WriteString(" - ") + msg.WriteString(value) + msg.WriteString("\n") + } + } + return msg.String() +} diff --git a/vendor/github.com/andygrunwald/go-jira/field.go b/vendor/github.com/andygrunwald/go-jira/field.go new file mode 100644 index 00000000000..b14057d9326 --- /dev/null +++ b/vendor/github.com/andygrunwald/go-jira/field.go @@ -0,0 +1,55 @@ +package jira + +import "context" + +// FieldService handles fields for the Jira instance / API. +// +// Jira API docs: https://developer.atlassian.com/cloud/jira/platform/rest/#api-Field +type FieldService struct { + client *Client +} + +// Field represents a field of a Jira issue. +type Field struct { + ID string `json:"id,omitempty" structs:"id,omitempty"` + Key string `json:"key,omitempty" structs:"key,omitempty"` + Name string `json:"name,omitempty" structs:"name,omitempty"` + Custom bool `json:"custom,omitempty" structs:"custom,omitempty"` + Navigable bool `json:"navigable,omitempty" structs:"navigable,omitempty"` + Searchable bool `json:"searchable,omitempty" structs:"searchable,omitempty"` + ClauseNames []string `json:"clauseNames,omitempty" structs:"clauseNames,omitempty"` + Schema FieldSchema `json:"schema,omitempty" structs:"schema,omitempty"` +} + +// FieldSchema represents a schema of a Jira field. +// Documentation: https://developer.atlassian.com/cloud/jira/platform/rest/v2/api-group-issue-fields/#api-rest-api-2-field-get +type FieldSchema struct { + Type string `json:"type,omitempty" structs:"type,omitempty"` + Items string `json:"items,omitempty" structs:"items,omitempty"` + Custom string `json:"custom,omitempty" structs:"custom,omitempty"` + System string `json:"system,omitempty" structs:"system,omitempty"` + CustomID int64 `json:"customId,omitempty" structs:"customId,omitempty"` +} + +// GetListWithContext gets all fields from Jira +// +// Jira API docs: https://developer.atlassian.com/cloud/jira/platform/rest/#api-api-2-field-get +func (s *FieldService) GetListWithContext(ctx context.Context) ([]Field, *Response, error) { + apiEndpoint := "rest/api/2/field" + req, err := s.client.NewRequestWithContext(ctx, "GET", apiEndpoint, nil) + if err != nil { + return nil, nil, err + } + + fieldList := []Field{} + resp, err := s.client.Do(req, &fieldList) + if err != nil { + return nil, resp, NewJiraError(resp, err) + } + return fieldList, resp, nil +} + +// GetList wraps GetListWithContext using the background context. +func (s *FieldService) GetList() ([]Field, *Response, error) { + return s.GetListWithContext(context.Background()) +} diff --git a/vendor/github.com/andygrunwald/go-jira/filter.go b/vendor/github.com/andygrunwald/go-jira/filter.go new file mode 100644 index 00000000000..f40f3a58c86 --- /dev/null +++ b/vendor/github.com/andygrunwald/go-jira/filter.go @@ -0,0 +1,251 @@ +package jira + +import ( + "context" + "fmt" + + "github.com/google/go-querystring/query" +) + +// FilterService handles fields for the Jira instance / API. +// +// Jira API docs: https://developer.atlassian.com/cloud/jira/platform/rest/v3/#api-group-Filter +type FilterService struct { + client *Client +} + +// Filter represents a Filter in Jira +type Filter struct { + Self string `json:"self"` + ID string `json:"id"` + Name string `json:"name"` + Description string `json:"description"` + Owner User `json:"owner"` + Jql string `json:"jql"` + ViewURL string `json:"viewUrl"` + SearchURL string `json:"searchUrl"` + Favourite bool `json:"favourite"` + FavouritedCount int `json:"favouritedCount"` + SharePermissions []interface{} `json:"sharePermissions"` + Subscriptions struct { + Size int `json:"size"` + Items []interface{} `json:"items"` + MaxResults int `json:"max-results"` + StartIndex int `json:"start-index"` + EndIndex int `json:"end-index"` + } `json:"subscriptions"` +} + +// GetMyFiltersQueryOptions specifies the optional parameters for the Get My Filters method +type GetMyFiltersQueryOptions struct { + IncludeFavourites bool `url:"includeFavourites,omitempty"` + Expand string `url:"expand,omitempty"` +} + +// FiltersList reflects a list of filters +type FiltersList struct { + MaxResults int `json:"maxResults" structs:"maxResults"` + StartAt int `json:"startAt" structs:"startAt"` + Total int `json:"total" structs:"total"` + IsLast bool `json:"isLast" structs:"isLast"` + Values []FiltersListItem `json:"values" structs:"values"` +} + +// FiltersListItem represents a Filter of FiltersList in Jira +type FiltersListItem struct { + Self string `json:"self"` + ID string `json:"id"` + Name string `json:"name"` + Description string `json:"description"` + Owner User `json:"owner"` + Jql string `json:"jql"` + ViewURL string `json:"viewUrl"` + SearchURL string `json:"searchUrl"` + Favourite bool `json:"favourite"` + FavouritedCount int `json:"favouritedCount"` + SharePermissions []interface{} `json:"sharePermissions"` + Subscriptions []struct { + ID int `json:"id"` + User User `json:"user"` + } `json:"subscriptions"` +} + +// FilterSearchOptions specifies the optional parameters for the Search method +// https://developer.atlassian.com/cloud/jira/platform/rest/v3/#api-rest-api-3-filter-search-get +type FilterSearchOptions struct { + // String used to perform a case-insensitive partial match with name. + FilterName string `url:"filterName,omitempty"` + + // User account ID used to return filters with the matching owner.accountId. This parameter cannot be used with owner. + AccountID string `url:"accountId,omitempty"` + + // Group name used to returns filters that are shared with a group that matches sharePermissions.group.groupname. + GroupName string `url:"groupname,omitempty"` + + // Project ID used to returns filters that are shared with a project that matches sharePermissions.project.id. + // Format: int64 + ProjectID int64 `url:"projectId,omitempty"` + + // Orders the results using one of these filter properties. + // - `description` Orders by filter `description`. Note that this ordering works independently of whether the expand to display the description field is in use. + // - `favourite_count` Orders by `favouritedCount`. + // - `is_favourite` Orders by `favourite`. + // - `id` Orders by filter `id`. + // - `name` Orders by filter `name`. + // - `owner` Orders by `owner.accountId`. + // + // Default: `name` + // + // Valid values: id, name, description, owner, favorite_count, is_favorite, -id, -name, -description, -owner, -favorite_count, -is_favorite + OrderBy string `url:"orderBy,omitempty"` + + // The index of the first item to return in a page of results (page offset). + // Default: 0, Format: int64 + StartAt int64 `url:"startAt,omitempty"` + + // The maximum number of items to return per page. The maximum is 100. + // Default: 50, Format: int32 + MaxResults int32 `url:"maxResults,omitempty"` + + // Use expand to include additional information about filter in the response. This parameter accepts multiple values separated by a comma: + // - description Returns the description of the filter. + // - favourite Returns an indicator of whether the user has set the filter as a favorite. + // - favouritedCount Returns a count of how many users have set this filter as a favorite. + // - jql Returns the JQL query that the filter uses. + // - owner Returns the owner of the filter. + // - searchUrl Returns a URL to perform the filter's JQL query. + // - sharePermissions Returns the share permissions defined for the filter. + // - subscriptions Returns the users that are subscribed to the filter. + // - viewUrl Returns a URL to view the filter. + Expand string `url:"expand,omitempty"` +} + +// GetListWithContext retrieves all filters from Jira +func (fs *FilterService) GetListWithContext(ctx context.Context) ([]*Filter, *Response, error) { + + options := &GetQueryOptions{} + apiEndpoint := "rest/api/2/filter" + req, err := fs.client.NewRequestWithContext(ctx, "GET", apiEndpoint, nil) + if err != nil { + return nil, nil, err + } + + q, err := query.Values(options) + if err != nil { + return nil, nil, err + } + req.URL.RawQuery = q.Encode() + + filters := []*Filter{} + resp, err := fs.client.Do(req, &filters) + if err != nil { + jerr := NewJiraError(resp, err) + return nil, resp, jerr + } + return filters, resp, err +} + +// GetList wraps GetListWithContext using the background context. +func (fs *FilterService) GetList() ([]*Filter, *Response, error) { + return fs.GetListWithContext(context.Background()) +} + +// GetFavouriteListWithContext retrieves the user's favourited filters from Jira +func (fs *FilterService) GetFavouriteListWithContext(ctx context.Context) ([]*Filter, *Response, error) { + apiEndpoint := "rest/api/2/filter/favourite" + req, err := fs.client.NewRequestWithContext(ctx, "GET", apiEndpoint, nil) + if err != nil { + return nil, nil, err + } + filters := []*Filter{} + resp, err := fs.client.Do(req, &filters) + if err != nil { + jerr := NewJiraError(resp, err) + return nil, resp, jerr + } + return filters, resp, err +} + +// GetFavouriteList wraps GetFavouriteListWithContext using the background context. +func (fs *FilterService) GetFavouriteList() ([]*Filter, *Response, error) { + return fs.GetFavouriteListWithContext(context.Background()) +} + +// GetWithContext retrieves a single Filter from Jira +func (fs *FilterService) GetWithContext(ctx context.Context, filterID int) (*Filter, *Response, error) { + apiEndpoint := fmt.Sprintf("rest/api/2/filter/%d", filterID) + req, err := fs.client.NewRequestWithContext(ctx, "GET", apiEndpoint, nil) + if err != nil { + return nil, nil, err + } + filter := new(Filter) + resp, err := fs.client.Do(req, filter) + if err != nil { + jerr := NewJiraError(resp, err) + return nil, resp, jerr + } + + return filter, resp, err +} + +// Get wraps GetWithContext using the background context. +func (fs *FilterService) Get(filterID int) (*Filter, *Response, error) { + return fs.GetWithContext(context.Background(), filterID) +} + +// GetMyFiltersWithContext retrieves the my Filters. +// +// https://developer.atlassian.com/cloud/jira/platform/rest/v3/#api-rest-api-3-filter-my-get +func (fs *FilterService) GetMyFiltersWithContext(ctx context.Context, opts *GetMyFiltersQueryOptions) ([]*Filter, *Response, error) { + apiEndpoint := "rest/api/3/filter/my" + url, err := addOptions(apiEndpoint, opts) + if err != nil { + return nil, nil, err + } + req, err := fs.client.NewRequestWithContext(ctx, "GET", url, nil) + if err != nil { + return nil, nil, err + } + + filters := []*Filter{} + resp, err := fs.client.Do(req, &filters) + if err != nil { + jerr := NewJiraError(resp, err) + return nil, resp, jerr + } + return filters, resp, nil +} + +// GetMyFilters wraps GetMyFiltersWithContext using the background context. +func (fs *FilterService) GetMyFilters(opts *GetMyFiltersQueryOptions) ([]*Filter, *Response, error) { + return fs.GetMyFiltersWithContext(context.Background(), opts) +} + +// SearchWithContext will search for filter according to the search options +// +// Jira API docs: https://developer.atlassian.com/cloud/jira/platform/rest/v3/#api-rest-api-3-filter-search-get +func (fs *FilterService) SearchWithContext(ctx context.Context, opt *FilterSearchOptions) (*FiltersList, *Response, error) { + apiEndpoint := "rest/api/3/filter/search" + url, err := addOptions(apiEndpoint, opt) + if err != nil { + return nil, nil, err + } + req, err := fs.client.NewRequestWithContext(ctx, "GET", url, nil) + if err != nil { + return nil, nil, err + } + + filters := new(FiltersList) + resp, err := fs.client.Do(req, filters) + if err != nil { + jerr := NewJiraError(resp, err) + return nil, resp, jerr + } + + return filters, resp, err +} + +// Search wraps SearchWithContext using the background context. +func (fs *FilterService) Search(opt *FilterSearchOptions) (*FiltersList, *Response, error) { + return fs.SearchWithContext(context.Background(), opt) +} diff --git a/vendor/github.com/andygrunwald/go-jira/go.mod b/vendor/github.com/andygrunwald/go-jira/go.mod new file mode 100644 index 00000000000..c70857d7bcb --- /dev/null +++ b/vendor/github.com/andygrunwald/go-jira/go.mod @@ -0,0 +1,13 @@ +module github.com/andygrunwald/go-jira + +go 1.12 + +require ( + github.com/fatih/structs v1.1.0 + github.com/golang-jwt/jwt v3.2.1+incompatible + github.com/google/go-cmp v0.5.6 + github.com/google/go-querystring v0.0.0-20170111101155-53e6ce116135 + github.com/pkg/errors v0.9.1 + github.com/trivago/tgo v1.0.7 + golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d +) diff --git a/vendor/github.com/andygrunwald/go-jira/go.sum b/vendor/github.com/andygrunwald/go-jira/go.sum new file mode 100644 index 00000000000..c09a21ddc68 --- /dev/null +++ b/vendor/github.com/andygrunwald/go-jira/go.sum @@ -0,0 +1,18 @@ +github.com/fatih/structs v1.1.0 h1:Q7juDM0QtcnhCpeyLGQKyg4TOIghuNXrkL32pHAUMxo= +github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= +github.com/golang-jwt/jwt v3.2.1+incompatible h1:73Z+4BJcrTC+KczS6WvTPvRGOp1WmfEP4Q1lOd9Z/+c= +github.com/golang-jwt/jwt v3.2.1+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzqecmYZeUEB8OUGHkxJ+I= +github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-querystring v0.0.0-20170111101155-53e6ce116135 h1:zLTLjkaOFEFIOxY5BWLFLwh+cL8vOBW4XJ2aqLE/Tf0= +github.com/google/go-querystring v0.0.0-20170111101155-53e6ce116135/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/trivago/tgo v1.0.7 h1:uaWH/XIy9aWYWpjm2CU3RpcqZXmX2ysQ9/Go+d9gyrM= +github.com/trivago/tgo v1.0.7/go.mod h1:w4dpD+3tzNIIiIfkWWa85w5/B77tlvdZckQ+6PkFnhc= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68 h1:nxC68pudNYkKU6jWhgrqdreuFiOQWj1Fs7T3VrH4Pjw= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d h1:SZxvLBoTP5yHO3Frd4z4vrF+DBX9vMVanchswa69toE= +golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/vendor/github.com/andygrunwald/go-jira/group.go b/vendor/github.com/andygrunwald/go-jira/group.go new file mode 100644 index 00000000000..9f67105ff0e --- /dev/null +++ b/vendor/github.com/andygrunwald/go-jira/group.go @@ -0,0 +1,177 @@ +package jira + +import ( + "context" + "fmt" + "net/url" +) + +// GroupService handles Groups for the Jira instance / API. +// +// Jira API docs: https://docs.atlassian.com/jira/REST/server/#api/2/group +type GroupService struct { + client *Client +} + +// groupMembersResult is only a small wrapper around the Group* methods +// to be able to parse the results +type groupMembersResult struct { + StartAt int `json:"startAt"` + MaxResults int `json:"maxResults"` + Total int `json:"total"` + Members []GroupMember `json:"values"` +} + +// Group represents a Jira group +type Group struct { + ID string `json:"id"` + Title string `json:"title"` + Type string `json:"type"` + Properties groupProperties `json:"properties"` + AdditionalProperties bool `json:"additionalProperties"` +} + +type groupProperties struct { + Name groupPropertiesName `json:"name"` +} + +type groupPropertiesName struct { + Type string `json:"type"` +} + +// GroupMember reflects a single member of a group +type GroupMember struct { + Self string `json:"self,omitempty"` + Name string `json:"name,omitempty"` + Key string `json:"key,omitempty"` + AccountID string `json:"accountId,omitempty"` + EmailAddress string `json:"emailAddress,omitempty"` + DisplayName string `json:"displayName,omitempty"` + Active bool `json:"active,omitempty"` + TimeZone string `json:"timeZone,omitempty"` + AccountType string `json:"accountType,omitempty"` +} + +// GroupSearchOptions specifies the optional parameters for the Get Group methods +type GroupSearchOptions struct { + StartAt int + MaxResults int + IncludeInactiveUsers bool +} + +// GetWithContext returns a paginated list of users who are members of the specified group and its subgroups. +// Users in the page are ordered by user names. +// User of this resource is required to have sysadmin or admin permissions. +// +// Jira API docs: https://docs.atlassian.com/jira/REST/server/#api/2/group-getUsersFromGroup +// +// WARNING: This API only returns the first page of group members +func (s *GroupService) GetWithContext(ctx context.Context, name string) ([]GroupMember, *Response, error) { + apiEndpoint := fmt.Sprintf("/rest/api/2/group/member?groupname=%s", url.QueryEscape(name)) + req, err := s.client.NewRequestWithContext(ctx, "GET", apiEndpoint, nil) + if err != nil { + return nil, nil, err + } + + group := new(groupMembersResult) + resp, err := s.client.Do(req, group) + if err != nil { + return nil, resp, err + } + + return group.Members, resp, nil +} + +// Get wraps GetWithContext using the background context. +func (s *GroupService) Get(name string) ([]GroupMember, *Response, error) { + return s.GetWithContext(context.Background(), name) +} + +// GetWithOptionsWithContext returns a paginated list of members of the specified group and its subgroups. +// Users in the page are ordered by user names. +// User of this resource is required to have sysadmin or admin permissions. +// +// Jira API docs: https://docs.atlassian.com/jira/REST/server/#api/2/group-getUsersFromGroup +func (s *GroupService) GetWithOptionsWithContext(ctx context.Context, name string, options *GroupSearchOptions) ([]GroupMember, *Response, error) { + var apiEndpoint string + if options == nil { + apiEndpoint = fmt.Sprintf("/rest/api/2/group/member?groupname=%s", url.QueryEscape(name)) + } else { + apiEndpoint = fmt.Sprintf( + "/rest/api/2/group/member?groupname=%s&startAt=%d&maxResults=%d&includeInactiveUsers=%t", + url.QueryEscape(name), + options.StartAt, + options.MaxResults, + options.IncludeInactiveUsers, + ) + } + req, err := s.client.NewRequestWithContext(ctx, "GET", apiEndpoint, nil) + if err != nil { + return nil, nil, err + } + + group := new(groupMembersResult) + resp, err := s.client.Do(req, group) + if err != nil { + return nil, resp, err + } + return group.Members, resp, nil +} + +// GetWithOptions wraps GetWithOptionsWithContext using the background context. +func (s *GroupService) GetWithOptions(name string, options *GroupSearchOptions) ([]GroupMember, *Response, error) { + return s.GetWithOptionsWithContext(context.Background(), name, options) +} + +// AddWithContext adds user to group +// +// Jira API docs: https://docs.atlassian.com/jira/REST/cloud/#api/2/group-addUserToGroup +func (s *GroupService) AddWithContext(ctx context.Context, groupname string, username string) (*Group, *Response, error) { + apiEndpoint := fmt.Sprintf("/rest/api/2/group/user?groupname=%s", groupname) + var user struct { + Name string `json:"name"` + } + user.Name = username + req, err := s.client.NewRequestWithContext(ctx, "POST", apiEndpoint, &user) + if err != nil { + return nil, nil, err + } + + responseGroup := new(Group) + resp, err := s.client.Do(req, responseGroup) + if err != nil { + jerr := NewJiraError(resp, err) + return nil, resp, jerr + } + + return responseGroup, resp, nil +} + +// Add wraps AddWithContext using the background context. +func (s *GroupService) Add(groupname string, username string) (*Group, *Response, error) { + return s.AddWithContext(context.Background(), groupname, username) +} + +// RemoveWithContext removes user from group +// +// Jira API docs: https://docs.atlassian.com/jira/REST/cloud/#api/2/group-removeUserFromGroup +func (s *GroupService) RemoveWithContext(ctx context.Context, groupname string, username string) (*Response, error) { + apiEndpoint := fmt.Sprintf("/rest/api/2/group/user?groupname=%s&username=%s", groupname, username) + req, err := s.client.NewRequestWithContext(ctx, "DELETE", apiEndpoint, nil) + if err != nil { + return nil, err + } + + resp, err := s.client.Do(req, nil) + if err != nil { + jerr := NewJiraError(resp, err) + return resp, jerr + } + + return resp, nil +} + +// Remove wraps RemoveWithContext using the background context. +func (s *GroupService) Remove(groupname string, username string) (*Response, error) { + return s.RemoveWithContext(context.Background(), groupname, username) +} diff --git a/vendor/github.com/andygrunwald/go-jira/issue.go b/vendor/github.com/andygrunwald/go-jira/issue.go new file mode 100644 index 00000000000..3e898d707ff --- /dev/null +++ b/vendor/github.com/andygrunwald/go-jira/issue.go @@ -0,0 +1,1533 @@ +package jira + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "mime/multipart" + "net/http" + "net/url" + "reflect" + "strconv" + "strings" + "time" + + "github.com/fatih/structs" + "github.com/google/go-querystring/query" + "github.com/trivago/tgo/tcontainer" +) + +const ( + // AssigneeAutomatic represents the value of the "Assignee: Automatic" of Jira + AssigneeAutomatic = "-1" +) + +// IssueService handles Issues for the Jira instance / API. +// +// Jira API docs: https://docs.atlassian.com/jira/REST/latest/#api/2/issue +type IssueService struct { + client *Client +} + +// UpdateQueryOptions specifies the optional parameters to the Edit issue +type UpdateQueryOptions struct { + NotifyUsers bool `url:"notifyUsers,omitempty"` + OverrideScreenSecurity bool `url:"overrideScreenSecurity,omitempty"` + OverrideEditableFlag bool `url:"overrideEditableFlag,omitempty"` +} + +// Issue represents a Jira issue. +type Issue struct { + Expand string `json:"expand,omitempty" structs:"expand,omitempty"` + ID string `json:"id,omitempty" structs:"id,omitempty"` + Self string `json:"self,omitempty" structs:"self,omitempty"` + Key string `json:"key,omitempty" structs:"key,omitempty"` + Fields *IssueFields `json:"fields,omitempty" structs:"fields,omitempty"` + RenderedFields *IssueRenderedFields `json:"renderedFields,omitempty" structs:"renderedFields,omitempty"` + Changelog *Changelog `json:"changelog,omitempty" structs:"changelog,omitempty"` + Transitions []Transition `json:"transitions,omitempty" structs:"transitions,omitempty"` + Names map[string]string `json:"names,omitempty" structs:"names,omitempty"` +} + +// ChangelogItems reflects one single changelog item of a history item +type ChangelogItems struct { + Field string `json:"field" structs:"field"` + FieldType string `json:"fieldtype" structs:"fieldtype"` + From interface{} `json:"from" structs:"from"` + FromString string `json:"fromString" structs:"fromString"` + To interface{} `json:"to" structs:"to"` + ToString string `json:"toString" structs:"toString"` +} + +// ChangelogHistory reflects one single changelog history entry +type ChangelogHistory struct { + Id string `json:"id" structs:"id"` + Author User `json:"author" structs:"author"` + Created string `json:"created" structs:"created"` + Items []ChangelogItems `json:"items" structs:"items"` +} + +// Changelog reflects the change log of an issue +type Changelog struct { + Histories []ChangelogHistory `json:"histories,omitempty"` +} + +// Attachment represents a Jira attachment +type Attachment struct { + Self string `json:"self,omitempty" structs:"self,omitempty"` + ID string `json:"id,omitempty" structs:"id,omitempty"` + Filename string `json:"filename,omitempty" structs:"filename,omitempty"` + Author *User `json:"author,omitempty" structs:"author,omitempty"` + Created string `json:"created,omitempty" structs:"created,omitempty"` + Size int `json:"size,omitempty" structs:"size,omitempty"` + MimeType string `json:"mimeType,omitempty" structs:"mimeType,omitempty"` + Content string `json:"content,omitempty" structs:"content,omitempty"` + Thumbnail string `json:"thumbnail,omitempty" structs:"thumbnail,omitempty"` +} + +// Epic represents the epic to which an issue is associated +// Not that this struct does not process the returned "color" value +type Epic struct { + ID int `json:"id" structs:"id"` + Key string `json:"key" structs:"key"` + Self string `json:"self" structs:"self"` + Name string `json:"name" structs:"name"` + Summary string `json:"summary" structs:"summary"` + Done bool `json:"done" structs:"done"` +} + +// IssueFields represents single fields of a Jira issue. +// Every Jira issue has several fields attached. +type IssueFields struct { + // TODO Missing fields + // * "workratio": -1, + // * "lastViewed": null, + // * "environment": null, + Expand string `json:"expand,omitempty" structs:"expand,omitempty"` + Type IssueType `json:"issuetype,omitempty" structs:"issuetype,omitempty"` + Project Project `json:"project,omitempty" structs:"project,omitempty"` + Resolution *Resolution `json:"resolution,omitempty" structs:"resolution,omitempty"` + Priority *Priority `json:"priority,omitempty" structs:"priority,omitempty"` + Resolutiondate Time `json:"resolutiondate,omitempty" structs:"resolutiondate,omitempty"` + Created Time `json:"created,omitempty" structs:"created,omitempty"` + Duedate Date `json:"duedate,omitempty" structs:"duedate,omitempty"` + Watches *Watches `json:"watches,omitempty" structs:"watches,omitempty"` + Assignee *User `json:"assignee,omitempty" structs:"assignee,omitempty"` + Updated Time `json:"updated,omitempty" structs:"updated,omitempty"` + Description string `json:"description,omitempty" structs:"description,omitempty"` + Summary string `json:"summary,omitempty" structs:"summary,omitempty"` + Creator *User `json:"Creator,omitempty" structs:"Creator,omitempty"` + Reporter *User `json:"reporter,omitempty" structs:"reporter,omitempty"` + Components []*Component `json:"components,omitempty" structs:"components,omitempty"` + Status *Status `json:"status,omitempty" structs:"status,omitempty"` + Progress *Progress `json:"progress,omitempty" structs:"progress,omitempty"` + AggregateProgress *Progress `json:"aggregateprogress,omitempty" structs:"aggregateprogress,omitempty"` + TimeTracking *TimeTracking `json:"timetracking,omitempty" structs:"timetracking,omitempty"` + TimeSpent int `json:"timespent,omitempty" structs:"timespent,omitempty"` + TimeEstimate int `json:"timeestimate,omitempty" structs:"timeestimate,omitempty"` + TimeOriginalEstimate int `json:"timeoriginalestimate,omitempty" structs:"timeoriginalestimate,omitempty"` + Worklog *Worklog `json:"worklog,omitempty" structs:"worklog,omitempty"` + IssueLinks []*IssueLink `json:"issuelinks,omitempty" structs:"issuelinks,omitempty"` + Comments *Comments `json:"comment,omitempty" structs:"comment,omitempty"` + FixVersions []*FixVersion `json:"fixVersions,omitempty" structs:"fixVersions,omitempty"` + AffectsVersions []*AffectsVersion `json:"versions,omitempty" structs:"versions,omitempty"` + Labels []string `json:"labels,omitempty" structs:"labels,omitempty"` + Subtasks []*Subtasks `json:"subtasks,omitempty" structs:"subtasks,omitempty"` + Attachments []*Attachment `json:"attachment,omitempty" structs:"attachment,omitempty"` + Epic *Epic `json:"epic,omitempty" structs:"epic,omitempty"` + Sprint *Sprint `json:"sprint,omitempty" structs:"sprint,omitempty"` + Parent *Parent `json:"parent,omitempty" structs:"parent,omitempty"` + AggregateTimeOriginalEstimate int `json:"aggregatetimeoriginalestimate,omitempty" structs:"aggregatetimeoriginalestimate,omitempty"` + AggregateTimeSpent int `json:"aggregatetimespent,omitempty" structs:"aggregatetimespent,omitempty"` + AggregateTimeEstimate int `json:"aggregatetimeestimate,omitempty" structs:"aggregatetimeestimate,omitempty"` + Unknowns tcontainer.MarshalMap +} + +// MarshalJSON is a custom JSON marshal function for the IssueFields structs. +// It handles Jira custom fields and maps those from / to "Unknowns" key. +func (i *IssueFields) MarshalJSON() ([]byte, error) { + m := structs.Map(i) + unknowns, okay := m["Unknowns"] + if okay { + // if unknowns present, shift all key value from unknown to a level up + for key, value := range unknowns.(tcontainer.MarshalMap) { + m[key] = value + } + delete(m, "Unknowns") + } + return json.Marshal(m) +} + +// UnmarshalJSON is a custom JSON marshal function for the IssueFields structs. +// It handles Jira custom fields and maps those from / to "Unknowns" key. +func (i *IssueFields) UnmarshalJSON(data []byte) error { + + // Do the normal unmarshalling first + // Details for this way: http://choly.ca/post/go-json-marshalling/ + type Alias IssueFields + aux := &struct { + *Alias + }{ + Alias: (*Alias)(i), + } + if err := json.Unmarshal(data, &aux); err != nil { + return err + } + + totalMap := tcontainer.NewMarshalMap() + err := json.Unmarshal(data, &totalMap) + if err != nil { + return err + } + + t := reflect.TypeOf(*i) + for i := 0; i < t.NumField(); i++ { + field := t.Field(i) + tagDetail := field.Tag.Get("json") + if tagDetail == "" { + // ignore if there are no tags + continue + } + options := strings.Split(tagDetail, ",") + + if len(options) == 0 { + return fmt.Errorf("no tags options found for %s", field.Name) + } + // the first one is the json tag + key := options[0] + if _, okay := totalMap.Value(key); okay { + delete(totalMap, key) + } + + } + i = (*IssueFields)(aux.Alias) + // all the tags found in the struct were removed. Whatever is left are unknowns to struct + i.Unknowns = totalMap + return nil + +} + +// IssueRenderedFields represents rendered fields of a Jira issue. +// Not all IssueFields are rendered. +type IssueRenderedFields struct { + // TODO Missing fields + // * "aggregatetimespent": null, + // * "workratio": -1, + // * "lastViewed": null, + // * "aggregatetimeoriginalestimate": null, + // * "aggregatetimeestimate": null, + // * "environment": null, + Resolutiondate string `json:"resolutiondate,omitempty" structs:"resolutiondate,omitempty"` + Created string `json:"created,omitempty" structs:"created,omitempty"` + Duedate string `json:"duedate,omitempty" structs:"duedate,omitempty"` + Updated string `json:"updated,omitempty" structs:"updated,omitempty"` + Comments *Comments `json:"comment,omitempty" structs:"comment,omitempty"` + Description string `json:"description,omitempty" structs:"description,omitempty"` +} + +// IssueType represents a type of a Jira issue. +// Typical types are "Request", "Bug", "Story", ... +type IssueType struct { + Self string `json:"self,omitempty" structs:"self,omitempty"` + ID string `json:"id,omitempty" structs:"id,omitempty"` + Description string `json:"description,omitempty" structs:"description,omitempty"` + IconURL string `json:"iconUrl,omitempty" structs:"iconUrl,omitempty"` + Name string `json:"name,omitempty" structs:"name,omitempty"` + Subtask bool `json:"subtask,omitempty" structs:"subtask,omitempty"` + AvatarID int `json:"avatarId,omitempty" structs:"avatarId,omitempty"` +} + +// Watches represents a type of how many and which user are "observing" a Jira issue to track the status / updates. +type Watches struct { + Self string `json:"self,omitempty" structs:"self,omitempty"` + WatchCount int `json:"watchCount,omitempty" structs:"watchCount,omitempty"` + IsWatching bool `json:"isWatching,omitempty" structs:"isWatching,omitempty"` + Watchers []*Watcher `json:"watchers,omitempty" structs:"watchers,omitempty"` +} + +// Watcher represents a simplified user that "observes" the issue +type Watcher struct { + Self string `json:"self,omitempty" structs:"self,omitempty"` + Name string `json:"name,omitempty" structs:"name,omitempty"` + AccountID string `json:"accountId,omitempty" structs:"accountId,omitempty"` + DisplayName string `json:"displayName,omitempty" structs:"displayName,omitempty"` + Active bool `json:"active,omitempty" structs:"active,omitempty"` +} + +// AvatarUrls represents different dimensions of avatars / images +type AvatarUrls struct { + Four8X48 string `json:"48x48,omitempty" structs:"48x48,omitempty"` + Two4X24 string `json:"24x24,omitempty" structs:"24x24,omitempty"` + One6X16 string `json:"16x16,omitempty" structs:"16x16,omitempty"` + Three2X32 string `json:"32x32,omitempty" structs:"32x32,omitempty"` +} + +// Component represents a "component" of a Jira issue. +// Components can be user defined in every Jira instance. +type Component struct { + Self string `json:"self,omitempty" structs:"self,omitempty"` + ID string `json:"id,omitempty" structs:"id,omitempty"` + Name string `json:"name,omitempty" structs:"name,omitempty"` + Description string `json:"description,omitempty" structs:"description,omitempty"` +} + +// Progress represents the progress of a Jira issue. +type Progress struct { + Progress int `json:"progress" structs:"progress"` + Total int `json:"total" structs:"total"` + Percent int `json:"percent" structs:"percent"` +} + +// Parent represents the parent of a Jira issue, to be used with subtask issue types. +type Parent struct { + ID string `json:"id,omitempty" structs:"id"` + Key string `json:"key,omitempty" structs:"key"` +} + +// Time represents the Time definition of Jira as a time.Time of go +type Time time.Time + +func (t Time) Equal(u Time) bool { + return time.Time(t).Equal(time.Time(u)) +} + +// Date represents the Date definition of Jira as a time.Time of go +type Date time.Time + +// Wrapper struct for search result +type transitionResult struct { + Transitions []Transition `json:"transitions" structs:"transitions"` +} + +// Transition represents an issue transition in Jira +type Transition struct { + ID string `json:"id" structs:"id"` + Name string `json:"name" structs:"name"` + To Status `json:"to" structs:"status"` + Fields map[string]TransitionField `json:"fields" structs:"fields"` +} + +// TransitionField represents the value of one Transition +type TransitionField struct { + Required bool `json:"required" structs:"required"` +} + +// CreateTransitionPayload is used for creating new issue transitions +type CreateTransitionPayload struct { + Transition TransitionPayload `json:"transition" structs:"transition"` + Fields TransitionPayloadFields `json:"fields" structs:"fields"` +} + +// TransitionPayload represents the request payload of Transition calls like DoTransition +type TransitionPayload struct { + ID string `json:"id" structs:"id"` +} + +// TransitionPayloadFields represents the fields that can be set when executing a transition +type TransitionPayloadFields struct { + Resolution *Resolution `json:"resolution,omitempty" structs:"resolution,omitempty"` +} + +// Option represents an option value in a SelectList or MultiSelect +// custom issue field +type Option struct { + Value string `json:"value" structs:"value"` +} + +// UnmarshalJSON will transform the Jira time into a time.Time +// during the transformation of the Jira JSON response +func (t *Time) UnmarshalJSON(b []byte) error { + // Ignore null, like in the main JSON package. + if string(b) == "null" { + return nil + } + ti, err := time.Parse("\"2006-01-02T15:04:05.999-0700\"", string(b)) + if err != nil { + return err + } + *t = Time(ti) + return nil +} + +// MarshalJSON will transform the time.Time into a Jira time +// during the creation of a Jira request +func (t Time) MarshalJSON() ([]byte, error) { + return []byte(time.Time(t).Format("\"2006-01-02T15:04:05.000-0700\"")), nil +} + +// UnmarshalJSON will transform the Jira date into a time.Time +// during the transformation of the Jira JSON response +func (t *Date) UnmarshalJSON(b []byte) error { + // Ignore null, like in the main JSON package. + if string(b) == "null" { + return nil + } + ti, err := time.Parse("\"2006-01-02\"", string(b)) + if err != nil { + return err + } + *t = Date(ti) + return nil +} + +// MarshalJSON will transform the Date object into a short +// date string as Jira expects during the creation of a +// Jira request +func (t Date) MarshalJSON() ([]byte, error) { + time := time.Time(t) + return []byte(time.Format("\"2006-01-02\"")), nil +} + +// Worklog represents the work log of a Jira issue. +// One Worklog contains zero or n WorklogRecords +// Jira Wiki: https://confluence.atlassian.com/jira/logging-work-on-an-issue-185729605.html +type Worklog struct { + StartAt int `json:"startAt" structs:"startAt"` + MaxResults int `json:"maxResults" structs:"maxResults"` + Total int `json:"total" structs:"total"` + Worklogs []WorklogRecord `json:"worklogs" structs:"worklogs"` +} + +// WorklogRecord represents one entry of a Worklog +type WorklogRecord struct { + Self string `json:"self,omitempty" structs:"self,omitempty"` + Author *User `json:"author,omitempty" structs:"author,omitempty"` + UpdateAuthor *User `json:"updateAuthor,omitempty" structs:"updateAuthor,omitempty"` + Comment string `json:"comment,omitempty" structs:"comment,omitempty"` + Created *Time `json:"created,omitempty" structs:"created,omitempty"` + Updated *Time `json:"updated,omitempty" structs:"updated,omitempty"` + Started *Time `json:"started,omitempty" structs:"started,omitempty"` + TimeSpent string `json:"timeSpent,omitempty" structs:"timeSpent,omitempty"` + TimeSpentSeconds int `json:"timeSpentSeconds,omitempty" structs:"timeSpentSeconds,omitempty"` + ID string `json:"id,omitempty" structs:"id,omitempty"` + IssueID string `json:"issueId,omitempty" structs:"issueId,omitempty"` + Properties []EntityProperty `json:"properties,omitempty"` +} + +type EntityProperty struct { + Key string `json:"key"` + Value interface{} `json:"value"` +} + +// TimeTracking represents the timetracking fields of a Jira issue. +type TimeTracking struct { + OriginalEstimate string `json:"originalEstimate,omitempty" structs:"originalEstimate,omitempty"` + RemainingEstimate string `json:"remainingEstimate,omitempty" structs:"remainingEstimate,omitempty"` + TimeSpent string `json:"timeSpent,omitempty" structs:"timeSpent,omitempty"` + OriginalEstimateSeconds int `json:"originalEstimateSeconds,omitempty" structs:"originalEstimateSeconds,omitempty"` + RemainingEstimateSeconds int `json:"remainingEstimateSeconds,omitempty" structs:"remainingEstimateSeconds,omitempty"` + TimeSpentSeconds int `json:"timeSpentSeconds,omitempty" structs:"timeSpentSeconds,omitempty"` +} + +// Subtasks represents all issues of a parent issue. +type Subtasks struct { + ID string `json:"id" structs:"id"` + Key string `json:"key" structs:"key"` + Self string `json:"self" structs:"self"` + Fields IssueFields `json:"fields" structs:"fields"` +} + +// IssueLink represents a link between two issues in Jira. +type IssueLink struct { + ID string `json:"id,omitempty" structs:"id,omitempty"` + Self string `json:"self,omitempty" structs:"self,omitempty"` + Type IssueLinkType `json:"type" structs:"type"` + OutwardIssue *Issue `json:"outwardIssue" structs:"outwardIssue"` + InwardIssue *Issue `json:"inwardIssue" structs:"inwardIssue"` + Comment *Comment `json:"comment,omitempty" structs:"comment,omitempty"` +} + +// IssueLinkType represents a type of a link between to issues in Jira. +// Typical issue link types are "Related to", "Duplicate", "Is blocked by", etc. +type IssueLinkType struct { + ID string `json:"id,omitempty" structs:"id,omitempty"` + Self string `json:"self,omitempty" structs:"self,omitempty"` + Name string `json:"name" structs:"name"` + Inward string `json:"inward" structs:"inward"` + Outward string `json:"outward" structs:"outward"` +} + +// Comments represents a list of Comment. +type Comments struct { + Comments []*Comment `json:"comments,omitempty" structs:"comments,omitempty"` +} + +// Comment represents a comment by a person to an issue in Jira. +type Comment struct { + ID string `json:"id,omitempty" structs:"id,omitempty"` + Self string `json:"self,omitempty" structs:"self,omitempty"` + Name string `json:"name,omitempty" structs:"name,omitempty"` + Author User `json:"author,omitempty" structs:"author,omitempty"` + Body string `json:"body,omitempty" structs:"body,omitempty"` + UpdateAuthor User `json:"updateAuthor,omitempty" structs:"updateAuthor,omitempty"` + Updated string `json:"updated,omitempty" structs:"updated,omitempty"` + Created string `json:"created,omitempty" structs:"created,omitempty"` + Visibility CommentVisibility `json:"visibility,omitempty" structs:"visibility,omitempty"` +} + +// FixVersion represents a software release in which an issue is fixed. +type FixVersion struct { + Self string `json:"self,omitempty" structs:"self,omitempty"` + ID string `json:"id,omitempty" structs:"id,omitempty"` + Name string `json:"name,omitempty" structs:"name,omitempty"` + Description string `json:"description,omitempty" structs:"description,omitempty"` + Archived *bool `json:"archived,omitempty" structs:"archived,omitempty"` + Released *bool `json:"released,omitempty" structs:"released,omitempty"` + ReleaseDate string `json:"releaseDate,omitempty" structs:"releaseDate,omitempty"` + UserReleaseDate string `json:"userReleaseDate,omitempty" structs:"userReleaseDate,omitempty"` + ProjectID int `json:"projectId,omitempty" structs:"projectId,omitempty"` // Unlike other IDs, this is returned as a number + StartDate string `json:"startDate,omitempty" structs:"startDate,omitempty"` +} + +// AffectsVersion represents a software release which is affected by an issue. +type AffectsVersion Version + +// CommentVisibility represents he visibility of a comment. +// E.g. Type could be "role" and Value "Administrators" +type CommentVisibility struct { + Type string `json:"type,omitempty" structs:"type,omitempty"` + Value string `json:"value,omitempty" structs:"value,omitempty"` +} + +// SearchOptions specifies the optional parameters to various List methods that +// support pagination. +// Pagination is used for the Jira REST APIs to conserve server resources and limit +// response size for resources that return potentially large collection of items. +// A request to a pages API will result in a values array wrapped in a JSON object with some paging metadata +// Default Pagination options +type SearchOptions struct { + // StartAt: The starting index of the returned projects. Base index: 0. + StartAt int `url:"startAt,omitempty"` + // MaxResults: The maximum number of projects to return per page. Default: 50. + MaxResults int `url:"maxResults,omitempty"` + // Expand: Expand specific sections in the returned issues + Expand string `url:"expand,omitempty"` + Fields []string + // ValidateQuery: The validateQuery param offers control over whether to validate and how strictly to treat the validation. Default: strict. + ValidateQuery string `url:"validateQuery,omitempty"` +} + +// searchResult is only a small wrapper around the Search (with JQL) method +// to be able to parse the results +type searchResult struct { + Issues []Issue `json:"issues" structs:"issues"` + StartAt int `json:"startAt" structs:"startAt"` + MaxResults int `json:"maxResults" structs:"maxResults"` + Total int `json:"total" structs:"total"` +} + +// GetQueryOptions specifies the optional parameters for the Get Issue methods +type GetQueryOptions struct { + // Fields is the list of fields to return for the issue. By default, all fields are returned. + Fields string `url:"fields,omitempty"` + Expand string `url:"expand,omitempty"` + // Properties is the list of properties to return for the issue. By default no properties are returned. + Properties string `url:"properties,omitempty"` + // FieldsByKeys if true then fields in issues will be referenced by keys instead of ids + FieldsByKeys bool `url:"fieldsByKeys,omitempty"` + UpdateHistory bool `url:"updateHistory,omitempty"` + ProjectKeys string `url:"projectKeys,omitempty"` +} + +// GetWorklogsQueryOptions specifies the optional parameters for the Get Worklogs method +type GetWorklogsQueryOptions struct { + StartAt int64 `url:"startAt,omitempty"` + MaxResults int32 `url:"maxResults,omitempty"` + StartedAfter int64 `url:"startedAfter,omitempty"` + Expand string `url:"expand,omitempty"` +} + +type AddWorklogQueryOptions struct { + NotifyUsers bool `url:"notifyUsers,omitempty"` + AdjustEstimate string `url:"adjustEstimate,omitempty"` + NewEstimate string `url:"newEstimate,omitempty"` + ReduceBy string `url:"reduceBy,omitempty"` + Expand string `url:"expand,omitempty"` + OverrideEditableFlag bool `url:"overrideEditableFlag,omitempty"` +} + +// CustomFields represents custom fields of Jira +// This can heavily differ between Jira instances +type CustomFields map[string]string + +// RemoteLink represents remote links which linked to issues +type RemoteLink struct { + ID int `json:"id,omitempty" structs:"id,omitempty"` + Self string `json:"self,omitempty" structs:"self,omitempty"` + GlobalID string `json:"globalId,omitempty" structs:"globalId,omitempty"` + Application *RemoteLinkApplication `json:"application,omitempty" structs:"application,omitempty"` + Relationship string `json:"relationship,omitempty" structs:"relationship,omitempty"` + Object *RemoteLinkObject `json:"object,omitempty" structs:"object,omitempty"` +} + +// RemoteLinkApplication represents remote links application +type RemoteLinkApplication struct { + Type string `json:"type,omitempty" structs:"type,omitempty"` + Name string `json:"name,omitempty" structs:"name,omitempty"` +} + +// RemoteLinkObject represents remote link object itself +type RemoteLinkObject struct { + URL string `json:"url,omitempty" structs:"url,omitempty"` + Title string `json:"title,omitempty" structs:"title,omitempty"` + Summary string `json:"summary,omitempty" structs:"summary,omitempty"` + Icon *RemoteLinkIcon `json:"icon,omitempty" structs:"icon,omitempty"` + Status *RemoteLinkStatus `json:"status,omitempty" structs:"status,omitempty"` +} + +// RemoteLinkIcon represents icon displayed next to link +type RemoteLinkIcon struct { + Url16x16 string `json:"url16x16,omitempty" structs:"url16x16,omitempty"` + Title string `json:"title,omitempty" structs:"title,omitempty"` + Link string `json:"link,omitempty" structs:"link,omitempty"` +} + +// RemoteLinkStatus if the link is a resolvable object (issue, epic) - the structure represent its status +type RemoteLinkStatus struct { + Resolved bool `json:"resolved,omitempty" structs:"resolved,omitempty"` + Icon *RemoteLinkIcon `json:"icon,omitempty" structs:"icon,omitempty"` +} + +// GetWithContext returns a full representation of the issue for the given issue key. +// Jira will attempt to identify the issue by the issueIdOrKey path parameter. +// This can be an issue id, or an issue key. +// If the issue cannot be found via an exact match, Jira will also look for the issue in a case-insensitive way, or by looking to see if the issue was moved. +// +// The given options will be appended to the query string +// +// Jira API docs: https://docs.atlassian.com/jira/REST/latest/#api/2/issue-getIssue +func (s *IssueService) GetWithContext(ctx context.Context, issueID string, options *GetQueryOptions) (*Issue, *Response, error) { + apiEndpoint := fmt.Sprintf("rest/api/2/issue/%s", issueID) + req, err := s.client.NewRequestWithContext(ctx, "GET", apiEndpoint, nil) + if err != nil { + return nil, nil, err + } + + if options != nil { + q, err := query.Values(options) + if err != nil { + return nil, nil, err + } + req.URL.RawQuery = q.Encode() + } + + issue := new(Issue) + resp, err := s.client.Do(req, issue) + if err != nil { + jerr := NewJiraError(resp, err) + return nil, resp, jerr + } + + return issue, resp, nil +} + +// Get wraps GetWithContext using the background context. +func (s *IssueService) Get(issueID string, options *GetQueryOptions) (*Issue, *Response, error) { + return s.GetWithContext(context.Background(), issueID, options) +} + +// DownloadAttachmentWithContext returns a Response of an attachment for a given attachmentID. +// The attachment is in the Response.Body of the response. +// This is an io.ReadCloser. +// The caller should close the resp.Body. +func (s *IssueService) DownloadAttachmentWithContext(ctx context.Context, attachmentID string) (*Response, error) { + apiEndpoint := fmt.Sprintf("secure/attachment/%s/", attachmentID) + req, err := s.client.NewRequestWithContext(ctx, "GET", apiEndpoint, nil) + if err != nil { + return nil, err + } + + resp, err := s.client.Do(req, nil) + if err != nil { + jerr := NewJiraError(resp, err) + return resp, jerr + } + + return resp, nil +} + +// DownloadAttachment wraps DownloadAttachmentWithContext using the background context. +func (s *IssueService) DownloadAttachment(attachmentID string) (*Response, error) { + return s.DownloadAttachmentWithContext(context.Background(), attachmentID) +} + +// PostAttachmentWithContext uploads r (io.Reader) as an attachment to a given issueID +func (s *IssueService) PostAttachmentWithContext(ctx context.Context, issueID string, r io.Reader, attachmentName string) (*[]Attachment, *Response, error) { + apiEndpoint := fmt.Sprintf("rest/api/2/issue/%s/attachments", issueID) + + b := new(bytes.Buffer) + writer := multipart.NewWriter(b) + + fw, err := writer.CreateFormFile("file", attachmentName) + if err != nil { + return nil, nil, err + } + + if r != nil { + // Copy the file + if _, err = io.Copy(fw, r); err != nil { + return nil, nil, err + } + } + writer.Close() + + req, err := s.client.NewMultiPartRequestWithContext(ctx, "POST", apiEndpoint, b) + if err != nil { + return nil, nil, err + } + + req.Header.Set("Content-Type", writer.FormDataContentType()) + + // PostAttachment response returns a JSON array (as multiple attachments can be posted) + attachment := new([]Attachment) + resp, err := s.client.Do(req, attachment) + if err != nil { + jerr := NewJiraError(resp, err) + return nil, resp, jerr + } + + return attachment, resp, nil +} + +// PostAttachment wraps PostAttachmentWithContext using the background context. +func (s *IssueService) PostAttachment(issueID string, r io.Reader, attachmentName string) (*[]Attachment, *Response, error) { + return s.PostAttachmentWithContext(context.Background(), issueID, r, attachmentName) +} + +// DeleteAttachmentWithContext deletes an attachment of a given attachmentID +func (s *IssueService) DeleteAttachmentWithContext(ctx context.Context, attachmentID string) (*Response, error) { + apiEndpoint := fmt.Sprintf("rest/api/2/attachment/%s", attachmentID) + + req, err := s.client.NewRequestWithContext(ctx, "DELETE", apiEndpoint, nil) + if err != nil { + return nil, err + } + + resp, err := s.client.Do(req, nil) + if err != nil { + jerr := NewJiraError(resp, err) + return resp, jerr + } + + return resp, nil +} + +// DeleteAttachment wraps DeleteAttachmentWithContext using the background context. +func (s *IssueService) DeleteAttachment(attachmentID string) (*Response, error) { + return s.DeleteAttachmentWithContext(context.Background(), attachmentID) +} + +// DeleteLinkWithContext deletes a link of a given linkID +func (s *IssueService) DeleteLinkWithContext(ctx context.Context, linkID string) (*Response, error) { + apiEndpoint := fmt.Sprintf("rest/api/2/issueLink/%s", linkID) + + req, err := s.client.NewRequestWithContext(ctx, "DELETE", apiEndpoint, nil) + if err != nil { + return nil, err + } + + resp, err := s.client.Do(req, nil) + if err != nil { + jerr := NewJiraError(resp, err) + return resp, jerr + } + + return resp, nil +} + +// DeleteLink wraps DeleteLinkWithContext using the background context. +func (s *IssueService) DeleteLink(linkID string) (*Response, error) { + return s.DeleteLinkWithContext(context.Background(), linkID) +} + +// GetWorklogsWithContext gets all the worklogs for an issue. +// This method is especially important if you need to read all the worklogs, not just the first page. +// +// https://docs.atlassian.com/jira/REST/cloud/#api/2/issue/{issueIdOrKey}/worklog-getIssueWorklog +func (s *IssueService) GetWorklogsWithContext(ctx context.Context, issueID string, options ...func(*http.Request) error) (*Worklog, *Response, error) { + apiEndpoint := fmt.Sprintf("rest/api/2/issue/%s/worklog", issueID) + + req, err := s.client.NewRequestWithContext(ctx, "GET", apiEndpoint, nil) + if err != nil { + return nil, nil, err + } + + for _, option := range options { + err = option(req) + if err != nil { + return nil, nil, err + } + } + + v := new(Worklog) + resp, err := s.client.Do(req, v) + return v, resp, err +} + +// GetWorklogs wraps GetWorklogsWithContext using the background context. +func (s *IssueService) GetWorklogs(issueID string, options ...func(*http.Request) error) (*Worklog, *Response, error) { + return s.GetWorklogsWithContext(context.Background(), issueID, options...) +} + +// Applies query options to http request. +// This helper is meant to be used with all "QueryOptions" structs. +func WithQueryOptions(options interface{}) func(*http.Request) error { + q, err := query.Values(options) + if err != nil { + return func(*http.Request) error { + return err + } + } + + return func(r *http.Request) error { + r.URL.RawQuery = q.Encode() + return nil + } +} + +// CreateWithContext creates an issue or a sub-task from a JSON representation. +// Creating a sub-task is similar to creating a regular issue, with two important differences: +// The issueType field must correspond to a sub-task issue type and you must provide a parent field in the issue create request containing the id or key of the parent issue. +// +// Jira API docs: https://docs.atlassian.com/jira/REST/latest/#api/2/issue-createIssues +func (s *IssueService) CreateWithContext(ctx context.Context, issue *Issue) (*Issue, *Response, error) { + apiEndpoint := "rest/api/2/issue" + req, err := s.client.NewRequestWithContext(ctx, "POST", apiEndpoint, issue) + if err != nil { + return nil, nil, err + } + resp, err := s.client.Do(req, nil) + if err != nil { + // incase of error return the resp for further inspection + return nil, resp, err + } + + responseIssue := new(Issue) + defer resp.Body.Close() + data, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, resp, fmt.Errorf("could not read the returned data") + } + err = json.Unmarshal(data, responseIssue) + if err != nil { + return nil, resp, fmt.Errorf("could not unmarshall the data into struct") + } + return responseIssue, resp, nil +} + +// Create wraps CreateWithContext using the background context. +func (s *IssueService) Create(issue *Issue) (*Issue, *Response, error) { + return s.CreateWithContext(context.Background(), issue) +} + +// UpdateWithOptionsWithContext updates an issue from a JSON representation, +// while also specifying query params. The issue is found by key. +// +// Jira API docs: https://docs.atlassian.com/jira/REST/cloud/#api/2/issue-editIssue +func (s *IssueService) UpdateWithOptionsWithContext(ctx context.Context, issue *Issue, opts *UpdateQueryOptions) (*Issue, *Response, error) { + apiEndpoint := fmt.Sprintf("rest/api/2/issue/%v", issue.Key) + url, err := addOptions(apiEndpoint, opts) + if err != nil { + return nil, nil, err + } + req, err := s.client.NewRequestWithContext(ctx, "PUT", url, issue) + if err != nil { + return nil, nil, err + } + resp, err := s.client.Do(req, nil) + if err != nil { + jerr := NewJiraError(resp, err) + return nil, resp, jerr + } + + // This is just to follow the rest of the API's convention of returning an issue. + // Returning the same pointer here is pointless, so we return a copy instead. + ret := *issue + return &ret, resp, nil +} + +// UpdateWithOptions wraps UpdateWithOptionsWithContext using the background context. +func (s *IssueService) UpdateWithOptions(issue *Issue, opts *UpdateQueryOptions) (*Issue, *Response, error) { + return s.UpdateWithOptionsWithContext(context.Background(), issue, opts) +} + +// UpdateWithContext updates an issue from a JSON representation. The issue is found by key. +// +// Jira API docs: https://docs.atlassian.com/jira/REST/cloud/#api/2/issue-editIssue +func (s *IssueService) UpdateWithContext(ctx context.Context, issue *Issue) (*Issue, *Response, error) { + return s.UpdateWithOptionsWithContext(ctx, issue, nil) +} + +// Update wraps UpdateWithContext using the background context. +func (s *IssueService) Update(issue *Issue) (*Issue, *Response, error) { + return s.UpdateWithContext(context.Background(), issue) +} + +// UpdateIssueWithContext updates an issue from a JSON representation. The issue is found by key. +// +// https://docs.atlassian.com/jira/REST/7.4.0/#api/2/issue-editIssue +func (s *IssueService) UpdateIssueWithContext(ctx context.Context, jiraID string, data map[string]interface{}) (*Response, error) { + apiEndpoint := fmt.Sprintf("rest/api/2/issue/%v", jiraID) + req, err := s.client.NewRequestWithContext(ctx, "PUT", apiEndpoint, data) + if err != nil { + return nil, err + } + resp, err := s.client.Do(req, nil) + if err != nil { + return resp, err + } + + // This is just to follow the rest of the API's convention of returning an issue. + // Returning the same pointer here is pointless, so we return a copy instead. + return resp, nil +} + +// UpdateIssue wraps UpdateIssueWithContext using the background context. +func (s *IssueService) UpdateIssue(jiraID string, data map[string]interface{}) (*Response, error) { + return s.UpdateIssueWithContext(context.Background(), jiraID, data) +} + +// AddCommentWithContext adds a new comment to issueID. +// +// Jira API docs: https://docs.atlassian.com/jira/REST/latest/#api/2/issue-addComment +func (s *IssueService) AddCommentWithContext(ctx context.Context, issueID string, comment *Comment) (*Comment, *Response, error) { + apiEndpoint := fmt.Sprintf("rest/api/2/issue/%s/comment", issueID) + req, err := s.client.NewRequestWithContext(ctx, "POST", apiEndpoint, comment) + if err != nil { + return nil, nil, err + } + + responseComment := new(Comment) + resp, err := s.client.Do(req, responseComment) + if err != nil { + jerr := NewJiraError(resp, err) + return nil, resp, jerr + } + + return responseComment, resp, nil +} + +// AddComment wraps AddCommentWithContext using the background context. +func (s *IssueService) AddComment(issueID string, comment *Comment) (*Comment, *Response, error) { + return s.AddCommentWithContext(context.Background(), issueID, comment) +} + +// UpdateCommentWithContext updates the body of a comment, identified by comment.ID, on the issueID. +// +// Jira API docs: https://docs.atlassian.com/jira/REST/cloud/#api/2/issue/{issueIdOrKey}/comment-updateComment +func (s *IssueService) UpdateCommentWithContext(ctx context.Context, issueID string, comment *Comment) (*Comment, *Response, error) { + reqBody := struct { + Body string `json:"body"` + }{ + Body: comment.Body, + } + apiEndpoint := fmt.Sprintf("rest/api/2/issue/%s/comment/%s", issueID, comment.ID) + req, err := s.client.NewRequestWithContext(ctx, "PUT", apiEndpoint, reqBody) + if err != nil { + return nil, nil, err + } + + responseComment := new(Comment) + resp, err := s.client.Do(req, responseComment) + if err != nil { + return nil, resp, err + } + + return responseComment, resp, nil +} + +// UpdateComment wraps UpdateCommentWithContext using the background context. +func (s *IssueService) UpdateComment(issueID string, comment *Comment) (*Comment, *Response, error) { + return s.UpdateCommentWithContext(context.Background(), issueID, comment) +} + +// DeleteCommentWithContext Deletes a comment from an issueID. +// +// Jira API docs: https://developer.atlassian.com/cloud/jira/platform/rest/v3/#api-api-3-issue-issueIdOrKey-comment-id-delete +func (s *IssueService) DeleteCommentWithContext(ctx context.Context, issueID, commentID string) error { + apiEndpoint := fmt.Sprintf("rest/api/2/issue/%s/comment/%s", issueID, commentID) + req, err := s.client.NewRequestWithContext(ctx, "DELETE", apiEndpoint, nil) + if err != nil { + return err + } + + resp, err := s.client.Do(req, nil) + if err != nil { + jerr := NewJiraError(resp, err) + return jerr + } + + return nil +} + +// DeleteComment wraps DeleteCommentWithContext using the background context. +func (s *IssueService) DeleteComment(issueID, commentID string) error { + return s.DeleteCommentWithContext(context.Background(), issueID, commentID) +} + +// AddWorklogRecordWithContext adds a new worklog record to issueID. +// +// https://developer.atlassian.com/cloud/jira/platform/rest/#api-api-2-issue-issueIdOrKey-worklog-post +func (s *IssueService) AddWorklogRecordWithContext(ctx context.Context, issueID string, record *WorklogRecord, options ...func(*http.Request) error) (*WorklogRecord, *Response, error) { + apiEndpoint := fmt.Sprintf("rest/api/2/issue/%s/worklog", issueID) + req, err := s.client.NewRequestWithContext(ctx, "POST", apiEndpoint, record) + if err != nil { + return nil, nil, err + } + + for _, option := range options { + err = option(req) + if err != nil { + return nil, nil, err + } + } + + responseRecord := new(WorklogRecord) + resp, err := s.client.Do(req, responseRecord) + if err != nil { + jerr := NewJiraError(resp, err) + return nil, resp, jerr + } + + return responseRecord, resp, nil +} + +// AddWorklogRecord wraps AddWorklogRecordWithContext using the background context. +func (s *IssueService) AddWorklogRecord(issueID string, record *WorklogRecord, options ...func(*http.Request) error) (*WorklogRecord, *Response, error) { + return s.AddWorklogRecordWithContext(context.Background(), issueID, record, options...) +} + +// UpdateWorklogRecordWithContext updates a worklog record. +// +// https://docs.atlassian.com/software/jira/docs/api/REST/7.1.2/#api/2/issue-updateWorklog +func (s *IssueService) UpdateWorklogRecordWithContext(ctx context.Context, issueID, worklogID string, record *WorklogRecord, options ...func(*http.Request) error) (*WorklogRecord, *Response, error) { + apiEndpoint := fmt.Sprintf("rest/api/2/issue/%s/worklog/%s", issueID, worklogID) + req, err := s.client.NewRequestWithContext(ctx, "PUT", apiEndpoint, record) + if err != nil { + return nil, nil, err + } + + for _, option := range options { + err = option(req) + if err != nil { + return nil, nil, err + } + } + + responseRecord := new(WorklogRecord) + resp, err := s.client.Do(req, responseRecord) + if err != nil { + jerr := NewJiraError(resp, err) + return nil, resp, jerr + } + + return responseRecord, resp, nil +} + +// UpdateWorklogRecord wraps UpdateWorklogRecordWithContext using the background context. +func (s *IssueService) UpdateWorklogRecord(issueID, worklogID string, record *WorklogRecord, options ...func(*http.Request) error) (*WorklogRecord, *Response, error) { + return s.UpdateWorklogRecordWithContext(context.Background(), issueID, worklogID, record, options...) +} + +// AddLinkWithContext adds a link between two issues. +// +// Jira API docs: https://docs.atlassian.com/jira/REST/latest/#api/2/issueLink +func (s *IssueService) AddLinkWithContext(ctx context.Context, issueLink *IssueLink) (*Response, error) { + apiEndpoint := "rest/api/2/issueLink" + req, err := s.client.NewRequestWithContext(ctx, "POST", apiEndpoint, issueLink) + if err != nil { + return nil, err + } + + resp, err := s.client.Do(req, nil) + if err != nil { + err = NewJiraError(resp, err) + } + + return resp, err +} + +// AddLink wraps AddLinkWithContext using the background context. +func (s *IssueService) AddLink(issueLink *IssueLink) (*Response, error) { + return s.AddLinkWithContext(context.Background(), issueLink) +} + +// SearchWithContext will search for tickets according to the jql +// +// Jira API docs: https://developer.atlassian.com/jiradev/jira-apis/jira-rest-apis/jira-rest-api-tutorials/jira-rest-api-example-query-issues +func (s *IssueService) SearchWithContext(ctx context.Context, jql string, options *SearchOptions) ([]Issue, *Response, error) { + u := url.URL{ + Path: "rest/api/2/search", + } + uv := url.Values{} + if jql != "" { + uv.Add("jql", jql) + } + + if options != nil { + if options.StartAt != 0 { + uv.Add("startAt", strconv.Itoa(options.StartAt)) + } + if options.MaxResults != 0 { + uv.Add("maxResults", strconv.Itoa(options.MaxResults)) + } + if options.Expand != "" { + uv.Add("expand", options.Expand) + } + if strings.Join(options.Fields, ",") != "" { + uv.Add("fields", strings.Join(options.Fields, ",")) + } + if options.ValidateQuery != "" { + uv.Add("validateQuery", options.ValidateQuery) + } + } + + u.RawQuery = uv.Encode() + + req, err := s.client.NewRequestWithContext(ctx, "GET", u.String(), nil) + if err != nil { + return []Issue{}, nil, err + } + + v := new(searchResult) + resp, err := s.client.Do(req, v) + if err != nil { + err = NewJiraError(resp, err) + } + return v.Issues, resp, err +} + +// Search wraps SearchWithContext using the background context. +func (s *IssueService) Search(jql string, options *SearchOptions) ([]Issue, *Response, error) { + return s.SearchWithContext(context.Background(), jql, options) +} + +// SearchPagesWithContext will get issues from all pages in a search +// +// Jira API docs: https://developer.atlassian.com/jiradev/jira-apis/jira-rest-apis/jira-rest-api-tutorials/jira-rest-api-example-query-issues +func (s *IssueService) SearchPagesWithContext(ctx context.Context, jql string, options *SearchOptions, f func(Issue) error) error { + if options == nil { + options = &SearchOptions{ + StartAt: 0, + MaxResults: 50, + } + } + + if options.MaxResults == 0 { + options.MaxResults = 50 + } + + issues, resp, err := s.SearchWithContext(ctx, jql, options) + if err != nil { + return err + } + + if len(issues) == 0 { + return nil + } + + for { + for _, issue := range issues { + err = f(issue) + if err != nil { + return err + } + } + + if resp.StartAt+resp.MaxResults >= resp.Total { + return nil + } + + options.StartAt += resp.MaxResults + issues, resp, err = s.SearchWithContext(ctx, jql, options) + if err != nil { + return err + } + } +} + +// SearchPages wraps SearchPagesWithContext using the background context. +func (s *IssueService) SearchPages(jql string, options *SearchOptions, f func(Issue) error) error { + return s.SearchPagesWithContext(context.Background(), jql, options, f) +} + +// GetCustomFieldsWithContext returns a map of customfield_* keys with string values +func (s *IssueService) GetCustomFieldsWithContext(ctx context.Context, issueID string) (CustomFields, *Response, error) { + apiEndpoint := fmt.Sprintf("rest/api/2/issue/%s", issueID) + req, err := s.client.NewRequestWithContext(ctx, "GET", apiEndpoint, nil) + if err != nil { + return nil, nil, err + } + + issue := new(map[string]interface{}) + resp, err := s.client.Do(req, issue) + if err != nil { + jerr := NewJiraError(resp, err) + return nil, resp, jerr + } + + m := *issue + f := m["fields"] + cf := make(CustomFields) + if f == nil { + return cf, resp, nil + } + + if rec, ok := f.(map[string]interface{}); ok { + for key, val := range rec { + if strings.Contains(key, "customfield") { + if valMap, ok := val.(map[string]interface{}); ok { + if v, ok := valMap["value"]; ok { + val = v + } + } + cf[key] = fmt.Sprint(val) + } + } + } + return cf, resp, nil +} + +// GetCustomFields wraps GetCustomFieldsWithContext using the background context. +func (s *IssueService) GetCustomFields(issueID string) (CustomFields, *Response, error) { + return s.GetCustomFieldsWithContext(context.Background(), issueID) +} + +// GetTransitionsWithContext gets a list of the transitions possible for this issue by the current user, +// along with fields that are required and their types. +// +// Jira API docs: https://docs.atlassian.com/jira/REST/latest/#api/2/issue-getTransitions +func (s *IssueService) GetTransitionsWithContext(ctx context.Context, id string) ([]Transition, *Response, error) { + apiEndpoint := fmt.Sprintf("rest/api/2/issue/%s/transitions?expand=transitions.fields", id) + req, err := s.client.NewRequestWithContext(ctx, "GET", apiEndpoint, nil) + if err != nil { + return nil, nil, err + } + + result := new(transitionResult) + resp, err := s.client.Do(req, result) + if err != nil { + err = NewJiraError(resp, err) + } + return result.Transitions, resp, err +} + +// GetTransitions wraps GetTransitionsWithContext using the background context. +func (s *IssueService) GetTransitions(id string) ([]Transition, *Response, error) { + return s.GetTransitionsWithContext(context.Background(), id) +} + +// DoTransitionWithContext performs a transition on an issue. +// When performing the transition you can update or set other issue fields. +// +// Jira API docs: https://docs.atlassian.com/jira/REST/latest/#api/2/issue-doTransition +func (s *IssueService) DoTransitionWithContext(ctx context.Context, ticketID, transitionID string) (*Response, error) { + payload := CreateTransitionPayload{ + Transition: TransitionPayload{ + ID: transitionID, + }, + } + return s.DoTransitionWithPayloadWithContext(ctx, ticketID, payload) +} + +// DoTransition wraps DoTransitionWithContext using the background context. +func (s *IssueService) DoTransition(ticketID, transitionID string) (*Response, error) { + return s.DoTransitionWithContext(context.Background(), ticketID, transitionID) +} + +// DoTransitionWithPayloadWithContext performs a transition on an issue using any payload. +// When performing the transition you can update or set other issue fields. +// +// Jira API docs: https://docs.atlassian.com/jira/REST/latest/#api/2/issue-doTransition +func (s *IssueService) DoTransitionWithPayloadWithContext(ctx context.Context, ticketID, payload interface{}) (*Response, error) { + apiEndpoint := fmt.Sprintf("rest/api/2/issue/%s/transitions", ticketID) + + req, err := s.client.NewRequestWithContext(ctx, "POST", apiEndpoint, payload) + if err != nil { + return nil, err + } + + resp, err := s.client.Do(req, nil) + if err != nil { + err = NewJiraError(resp, err) + } + + return resp, err +} + +// DoTransitionWithPayload wraps DoTransitionWithPayloadWithContext using the background context. +func (s *IssueService) DoTransitionWithPayload(ticketID, payload interface{}) (*Response, error) { + return s.DoTransitionWithPayloadWithContext(context.Background(), ticketID, payload) +} + +// InitIssueWithMetaAndFields returns Issue with with values from fieldsConfig properly set. +// * metaProject should contain metaInformation about the project where the issue should be created. +// * metaIssuetype is the MetaInformation about the Issuetype that needs to be created. +// * fieldsConfig is a key->value pair where key represents the name of the field as seen in the UI +// And value is the string value for that particular key. +// Note: This method doesn't verify that the fieldsConfig is complete with mandatory fields. The fieldsConfig is +// supposed to be already verified with MetaIssueType.CheckCompleteAndAvailable. It will however return +// error if the key is not found. +// All values will be packed into Unknowns. This is much convenient. If the struct fields needs to be +// configured as well, marshalling and unmarshalling will set the proper fields. +func InitIssueWithMetaAndFields(metaProject *MetaProject, metaIssuetype *MetaIssueType, fieldsConfig map[string]string) (*Issue, error) { + issue := new(Issue) + issueFields := new(IssueFields) + issueFields.Unknowns = tcontainer.NewMarshalMap() + + // map the field names the User presented to jira's internal key + allFields, _ := metaIssuetype.GetAllFields() + for key, value := range fieldsConfig { + jiraKey, found := allFields[key] + if !found { + return nil, fmt.Errorf("key %s is not found in the list of fields", key) + } + + valueType, err := metaIssuetype.Fields.String(jiraKey + "/schema/type") + if err != nil { + return nil, err + } + switch valueType { + case "array": + elemType, err := metaIssuetype.Fields.String(jiraKey + "/schema/items") + if err != nil { + return nil, err + } + switch elemType { + case "component": + issueFields.Unknowns[jiraKey] = []Component{{Name: value}} + case "option": + issueFields.Unknowns[jiraKey] = []map[string]string{{"value": value}} + default: + issueFields.Unknowns[jiraKey] = []string{value} + } + case "string": + issueFields.Unknowns[jiraKey] = value + case "date": + issueFields.Unknowns[jiraKey] = value + case "datetime": + issueFields.Unknowns[jiraKey] = value + case "any": + // Treat any as string + issueFields.Unknowns[jiraKey] = value + case "project": + issueFields.Unknowns[jiraKey] = Project{ + Name: metaProject.Name, + ID: metaProject.Id, + } + case "priority": + issueFields.Unknowns[jiraKey] = Priority{Name: value} + case "user": + issueFields.Unknowns[jiraKey] = User{ + Name: value, + } + case "issuetype": + issueFields.Unknowns[jiraKey] = IssueType{ + Name: value, + } + case "option": + issueFields.Unknowns[jiraKey] = Option{ + Value: value, + } + default: + return nil, fmt.Errorf("unknown issue type encountered: %s for %s", valueType, key) + } + } + + issue.Fields = issueFields + + return issue, nil +} + +// DeleteWithContext will delete a specified issue. +func (s *IssueService) DeleteWithContext(ctx context.Context, issueID string) (*Response, error) { + apiEndpoint := fmt.Sprintf("rest/api/2/issue/%s", issueID) + + // to enable deletion of subtasks; without this, the request will fail if the issue has subtasks + deletePayload := make(map[string]interface{}) + deletePayload["deleteSubtasks"] = "true" + content, _ := json.Marshal(deletePayload) + + req, err := s.client.NewRequestWithContext(ctx, "DELETE", apiEndpoint, content) + if err != nil { + return nil, err + } + + resp, err := s.client.Do(req, nil) + return resp, err +} + +// Delete wraps DeleteWithContext using the background context. +func (s *IssueService) Delete(issueID string) (*Response, error) { + return s.DeleteWithContext(context.Background(), issueID) +} + +// GetWatchersWithContext wil return all the users watching/observing the given issue +// +// Jira API docs: https://docs.atlassian.com/software/jira/docs/api/REST/latest/#api/2/issue-getIssueWatchers +func (s *IssueService) GetWatchersWithContext(ctx context.Context, issueID string) (*[]User, *Response, error) { + watchesAPIEndpoint := fmt.Sprintf("rest/api/2/issue/%s/watchers", issueID) + + req, err := s.client.NewRequestWithContext(ctx, "GET", watchesAPIEndpoint, nil) + if err != nil { + return nil, nil, err + } + + watches := new(Watches) + resp, err := s.client.Do(req, watches) + if err != nil { + return nil, nil, NewJiraError(resp, err) + } + + result := []User{} + for _, watcher := range watches.Watchers { + var user *User + if watcher.AccountID != "" { + user, resp, err = s.client.User.GetByAccountID(watcher.AccountID) + if err != nil { + return nil, resp, NewJiraError(resp, err) + } + } + result = append(result, *user) + } + + return &result, resp, nil +} + +// GetWatchers wraps GetWatchersWithContext using the background context. +func (s *IssueService) GetWatchers(issueID string) (*[]User, *Response, error) { + return s.GetWatchersWithContext(context.Background(), issueID) +} + +// AddWatcherWithContext adds watcher to the given issue +// +// Jira API docs: https://docs.atlassian.com/software/jira/docs/api/REST/latest/#api/2/issue-addWatcher +func (s *IssueService) AddWatcherWithContext(ctx context.Context, issueID string, userName string) (*Response, error) { + apiEndPoint := fmt.Sprintf("rest/api/2/issue/%s/watchers", issueID) + + req, err := s.client.NewRequestWithContext(ctx, "POST", apiEndPoint, userName) + if err != nil { + return nil, err + } + + resp, err := s.client.Do(req, nil) + if err != nil { + err = NewJiraError(resp, err) + } + + return resp, err +} + +// AddWatcher wraps AddWatcherWithContext using the background context. +func (s *IssueService) AddWatcher(issueID string, userName string) (*Response, error) { + return s.AddWatcherWithContext(context.Background(), issueID, userName) +} + +// RemoveWatcherWithContext removes given user from given issue +// +// Jira API docs: https://docs.atlassian.com/software/jira/docs/api/REST/latest/#api/2/issue-removeWatcher +func (s *IssueService) RemoveWatcherWithContext(ctx context.Context, issueID string, userName string) (*Response, error) { + apiEndPoint := fmt.Sprintf("rest/api/2/issue/%s/watchers", issueID) + + req, err := s.client.NewRequestWithContext(ctx, "DELETE", apiEndPoint, userName) + if err != nil { + return nil, err + } + + resp, err := s.client.Do(req, nil) + if err != nil { + err = NewJiraError(resp, err) + } + + return resp, err +} + +// RemoveWatcher wraps RemoveWatcherWithContext using the background context. +func (s *IssueService) RemoveWatcher(issueID string, userName string) (*Response, error) { + return s.RemoveWatcherWithContext(context.Background(), issueID, userName) +} + +// UpdateAssigneeWithContext updates the user assigned to work on the given issue +// +// Jira API docs: https://docs.atlassian.com/software/jira/docs/api/REST/7.10.2/#api/2/issue-assign +func (s *IssueService) UpdateAssigneeWithContext(ctx context.Context, issueID string, assignee *User) (*Response, error) { + apiEndPoint := fmt.Sprintf("rest/api/2/issue/%s/assignee", issueID) + + req, err := s.client.NewRequestWithContext(ctx, "PUT", apiEndPoint, assignee) + if err != nil { + return nil, err + } + + resp, err := s.client.Do(req, nil) + if err != nil { + err = NewJiraError(resp, err) + } + + return resp, err +} + +// UpdateAssignee wraps UpdateAssigneeWithContext using the background context. +func (s *IssueService) UpdateAssignee(issueID string, assignee *User) (*Response, error) { + return s.UpdateAssigneeWithContext(context.Background(), issueID, assignee) +} + +func (c ChangelogHistory) CreatedTime() (time.Time, error) { + var t time.Time + // Ignore null + if string(c.Created) == "null" { + return t, nil + } + t, err := time.Parse("2006-01-02T15:04:05.999-0700", c.Created) + return t, err +} + +// GetRemoteLinksWithContext gets remote issue links on the issue. +// +// Jira API docs: https://docs.atlassian.com/jira/REST/latest/#api/2/issue-getRemoteIssueLinks +func (s *IssueService) GetRemoteLinksWithContext(ctx context.Context, id string) (*[]RemoteLink, *Response, error) { + apiEndpoint := fmt.Sprintf("rest/api/2/issue/%s/remotelink", id) + req, err := s.client.NewRequestWithContext(ctx, "GET", apiEndpoint, nil) + if err != nil { + return nil, nil, err + } + + result := new([]RemoteLink) + resp, err := s.client.Do(req, result) + if err != nil { + err = NewJiraError(resp, err) + } + return result, resp, err +} + +// GetRemoteLinks wraps GetRemoteLinksWithContext using the background context. +func (s *IssueService) GetRemoteLinks(id string) (*[]RemoteLink, *Response, error) { + return s.GetRemoteLinksWithContext(context.Background(), id) +} + +// AddRemoteLinkWithContext adds a remote link to issueID. +// +// Jira API docs: https://developer.atlassian.com/cloud/jira/platform/rest/v2/#api-rest-api-2-issue-issueIdOrKey-remotelink-post +func (s *IssueService) AddRemoteLinkWithContext(ctx context.Context, issueID string, remotelink *RemoteLink) (*RemoteLink, *Response, error) { + apiEndpoint := fmt.Sprintf("rest/api/2/issue/%s/remotelink", issueID) + req, err := s.client.NewRequestWithContext(ctx, "POST", apiEndpoint, remotelink) + if err != nil { + return nil, nil, err + } + + responseRemotelink := new(RemoteLink) + resp, err := s.client.Do(req, responseRemotelink) + if err != nil { + jerr := NewJiraError(resp, err) + return nil, resp, jerr + } + + return responseRemotelink, resp, nil +} + +// AddRemoteLink wraps AddRemoteLinkWithContext using the background context. +func (s *IssueService) AddRemoteLink(issueID string, remotelink *RemoteLink) (*RemoteLink, *Response, error) { + return s.AddRemoteLinkWithContext(context.Background(), issueID, remotelink) +} diff --git a/vendor/github.com/andygrunwald/go-jira/issuelinktype.go b/vendor/github.com/andygrunwald/go-jira/issuelinktype.go new file mode 100644 index 00000000000..92076055bde --- /dev/null +++ b/vendor/github.com/andygrunwald/go-jira/issuelinktype.go @@ -0,0 +1,137 @@ +package jira + +import ( + "context" + "encoding/json" + "fmt" + "io/ioutil" +) + +// IssueLinkTypeService handles issue link types for the Jira instance / API. +// +// Jira API docs: https://developer.atlassian.com/cloud/jira/platform/rest/v2/#api-group-Issue-link-types +type IssueLinkTypeService struct { + client *Client +} + +// GetListWithContext gets all of the issue link types from Jira. +// +// Jira API docs: https://developer.atlassian.com/cloud/jira/platform/rest/v2/#api-rest-api-2-issueLinkType-get +func (s *IssueLinkTypeService) GetListWithContext(ctx context.Context) ([]IssueLinkType, *Response, error) { + apiEndpoint := "rest/api/2/issueLinkType" + req, err := s.client.NewRequestWithContext(ctx, "GET", apiEndpoint, nil) + if err != nil { + return nil, nil, err + } + + linkTypeList := []IssueLinkType{} + resp, err := s.client.Do(req, &linkTypeList) + if err != nil { + return nil, resp, NewJiraError(resp, err) + } + return linkTypeList, resp, nil +} + +// GetList wraps GetListWithContext using the background context. +func (s *IssueLinkTypeService) GetList() ([]IssueLinkType, *Response, error) { + return s.GetListWithContext(context.Background()) +} + +// GetWithContext gets info of a specific issue link type from Jira. +// +// Jira API docs: https://developer.atlassian.com/cloud/jira/platform/rest/v2/#api-rest-api-2-issueLinkType-issueLinkTypeId-get +func (s *IssueLinkTypeService) GetWithContext(ctx context.Context, ID string) (*IssueLinkType, *Response, error) { + apiEndPoint := fmt.Sprintf("rest/api/2/issueLinkType/%s", ID) + req, err := s.client.NewRequestWithContext(ctx, "GET", apiEndPoint, nil) + if err != nil { + return nil, nil, err + } + + linkType := new(IssueLinkType) + resp, err := s.client.Do(req, linkType) + if err != nil { + return nil, resp, NewJiraError(resp, err) + } + return linkType, resp, nil +} + +// Get wraps GetWithContext using the background context. +func (s *IssueLinkTypeService) Get(ID string) (*IssueLinkType, *Response, error) { + return s.GetWithContext(context.Background(), ID) +} + +// CreateWithContext creates an issue link type in Jira. +// +// Jira API docs: https://developer.atlassian.com/cloud/jira/platform/rest/v2/#api-rest-api-2-issueLinkType-post +func (s *IssueLinkTypeService) CreateWithContext(ctx context.Context, linkType *IssueLinkType) (*IssueLinkType, *Response, error) { + apiEndpoint := "/rest/api/2/issueLinkType" + req, err := s.client.NewRequestWithContext(ctx, "POST", apiEndpoint, linkType) + if err != nil { + return nil, nil, err + } + + resp, err := s.client.Do(req, nil) + if err != nil { + return nil, resp, err + } + + responseLinkType := new(IssueLinkType) + defer resp.Body.Close() + data, err := ioutil.ReadAll(resp.Body) + if err != nil { + e := fmt.Errorf("could not read the returned data") + return nil, resp, NewJiraError(resp, e) + } + err = json.Unmarshal(data, responseLinkType) + if err != nil { + e := fmt.Errorf("could no unmarshal the data into struct") + return nil, resp, NewJiraError(resp, e) + } + return linkType, resp, nil +} + +// Create wraps CreateWithContext using the background context. +func (s *IssueLinkTypeService) Create(linkType *IssueLinkType) (*IssueLinkType, *Response, error) { + return s.CreateWithContext(context.Background(), linkType) +} + +// UpdateWithContext updates an issue link type. The issue is found by key. +// +// Jira API docs: https://developer.atlassian.com/cloud/jira/platform/rest/v2/#api-rest-api-2-issueLinkType-issueLinkTypeId-put +func (s *IssueLinkTypeService) UpdateWithContext(ctx context.Context, linkType *IssueLinkType) (*IssueLinkType, *Response, error) { + apiEndpoint := fmt.Sprintf("rest/api/2/issueLinkType/%s", linkType.ID) + req, err := s.client.NewRequestWithContext(ctx, "PUT", apiEndpoint, linkType) + if err != nil { + return nil, nil, err + } + resp, err := s.client.Do(req, nil) + if err != nil { + return nil, resp, NewJiraError(resp, err) + } + ret := *linkType + return &ret, resp, nil +} + +// Update wraps UpdateWithContext using the background context. +func (s *IssueLinkTypeService) Update(linkType *IssueLinkType) (*IssueLinkType, *Response, error) { + return s.UpdateWithContext(context.Background(), linkType) +} + +// DeleteWithContext deletes an issue link type based on provided ID. +// +// Jira API docs: https://developer.atlassian.com/cloud/jira/platform/rest/v2/#api-rest-api-2-issueLinkType-issueLinkTypeId-delete +func (s *IssueLinkTypeService) DeleteWithContext(ctx context.Context, ID string) (*Response, error) { + apiEndpoint := fmt.Sprintf("rest/api/2/issueLinkType/%s", ID) + req, err := s.client.NewRequestWithContext(ctx, "DELETE", apiEndpoint, nil) + if err != nil { + return nil, err + } + + resp, err := s.client.Do(req, nil) + return resp, err +} + +// Delete wraps DeleteWithContext using the background context. +func (s *IssueLinkTypeService) Delete(ID string) (*Response, error) { + return s.DeleteWithContext(context.Background(), ID) +} diff --git a/vendor/github.com/andygrunwald/go-jira/jira.go b/vendor/github.com/andygrunwald/go-jira/jira.go new file mode 100644 index 00000000000..7d7702bbdd7 --- /dev/null +++ b/vendor/github.com/andygrunwald/go-jira/jira.go @@ -0,0 +1,565 @@ +package jira + +import ( + "bytes" + "context" + "crypto/sha256" + "encoding/hex" + "encoding/json" + "fmt" + "io" + "net/http" + "net/url" + "reflect" + "sort" + "strings" + "time" + + "github.com/golang-jwt/jwt" + "github.com/google/go-querystring/query" + "github.com/pkg/errors" +) + +// httpClient defines an interface for an http.Client implementation so that alternative +// http Clients can be passed in for making requests +type httpClient interface { + Do(request *http.Request) (response *http.Response, err error) +} + +// A Client manages communication with the Jira API. +type Client struct { + // HTTP client used to communicate with the API. + client httpClient + + // Base URL for API requests. + baseURL *url.URL + + // Session storage if the user authenticates with a Session cookie + session *Session + + // Services used for talking to different parts of the Jira API. + Authentication *AuthenticationService + Issue *IssueService + Project *ProjectService + Board *BoardService + Sprint *SprintService + User *UserService + Group *GroupService + Version *VersionService + Priority *PriorityService + Field *FieldService + Component *ComponentService + Resolution *ResolutionService + StatusCategory *StatusCategoryService + Filter *FilterService + Role *RoleService + PermissionScheme *PermissionSchemeService + Status *StatusService + IssueLinkType *IssueLinkTypeService + Organization *OrganizationService + ServiceDesk *ServiceDeskService +} + +// NewClient returns a new Jira API client. +// If a nil httpClient is provided, http.DefaultClient will be used. +// To use API methods which require authentication you can follow the preferred solution and +// provide an http.Client that will perform the authentication for you with OAuth and HTTP Basic (such as that provided by the golang.org/x/oauth2 library). +// As an alternative you can use Session Cookie based authentication provided by this package as well. +// See https://docs.atlassian.com/jira/REST/latest/#authentication +// baseURL is the HTTP endpoint of your Jira instance and should always be specified with a trailing slash. +func NewClient(httpClient httpClient, baseURL string) (*Client, error) { + if httpClient == nil { + httpClient = http.DefaultClient + } + + // ensure the baseURL contains a trailing slash so that all paths are preserved in later calls + if !strings.HasSuffix(baseURL, "/") { + baseURL += "/" + } + + parsedBaseURL, err := url.Parse(baseURL) + if err != nil { + return nil, err + } + + c := &Client{ + client: httpClient, + baseURL: parsedBaseURL, + } + c.Authentication = &AuthenticationService{client: c} + c.Issue = &IssueService{client: c} + c.Project = &ProjectService{client: c} + c.Board = &BoardService{client: c} + c.Sprint = &SprintService{client: c} + c.User = &UserService{client: c} + c.Group = &GroupService{client: c} + c.Version = &VersionService{client: c} + c.Priority = &PriorityService{client: c} + c.Field = &FieldService{client: c} + c.Component = &ComponentService{client: c} + c.Resolution = &ResolutionService{client: c} + c.StatusCategory = &StatusCategoryService{client: c} + c.Filter = &FilterService{client: c} + c.Role = &RoleService{client: c} + c.PermissionScheme = &PermissionSchemeService{client: c} + c.Status = &StatusService{client: c} + c.IssueLinkType = &IssueLinkTypeService{client: c} + c.Organization = &OrganizationService{client: c} + c.ServiceDesk = &ServiceDeskService{client: c} + + return c, nil +} + +// NewRawRequestWithContext creates an API request. +// A relative URL can be provided in urlStr, in which case it is resolved relative to the baseURL of the Client. +// Allows using an optional native io.Reader for sourcing the request body. +func (c *Client) NewRawRequestWithContext(ctx context.Context, method, urlStr string, body io.Reader) (*http.Request, error) { + rel, err := url.Parse(urlStr) + if err != nil { + return nil, err + } + // Relative URLs should be specified without a preceding slash since baseURL will have the trailing slash + rel.Path = strings.TrimLeft(rel.Path, "/") + + u := c.baseURL.ResolveReference(rel) + + req, err := newRequestWithContext(ctx, method, u.String(), body) + if err != nil { + return nil, err + } + + req.Header.Set("Content-Type", "application/json") + + // Set authentication information + if c.Authentication.authType == authTypeSession { + // Set session cookie if there is one + if c.session != nil { + for _, cookie := range c.session.Cookies { + req.AddCookie(cookie) + } + } + } else if c.Authentication.authType == authTypeBasic { + // Set basic auth information + if c.Authentication.username != "" { + req.SetBasicAuth(c.Authentication.username, c.Authentication.password) + } + } + + return req, nil +} + +// NewRawRequest wraps NewRawRequestWithContext using the background context. +func (c *Client) NewRawRequest(method, urlStr string, body io.Reader) (*http.Request, error) { + return c.NewRawRequestWithContext(context.Background(), method, urlStr, body) +} + +// NewRequestWithContext creates an API request. +// A relative URL can be provided in urlStr, in which case it is resolved relative to the baseURL of the Client. +// If specified, the value pointed to by body is JSON encoded and included as the request body. +func (c *Client) NewRequestWithContext(ctx context.Context, method, urlStr string, body interface{}) (*http.Request, error) { + rel, err := url.Parse(urlStr) + if err != nil { + return nil, err + } + // Relative URLs should be specified without a preceding slash since baseURL will have the trailing slash + rel.Path = strings.TrimLeft(rel.Path, "/") + + u := c.baseURL.ResolveReference(rel) + + var buf io.ReadWriter + if body != nil { + buf = new(bytes.Buffer) + err = json.NewEncoder(buf).Encode(body) + if err != nil { + return nil, err + } + } + + req, err := newRequestWithContext(ctx, method, u.String(), buf) + if err != nil { + return nil, err + } + + req.Header.Set("Content-Type", "application/json") + + // Set authentication information + if c.Authentication.authType == authTypeSession { + // Set session cookie if there is one + if c.session != nil { + for _, cookie := range c.session.Cookies { + req.AddCookie(cookie) + } + } + } else if c.Authentication.authType == authTypeBasic { + // Set basic auth information + if c.Authentication.username != "" { + req.SetBasicAuth(c.Authentication.username, c.Authentication.password) + } + } + + return req, nil +} + +// NewRequest wraps NewRequestWithContext using the background context. +func (c *Client) NewRequest(method, urlStr string, body interface{}) (*http.Request, error) { + return c.NewRequestWithContext(context.Background(), method, urlStr, body) +} + +// addOptions adds the parameters in opt as URL query parameters to s. opt +// must be a struct whose fields may contain "url" tags. +func addOptions(s string, opt interface{}) (string, error) { + v := reflect.ValueOf(opt) + if v.Kind() == reflect.Ptr && v.IsNil() { + return s, nil + } + + u, err := url.Parse(s) + if err != nil { + return s, err + } + + qs, err := query.Values(opt) + if err != nil { + return s, err + } + + u.RawQuery = qs.Encode() + return u.String(), nil +} + +// NewMultiPartRequestWithContext creates an API request including a multi-part file. +// A relative URL can be provided in urlStr, in which case it is resolved relative to the baseURL of the Client. +// If specified, the value pointed to by buf is a multipart form. +func (c *Client) NewMultiPartRequestWithContext(ctx context.Context, method, urlStr string, buf *bytes.Buffer) (*http.Request, error) { + rel, err := url.Parse(urlStr) + if err != nil { + return nil, err + } + // Relative URLs should be specified without a preceding slash since baseURL will have the trailing slash + rel.Path = strings.TrimLeft(rel.Path, "/") + + u := c.baseURL.ResolveReference(rel) + + req, err := newRequestWithContext(ctx, method, u.String(), buf) + if err != nil { + return nil, err + } + + // Set required headers + req.Header.Set("X-Atlassian-Token", "nocheck") + + // Set authentication information + if c.Authentication.authType == authTypeSession { + // Set session cookie if there is one + if c.session != nil { + for _, cookie := range c.session.Cookies { + req.AddCookie(cookie) + } + } + } else if c.Authentication.authType == authTypeBasic { + // Set basic auth information + if c.Authentication.username != "" { + req.SetBasicAuth(c.Authentication.username, c.Authentication.password) + } + } + + return req, nil +} + +// NewMultiPartRequest wraps NewMultiPartRequestWithContext using the background context. +func (c *Client) NewMultiPartRequest(method, urlStr string, buf *bytes.Buffer) (*http.Request, error) { + return c.NewMultiPartRequestWithContext(context.Background(), method, urlStr, buf) +} + +// Do sends an API request and returns the API response. +// The API response is JSON decoded and stored in the value pointed to by v, or returned as an error if an API error has occurred. +func (c *Client) Do(req *http.Request, v interface{}) (*Response, error) { + httpResp, err := c.client.Do(req) + if err != nil { + return nil, err + } + + err = CheckResponse(httpResp) + if err != nil { + // Even though there was an error, we still return the response + // in case the caller wants to inspect it further + return newResponse(httpResp, nil), err + } + + if v != nil { + // Open a NewDecoder and defer closing the reader only if there is a provided interface to decode to + defer httpResp.Body.Close() + err = json.NewDecoder(httpResp.Body).Decode(v) + } + + resp := newResponse(httpResp, v) + return resp, err +} + +// CheckResponse checks the API response for errors, and returns them if present. +// A response is considered an error if it has a status code outside the 200 range. +// The caller is responsible to analyze the response body. +// The body can contain JSON (if the error is intended) or xml (sometimes Jira just failes). +func CheckResponse(r *http.Response) error { + if c := r.StatusCode; 200 <= c && c <= 299 { + return nil + } + + err := fmt.Errorf("request failed. Please analyze the request body for more details. Status code: %d", r.StatusCode) + return err +} + +// GetBaseURL will return you the Base URL. +// This is the same URL as in the NewClient constructor +func (c *Client) GetBaseURL() url.URL { + return *c.baseURL +} + +// Response represents Jira API response. It wraps http.Response returned from +// API and provides information about paging. +type Response struct { + *http.Response + + StartAt int + MaxResults int + Total int +} + +func newResponse(r *http.Response, v interface{}) *Response { + resp := &Response{Response: r} + resp.populatePageValues(v) + return resp +} + +// Sets paging values if response json was parsed to searchResult type +// (can be extended with other types if they also need paging info) +func (r *Response) populatePageValues(v interface{}) { + switch value := v.(type) { + case *searchResult: + r.StartAt = value.StartAt + r.MaxResults = value.MaxResults + r.Total = value.Total + case *groupMembersResult: + r.StartAt = value.StartAt + r.MaxResults = value.MaxResults + r.Total = value.Total + } +} + +// BasicAuthTransport is an http.RoundTripper that authenticates all requests +// using HTTP Basic Authentication with the provided username and password. +type BasicAuthTransport struct { + Username string + Password string + + // Transport is the underlying HTTP transport to use when making requests. + // It will default to http.DefaultTransport if nil. + Transport http.RoundTripper +} + +// RoundTrip implements the RoundTripper interface. We just add the +// basic auth and return the RoundTripper for this transport type. +func (t *BasicAuthTransport) RoundTrip(req *http.Request) (*http.Response, error) { + req2 := cloneRequest(req) // per RoundTripper contract + + req2.SetBasicAuth(t.Username, t.Password) + return t.transport().RoundTrip(req2) +} + +// Client returns an *http.Client that makes requests that are authenticated +// using HTTP Basic Authentication. This is a nice little bit of sugar +// so we can just get the client instead of creating the client in the calling code. +// If it's necessary to send more information on client init, the calling code can +// always skip this and set the transport itself. +func (t *BasicAuthTransport) Client() *http.Client { + return &http.Client{Transport: t} +} + +func (t *BasicAuthTransport) transport() http.RoundTripper { + if t.Transport != nil { + return t.Transport + } + return http.DefaultTransport +} + +// CookieAuthTransport is an http.RoundTripper that authenticates all requests +// using Jira's cookie-based authentication. +// +// Note that it is generally preferable to use HTTP BASIC authentication with the REST API. +// However, this resource may be used to mimic the behaviour of Jira's log-in page (e.g. to display log-in errors to a user). +// +// Jira API docs: https://docs.atlassian.com/jira/REST/latest/#auth/1/session +type CookieAuthTransport struct { + Username string + Password string + AuthURL string + + // SessionObject is the authenticated cookie string.s + // It's passed in each call to prove the client is authenticated. + SessionObject []*http.Cookie + + // Transport is the underlying HTTP transport to use when making requests. + // It will default to http.DefaultTransport if nil. + Transport http.RoundTripper +} + +// RoundTrip adds the session object to the request. +func (t *CookieAuthTransport) RoundTrip(req *http.Request) (*http.Response, error) { + if t.SessionObject == nil { + err := t.setSessionObject() + if err != nil { + return nil, errors.Wrap(err, "cookieauth: no session object has been set") + } + } + + req2 := cloneRequest(req) // per RoundTripper contract + for _, cookie := range t.SessionObject { + // Don't add an empty value cookie to the request + if cookie.Value != "" { + req2.AddCookie(cookie) + } + } + + return t.transport().RoundTrip(req2) +} + +// Client returns an *http.Client that makes requests that are authenticated +// using cookie authentication +func (t *CookieAuthTransport) Client() *http.Client { + return &http.Client{Transport: t} +} + +// setSessionObject attempts to authenticate the user and set +// the session object (e.g. cookie) +func (t *CookieAuthTransport) setSessionObject() error { + req, err := t.buildAuthRequest() + if err != nil { + return err + } + + var authClient = &http.Client{ + Timeout: time.Second * 60, + } + resp, err := authClient.Do(req) + if err != nil { + return err + } + + t.SessionObject = resp.Cookies() + return nil +} + +// getAuthRequest assembles the request to get the authenticated cookie +func (t *CookieAuthTransport) buildAuthRequest() (*http.Request, error) { + body := struct { + Username string `json:"username"` + Password string `json:"password"` + }{ + t.Username, + t.Password, + } + + b := new(bytes.Buffer) + json.NewEncoder(b).Encode(body) + + req, err := http.NewRequest("POST", t.AuthURL, b) + if err != nil { + return nil, err + } + + req.Header.Set("Content-Type", "application/json") + return req, nil +} + +func (t *CookieAuthTransport) transport() http.RoundTripper { + if t.Transport != nil { + return t.Transport + } + return http.DefaultTransport +} + +// JWTAuthTransport is an http.RoundTripper that authenticates all requests +// using Jira's JWT based authentication. +// +// NOTE: this form of auth should be used by add-ons installed from the Atlassian marketplace. +// +// Jira docs: https://developer.atlassian.com/cloud/jira/platform/understanding-jwt +// Examples in other languages: +// https://bitbucket.org/atlassian/atlassian-jwt-ruby/src/d44a8e7a4649e4f23edaa784402655fda7c816ea/lib/atlassian/jwt.rb +// https://bitbucket.org/atlassian/atlassian-jwt-py/src/master/atlassian_jwt/url_utils.py +type JWTAuthTransport struct { + Secret []byte + Issuer string + + // Transport is the underlying HTTP transport to use when making requests. + // It will default to http.DefaultTransport if nil. + Transport http.RoundTripper +} + +func (t *JWTAuthTransport) Client() *http.Client { + return &http.Client{Transport: t} +} + +func (t *JWTAuthTransport) transport() http.RoundTripper { + if t.Transport != nil { + return t.Transport + } + return http.DefaultTransport +} + +// RoundTrip adds the session object to the request. +func (t *JWTAuthTransport) RoundTrip(req *http.Request) (*http.Response, error) { + req2 := cloneRequest(req) // per RoundTripper contract + exp := time.Duration(59) * time.Second + qsh := t.createQueryStringHash(req.Method, req2.URL) + token := jwt.NewWithClaims(jwt.SigningMethodHS256, jwt.MapClaims{ + "iss": t.Issuer, + "iat": time.Now().Unix(), + "exp": time.Now().Add(exp).Unix(), + "qsh": qsh, + }) + + jwtStr, err := token.SignedString(t.Secret) + if err != nil { + return nil, errors.Wrap(err, "jwtAuth: error signing JWT") + } + + req2.Header.Set("Authorization", fmt.Sprintf("JWT %s", jwtStr)) + return t.transport().RoundTrip(req2) +} + +func (t *JWTAuthTransport) createQueryStringHash(httpMethod string, jiraURL *url.URL) string { + canonicalRequest := t.canonicalizeRequest(httpMethod, jiraURL) + h := sha256.Sum256([]byte(canonicalRequest)) + return hex.EncodeToString(h[:]) +} + +func (t *JWTAuthTransport) canonicalizeRequest(httpMethod string, jiraURL *url.URL) string { + path := "/" + strings.Replace(strings.Trim(jiraURL.Path, "/"), "&", "%26", -1) + + var canonicalQueryString []string + for k, v := range jiraURL.Query() { + if k == "jwt" { + continue + } + param := url.QueryEscape(k) + value := url.QueryEscape(strings.Join(v, "")) + canonicalQueryString = append(canonicalQueryString, strings.Replace(strings.Join([]string{param, value}, "="), "+", "%20", -1)) + } + sort.Strings(canonicalQueryString) + return fmt.Sprintf("%s&%s&%s", strings.ToUpper(httpMethod), path, strings.Join(canonicalQueryString, "&")) +} + +// cloneRequest returns a clone of the provided *http.Request. +// The clone is a shallow copy of the struct and its Header map. +func cloneRequest(r *http.Request) *http.Request { + // shallow copy of the struct + r2 := new(http.Request) + *r2 = *r + // deep copy of the Header + r2.Header = make(http.Header, len(r.Header)) + for k, s := range r.Header { + r2.Header[k] = append([]string(nil), s...) + } + return r2 +} diff --git a/vendor/github.com/andygrunwald/go-jira/metaissue.go b/vendor/github.com/andygrunwald/go-jira/metaissue.go new file mode 100644 index 00000000000..560a2f05a92 --- /dev/null +++ b/vendor/github.com/andygrunwald/go-jira/metaissue.go @@ -0,0 +1,234 @@ +package jira + +import ( + "context" + "fmt" + "strings" + + "github.com/google/go-querystring/query" + "github.com/trivago/tgo/tcontainer" +) + +// CreateMetaInfo contains information about fields and their attributed to create a ticket. +type CreateMetaInfo struct { + Expand string `json:"expand,omitempty"` + Projects []*MetaProject `json:"projects,omitempty"` +} + +// EditMetaInfo contains information about fields and their attributed to edit a ticket. +type EditMetaInfo struct { + Fields tcontainer.MarshalMap `json:"fields,omitempty"` +} + +// MetaProject is the meta information about a project returned from createmeta api +type MetaProject struct { + Expand string `json:"expand,omitempty"` + Self string `json:"self,omitempty"` + Id string `json:"id,omitempty"` + Key string `json:"key,omitempty"` + Name string `json:"name,omitempty"` + // omitted avatarUrls + IssueTypes []*MetaIssueType `json:"issuetypes,omitempty"` +} + +// MetaIssueType represents the different issue types a project has. +// +// Note: Fields is interface because this is an object which can +// have arbitraty keys related to customfields. It is not possible to +// expect these for a general way. This will be returning a map. +// Further processing must be done depending on what is required. +type MetaIssueType struct { + Self string `json:"self,omitempty"` + Id string `json:"id,omitempty"` + Description string `json:"description,omitempty"` + IconUrl string `json:"iconurl,omitempty"` + Name string `json:"name,omitempty"` + Subtasks bool `json:"subtask,omitempty"` + Expand string `json:"expand,omitempty"` + Fields tcontainer.MarshalMap `json:"fields,omitempty"` +} + +// GetCreateMetaWithContext makes the api call to get the meta information required to create a ticket +func (s *IssueService) GetCreateMetaWithContext(ctx context.Context, projectkeys string) (*CreateMetaInfo, *Response, error) { + return s.GetCreateMetaWithOptionsWithContext(ctx, &GetQueryOptions{ProjectKeys: projectkeys, Expand: "projects.issuetypes.fields"}) +} + +// GetCreateMeta wraps GetCreateMetaWithContext using the background context. +func (s *IssueService) GetCreateMeta(projectkeys string) (*CreateMetaInfo, *Response, error) { + return s.GetCreateMetaWithContext(context.Background(), projectkeys) +} + +// GetCreateMetaWithOptionsWithContext makes the api call to get the meta information without requiring to have a projectKey +func (s *IssueService) GetCreateMetaWithOptionsWithContext(ctx context.Context, options *GetQueryOptions) (*CreateMetaInfo, *Response, error) { + apiEndpoint := "rest/api/2/issue/createmeta" + + req, err := s.client.NewRequestWithContext(ctx, "GET", apiEndpoint, nil) + if err != nil { + return nil, nil, err + } + if options != nil { + q, err := query.Values(options) + if err != nil { + return nil, nil, err + } + req.URL.RawQuery = q.Encode() + } + + meta := new(CreateMetaInfo) + resp, err := s.client.Do(req, meta) + + if err != nil { + return nil, resp, err + } + + return meta, resp, nil +} + +// GetCreateMetaWithOptions wraps GetCreateMetaWithOptionsWithContext using the background context. +func (s *IssueService) GetCreateMetaWithOptions(options *GetQueryOptions) (*CreateMetaInfo, *Response, error) { + return s.GetCreateMetaWithOptionsWithContext(context.Background(), options) +} + +// GetEditMetaWithContext makes the api call to get the edit meta information for an issue +func (s *IssueService) GetEditMetaWithContext(ctx context.Context, issue *Issue) (*EditMetaInfo, *Response, error) { + apiEndpoint := fmt.Sprintf("/rest/api/2/issue/%s/editmeta", issue.Key) + + req, err := s.client.NewRequestWithContext(ctx, "GET", apiEndpoint, nil) + if err != nil { + return nil, nil, err + } + + meta := new(EditMetaInfo) + resp, err := s.client.Do(req, meta) + + if err != nil { + return nil, resp, err + } + + return meta, resp, nil +} + +// GetEditMeta wraps GetEditMetaWithContext using the background context. +func (s *IssueService) GetEditMeta(issue *Issue) (*EditMetaInfo, *Response, error) { + return s.GetEditMetaWithContext(context.Background(), issue) +} + +// GetProjectWithName returns a project with "name" from the meta information received. If not found, this returns nil. +// The comparison of the name is case insensitive. +func (m *CreateMetaInfo) GetProjectWithName(name string) *MetaProject { + for _, m := range m.Projects { + if strings.EqualFold(m.Name, name) { + return m + } + } + return nil +} + +// GetProjectWithKey returns a project with "name" from the meta information received. If not found, this returns nil. +// The comparison of the name is case insensitive. +func (m *CreateMetaInfo) GetProjectWithKey(key string) *MetaProject { + for _, m := range m.Projects { + if strings.EqualFold(m.Key, key) { + return m + } + } + return nil +} + +// GetIssueTypeWithName returns an IssueType with name from a given MetaProject. If not found, this returns nil. +// The comparison of the name is case insensitive +func (p *MetaProject) GetIssueTypeWithName(name string) *MetaIssueType { + for _, m := range p.IssueTypes { + if strings.EqualFold(m.Name, name) { + return m + } + } + return nil +} + +// GetMandatoryFields returns a map of all the required fields from the MetaIssueTypes. +// if a field returned by the api was: +// "customfield_10806": { +// "required": true, +// "schema": { +// "type": "any", +// "custom": "com.pyxis.greenhopper.jira:gh-epic-link", +// "customId": 10806 +// }, +// "name": "Epic Link", +// "hasDefaultValue": false, +// "operations": [ +// "set" +// ] +// } +// the returned map would have "Epic Link" as the key and "customfield_10806" as value. +// This choice has been made so that the it is easier to generate the create api request later. +func (t *MetaIssueType) GetMandatoryFields() (map[string]string, error) { + ret := make(map[string]string) + for key := range t.Fields { + required, err := t.Fields.Bool(key + "/required") + if err != nil { + return nil, err + } + if required { + name, err := t.Fields.String(key + "/name") + if err != nil { + return nil, err + } + ret[name] = key + } + } + return ret, nil +} + +// GetAllFields returns a map of all the fields for an IssueType. This includes all required and not required. +// The key of the returned map is what you see in the form and the value is how it is representated in the jira schema. +func (t *MetaIssueType) GetAllFields() (map[string]string, error) { + ret := make(map[string]string) + for key := range t.Fields { + + name, err := t.Fields.String(key + "/name") + if err != nil { + return nil, err + } + ret[name] = key + } + return ret, nil +} + +// CheckCompleteAndAvailable checks if the given fields satisfies the mandatory field required to create a issue for the given type +// And also if the given fields are available. +func (t *MetaIssueType) CheckCompleteAndAvailable(config map[string]string) (bool, error) { + mandatory, err := t.GetMandatoryFields() + if err != nil { + return false, err + } + all, err := t.GetAllFields() + if err != nil { + return false, err + } + + // check templateconfig against mandatory fields + for key := range mandatory { + if _, okay := config[key]; !okay { + var requiredFields []string + for name := range mandatory { + requiredFields = append(requiredFields, name) + } + return false, fmt.Errorf("required field not found in provided jira.fields. Required are: %#v", requiredFields) + } + } + + // check templateConfig against all fields to verify they are available + for key := range config { + if _, okay := all[key]; !okay { + var availableFields []string + for name := range all { + availableFields = append(availableFields, name) + } + return false, fmt.Errorf("fields in jira.fields are not available in jira. Available are: %#v", availableFields) + } + } + + return true, nil +} diff --git a/vendor/github.com/andygrunwald/go-jira/organization.go b/vendor/github.com/andygrunwald/go-jira/organization.go new file mode 100644 index 00000000000..090594c2e87 --- /dev/null +++ b/vendor/github.com/andygrunwald/go-jira/organization.go @@ -0,0 +1,387 @@ +package jira + +import ( + "context" + "fmt" +) + +// OrganizationService handles Organizations for the Jira instance / API. +// +// Jira API docs: https://developer.atlassian.com/cloud/jira/service-desk/rest/api-group-organization/ +type OrganizationService struct { + client *Client +} + +// OrganizationCreationDTO is DTO for creat organization API +type OrganizationCreationDTO struct { + Name string `json:"name,omitempty" structs:"name,omitempty"` +} + +// SelfLink Stores REST API URL to the organization. +type SelfLink struct { + Self string `json:"self,omitempty" structs:"self,omitempty"` +} + +// Organization contains Organization data +type Organization struct { + ID string `json:"id,omitempty" structs:"id,omitempty"` + Name string `json:"name,omitempty" structs:"name,omitempty"` + Links *SelfLink `json:"_links,omitempty" structs:"_links,omitempty"` +} + +// OrganizationUsersDTO contains organization user ids +type OrganizationUsersDTO struct { + AccountIds []string `json:"accountIds,omitempty" structs:"accountIds,omitempty"` +} + +// PagedDTO is response of a paged list +type PagedDTO struct { + Size int `json:"size,omitempty" structs:"size,omitempty"` + Start int `json:"start,omitempty" structs:"start,omitempty"` + Limit int `limit:"size,omitempty" structs:"limit,omitempty"` + IsLastPage bool `json:"isLastPage,omitempty" structs:"isLastPage,omitempty"` + Values []interface{} `values:"isLastPage,omitempty" structs:"values,omitempty"` + Expands []string `json:"_expands,omitempty" structs:"_expands,omitempty"` +} + +// PropertyKey contains Property key details. +type PropertyKey struct { + Self string `json:"self,omitempty" structs:"self,omitempty"` + Key string `json:"key,omitempty" structs:"key,omitempty"` +} + +// PropertyKeys contains an array of PropertyKey +type PropertyKeys struct { + Keys []PropertyKey `json:"keys,omitempty" structs:"keys,omitempty"` +} + +// GetAllOrganizationsWithContext returns a list of organizations in +// the Jira Service Management instance. +// Use this method when you want to present a list +// of organizations or want to locate an organization +// by name. +// +// Jira API docs: https://developer.atlassian.com/cloud/jira/service-desk/rest/api-group-organization/#api-group-organization +func (s *OrganizationService) GetAllOrganizationsWithContext(ctx context.Context, start int, limit int, accountID string) (*PagedDTO, *Response, error) { + apiEndPoint := fmt.Sprintf("rest/servicedeskapi/organization?start=%d&limit=%d", start, limit) + if accountID != "" { + apiEndPoint += fmt.Sprintf("&accountId=%s", accountID) + } + + req, err := s.client.NewRequestWithContext(ctx, "GET", apiEndPoint, nil) + req.Header.Set("Accept", "application/json") + + if err != nil { + return nil, nil, err + } + + v := new(PagedDTO) + resp, err := s.client.Do(req, v) + if err != nil { + jerr := NewJiraError(resp, err) + return nil, resp, jerr + } + + return v, resp, nil +} + +// GetAllOrganizations wraps GetAllOrganizationsWithContext using the background context. +func (s *OrganizationService) GetAllOrganizations(start int, limit int, accountID string) (*PagedDTO, *Response, error) { + return s.GetAllOrganizationsWithContext(context.Background(), start, limit, accountID) +} + +// CreateOrganizationWithContext creates an organization by +// passing the name of the organization. +// +// Jira API docs: https://developer.atlassian.com/cloud/jira/service-desk/rest/api-group-organization/#api-rest-servicedeskapi-organization-post +func (s *OrganizationService) CreateOrganizationWithContext(ctx context.Context, name string) (*Organization, *Response, error) { + apiEndPoint := "rest/servicedeskapi/organization" + + organization := OrganizationCreationDTO{ + Name: name, + } + + req, err := s.client.NewRequestWithContext(ctx, "POST", apiEndPoint, organization) + req.Header.Set("Accept", "application/json") + + if err != nil { + return nil, nil, err + } + + o := new(Organization) + resp, err := s.client.Do(req, &o) + if err != nil { + jerr := NewJiraError(resp, err) + return nil, resp, jerr + } + + return o, resp, nil +} + +// CreateOrganization wraps CreateOrganizationWithContext using the background context. +func (s *OrganizationService) CreateOrganization(name string) (*Organization, *Response, error) { + return s.CreateOrganizationWithContext(context.Background(), name) +} + +// GetOrganizationWithContext returns details of an +// organization. Use this method to get organization +// details whenever your application component is +// passed an organization ID but needs to display +// other organization details. +// +// Jira API docs: https://developer.atlassian.com/cloud/jira/service-desk/rest/api-group-organization/#api-rest-servicedeskapi-organization-organizationid-get +func (s *OrganizationService) GetOrganizationWithContext(ctx context.Context, organizationID int) (*Organization, *Response, error) { + apiEndPoint := fmt.Sprintf("rest/servicedeskapi/organization/%d", organizationID) + + req, err := s.client.NewRequestWithContext(ctx, "GET", apiEndPoint, nil) + req.Header.Set("Accept", "application/json") + + if err != nil { + return nil, nil, err + } + + o := new(Organization) + resp, err := s.client.Do(req, &o) + if err != nil { + jerr := NewJiraError(resp, err) + return nil, resp, jerr + } + + return o, resp, nil +} + +// GetOrganization wraps GetOrganizationWithContext using the background context. +func (s *OrganizationService) GetOrganization(organizationID int) (*Organization, *Response, error) { + return s.GetOrganizationWithContext(context.Background(), organizationID) +} + +// DeleteOrganizationWithContext deletes an organization. Note that +// the organization is deleted regardless +// of other associations it may have. +// For example, associations with service desks. +// +// Jira API docs: https://developer.atlassian.com/cloud/jira/service-desk/rest/api-group-organization/#api-rest-servicedeskapi-organization-organizationid-delete +func (s *OrganizationService) DeleteOrganizationWithContext(ctx context.Context, organizationID int) (*Response, error) { + apiEndPoint := fmt.Sprintf("rest/servicedeskapi/organization/%d", organizationID) + + req, err := s.client.NewRequestWithContext(ctx, "DELETE", apiEndPoint, nil) + + if err != nil { + return nil, err + } + + resp, err := s.client.Do(req, nil) + if err != nil { + jerr := NewJiraError(resp, err) + return resp, jerr + } + + return resp, nil +} + +// DeleteOrganization wraps DeleteOrganizationWithContext using the background context. +func (s *OrganizationService) DeleteOrganization(organizationID int) (*Response, error) { + return s.DeleteOrganizationWithContext(context.Background(), organizationID) +} + +// GetPropertiesKeysWithContext returns the keys of +// all properties for an organization. Use this resource +// when you need to find out what additional properties +// items have been added to an organization. +// +// https://developer.atlassian.com/cloud/jira/service-desk/rest/api-group-organization/#api-rest-servicedeskapi-organization-organizationid-property-get +func (s *OrganizationService) GetPropertiesKeysWithContext(ctx context.Context, organizationID int) (*PropertyKeys, *Response, error) { + apiEndPoint := fmt.Sprintf("rest/servicedeskapi/organization/%d/property", organizationID) + + req, err := s.client.NewRequestWithContext(ctx, "GET", apiEndPoint, nil) + req.Header.Set("Accept", "application/json") + + if err != nil { + return nil, nil, err + } + + pk := new(PropertyKeys) + resp, err := s.client.Do(req, &pk) + if err != nil { + jerr := NewJiraError(resp, err) + return nil, resp, jerr + } + + return pk, resp, nil +} + +// GetPropertiesKeys wraps GetPropertiesKeysWithContext using the background context. +func (s *OrganizationService) GetPropertiesKeys(organizationID int) (*PropertyKeys, *Response, error) { + return s.GetPropertiesKeysWithContext(context.Background(), organizationID) +} + +// GetPropertyWithContext returns the value of a property +// from an organization. Use this method to obtain the JSON +// content for an organization's property. +// +// https://developer.atlassian.com/cloud/jira/service-desk/rest/api-group-organization/#api-rest-servicedeskapi-organization-organizationid-property-propertykey-get +func (s *OrganizationService) GetPropertyWithContext(ctx context.Context, organizationID int, propertyKey string) (*EntityProperty, *Response, error) { + apiEndPoint := fmt.Sprintf("rest/servicedeskapi/organization/%d/property/%s", organizationID, propertyKey) + + req, err := s.client.NewRequestWithContext(ctx, "GET", apiEndPoint, nil) + req.Header.Set("Accept", "application/json") + + if err != nil { + return nil, nil, err + } + + ep := new(EntityProperty) + resp, err := s.client.Do(req, &ep) + if err != nil { + jerr := NewJiraError(resp, err) + return nil, resp, jerr + } + + return ep, resp, nil +} + +// GetProperty wraps GetPropertyWithContext using the background context. +func (s *OrganizationService) GetProperty(organizationID int, propertyKey string) (*EntityProperty, *Response, error) { + return s.GetPropertyWithContext(context.Background(), organizationID, propertyKey) +} + +// SetPropertyWithContext sets the value of a +// property for an organization. Use this +// resource to store custom data against an organization. +// +// https://developer.atlassian.com/cloud/jira/service-desk/rest/api-group-organization/#api-rest-servicedeskapi-organization-organizationid-property-propertykey-put +func (s *OrganizationService) SetPropertyWithContext(ctx context.Context, organizationID int, propertyKey string) (*Response, error) { + apiEndPoint := fmt.Sprintf("rest/servicedeskapi/organization/%d/property/%s", organizationID, propertyKey) + + req, err := s.client.NewRequestWithContext(ctx, "PUT", apiEndPoint, nil) + req.Header.Set("Accept", "application/json") + + if err != nil { + return nil, err + } + + resp, err := s.client.Do(req, nil) + if err != nil { + jerr := NewJiraError(resp, err) + return resp, jerr + } + + return resp, nil +} + +// SetProperty wraps SetPropertyWithContext using the background context. +func (s *OrganizationService) SetProperty(organizationID int, propertyKey string) (*Response, error) { + return s.SetPropertyWithContext(context.Background(), organizationID, propertyKey) +} + +// DeletePropertyWithContext removes a property from an organization. +// +// https://developer.atlassian.com/cloud/jira/service-desk/rest/api-group-organization/#api-rest-servicedeskapi-organization-organizationid-property-propertykey-delete +func (s *OrganizationService) DeletePropertyWithContext(ctx context.Context, organizationID int, propertyKey string) (*Response, error) { + apiEndPoint := fmt.Sprintf("rest/servicedeskapi/organization/%d/property/%s", organizationID, propertyKey) + + req, err := s.client.NewRequestWithContext(ctx, "DELETE", apiEndPoint, nil) + req.Header.Set("Accept", "application/json") + + if err != nil { + return nil, err + } + + resp, err := s.client.Do(req, nil) + if err != nil { + jerr := NewJiraError(resp, err) + return resp, jerr + } + + return resp, nil +} + +// DeleteProperty wraps DeletePropertyWithContext using the background context. +func (s *OrganizationService) DeleteProperty(organizationID int, propertyKey string) (*Response, error) { + return s.DeletePropertyWithContext(context.Background(), organizationID, propertyKey) +} + +// GetUsersWithContext returns all the users +// associated with an organization. Use this +// method where you want to provide a list of +// users for an organization or determine if +// a user is associated with an organization. +// +// https://developer.atlassian.com/cloud/jira/service-desk/rest/api-group-organization/#api-rest-servicedeskapi-organization-organizationid-user-get +func (s *OrganizationService) GetUsersWithContext(ctx context.Context, organizationID int, start int, limit int) (*PagedDTO, *Response, error) { + apiEndPoint := fmt.Sprintf("rest/servicedeskapi/organization/%d/user?start=%d&limit=%d", organizationID, start, limit) + + req, err := s.client.NewRequestWithContext(ctx, "GET", apiEndPoint, nil) + req.Header.Set("Accept", "application/json") + + if err != nil { + return nil, nil, err + } + + users := new(PagedDTO) + resp, err := s.client.Do(req, &users) + if err != nil { + jerr := NewJiraError(resp, err) + return nil, resp, jerr + } + + return users, resp, nil +} + +// GetUsers wraps GetUsersWithContext using the background context. +func (s *OrganizationService) GetUsers(organizationID int, start int, limit int) (*PagedDTO, *Response, error) { + return s.GetUsersWithContext(context.Background(), organizationID, start, limit) +} + +// AddUsersWithContext adds users to an organization. +// +// https://developer.atlassian.com/cloud/jira/service-desk/rest/api-group-organization/#api-rest-servicedeskapi-organization-organizationid-user-post +func (s *OrganizationService) AddUsersWithContext(ctx context.Context, organizationID int, users OrganizationUsersDTO) (*Response, error) { + apiEndPoint := fmt.Sprintf("rest/servicedeskapi/organization/%d/user", organizationID) + + req, err := s.client.NewRequestWithContext(ctx, "POST", apiEndPoint, users) + + if err != nil { + return nil, err + } + + resp, err := s.client.Do(req, nil) + if err != nil { + jerr := NewJiraError(resp, err) + return resp, jerr + } + + return resp, nil +} + +// AddUsers wraps AddUsersWithContext using the background context. +func (s *OrganizationService) AddUsers(organizationID int, users OrganizationUsersDTO) (*Response, error) { + return s.AddUsersWithContext(context.Background(), organizationID, users) +} + +// RemoveUsersWithContext removes users from an organization. +// +// https://developer.atlassian.com/cloud/jira/service-desk/rest/api-group-organization/#api-rest-servicedeskapi-organization-organizationid-user-delete +func (s *OrganizationService) RemoveUsersWithContext(ctx context.Context, organizationID int, users OrganizationUsersDTO) (*Response, error) { + apiEndPoint := fmt.Sprintf("rest/servicedeskapi/organization/%d/user", organizationID) + + req, err := s.client.NewRequestWithContext(ctx, "DELETE", apiEndPoint, nil) + req.Header.Set("Accept", "application/json") + + if err != nil { + return nil, err + } + + resp, err := s.client.Do(req, nil) + if err != nil { + jerr := NewJiraError(resp, err) + return resp, jerr + } + + return resp, nil +} + +// RemoveUsers wraps RemoveUsersWithContext using the background context. +func (s *OrganizationService) RemoveUsers(organizationID int, users OrganizationUsersDTO) (*Response, error) { + return s.RemoveUsersWithContext(context.Background(), organizationID, users) +} diff --git a/vendor/github.com/andygrunwald/go-jira/permissionscheme.go b/vendor/github.com/andygrunwald/go-jira/permissionscheme.go new file mode 100644 index 00000000000..7af5a8bf892 --- /dev/null +++ b/vendor/github.com/andygrunwald/go-jira/permissionscheme.go @@ -0,0 +1,82 @@ +package jira + +import ( + "context" + "fmt" +) + +// PermissionSchemeService handles permissionschemes for the Jira instance / API. +// +// Jira API docs: https://developer.atlassian.com/cloud/jira/platform/rest/v3/#api-group-Permissionscheme +type PermissionSchemeService struct { + client *Client +} +type PermissionSchemes struct { + PermissionSchemes []PermissionScheme `json:"permissionSchemes" structs:"permissionSchemes"` +} + +type Permission struct { + ID int `json:"id" structs:"id"` + Self string `json:"expand" structs:"expand"` + Holder Holder `json:"holder" structs:"holder"` + Name string `json:"permission" structs:"permission"` +} + +type Holder struct { + Type string `json:"type" structs:"type"` + Parameter string `json:"parameter" structs:"parameter"` + Expand string `json:"expand" structs:"expand"` +} + +// GetListWithContext returns a list of all permission schemes +// +// Jira API docs: https://developer.atlassian.com/cloud/jira/platform/rest/v3/#api-api-3-permissionscheme-get +func (s *PermissionSchemeService) GetListWithContext(ctx context.Context) (*PermissionSchemes, *Response, error) { + apiEndpoint := "/rest/api/3/permissionscheme" + req, err := s.client.NewRequestWithContext(ctx, "GET", apiEndpoint, nil) + if err != nil { + return nil, nil, err + } + + pss := new(PermissionSchemes) + resp, err := s.client.Do(req, &pss) + if err != nil { + jerr := NewJiraError(resp, err) + return nil, resp, jerr + } + + return pss, resp, nil +} + +// GetList wraps GetListWithContext using the background context. +func (s *PermissionSchemeService) GetList() (*PermissionSchemes, *Response, error) { + return s.GetListWithContext(context.Background()) +} + +// GetWithContext returns a full representation of the permission scheme for the schemeID +// +// Jira API docs: https://developer.atlassian.com/cloud/jira/platform/rest/v3/#api-api-3-permissionscheme-schemeId-get +func (s *PermissionSchemeService) GetWithContext(ctx context.Context, schemeID int) (*PermissionScheme, *Response, error) { + apiEndpoint := fmt.Sprintf("/rest/api/3/permissionscheme/%d", schemeID) + req, err := s.client.NewRequestWithContext(ctx, "GET", apiEndpoint, nil) + if err != nil { + return nil, nil, err + } + + ps := new(PermissionScheme) + resp, err := s.client.Do(req, ps) + if err != nil { + jerr := NewJiraError(resp, err) + return nil, resp, jerr + } + if ps.Self == "" { + return nil, resp, fmt.Errorf("no permissionscheme with ID %d found", schemeID) + } + + return ps, resp, nil +} + +// Get wraps GetWithContext using the background context. +func (s *PermissionSchemeService) Get(schemeID int) (*PermissionScheme, *Response, error) { + return s.GetWithContext(context.Background(), schemeID) +} diff --git a/vendor/github.com/andygrunwald/go-jira/priority.go b/vendor/github.com/andygrunwald/go-jira/priority.go new file mode 100644 index 00000000000..a7b12a4176c --- /dev/null +++ b/vendor/github.com/andygrunwald/go-jira/priority.go @@ -0,0 +1,44 @@ +package jira + +import "context" + +// PriorityService handles priorities for the Jira instance / API. +// +// Jira API docs: https://developer.atlassian.com/cloud/jira/platform/rest/#api-Priority +type PriorityService struct { + client *Client +} + +// Priority represents a priority of a Jira issue. +// Typical types are "Normal", "Moderate", "Urgent", ... +type Priority struct { + Self string `json:"self,omitempty" structs:"self,omitempty"` + IconURL string `json:"iconUrl,omitempty" structs:"iconUrl,omitempty"` + Name string `json:"name,omitempty" structs:"name,omitempty"` + ID string `json:"id,omitempty" structs:"id,omitempty"` + StatusColor string `json:"statusColor,omitempty" structs:"statusColor,omitempty"` + Description string `json:"description,omitempty" structs:"description,omitempty"` +} + +// GetListWithContext gets all priorities from Jira +// +// Jira API docs: https://developer.atlassian.com/cloud/jira/platform/rest/#api-api-2-priority-get +func (s *PriorityService) GetListWithContext(ctx context.Context) ([]Priority, *Response, error) { + apiEndpoint := "rest/api/2/priority" + req, err := s.client.NewRequestWithContext(ctx, "GET", apiEndpoint, nil) + if err != nil { + return nil, nil, err + } + + priorityList := []Priority{} + resp, err := s.client.Do(req, &priorityList) + if err != nil { + return nil, resp, NewJiraError(resp, err) + } + return priorityList, resp, nil +} + +// GetList wraps GetListWithContext using the background context. +func (s *PriorityService) GetList() ([]Priority, *Response, error) { + return s.GetListWithContext(context.Background()) +} diff --git a/vendor/github.com/andygrunwald/go-jira/project.go b/vendor/github.com/andygrunwald/go-jira/project.go new file mode 100644 index 00000000000..f1000c81385 --- /dev/null +++ b/vendor/github.com/andygrunwald/go-jira/project.go @@ -0,0 +1,182 @@ +package jira + +import ( + "context" + "fmt" + + "github.com/google/go-querystring/query" +) + +// ProjectService handles projects for the Jira instance / API. +// +// Jira API docs: https://docs.atlassian.com/jira/REST/latest/#api/2/project +type ProjectService struct { + client *Client +} + +// ProjectList represent a list of Projects +type ProjectList []struct { + Expand string `json:"expand" structs:"expand"` + Self string `json:"self" structs:"self"` + ID string `json:"id" structs:"id"` + Key string `json:"key" structs:"key"` + Name string `json:"name" structs:"name"` + AvatarUrls AvatarUrls `json:"avatarUrls" structs:"avatarUrls"` + ProjectTypeKey string `json:"projectTypeKey" structs:"projectTypeKey"` + ProjectCategory ProjectCategory `json:"projectCategory,omitempty" structs:"projectsCategory,omitempty"` + IssueTypes []IssueType `json:"issueTypes,omitempty" structs:"issueTypes,omitempty"` +} + +// ProjectCategory represents a single project category +type ProjectCategory struct { + Self string `json:"self" structs:"self,omitempty"` + ID string `json:"id" structs:"id,omitempty"` + Name string `json:"name" structs:"name,omitempty"` + Description string `json:"description" structs:"description,omitempty"` +} + +// Project represents a Jira Project. +type Project struct { + Expand string `json:"expand,omitempty" structs:"expand,omitempty"` + Self string `json:"self,omitempty" structs:"self,omitempty"` + ID string `json:"id,omitempty" structs:"id,omitempty"` + Key string `json:"key,omitempty" structs:"key,omitempty"` + Description string `json:"description,omitempty" structs:"description,omitempty"` + Lead User `json:"lead,omitempty" structs:"lead,omitempty"` + Components []ProjectComponent `json:"components,omitempty" structs:"components,omitempty"` + IssueTypes []IssueType `json:"issueTypes,omitempty" structs:"issueTypes,omitempty"` + URL string `json:"url,omitempty" structs:"url,omitempty"` + Email string `json:"email,omitempty" structs:"email,omitempty"` + AssigneeType string `json:"assigneeType,omitempty" structs:"assigneeType,omitempty"` + Versions []Version `json:"versions,omitempty" structs:"versions,omitempty"` + Name string `json:"name,omitempty" structs:"name,omitempty"` + Roles map[string]string `json:"roles,omitempty" structs:"roles,omitempty"` + AvatarUrls AvatarUrls `json:"avatarUrls,omitempty" structs:"avatarUrls,omitempty"` + ProjectCategory ProjectCategory `json:"projectCategory,omitempty" structs:"projectCategory,omitempty"` +} + +// ProjectComponent represents a single component of a project +type ProjectComponent struct { + Self string `json:"self" structs:"self,omitempty"` + ID string `json:"id" structs:"id,omitempty"` + Name string `json:"name" structs:"name,omitempty"` + Description string `json:"description" structs:"description,omitempty"` + Lead User `json:"lead,omitempty" structs:"lead,omitempty"` + AssigneeType string `json:"assigneeType" structs:"assigneeType,omitempty"` + Assignee User `json:"assignee" structs:"assignee,omitempty"` + RealAssigneeType string `json:"realAssigneeType" structs:"realAssigneeType,omitempty"` + RealAssignee User `json:"realAssignee" structs:"realAssignee,omitempty"` + IsAssigneeTypeValid bool `json:"isAssigneeTypeValid" structs:"isAssigneeTypeValid,omitempty"` + Project string `json:"project" structs:"project,omitempty"` + ProjectID int `json:"projectId" structs:"projectId,omitempty"` +} + +// PermissionScheme represents the permission scheme for the project +type PermissionScheme struct { + Expand string `json:"expand" structs:"expand,omitempty"` + Self string `json:"self" structs:"self,omitempty"` + ID int `json:"id" structs:"id,omitempty"` + Name string `json:"name" structs:"name,omitempty"` + Description string `json:"description" structs:"description,omitempty"` + Permissions []Permission `json:"permissions" structs:"permissions,omitempty"` +} + +// GetListWithContext gets all projects form Jira +// +// Jira API docs: https://docs.atlassian.com/jira/REST/latest/#api/2/project-getAllProjects +func (s *ProjectService) GetListWithContext(ctx context.Context) (*ProjectList, *Response, error) { + return s.ListWithOptionsWithContext(ctx, &GetQueryOptions{}) +} + +// GetList wraps GetListWithContext using the background context. +func (s *ProjectService) GetList() (*ProjectList, *Response, error) { + return s.GetListWithContext(context.Background()) +} + +// ListWithOptionsWithContext gets all projects form Jira with optional query params, like &GetQueryOptions{Expand: "issueTypes"} to get +// a list of all projects and their supported issuetypes +// +// Jira API docs: https://docs.atlassian.com/jira/REST/latest/#api/2/project-getAllProjects +func (s *ProjectService) ListWithOptionsWithContext(ctx context.Context, options *GetQueryOptions) (*ProjectList, *Response, error) { + apiEndpoint := "rest/api/2/project" + req, err := s.client.NewRequestWithContext(ctx, "GET", apiEndpoint, nil) + if err != nil { + return nil, nil, err + } + + if options != nil { + q, err := query.Values(options) + if err != nil { + return nil, nil, err + } + req.URL.RawQuery = q.Encode() + } + + projectList := new(ProjectList) + resp, err := s.client.Do(req, projectList) + if err != nil { + jerr := NewJiraError(resp, err) + return nil, resp, jerr + } + + return projectList, resp, nil +} + +// ListWithOptions wraps ListWithOptionsWithContext using the background context. +func (s *ProjectService) ListWithOptions(options *GetQueryOptions) (*ProjectList, *Response, error) { + return s.ListWithOptionsWithContext(context.Background(), options) +} + +// GetWithContext returns a full representation of the project for the given issue key. +// Jira will attempt to identify the project by the projectIdOrKey path parameter. +// This can be an project id, or an project key. +// +// Jira API docs: https://docs.atlassian.com/jira/REST/latest/#api/2/project-getProject +func (s *ProjectService) GetWithContext(ctx context.Context, projectID string) (*Project, *Response, error) { + apiEndpoint := fmt.Sprintf("rest/api/2/project/%s", projectID) + req, err := s.client.NewRequestWithContext(ctx, "GET", apiEndpoint, nil) + if err != nil { + return nil, nil, err + } + + project := new(Project) + resp, err := s.client.Do(req, project) + if err != nil { + jerr := NewJiraError(resp, err) + return nil, resp, jerr + } + + return project, resp, nil +} + +// Get wraps GetWithContext using the background context. +func (s *ProjectService) Get(projectID string) (*Project, *Response, error) { + return s.GetWithContext(context.Background(), projectID) +} + +// GetPermissionSchemeWithContext returns a full representation of the permission scheme for the project +// Jira will attempt to identify the project by the projectIdOrKey path parameter. +// This can be an project id, or an project key. +// +// Jira API docs: https://docs.atlassian.com/jira/REST/latest/#api/2/project-getProject +func (s *ProjectService) GetPermissionSchemeWithContext(ctx context.Context, projectID string) (*PermissionScheme, *Response, error) { + apiEndpoint := fmt.Sprintf("/rest/api/2/project/%s/permissionscheme", projectID) + req, err := s.client.NewRequestWithContext(ctx, "GET", apiEndpoint, nil) + if err != nil { + return nil, nil, err + } + + ps := new(PermissionScheme) + resp, err := s.client.Do(req, ps) + if err != nil { + jerr := NewJiraError(resp, err) + return nil, resp, jerr + } + + return ps, resp, nil +} + +// GetPermissionScheme wraps GetPermissionSchemeWithContext using the background context. +func (s *ProjectService) GetPermissionScheme(projectID string) (*PermissionScheme, *Response, error) { + return s.GetPermissionSchemeWithContext(context.Background(), projectID) +} diff --git a/vendor/github.com/andygrunwald/go-jira/request_context.go b/vendor/github.com/andygrunwald/go-jira/request_context.go new file mode 100644 index 00000000000..fc0052dfa89 --- /dev/null +++ b/vendor/github.com/andygrunwald/go-jira/request_context.go @@ -0,0 +1,23 @@ +// +build go1.13 + +// This file provides glue to use Context in `http.Request` with +// Go version 1.13 and higher. + +// The function `http.NewRequestWithContext` has been added in Go 1.13. +// Before the release 1.13, to use Context we need creat `http.Request` +// then use the method `WithContext` to create a new `http.Request` +// with Context from the existing `http.Request`. +// +// Doc: https://golang.org/doc/go1.13#net/http + +package jira + +import ( + "context" + "io" + "net/http" +) + +func newRequestWithContext(ctx context.Context, method, url string, body io.Reader) (*http.Request, error) { + return http.NewRequestWithContext(ctx, method, url, body) +} diff --git a/vendor/github.com/andygrunwald/go-jira/request_legacy.go b/vendor/github.com/andygrunwald/go-jira/request_legacy.go new file mode 100644 index 00000000000..5ceee8863e4 --- /dev/null +++ b/vendor/github.com/andygrunwald/go-jira/request_legacy.go @@ -0,0 +1,28 @@ +// +build !go1.13 + +// This file provides glue to use Context in `http.Request` with +// Go version before 1.13. + +// The function `http.NewRequestWithContext` has been added in Go 1.13. +// Before the release 1.13, to use Context we need creat `http.Request` +// then use the method `WithContext` to create a new `http.Request` +// with Context from the existing `http.Request`. +// +// Doc: https://golang.org/doc/go1.13#net/http + +package jira + +import ( + "context" + "io" + "net/http" +) + +func newRequestWithContext(ctx context.Context, method, url string, body io.Reader) (*http.Request, error) { + r, err := http.NewRequest(method, url, body) + if err != nil { + return nil, err + } + + return r.WithContext(ctx), nil +} diff --git a/vendor/github.com/andygrunwald/go-jira/resolution.go b/vendor/github.com/andygrunwald/go-jira/resolution.go new file mode 100644 index 00000000000..e23d5650fbe --- /dev/null +++ b/vendor/github.com/andygrunwald/go-jira/resolution.go @@ -0,0 +1,42 @@ +package jira + +import "context" + +// ResolutionService handles resolutions for the Jira instance / API. +// +// Jira API docs: https://developer.atlassian.com/cloud/jira/platform/rest/#api-Resolution +type ResolutionService struct { + client *Client +} + +// Resolution represents a resolution of a Jira issue. +// Typical types are "Fixed", "Suspended", "Won't Fix", ... +type Resolution struct { + Self string `json:"self" structs:"self"` + ID string `json:"id" structs:"id"` + Description string `json:"description" structs:"description"` + Name string `json:"name" structs:"name"` +} + +// GetListWithContext gets all resolutions from Jira +// +// Jira API docs: https://developer.atlassian.com/cloud/jira/platform/rest/#api-api-2-resolution-get +func (s *ResolutionService) GetListWithContext(ctx context.Context) ([]Resolution, *Response, error) { + apiEndpoint := "rest/api/2/resolution" + req, err := s.client.NewRequestWithContext(ctx, "GET", apiEndpoint, nil) + if err != nil { + return nil, nil, err + } + + resolutionList := []Resolution{} + resp, err := s.client.Do(req, &resolutionList) + if err != nil { + return nil, resp, NewJiraError(resp, err) + } + return resolutionList, resp, nil +} + +// GetList wraps GetListWithContext using the background context. +func (s *ResolutionService) GetList() ([]Resolution, *Response, error) { + return s.GetListWithContext(context.Background()) +} diff --git a/vendor/github.com/andygrunwald/go-jira/role.go b/vendor/github.com/andygrunwald/go-jira/role.go new file mode 100644 index 00000000000..66d223ff4a4 --- /dev/null +++ b/vendor/github.com/andygrunwald/go-jira/role.go @@ -0,0 +1,87 @@ +package jira + +import ( + "context" + "fmt" +) + +// RoleService handles roles for the Jira instance / API. +// +// Jira API docs: https://developer.atlassian.com/cloud/jira/platform/rest/v3/#api-group-Role +type RoleService struct { + client *Client +} + +// Role represents a Jira product role +type Role struct { + Self string `json:"self" structs:"self"` + Name string `json:"name" structs:"name"` + ID int `json:"id" structs:"id"` + Description string `json:"description" structs:"description"` + Actors []*Actor `json:"actors" structs:"actors"` +} + +// Actor represents a Jira actor +type Actor struct { + ID int `json:"id" structs:"id"` + DisplayName string `json:"displayName" structs:"displayName"` + Type string `json:"type" structs:"type"` + Name string `json:"name" structs:"name"` + AvatarURL string `json:"avatarUrl" structs:"avatarUrl"` + ActorUser *ActorUser `json:"actorUser" structs:"actoruser"` +} + +// ActorUser contains the account id of the actor/user +type ActorUser struct { + AccountID string `json:"accountId" structs:"accountId"` +} + +// GetListWithContext returns a list of all available project roles +// +// Jira API docs: https://developer.atlassian.com/cloud/jira/platform/rest/v3/#api-api-3-role-get +func (s *RoleService) GetListWithContext(ctx context.Context) (*[]Role, *Response, error) { + apiEndpoint := "rest/api/3/role" + req, err := s.client.NewRequestWithContext(ctx, "GET", apiEndpoint, nil) + if err != nil { + return nil, nil, err + } + roles := new([]Role) + resp, err := s.client.Do(req, roles) + if err != nil { + jerr := NewJiraError(resp, err) + return nil, resp, jerr + } + return roles, resp, err +} + +// GetList wraps GetListWithContext using the background context. +func (s *RoleService) GetList() (*[]Role, *Response, error) { + return s.GetListWithContext(context.Background()) +} + +// GetWithContext retreives a single Role from Jira +// +// Jira API docs: https://developer.atlassian.com/cloud/jira/platform/rest/v3/#api-api-3-role-id-get +func (s *RoleService) GetWithContext(ctx context.Context, roleID int) (*Role, *Response, error) { + apiEndpoint := fmt.Sprintf("rest/api/3/role/%d", roleID) + req, err := s.client.NewRequestWithContext(ctx, "GET", apiEndpoint, nil) + if err != nil { + return nil, nil, err + } + role := new(Role) + resp, err := s.client.Do(req, role) + if err != nil { + jerr := NewJiraError(resp, err) + return nil, resp, jerr + } + if role.Self == "" { + return nil, resp, fmt.Errorf("no role with ID %d found", roleID) + } + + return role, resp, err +} + +// Get wraps GetWithContext using the background context. +func (s *RoleService) Get(roleID int) (*Role, *Response, error) { + return s.GetWithContext(context.Background(), roleID) +} diff --git a/vendor/github.com/andygrunwald/go-jira/servicedesk.go b/vendor/github.com/andygrunwald/go-jira/servicedesk.go new file mode 100644 index 00000000000..c6c9147366f --- /dev/null +++ b/vendor/github.com/andygrunwald/go-jira/servicedesk.go @@ -0,0 +1,114 @@ +package jira + +import ( + "context" + "fmt" +) + +// ServiceDeskService handles ServiceDesk for the Jira instance / API. +type ServiceDeskService struct { + client *Client +} + +// ServiceDeskOrganizationDTO is a DTO for ServiceDesk organizations +type ServiceDeskOrganizationDTO struct { + OrganizationID int `json:"organizationId,omitempty" structs:"organizationId,omitempty"` +} + +// GetOrganizationsWithContext returns a list of +// all organizations associated with a service desk. +// +// https://developer.atlassian.com/cloud/jira/service-desk/rest/api-group-organization/#api-rest-servicedeskapi-servicedesk-servicedeskid-organization-get +func (s *ServiceDeskService) GetOrganizationsWithContext(ctx context.Context, serviceDeskID int, start int, limit int, accountID string) (*PagedDTO, *Response, error) { + apiEndPoint := fmt.Sprintf("rest/servicedeskapi/servicedesk/%d/organization?start=%d&limit=%d", serviceDeskID, start, limit) + if accountID != "" { + apiEndPoint += fmt.Sprintf("&accountId=%s", accountID) + } + + req, err := s.client.NewRequestWithContext(ctx, "GET", apiEndPoint, nil) + req.Header.Set("Accept", "application/json") + + if err != nil { + return nil, nil, err + } + + orgs := new(PagedDTO) + resp, err := s.client.Do(req, &orgs) + if err != nil { + jerr := NewJiraError(resp, err) + return nil, resp, jerr + } + + return orgs, resp, nil +} + +// GetOrganizations wraps GetOrganizationsWithContext using the background context. +func (s *ServiceDeskService) GetOrganizations(serviceDeskID int, start int, limit int, accountID string) (*PagedDTO, *Response, error) { + return s.GetOrganizationsWithContext(context.Background(), serviceDeskID, start, limit, accountID) +} + +// AddOrganizationWithContext adds an organization to +// a service desk. If the organization ID is already +// associated with the service desk, no change is made +// and the resource returns a 204 success code. +// +// https://developer.atlassian.com/cloud/jira/service-desk/rest/api-group-organization/#api-rest-servicedeskapi-servicedesk-servicedeskid-organization-post +func (s *ServiceDeskService) AddOrganizationWithContext(ctx context.Context, serviceDeskID int, organizationID int) (*Response, error) { + apiEndPoint := fmt.Sprintf("rest/servicedeskapi/servicedesk/%d/organization", serviceDeskID) + + organization := ServiceDeskOrganizationDTO{ + OrganizationID: organizationID, + } + + req, err := s.client.NewRequestWithContext(ctx, "POST", apiEndPoint, organization) + + if err != nil { + return nil, err + } + + resp, err := s.client.Do(req, nil) + if err != nil { + jerr := NewJiraError(resp, err) + return resp, jerr + } + + return resp, nil +} + +// AddOrganization wraps AddOrganizationWithContext using the background context. +func (s *ServiceDeskService) AddOrganization(serviceDeskID int, organizationID int) (*Response, error) { + return s.AddOrganizationWithContext(context.Background(), serviceDeskID, organizationID) +} + +// RemoveOrganizationWithContext removes an organization +// from a service desk. If the organization ID does not +// match an organization associated with the service desk, +// no change is made and the resource returns a 204 success code. +// +// https://developer.atlassian.com/cloud/jira/service-desk/rest/api-group-organization/#api-rest-servicedeskapi-servicedesk-servicedeskid-organization-delete +func (s *ServiceDeskService) RemoveOrganizationWithContext(ctx context.Context, serviceDeskID int, organizationID int) (*Response, error) { + apiEndPoint := fmt.Sprintf("rest/servicedeskapi/servicedesk/%d/organization", serviceDeskID) + + organization := ServiceDeskOrganizationDTO{ + OrganizationID: organizationID, + } + + req, err := s.client.NewRequestWithContext(ctx, "DELETE", apiEndPoint, organization) + + if err != nil { + return nil, err + } + + resp, err := s.client.Do(req, nil) + if err != nil { + jerr := NewJiraError(resp, err) + return resp, jerr + } + + return resp, nil +} + +// RemoveOrganization wraps RemoveOrganizationWithContext using the background context. +func (s *ServiceDeskService) RemoveOrganization(serviceDeskID int, organizationID int) (*Response, error) { + return s.RemoveOrganizationWithContext(context.Background(), serviceDeskID, organizationID) +} diff --git a/vendor/github.com/andygrunwald/go-jira/sprint.go b/vendor/github.com/andygrunwald/go-jira/sprint.go new file mode 100644 index 00000000000..999b59c8fec --- /dev/null +++ b/vendor/github.com/andygrunwald/go-jira/sprint.go @@ -0,0 +1,123 @@ +package jira + +import ( + "context" + "fmt" + + "github.com/google/go-querystring/query" +) + +// SprintService handles sprints in Jira Agile API. +// See https://docs.atlassian.com/jira-software/REST/cloud/ +type SprintService struct { + client *Client +} + +// IssuesWrapper represents a wrapper struct for moving issues to sprint +type IssuesWrapper struct { + Issues []string `json:"issues"` +} + +// IssuesInSprintResult represents a wrapper struct for search result +type IssuesInSprintResult struct { + Issues []Issue `json:"issues"` +} + +// MoveIssuesToSprintWithContext moves issues to a sprint, for a given sprint Id. +// Issues can only be moved to open or active sprints. +// The maximum number of issues that can be moved in one operation is 50. +// +// Jira API docs: https://docs.atlassian.com/jira-software/REST/cloud/#agile/1.0/sprint-moveIssuesToSprint +func (s *SprintService) MoveIssuesToSprintWithContext(ctx context.Context, sprintID int, issueIDs []string) (*Response, error) { + apiEndpoint := fmt.Sprintf("rest/agile/1.0/sprint/%d/issue", sprintID) + + payload := IssuesWrapper{Issues: issueIDs} + + req, err := s.client.NewRequestWithContext(ctx, "POST", apiEndpoint, payload) + + if err != nil { + return nil, err + } + + resp, err := s.client.Do(req, nil) + if err != nil { + err = NewJiraError(resp, err) + } + return resp, err +} + +// MoveIssuesToSprint wraps MoveIssuesToSprintWithContext using the background context. +func (s *SprintService) MoveIssuesToSprint(sprintID int, issueIDs []string) (*Response, error) { + return s.MoveIssuesToSprintWithContext(context.Background(), sprintID, issueIDs) +} + +// GetIssuesForSprintWithContext returns all issues in a sprint, for a given sprint Id. +// This only includes issues that the user has permission to view. +// By default, the returned issues are ordered by rank. +// +// Jira API Docs: https://docs.atlassian.com/jira-software/REST/cloud/#agile/1.0/sprint-getIssuesForSprint +func (s *SprintService) GetIssuesForSprintWithContext(ctx context.Context, sprintID int) ([]Issue, *Response, error) { + apiEndpoint := fmt.Sprintf("rest/agile/1.0/sprint/%d/issue", sprintID) + + req, err := s.client.NewRequestWithContext(ctx, "GET", apiEndpoint, nil) + + if err != nil { + return nil, nil, err + } + + result := new(IssuesInSprintResult) + resp, err := s.client.Do(req, result) + if err != nil { + err = NewJiraError(resp, err) + } + + return result.Issues, resp, err +} + +// GetIssuesForSprint wraps GetIssuesForSprintWithContext using the background context. +func (s *SprintService) GetIssuesForSprint(sprintID int) ([]Issue, *Response, error) { + return s.GetIssuesForSprintWithContext(context.Background(), sprintID) +} + +// GetIssueWithContext returns a full representation of the issue for the given issue key. +// Jira will attempt to identify the issue by the issueIdOrKey path parameter. +// This can be an issue id, or an issue key. +// If the issue cannot be found via an exact match, Jira will also look for the issue in a case-insensitive way, or by looking to see if the issue was moved. +// +// The given options will be appended to the query string +// +// Jira API docs: https://docs.atlassian.com/jira-software/REST/7.3.1/#agile/1.0/issue-getIssue +// +// TODO: create agile service for holding all agile apis' implementation +func (s *SprintService) GetIssueWithContext(ctx context.Context, issueID string, options *GetQueryOptions) (*Issue, *Response, error) { + apiEndpoint := fmt.Sprintf("rest/agile/1.0/issue/%s", issueID) + + req, err := s.client.NewRequestWithContext(ctx, "GET", apiEndpoint, nil) + + if err != nil { + return nil, nil, err + } + + if options != nil { + q, err := query.Values(options) + if err != nil { + return nil, nil, err + } + req.URL.RawQuery = q.Encode() + } + + issue := new(Issue) + resp, err := s.client.Do(req, issue) + + if err != nil { + jerr := NewJiraError(resp, err) + return nil, resp, jerr + } + + return issue, resp, nil +} + +// GetIssue wraps GetIssueWithContext using the background context. +func (s *SprintService) GetIssue(issueID string, options *GetQueryOptions) (*Issue, *Response, error) { + return s.GetIssueWithContext(context.Background(), issueID, options) +} diff --git a/vendor/github.com/andygrunwald/go-jira/status.go b/vendor/github.com/andygrunwald/go-jira/status.go new file mode 100644 index 00000000000..a37039296c9 --- /dev/null +++ b/vendor/github.com/andygrunwald/go-jira/status.go @@ -0,0 +1,47 @@ +package jira + +import "context" + +// StatusService handles staties for the Jira instance / API. +// +// Jira API docs: https://developer.atlassian.com/cloud/jira/platform/rest/v2/#api-group-Workflow-statuses +type StatusService struct { + client *Client +} + +// Status represents the current status of a Jira issue. +// Typical status are "Open", "In Progress", "Closed", ... +// Status can be user defined in every Jira instance. +type Status struct { + Self string `json:"self" structs:"self"` + Description string `json:"description" structs:"description"` + IconURL string `json:"iconUrl" structs:"iconUrl"` + Name string `json:"name" structs:"name"` + ID string `json:"id" structs:"id"` + StatusCategory StatusCategory `json:"statusCategory" structs:"statusCategory"` +} + +// GetAllStatusesWithContext returns a list of all statuses associated with workflows. +// +// Jira API docs: https://developer.atlassian.com/cloud/jira/platform/rest/v2/#api-rest-api-2-status-get +func (s *StatusService) GetAllStatusesWithContext(ctx context.Context) ([]Status, *Response, error) { + apiEndpoint := "rest/api/2/status" + req, err := s.client.NewRequestWithContext(ctx, "GET", apiEndpoint, nil) + + if err != nil { + return nil, nil, err + } + + statusList := []Status{} + resp, err := s.client.Do(req, &statusList) + if err != nil { + return nil, resp, NewJiraError(resp, err) + } + + return statusList, resp, nil +} + +// GetAllStatuses wraps GetAllStatusesWithContext using the background context. +func (s *StatusService) GetAllStatuses() ([]Status, *Response, error) { + return s.GetAllStatusesWithContext(context.Background()) +} diff --git a/vendor/github.com/andygrunwald/go-jira/statuscategory.go b/vendor/github.com/andygrunwald/go-jira/statuscategory.go new file mode 100644 index 00000000000..bed5c566ca8 --- /dev/null +++ b/vendor/github.com/andygrunwald/go-jira/statuscategory.go @@ -0,0 +1,51 @@ +package jira + +import "context" + +// StatusCategoryService handles status categories for the Jira instance / API. +// +// Jira API docs: https://developer.atlassian.com/cloud/jira/platform/rest/#api-Statuscategory +type StatusCategoryService struct { + client *Client +} + +// StatusCategory represents the category a status belongs to. +// Those categories can be user defined in every Jira instance. +type StatusCategory struct { + Self string `json:"self" structs:"self"` + ID int `json:"id" structs:"id"` + Name string `json:"name" structs:"name"` + Key string `json:"key" structs:"key"` + ColorName string `json:"colorName" structs:"colorName"` +} + +// These constants are the keys of the default Jira status categories +const ( + StatusCategoryComplete = "done" + StatusCategoryInProgress = "indeterminate" + StatusCategoryToDo = "new" + StatusCategoryUndefined = "undefined" +) + +// GetListWithContext gets all status categories from Jira +// +// Jira API docs: https://developer.atlassian.com/cloud/jira/platform/rest/#api-api-2-statuscategory-get +func (s *StatusCategoryService) GetListWithContext(ctx context.Context) ([]StatusCategory, *Response, error) { + apiEndpoint := "rest/api/2/statuscategory" + req, err := s.client.NewRequestWithContext(ctx, "GET", apiEndpoint, nil) + if err != nil { + return nil, nil, err + } + + statusCategoryList := []StatusCategory{} + resp, err := s.client.Do(req, &statusCategoryList) + if err != nil { + return nil, resp, NewJiraError(resp, err) + } + return statusCategoryList, resp, nil +} + +// GetList wraps GetListWithContext using the background context. +func (s *StatusCategoryService) GetList() ([]StatusCategory, *Response, error) { + return s.GetListWithContext(context.Background()) +} diff --git a/vendor/github.com/andygrunwald/go-jira/types.go b/vendor/github.com/andygrunwald/go-jira/types.go new file mode 100644 index 00000000000..b99fc1c3fe8 --- /dev/null +++ b/vendor/github.com/andygrunwald/go-jira/types.go @@ -0,0 +1,9 @@ +package jira + +// Bool is a helper routine that allocates a new bool value +// to store v and returns a pointer to it. +func Bool(v bool) *bool { + p := new(bool) + *p = v + return p +} diff --git a/vendor/github.com/andygrunwald/go-jira/user.go b/vendor/github.com/andygrunwald/go-jira/user.go new file mode 100644 index 00000000000..3e64e42c2ae --- /dev/null +++ b/vendor/github.com/andygrunwald/go-jira/user.go @@ -0,0 +1,268 @@ +package jira + +import ( + "context" + "encoding/json" + "fmt" + "io/ioutil" +) + +// UserService handles users for the Jira instance / API. +// +// Jira API docs: https://developer.atlassian.com/cloud/jira/platform/rest/v2/#api-group-Users +type UserService struct { + client *Client +} + +// User represents a Jira user. +type User struct { + Self string `json:"self,omitempty" structs:"self,omitempty"` + AccountID string `json:"accountId,omitempty" structs:"accountId,omitempty"` + AccountType string `json:"accountType,omitempty" structs:"accountType,omitempty"` + Name string `json:"name,omitempty" structs:"name,omitempty"` + Key string `json:"key,omitempty" structs:"key,omitempty"` + Password string `json:"-"` + EmailAddress string `json:"emailAddress,omitempty" structs:"emailAddress,omitempty"` + AvatarUrls AvatarUrls `json:"avatarUrls,omitempty" structs:"avatarUrls,omitempty"` + DisplayName string `json:"displayName,omitempty" structs:"displayName,omitempty"` + Active bool `json:"active,omitempty" structs:"active,omitempty"` + TimeZone string `json:"timeZone,omitempty" structs:"timeZone,omitempty"` + Locale string `json:"locale,omitempty" structs:"locale,omitempty"` + ApplicationKeys []string `json:"applicationKeys,omitempty" structs:"applicationKeys,omitempty"` +} + +// UserGroup represents the group list +type UserGroup struct { + Self string `json:"self,omitempty" structs:"self,omitempty"` + Name string `json:"name,omitempty" structs:"name,omitempty"` +} + +type userSearchParam struct { + name string + value string +} + +type userSearch []userSearchParam + +type userSearchF func(userSearch) userSearch + +// GetWithContext gets user info from Jira using its Account Id +// +// Jira API docs: https://developer.atlassian.com/cloud/jira/platform/rest/v2/#api-rest-api-2-user-get +func (s *UserService) GetWithContext(ctx context.Context, accountId string) (*User, *Response, error) { + apiEndpoint := fmt.Sprintf("/rest/api/2/user?accountId=%s", accountId) + req, err := s.client.NewRequestWithContext(ctx, "GET", apiEndpoint, nil) + if err != nil { + return nil, nil, err + } + + user := new(User) + resp, err := s.client.Do(req, user) + if err != nil { + return nil, resp, NewJiraError(resp, err) + } + return user, resp, nil +} + +// Get wraps GetWithContext using the background context. +func (s *UserService) Get(accountId string) (*User, *Response, error) { + return s.GetWithContext(context.Background(), accountId) +} + +// GetByAccountIDWithContext gets user info from Jira +// Searching by another parameter that is not accountId is deprecated, +// but this method is kept for backwards compatibility +// Jira API docs: https://docs.atlassian.com/jira/REST/cloud/#api/2/user-getUser +func (s *UserService) GetByAccountIDWithContext(ctx context.Context, accountID string) (*User, *Response, error) { + apiEndpoint := fmt.Sprintf("/rest/api/2/user?accountId=%s", accountID) + req, err := s.client.NewRequestWithContext(ctx, "GET", apiEndpoint, nil) + if err != nil { + return nil, nil, err + } + + user := new(User) + resp, err := s.client.Do(req, user) + if err != nil { + return nil, resp, NewJiraError(resp, err) + } + return user, resp, nil +} + +// GetByAccountID wraps GetByAccountIDWithContext using the background context. +func (s *UserService) GetByAccountID(accountID string) (*User, *Response, error) { + return s.GetByAccountIDWithContext(context.Background(), accountID) +} + +// CreateWithContext creates an user in Jira. +// +// Jira API docs: https://docs.atlassian.com/jira/REST/cloud/#api/2/user-createUser +func (s *UserService) CreateWithContext(ctx context.Context, user *User) (*User, *Response, error) { + apiEndpoint := "/rest/api/2/user" + req, err := s.client.NewRequestWithContext(ctx, "POST", apiEndpoint, user) + if err != nil { + return nil, nil, err + } + + resp, err := s.client.Do(req, nil) + if err != nil { + return nil, resp, err + } + + responseUser := new(User) + defer resp.Body.Close() + data, err := ioutil.ReadAll(resp.Body) + if err != nil { + e := fmt.Errorf("could not read the returned data") + return nil, resp, NewJiraError(resp, e) + } + err = json.Unmarshal(data, responseUser) + if err != nil { + e := fmt.Errorf("could not unmarshall the data into struct") + return nil, resp, NewJiraError(resp, e) + } + return responseUser, resp, nil +} + +// Create wraps CreateWithContext using the background context. +func (s *UserService) Create(user *User) (*User, *Response, error) { + return s.CreateWithContext(context.Background(), user) +} + +// DeleteWithContext deletes an user from Jira. +// Returns http.StatusNoContent on success. +// +// Jira API docs: https://developer.atlassian.com/cloud/jira/platform/rest/v2/#api-rest-api-2-user-delete +func (s *UserService) DeleteWithContext(ctx context.Context, accountId string) (*Response, error) { + apiEndpoint := fmt.Sprintf("/rest/api/2/user?accountId=%s", accountId) + req, err := s.client.NewRequestWithContext(ctx, "DELETE", apiEndpoint, nil) + if err != nil { + return nil, err + } + + resp, err := s.client.Do(req, nil) + if err != nil { + return resp, NewJiraError(resp, err) + } + return resp, nil +} + +// Delete wraps DeleteWithContext using the background context. +func (s *UserService) Delete(accountId string) (*Response, error) { + return s.DeleteWithContext(context.Background(), accountId) +} + +// GetGroupsWithContext returns the groups which the user belongs to +// +// Jira API docs: https://developer.atlassian.com/cloud/jira/platform/rest/v2/#api-rest-api-2-user-groups-get +func (s *UserService) GetGroupsWithContext(ctx context.Context, accountId string) (*[]UserGroup, *Response, error) { + apiEndpoint := fmt.Sprintf("/rest/api/2/user/groups?accountId=%s", accountId) + req, err := s.client.NewRequestWithContext(ctx, "GET", apiEndpoint, nil) + if err != nil { + return nil, nil, err + } + + userGroups := new([]UserGroup) + resp, err := s.client.Do(req, userGroups) + if err != nil { + return nil, resp, NewJiraError(resp, err) + } + return userGroups, resp, nil +} + +// GetGroups wraps GetGroupsWithContext using the background context. +func (s *UserService) GetGroups(accountId string) (*[]UserGroup, *Response, error) { + return s.GetGroupsWithContext(context.Background(), accountId) +} + +// GetSelfWithContext information about the current logged-in user +// +// Jira API docs: https://developer.atlassian.com/cloud/jira/platform/rest/v2/#api-rest-api-2-myself-get +func (s *UserService) GetSelfWithContext(ctx context.Context) (*User, *Response, error) { + const apiEndpoint = "rest/api/2/myself" + req, err := s.client.NewRequestWithContext(ctx, "GET", apiEndpoint, nil) + if err != nil { + return nil, nil, err + } + var user User + resp, err := s.client.Do(req, &user) + if err != nil { + return nil, resp, NewJiraError(resp, err) + } + return &user, resp, nil +} + +// GetSelf wraps GetSelfWithContext using the background context. +func (s *UserService) GetSelf() (*User, *Response, error) { + return s.GetSelfWithContext(context.Background()) +} + +// WithMaxResults sets the max results to return +func WithMaxResults(maxResults int) userSearchF { + return func(s userSearch) userSearch { + s = append(s, userSearchParam{name: "maxResults", value: fmt.Sprintf("%d", maxResults)}) + return s + } +} + +// WithStartAt set the start pager +func WithStartAt(startAt int) userSearchF { + return func(s userSearch) userSearch { + s = append(s, userSearchParam{name: "startAt", value: fmt.Sprintf("%d", startAt)}) + return s + } +} + +// WithActive sets the active users lookup +func WithActive(active bool) userSearchF { + return func(s userSearch) userSearch { + s = append(s, userSearchParam{name: "includeActive", value: fmt.Sprintf("%t", active)}) + return s + } +} + +// WithInactive sets the inactive users lookup +func WithInactive(inactive bool) userSearchF { + return func(s userSearch) userSearch { + s = append(s, userSearchParam{name: "includeInactive", value: fmt.Sprintf("%t", inactive)}) + return s + } +} + +// FindWithContext searches for user info from Jira: +// It can find users by email or display name using the query parameter +// +// Jira API docs: https://developer.atlassian.com/cloud/jira/platform/rest/v2/#api-rest-api-2-user-search-get +func (s *UserService) FindWithContext(ctx context.Context, property string, tweaks ...userSearchF) ([]User, *Response, error) { + search := []userSearchParam{ + { + name: "query", + value: property, + }, + } + for _, f := range tweaks { + search = f(search) + } + + var queryString = "" + for _, param := range search { + queryString += param.name + "=" + param.value + "&" + } + + apiEndpoint := fmt.Sprintf("/rest/api/2/user/search?%s", queryString[:len(queryString)-1]) + req, err := s.client.NewRequestWithContext(ctx, "GET", apiEndpoint, nil) + if err != nil { + return nil, nil, err + } + + users := []User{} + resp, err := s.client.Do(req, &users) + if err != nil { + return nil, resp, NewJiraError(resp, err) + } + return users, resp, nil +} + +// Find wraps FindWithContext using the background context. +func (s *UserService) Find(property string, tweaks ...userSearchF) ([]User, *Response, error) { + return s.FindWithContext(context.Background(), property, tweaks...) +} diff --git a/vendor/github.com/andygrunwald/go-jira/version.go b/vendor/github.com/andygrunwald/go-jira/version.go new file mode 100644 index 00000000000..6ab9fa70554 --- /dev/null +++ b/vendor/github.com/andygrunwald/go-jira/version.go @@ -0,0 +1,113 @@ +package jira + +import ( + "context" + "encoding/json" + "fmt" + "io/ioutil" +) + +// VersionService handles Versions for the Jira instance / API. +// +// Jira API docs: https://docs.atlassian.com/jira/REST/latest/#api/2/version +type VersionService struct { + client *Client +} + +// Version represents a single release version of a project +type Version struct { + Self string `json:"self,omitempty" structs:"self,omitempty"` + ID string `json:"id,omitempty" structs:"id,omitempty"` + Name string `json:"name,omitempty" structs:"name,omitempty"` + Description string `json:"description,omitempty" structs:"description,omitempty"` + Archived *bool `json:"archived,omitempty" structs:"archived,omitempty"` + Released *bool `json:"released,omitempty" structs:"released,omitempty"` + ReleaseDate string `json:"releaseDate,omitempty" structs:"releaseDate,omitempty"` + UserReleaseDate string `json:"userReleaseDate,omitempty" structs:"userReleaseDate,omitempty"` + ProjectID int `json:"projectId,omitempty" structs:"projectId,omitempty"` // Unlike other IDs, this is returned as a number + StartDate string `json:"startDate,omitempty" structs:"startDate,omitempty"` +} + +// GetWithContext gets version info from Jira +// +// Jira API docs: https://developer.atlassian.com/cloud/jira/platform/rest/#api-api-2-version-id-get +func (s *VersionService) GetWithContext(ctx context.Context, versionID int) (*Version, *Response, error) { + apiEndpoint := fmt.Sprintf("/rest/api/2/version/%v", versionID) + req, err := s.client.NewRequestWithContext(ctx, "GET", apiEndpoint, nil) + if err != nil { + return nil, nil, err + } + + version := new(Version) + resp, err := s.client.Do(req, version) + if err != nil { + return nil, resp, NewJiraError(resp, err) + } + return version, resp, nil +} + +// Get wraps GetWithContext using the background context. +func (s *VersionService) Get(versionID int) (*Version, *Response, error) { + return s.GetWithContext(context.Background(), versionID) +} + +// CreateWithContext creates a version in Jira. +// +// Jira API docs: https://developer.atlassian.com/cloud/jira/platform/rest/#api-api-2-version-post +func (s *VersionService) CreateWithContext(ctx context.Context, version *Version) (*Version, *Response, error) { + apiEndpoint := "/rest/api/2/version" + req, err := s.client.NewRequestWithContext(ctx, "POST", apiEndpoint, version) + if err != nil { + return nil, nil, err + } + + resp, err := s.client.Do(req, nil) + if err != nil { + return nil, resp, err + } + + responseVersion := new(Version) + defer resp.Body.Close() + data, err := ioutil.ReadAll(resp.Body) + if err != nil { + e := fmt.Errorf("could not read the returned data") + return nil, resp, NewJiraError(resp, e) + } + err = json.Unmarshal(data, responseVersion) + if err != nil { + e := fmt.Errorf("could not unmarshall the data into struct") + return nil, resp, NewJiraError(resp, e) + } + return responseVersion, resp, nil +} + +// Create wraps CreateWithContext using the background context. +func (s *VersionService) Create(version *Version) (*Version, *Response, error) { + return s.CreateWithContext(context.Background(), version) +} + +// UpdateWithContext updates a version from a JSON representation. +// +// Jira API docs: https://developer.atlassian.com/cloud/jira/platform/rest/#api-api-2-version-id-put +func (s *VersionService) UpdateWithContext(ctx context.Context, version *Version) (*Version, *Response, error) { + apiEndpoint := fmt.Sprintf("rest/api/2/version/%v", version.ID) + req, err := s.client.NewRequestWithContext(ctx, "PUT", apiEndpoint, version) + if err != nil { + return nil, nil, err + } + resp, err := s.client.Do(req, nil) + if err != nil { + jerr := NewJiraError(resp, err) + return nil, resp, jerr + } + + // This is just to follow the rest of the API's convention of returning a version. + // Returning the same pointer here is pointless, so we return a copy instead. + ret := *version + return &ret, resp, nil +} + +// Update wraps UpdateWithContext using the background context. +func (s *VersionService) Update(version *Version) (*Version, *Response, error) { + return s.UpdateWithContext(context.Background(), version) +} diff --git a/vendor/github.com/aws/aws-sdk-go/LICENSE.txt b/vendor/github.com/aws/aws-sdk-go/LICENSE.txt new file mode 100644 index 00000000000..d6456956733 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/aws/aws-sdk-go/NOTICE.txt b/vendor/github.com/aws/aws-sdk-go/NOTICE.txt new file mode 100644 index 00000000000..899129ecc46 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/NOTICE.txt @@ -0,0 +1,3 @@ +AWS SDK for Go +Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. +Copyright 2014-2015 Stripe, Inc. diff --git a/vendor/github.com/aws/aws-sdk-go/aws/arn/arn.go b/vendor/github.com/aws/aws-sdk-go/aws/arn/arn.go new file mode 100644 index 00000000000..1c496742903 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/arn/arn.go @@ -0,0 +1,93 @@ +// Package arn provides a parser for interacting with Amazon Resource Names. +package arn + +import ( + "errors" + "strings" +) + +const ( + arnDelimiter = ":" + arnSections = 6 + arnPrefix = "arn:" + + // zero-indexed + sectionPartition = 1 + sectionService = 2 + sectionRegion = 3 + sectionAccountID = 4 + sectionResource = 5 + + // errors + invalidPrefix = "arn: invalid prefix" + invalidSections = "arn: not enough sections" +) + +// ARN captures the individual fields of an Amazon Resource Name. +// See http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html for more information. +type ARN struct { + // The partition that the resource is in. For standard AWS regions, the partition is "aws". If you have resources in + // other partitions, the partition is "aws-partitionname". For example, the partition for resources in the China + // (Beijing) region is "aws-cn". + Partition string + + // The service namespace that identifies the AWS product (for example, Amazon S3, IAM, or Amazon RDS). For a list of + // namespaces, see + // http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#genref-aws-service-namespaces. + Service string + + // The region the resource resides in. Note that the ARNs for some resources do not require a region, so this + // component might be omitted. + Region string + + // The ID of the AWS account that owns the resource, without the hyphens. For example, 123456789012. Note that the + // ARNs for some resources don't require an account number, so this component might be omitted. + AccountID string + + // The content of this part of the ARN varies by service. It often includes an indicator of the type of resource — + // for example, an IAM user or Amazon RDS database - followed by a slash (/) or a colon (:), followed by the + // resource name itself. Some services allows paths for resource names, as described in + // http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#arns-paths. + Resource string +} + +// Parse parses an ARN into its constituent parts. +// +// Some example ARNs: +// arn:aws:elasticbeanstalk:us-east-1:123456789012:environment/My App/MyEnvironment +// arn:aws:iam::123456789012:user/David +// arn:aws:rds:eu-west-1:123456789012:db:mysql-db +// arn:aws:s3:::my_corporate_bucket/exampleobject.png +func Parse(arn string) (ARN, error) { + if !strings.HasPrefix(arn, arnPrefix) { + return ARN{}, errors.New(invalidPrefix) + } + sections := strings.SplitN(arn, arnDelimiter, arnSections) + if len(sections) != arnSections { + return ARN{}, errors.New(invalidSections) + } + return ARN{ + Partition: sections[sectionPartition], + Service: sections[sectionService], + Region: sections[sectionRegion], + AccountID: sections[sectionAccountID], + Resource: sections[sectionResource], + }, nil +} + +// IsARN returns whether the given string is an ARN by looking for +// whether the string starts with "arn:" and contains the correct number +// of sections delimited by colons(:). +func IsARN(arn string) bool { + return strings.HasPrefix(arn, arnPrefix) && strings.Count(arn, ":") >= arnSections-1 +} + +// String returns the canonical representation of the ARN +func (arn ARN) String() string { + return arnPrefix + + arn.Partition + arnDelimiter + + arn.Service + arnDelimiter + + arn.Region + arnDelimiter + + arn.AccountID + arnDelimiter + + arn.Resource +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go b/vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go new file mode 100644 index 00000000000..99849c0e19c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go @@ -0,0 +1,164 @@ +// Package awserr represents API error interface accessors for the SDK. +package awserr + +// An Error wraps lower level errors with code, message and an original error. +// The underlying concrete error type may also satisfy other interfaces which +// can be to used to obtain more specific information about the error. +// +// Calling Error() or String() will always include the full information about +// an error based on its underlying type. +// +// Example: +// +// output, err := s3manage.Upload(svc, input, opts) +// if err != nil { +// if awsErr, ok := err.(awserr.Error); ok { +// // Get error details +// log.Println("Error:", awsErr.Code(), awsErr.Message()) +// +// // Prints out full error message, including original error if there was one. +// log.Println("Error:", awsErr.Error()) +// +// // Get original error +// if origErr := awsErr.OrigErr(); origErr != nil { +// // operate on original error. +// } +// } else { +// fmt.Println(err.Error()) +// } +// } +// +type Error interface { + // Satisfy the generic error interface. + error + + // Returns the short phrase depicting the classification of the error. + Code() string + + // Returns the error details message. + Message() string + + // Returns the original error if one was set. Nil is returned if not set. + OrigErr() error +} + +// BatchError is a batch of errors which also wraps lower level errors with +// code, message, and original errors. Calling Error() will include all errors +// that occurred in the batch. +// +// Deprecated: Replaced with BatchedErrors. Only defined for backwards +// compatibility. +type BatchError interface { + // Satisfy the generic error interface. + error + + // Returns the short phrase depicting the classification of the error. + Code() string + + // Returns the error details message. + Message() string + + // Returns the original error if one was set. Nil is returned if not set. + OrigErrs() []error +} + +// BatchedErrors is a batch of errors which also wraps lower level errors with +// code, message, and original errors. Calling Error() will include all errors +// that occurred in the batch. +// +// Replaces BatchError +type BatchedErrors interface { + // Satisfy the base Error interface. + Error + + // Returns the original error if one was set. Nil is returned if not set. + OrigErrs() []error +} + +// New returns an Error object described by the code, message, and origErr. +// +// If origErr satisfies the Error interface it will not be wrapped within a new +// Error object and will instead be returned. +func New(code, message string, origErr error) Error { + var errs []error + if origErr != nil { + errs = append(errs, origErr) + } + return newBaseError(code, message, errs) +} + +// NewBatchError returns an BatchedErrors with a collection of errors as an +// array of errors. +func NewBatchError(code, message string, errs []error) BatchedErrors { + return newBaseError(code, message, errs) +} + +// A RequestFailure is an interface to extract request failure information from +// an Error such as the request ID of the failed request returned by a service. +// RequestFailures may not always have a requestID value if the request failed +// prior to reaching the service such as a connection error. +// +// Example: +// +// output, err := s3manage.Upload(svc, input, opts) +// if err != nil { +// if reqerr, ok := err.(RequestFailure); ok { +// log.Println("Request failed", reqerr.Code(), reqerr.Message(), reqerr.RequestID()) +// } else { +// log.Println("Error:", err.Error()) +// } +// } +// +// Combined with awserr.Error: +// +// output, err := s3manage.Upload(svc, input, opts) +// if err != nil { +// if awsErr, ok := err.(awserr.Error); ok { +// // Generic AWS Error with Code, Message, and original error (if any) +// fmt.Println(awsErr.Code(), awsErr.Message(), awsErr.OrigErr()) +// +// if reqErr, ok := err.(awserr.RequestFailure); ok { +// // A service error occurred +// fmt.Println(reqErr.StatusCode(), reqErr.RequestID()) +// } +// } else { +// fmt.Println(err.Error()) +// } +// } +// +type RequestFailure interface { + Error + + // The status code of the HTTP response. + StatusCode() int + + // The request ID returned by the service for a request failure. This will + // be empty if no request ID is available such as the request failed due + // to a connection error. + RequestID() string +} + +// NewRequestFailure returns a wrapped error with additional information for +// request status code, and service requestID. +// +// Should be used to wrap all request which involve service requests. Even if +// the request failed without a service response, but had an HTTP status code +// that may be meaningful. +func NewRequestFailure(err Error, statusCode int, reqID string) RequestFailure { + return newRequestError(err, statusCode, reqID) +} + +// UnmarshalError provides the interface for the SDK failing to unmarshal data. +type UnmarshalError interface { + awsError + Bytes() []byte +} + +// NewUnmarshalError returns an initialized UnmarshalError error wrapper adding +// the bytes that fail to unmarshal to the error. +func NewUnmarshalError(err error, msg string, bytes []byte) UnmarshalError { + return &unmarshalError{ + awsError: New("UnmarshalError", msg, err), + bytes: bytes, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go b/vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go new file mode 100644 index 00000000000..9cf7eaf4007 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go @@ -0,0 +1,221 @@ +package awserr + +import ( + "encoding/hex" + "fmt" +) + +// SprintError returns a string of the formatted error code. +// +// Both extra and origErr are optional. If they are included their lines +// will be added, but if they are not included their lines will be ignored. +func SprintError(code, message, extra string, origErr error) string { + msg := fmt.Sprintf("%s: %s", code, message) + if extra != "" { + msg = fmt.Sprintf("%s\n\t%s", msg, extra) + } + if origErr != nil { + msg = fmt.Sprintf("%s\ncaused by: %s", msg, origErr.Error()) + } + return msg +} + +// A baseError wraps the code and message which defines an error. It also +// can be used to wrap an original error object. +// +// Should be used as the root for errors satisfying the awserr.Error. Also +// for any error which does not fit into a specific error wrapper type. +type baseError struct { + // Classification of error + code string + + // Detailed information about error + message string + + // Optional original error this error is based off of. Allows building + // chained errors. + errs []error +} + +// newBaseError returns an error object for the code, message, and errors. +// +// code is a short no whitespace phrase depicting the classification of +// the error that is being created. +// +// message is the free flow string containing detailed information about the +// error. +// +// origErrs is the error objects which will be nested under the new errors to +// be returned. +func newBaseError(code, message string, origErrs []error) *baseError { + b := &baseError{ + code: code, + message: message, + errs: origErrs, + } + + return b +} + +// Error returns the string representation of the error. +// +// See ErrorWithExtra for formatting. +// +// Satisfies the error interface. +func (b baseError) Error() string { + size := len(b.errs) + if size > 0 { + return SprintError(b.code, b.message, "", errorList(b.errs)) + } + + return SprintError(b.code, b.message, "", nil) +} + +// String returns the string representation of the error. +// Alias for Error to satisfy the stringer interface. +func (b baseError) String() string { + return b.Error() +} + +// Code returns the short phrase depicting the classification of the error. +func (b baseError) Code() string { + return b.code +} + +// Message returns the error details message. +func (b baseError) Message() string { + return b.message +} + +// OrigErr returns the original error if one was set. Nil is returned if no +// error was set. This only returns the first element in the list. If the full +// list is needed, use BatchedErrors. +func (b baseError) OrigErr() error { + switch len(b.errs) { + case 0: + return nil + case 1: + return b.errs[0] + default: + if err, ok := b.errs[0].(Error); ok { + return NewBatchError(err.Code(), err.Message(), b.errs[1:]) + } + return NewBatchError("BatchedErrors", + "multiple errors occurred", b.errs) + } +} + +// OrigErrs returns the original errors if one was set. An empty slice is +// returned if no error was set. +func (b baseError) OrigErrs() []error { + return b.errs +} + +// So that the Error interface type can be included as an anonymous field +// in the requestError struct and not conflict with the error.Error() method. +type awsError Error + +// A requestError wraps a request or service error. +// +// Composed of baseError for code, message, and original error. +type requestError struct { + awsError + statusCode int + requestID string + bytes []byte +} + +// newRequestError returns a wrapped error with additional information for +// request status code, and service requestID. +// +// Should be used to wrap all request which involve service requests. Even if +// the request failed without a service response, but had an HTTP status code +// that may be meaningful. +// +// Also wraps original errors via the baseError. +func newRequestError(err Error, statusCode int, requestID string) *requestError { + return &requestError{ + awsError: err, + statusCode: statusCode, + requestID: requestID, + } +} + +// Error returns the string representation of the error. +// Satisfies the error interface. +func (r requestError) Error() string { + extra := fmt.Sprintf("status code: %d, request id: %s", + r.statusCode, r.requestID) + return SprintError(r.Code(), r.Message(), extra, r.OrigErr()) +} + +// String returns the string representation of the error. +// Alias for Error to satisfy the stringer interface. +func (r requestError) String() string { + return r.Error() +} + +// StatusCode returns the wrapped status code for the error +func (r requestError) StatusCode() int { + return r.statusCode +} + +// RequestID returns the wrapped requestID +func (r requestError) RequestID() string { + return r.requestID +} + +// OrigErrs returns the original errors if one was set. An empty slice is +// returned if no error was set. +func (r requestError) OrigErrs() []error { + if b, ok := r.awsError.(BatchedErrors); ok { + return b.OrigErrs() + } + return []error{r.OrigErr()} +} + +type unmarshalError struct { + awsError + bytes []byte +} + +// Error returns the string representation of the error. +// Satisfies the error interface. +func (e unmarshalError) Error() string { + extra := hex.Dump(e.bytes) + return SprintError(e.Code(), e.Message(), extra, e.OrigErr()) +} + +// String returns the string representation of the error. +// Alias for Error to satisfy the stringer interface. +func (e unmarshalError) String() string { + return e.Error() +} + +// Bytes returns the bytes that failed to unmarshal. +func (e unmarshalError) Bytes() []byte { + return e.bytes +} + +// An error list that satisfies the golang interface +type errorList []error + +// Error returns the string representation of the error. +// +// Satisfies the error interface. +func (e errorList) Error() string { + msg := "" + // How do we want to handle the array size being zero + if size := len(e); size > 0 { + for i := 0; i < size; i++ { + msg += e[i].Error() + // We check the next index to see if it is within the slice. + // If it is, then we append a newline. We do this, because unit tests + // could be broken with the additional '\n' + if i+1 < size { + msg += "\n" + } + } + } + return msg +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/copy.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/copy.go new file mode 100644 index 00000000000..1a3d106d5c1 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/copy.go @@ -0,0 +1,108 @@ +package awsutil + +import ( + "io" + "reflect" + "time" +) + +// Copy deeply copies a src structure to dst. Useful for copying request and +// response structures. +// +// Can copy between structs of different type, but will only copy fields which +// are assignable, and exist in both structs. Fields which are not assignable, +// or do not exist in both structs are ignored. +func Copy(dst, src interface{}) { + dstval := reflect.ValueOf(dst) + if !dstval.IsValid() { + panic("Copy dst cannot be nil") + } + + rcopy(dstval, reflect.ValueOf(src), true) +} + +// CopyOf returns a copy of src while also allocating the memory for dst. +// src must be a pointer type or this operation will fail. +func CopyOf(src interface{}) (dst interface{}) { + dsti := reflect.New(reflect.TypeOf(src).Elem()) + dst = dsti.Interface() + rcopy(dsti, reflect.ValueOf(src), true) + return +} + +// rcopy performs a recursive copy of values from the source to destination. +// +// root is used to skip certain aspects of the copy which are not valid +// for the root node of a object. +func rcopy(dst, src reflect.Value, root bool) { + if !src.IsValid() { + return + } + + switch src.Kind() { + case reflect.Ptr: + if _, ok := src.Interface().(io.Reader); ok { + if dst.Kind() == reflect.Ptr && dst.Elem().CanSet() { + dst.Elem().Set(src) + } else if dst.CanSet() { + dst.Set(src) + } + } else { + e := src.Type().Elem() + if dst.CanSet() && !src.IsNil() { + if _, ok := src.Interface().(*time.Time); !ok { + dst.Set(reflect.New(e)) + } else { + tempValue := reflect.New(e) + tempValue.Elem().Set(src.Elem()) + // Sets time.Time's unexported values + dst.Set(tempValue) + } + } + if src.Elem().IsValid() { + // Keep the current root state since the depth hasn't changed + rcopy(dst.Elem(), src.Elem(), root) + } + } + case reflect.Struct: + t := dst.Type() + for i := 0; i < t.NumField(); i++ { + name := t.Field(i).Name + srcVal := src.FieldByName(name) + dstVal := dst.FieldByName(name) + if srcVal.IsValid() && dstVal.CanSet() { + rcopy(dstVal, srcVal, false) + } + } + case reflect.Slice: + if src.IsNil() { + break + } + + s := reflect.MakeSlice(src.Type(), src.Len(), src.Cap()) + dst.Set(s) + for i := 0; i < src.Len(); i++ { + rcopy(dst.Index(i), src.Index(i), false) + } + case reflect.Map: + if src.IsNil() { + break + } + + s := reflect.MakeMap(src.Type()) + dst.Set(s) + for _, k := range src.MapKeys() { + v := src.MapIndex(k) + v2 := reflect.New(v.Type()).Elem() + rcopy(v2, v, false) + dst.SetMapIndex(k, v2) + } + default: + // Assign the value if possible. If its not assignable, the value would + // need to be converted and the impact of that may be unexpected, or is + // not compatible with the dst type. + if src.Type().AssignableTo(dst.Type()) { + dst.Set(src) + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/equal.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/equal.go new file mode 100644 index 00000000000..142a7a01c52 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/equal.go @@ -0,0 +1,27 @@ +package awsutil + +import ( + "reflect" +) + +// DeepEqual returns if the two values are deeply equal like reflect.DeepEqual. +// In addition to this, this method will also dereference the input values if +// possible so the DeepEqual performed will not fail if one parameter is a +// pointer and the other is not. +// +// DeepEqual will not perform indirection of nested values of the input parameters. +func DeepEqual(a, b interface{}) bool { + ra := reflect.Indirect(reflect.ValueOf(a)) + rb := reflect.Indirect(reflect.ValueOf(b)) + + if raValid, rbValid := ra.IsValid(), rb.IsValid(); !raValid && !rbValid { + // If the elements are both nil, and of the same type they are equal + // If they are of different types they are not equal + return reflect.TypeOf(a) == reflect.TypeOf(b) + } else if raValid != rbValid { + // Both values must be valid to be equal + return false + } + + return reflect.DeepEqual(ra.Interface(), rb.Interface()) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go new file mode 100644 index 00000000000..a4eb6a7f43a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go @@ -0,0 +1,221 @@ +package awsutil + +import ( + "reflect" + "regexp" + "strconv" + "strings" + + "github.com/jmespath/go-jmespath" +) + +var indexRe = regexp.MustCompile(`(.+)\[(-?\d+)?\]$`) + +// rValuesAtPath returns a slice of values found in value v. The values +// in v are explored recursively so all nested values are collected. +func rValuesAtPath(v interface{}, path string, createPath, caseSensitive, nilTerm bool) []reflect.Value { + pathparts := strings.Split(path, "||") + if len(pathparts) > 1 { + for _, pathpart := range pathparts { + vals := rValuesAtPath(v, pathpart, createPath, caseSensitive, nilTerm) + if len(vals) > 0 { + return vals + } + } + return nil + } + + values := []reflect.Value{reflect.Indirect(reflect.ValueOf(v))} + components := strings.Split(path, ".") + for len(values) > 0 && len(components) > 0 { + var index *int64 + var indexStar bool + c := strings.TrimSpace(components[0]) + if c == "" { // no actual component, illegal syntax + return nil + } else if caseSensitive && c != "*" && strings.ToLower(c[0:1]) == c[0:1] { + // TODO normalize case for user + return nil // don't support unexported fields + } + + // parse this component + if m := indexRe.FindStringSubmatch(c); m != nil { + c = m[1] + if m[2] == "" { + index = nil + indexStar = true + } else { + i, _ := strconv.ParseInt(m[2], 10, 32) + index = &i + indexStar = false + } + } + + nextvals := []reflect.Value{} + for _, value := range values { + // pull component name out of struct member + if value.Kind() != reflect.Struct { + continue + } + + if c == "*" { // pull all members + for i := 0; i < value.NumField(); i++ { + if f := reflect.Indirect(value.Field(i)); f.IsValid() { + nextvals = append(nextvals, f) + } + } + continue + } + + value = value.FieldByNameFunc(func(name string) bool { + if c == name { + return true + } else if !caseSensitive && strings.EqualFold(name, c) { + return true + } + return false + }) + + if nilTerm && value.Kind() == reflect.Ptr && len(components[1:]) == 0 { + if !value.IsNil() { + value.Set(reflect.Zero(value.Type())) + } + return []reflect.Value{value} + } + + if createPath && value.Kind() == reflect.Ptr && value.IsNil() { + // TODO if the value is the terminus it should not be created + // if the value to be set to its position is nil. + value.Set(reflect.New(value.Type().Elem())) + value = value.Elem() + } else { + value = reflect.Indirect(value) + } + + if value.Kind() == reflect.Slice || value.Kind() == reflect.Map { + if !createPath && value.IsNil() { + value = reflect.ValueOf(nil) + } + } + + if value.IsValid() { + nextvals = append(nextvals, value) + } + } + values = nextvals + + if indexStar || index != nil { + nextvals = []reflect.Value{} + for _, valItem := range values { + value := reflect.Indirect(valItem) + if value.Kind() != reflect.Slice { + continue + } + + if indexStar { // grab all indices + for i := 0; i < value.Len(); i++ { + idx := reflect.Indirect(value.Index(i)) + if idx.IsValid() { + nextvals = append(nextvals, idx) + } + } + continue + } + + // pull out index + i := int(*index) + if i >= value.Len() { // check out of bounds + if createPath { + // TODO resize slice + } else { + continue + } + } else if i < 0 { // support negative indexing + i = value.Len() + i + } + value = reflect.Indirect(value.Index(i)) + + if value.Kind() == reflect.Slice || value.Kind() == reflect.Map { + if !createPath && value.IsNil() { + value = reflect.ValueOf(nil) + } + } + + if value.IsValid() { + nextvals = append(nextvals, value) + } + } + values = nextvals + } + + components = components[1:] + } + return values +} + +// ValuesAtPath returns a list of values at the case insensitive lexical +// path inside of a structure. +func ValuesAtPath(i interface{}, path string) ([]interface{}, error) { + result, err := jmespath.Search(path, i) + if err != nil { + return nil, err + } + + v := reflect.ValueOf(result) + if !v.IsValid() || (v.Kind() == reflect.Ptr && v.IsNil()) { + return nil, nil + } + if s, ok := result.([]interface{}); ok { + return s, err + } + if v.Kind() == reflect.Map && v.Len() == 0 { + return nil, nil + } + if v.Kind() == reflect.Slice { + out := make([]interface{}, v.Len()) + for i := 0; i < v.Len(); i++ { + out[i] = v.Index(i).Interface() + } + return out, nil + } + + return []interface{}{result}, nil +} + +// SetValueAtPath sets a value at the case insensitive lexical path inside +// of a structure. +func SetValueAtPath(i interface{}, path string, v interface{}) { + rvals := rValuesAtPath(i, path, true, false, v == nil) + for _, rval := range rvals { + if rval.Kind() == reflect.Ptr && rval.IsNil() { + continue + } + setValue(rval, v) + } +} + +func setValue(dstVal reflect.Value, src interface{}) { + if dstVal.Kind() == reflect.Ptr { + dstVal = reflect.Indirect(dstVal) + } + srcVal := reflect.ValueOf(src) + + if !srcVal.IsValid() { // src is literal nil + if dstVal.CanAddr() { + // Convert to pointer so that pointer's value can be nil'ed + // dstVal = dstVal.Addr() + } + dstVal.Set(reflect.Zero(dstVal.Type())) + + } else if srcVal.Kind() == reflect.Ptr { + if srcVal.IsNil() { + srcVal = reflect.Zero(dstVal.Type()) + } else { + srcVal = reflect.ValueOf(src).Elem() + } + dstVal.Set(srcVal) + } else { + dstVal.Set(srcVal) + } + +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go new file mode 100644 index 00000000000..710eb432f85 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go @@ -0,0 +1,113 @@ +package awsutil + +import ( + "bytes" + "fmt" + "io" + "reflect" + "strings" +) + +// Prettify returns the string representation of a value. +func Prettify(i interface{}) string { + var buf bytes.Buffer + prettify(reflect.ValueOf(i), 0, &buf) + return buf.String() +} + +// prettify will recursively walk value v to build a textual +// representation of the value. +func prettify(v reflect.Value, indent int, buf *bytes.Buffer) { + for v.Kind() == reflect.Ptr { + v = v.Elem() + } + + switch v.Kind() { + case reflect.Struct: + strtype := v.Type().String() + if strtype == "time.Time" { + fmt.Fprintf(buf, "%s", v.Interface()) + break + } else if strings.HasPrefix(strtype, "io.") { + buf.WriteString("") + break + } + + buf.WriteString("{\n") + + names := []string{} + for i := 0; i < v.Type().NumField(); i++ { + name := v.Type().Field(i).Name + f := v.Field(i) + if name[0:1] == strings.ToLower(name[0:1]) { + continue // ignore unexported fields + } + if (f.Kind() == reflect.Ptr || f.Kind() == reflect.Slice || f.Kind() == reflect.Map) && f.IsNil() { + continue // ignore unset fields + } + names = append(names, name) + } + + for i, n := range names { + val := v.FieldByName(n) + buf.WriteString(strings.Repeat(" ", indent+2)) + buf.WriteString(n + ": ") + prettify(val, indent+2, buf) + + if i < len(names)-1 { + buf.WriteString(",\n") + } + } + + buf.WriteString("\n" + strings.Repeat(" ", indent) + "}") + case reflect.Slice: + strtype := v.Type().String() + if strtype == "[]uint8" { + fmt.Fprintf(buf, " len %d", v.Len()) + break + } + + nl, id, id2 := "", "", "" + if v.Len() > 3 { + nl, id, id2 = "\n", strings.Repeat(" ", indent), strings.Repeat(" ", indent+2) + } + buf.WriteString("[" + nl) + for i := 0; i < v.Len(); i++ { + buf.WriteString(id2) + prettify(v.Index(i), indent+2, buf) + + if i < v.Len()-1 { + buf.WriteString("," + nl) + } + } + + buf.WriteString(nl + id + "]") + case reflect.Map: + buf.WriteString("{\n") + + for i, k := range v.MapKeys() { + buf.WriteString(strings.Repeat(" ", indent+2)) + buf.WriteString(k.String() + ": ") + prettify(v.MapIndex(k), indent+2, buf) + + if i < v.Len()-1 { + buf.WriteString(",\n") + } + } + + buf.WriteString("\n" + strings.Repeat(" ", indent) + "}") + default: + if !v.IsValid() { + fmt.Fprint(buf, "") + return + } + format := "%v" + switch v.Interface().(type) { + case string: + format = "%q" + case io.ReadSeeker, io.Reader: + format = "buffer(%p)" + } + fmt.Fprintf(buf, format, v.Interface()) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go new file mode 100644 index 00000000000..645df2450fc --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go @@ -0,0 +1,88 @@ +package awsutil + +import ( + "bytes" + "fmt" + "reflect" + "strings" +) + +// StringValue returns the string representation of a value. +func StringValue(i interface{}) string { + var buf bytes.Buffer + stringValue(reflect.ValueOf(i), 0, &buf) + return buf.String() +} + +func stringValue(v reflect.Value, indent int, buf *bytes.Buffer) { + for v.Kind() == reflect.Ptr { + v = v.Elem() + } + + switch v.Kind() { + case reflect.Struct: + buf.WriteString("{\n") + + for i := 0; i < v.Type().NumField(); i++ { + ft := v.Type().Field(i) + fv := v.Field(i) + + if ft.Name[0:1] == strings.ToLower(ft.Name[0:1]) { + continue // ignore unexported fields + } + if (fv.Kind() == reflect.Ptr || fv.Kind() == reflect.Slice) && fv.IsNil() { + continue // ignore unset fields + } + + buf.WriteString(strings.Repeat(" ", indent+2)) + buf.WriteString(ft.Name + ": ") + + if tag := ft.Tag.Get("sensitive"); tag == "true" { + buf.WriteString("") + } else { + stringValue(fv, indent+2, buf) + } + + buf.WriteString(",\n") + } + + buf.WriteString("\n" + strings.Repeat(" ", indent) + "}") + case reflect.Slice: + nl, id, id2 := "", "", "" + if v.Len() > 3 { + nl, id, id2 = "\n", strings.Repeat(" ", indent), strings.Repeat(" ", indent+2) + } + buf.WriteString("[" + nl) + for i := 0; i < v.Len(); i++ { + buf.WriteString(id2) + stringValue(v.Index(i), indent+2, buf) + + if i < v.Len()-1 { + buf.WriteString("," + nl) + } + } + + buf.WriteString(nl + id + "]") + case reflect.Map: + buf.WriteString("{\n") + + for i, k := range v.MapKeys() { + buf.WriteString(strings.Repeat(" ", indent+2)) + buf.WriteString(k.String() + ": ") + stringValue(v.MapIndex(k), indent+2, buf) + + if i < v.Len()-1 { + buf.WriteString(",\n") + } + } + + buf.WriteString("\n" + strings.Repeat(" ", indent) + "}") + default: + format := "%v" + switch v.Interface().(type) { + case string: + format = "%q" + } + fmt.Fprintf(buf, format, v.Interface()) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/client.go b/vendor/github.com/aws/aws-sdk-go/aws/client/client.go new file mode 100644 index 00000000000..74f35ccf0cd --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/client/client.go @@ -0,0 +1,93 @@ +package client + +import ( + "fmt" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" +) + +// A Config provides configuration to a service client instance. +type Config struct { + Config *aws.Config + Handlers request.Handlers + PartitionID string + Endpoint string + SigningRegion string + SigningName string + + // States that the signing name did not come from a modeled source but + // was derived based on other data. Used by service client constructors + // to determine if the signin name can be overridden based on metadata the + // service has. + SigningNameDerived bool +} + +// ConfigProvider provides a generic way for a service client to receive +// the ClientConfig without circular dependencies. +type ConfigProvider interface { + ClientConfig(serviceName string, cfgs ...*aws.Config) Config +} + +// ConfigNoResolveEndpointProvider same as ConfigProvider except it will not +// resolve the endpoint automatically. The service client's endpoint must be +// provided via the aws.Config.Endpoint field. +type ConfigNoResolveEndpointProvider interface { + ClientConfigNoResolveEndpoint(cfgs ...*aws.Config) Config +} + +// A Client implements the base client request and response handling +// used by all service clients. +type Client struct { + request.Retryer + metadata.ClientInfo + + Config aws.Config + Handlers request.Handlers +} + +// New will return a pointer to a new initialized service client. +func New(cfg aws.Config, info metadata.ClientInfo, handlers request.Handlers, options ...func(*Client)) *Client { + svc := &Client{ + Config: cfg, + ClientInfo: info, + Handlers: handlers.Copy(), + } + + switch retryer, ok := cfg.Retryer.(request.Retryer); { + case ok: + svc.Retryer = retryer + case cfg.Retryer != nil && cfg.Logger != nil: + s := fmt.Sprintf("WARNING: %T does not implement request.Retryer; using DefaultRetryer instead", cfg.Retryer) + cfg.Logger.Log(s) + fallthrough + default: + maxRetries := aws.IntValue(cfg.MaxRetries) + if cfg.MaxRetries == nil || maxRetries == aws.UseServiceDefaultRetries { + maxRetries = DefaultRetryerMaxNumRetries + } + svc.Retryer = DefaultRetryer{NumMaxRetries: maxRetries} + } + + svc.AddDebugHandlers() + + for _, option := range options { + option(svc) + } + + return svc +} + +// NewRequest returns a new Request pointer for the service API +// operation and parameters. +func (c *Client) NewRequest(operation *request.Operation, params interface{}, data interface{}) *request.Request { + return request.New(c.Config, c.ClientInfo, c.Handlers, c.Retryer, operation, params, data) +} + +// AddDebugHandlers injects debug logging handlers into the service to log request +// debug information. +func (c *Client) AddDebugHandlers() { + c.Handlers.Send.PushFrontNamed(LogHTTPRequestHandler) + c.Handlers.Send.PushBackNamed(LogHTTPResponseHandler) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go b/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go new file mode 100644 index 00000000000..9f6af19dd45 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go @@ -0,0 +1,177 @@ +package client + +import ( + "math" + "strconv" + "time" + + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/internal/sdkrand" +) + +// DefaultRetryer implements basic retry logic using exponential backoff for +// most services. If you want to implement custom retry logic, you can implement the +// request.Retryer interface. +// +type DefaultRetryer struct { + // Num max Retries is the number of max retries that will be performed. + // By default, this is zero. + NumMaxRetries int + + // MinRetryDelay is the minimum retry delay after which retry will be performed. + // If not set, the value is 0ns. + MinRetryDelay time.Duration + + // MinThrottleRetryDelay is the minimum retry delay when throttled. + // If not set, the value is 0ns. + MinThrottleDelay time.Duration + + // MaxRetryDelay is the maximum retry delay before which retry must be performed. + // If not set, the value is 0ns. + MaxRetryDelay time.Duration + + // MaxThrottleDelay is the maximum retry delay when throttled. + // If not set, the value is 0ns. + MaxThrottleDelay time.Duration +} + +const ( + // DefaultRetryerMaxNumRetries sets maximum number of retries + DefaultRetryerMaxNumRetries = 3 + + // DefaultRetryerMinRetryDelay sets minimum retry delay + DefaultRetryerMinRetryDelay = 30 * time.Millisecond + + // DefaultRetryerMinThrottleDelay sets minimum delay when throttled + DefaultRetryerMinThrottleDelay = 500 * time.Millisecond + + // DefaultRetryerMaxRetryDelay sets maximum retry delay + DefaultRetryerMaxRetryDelay = 300 * time.Second + + // DefaultRetryerMaxThrottleDelay sets maximum delay when throttled + DefaultRetryerMaxThrottleDelay = 300 * time.Second +) + +// MaxRetries returns the number of maximum returns the service will use to make +// an individual API request. +func (d DefaultRetryer) MaxRetries() int { + return d.NumMaxRetries +} + +// setRetryerDefaults sets the default values of the retryer if not set +func (d *DefaultRetryer) setRetryerDefaults() { + if d.MinRetryDelay == 0 { + d.MinRetryDelay = DefaultRetryerMinRetryDelay + } + if d.MaxRetryDelay == 0 { + d.MaxRetryDelay = DefaultRetryerMaxRetryDelay + } + if d.MinThrottleDelay == 0 { + d.MinThrottleDelay = DefaultRetryerMinThrottleDelay + } + if d.MaxThrottleDelay == 0 { + d.MaxThrottleDelay = DefaultRetryerMaxThrottleDelay + } +} + +// RetryRules returns the delay duration before retrying this request again +func (d DefaultRetryer) RetryRules(r *request.Request) time.Duration { + + // if number of max retries is zero, no retries will be performed. + if d.NumMaxRetries == 0 { + return 0 + } + + // Sets default value for retryer members + d.setRetryerDefaults() + + // minDelay is the minimum retryer delay + minDelay := d.MinRetryDelay + + var initialDelay time.Duration + + isThrottle := r.IsErrorThrottle() + if isThrottle { + if delay, ok := getRetryAfterDelay(r); ok { + initialDelay = delay + } + minDelay = d.MinThrottleDelay + } + + retryCount := r.RetryCount + + // maxDelay the maximum retryer delay + maxDelay := d.MaxRetryDelay + + if isThrottle { + maxDelay = d.MaxThrottleDelay + } + + var delay time.Duration + + // Logic to cap the retry count based on the minDelay provided + actualRetryCount := int(math.Log2(float64(minDelay))) + 1 + if actualRetryCount < 63-retryCount { + delay = time.Duration(1< maxDelay { + delay = getJitterDelay(maxDelay / 2) + } + } else { + delay = getJitterDelay(maxDelay / 2) + } + return delay + initialDelay +} + +// getJitterDelay returns a jittered delay for retry +func getJitterDelay(duration time.Duration) time.Duration { + return time.Duration(sdkrand.SeededRand.Int63n(int64(duration)) + int64(duration)) +} + +// ShouldRetry returns true if the request should be retried. +func (d DefaultRetryer) ShouldRetry(r *request.Request) bool { + + // ShouldRetry returns false if number of max retries is 0. + if d.NumMaxRetries == 0 { + return false + } + + // If one of the other handlers already set the retry state + // we don't want to override it based on the service's state + if r.Retryable != nil { + return *r.Retryable + } + return r.IsErrorRetryable() || r.IsErrorThrottle() +} + +// This will look in the Retry-After header, RFC 7231, for how long +// it will wait before attempting another request +func getRetryAfterDelay(r *request.Request) (time.Duration, bool) { + if !canUseRetryAfterHeader(r) { + return 0, false + } + + delayStr := r.HTTPResponse.Header.Get("Retry-After") + if len(delayStr) == 0 { + return 0, false + } + + delay, err := strconv.Atoi(delayStr) + if err != nil { + return 0, false + } + + return time.Duration(delay) * time.Second, true +} + +// Will look at the status code to see if the retry header pertains to +// the status code. +func canUseRetryAfterHeader(r *request.Request) bool { + switch r.HTTPResponse.StatusCode { + case 429: + case 503: + default: + return false + } + + return true +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/logger.go b/vendor/github.com/aws/aws-sdk-go/aws/client/logger.go new file mode 100644 index 00000000000..1d774cfa251 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/client/logger.go @@ -0,0 +1,202 @@ +package client + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "net/http/httputil" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/request" +) + +const logReqMsg = `DEBUG: Request %s/%s Details: +---[ REQUEST POST-SIGN ]----------------------------- +%s +-----------------------------------------------------` + +const logReqErrMsg = `DEBUG ERROR: Request %s/%s: +---[ REQUEST DUMP ERROR ]----------------------------- +%s +------------------------------------------------------` + +type logWriter struct { + // Logger is what we will use to log the payload of a response. + Logger aws.Logger + // buf stores the contents of what has been read + buf *bytes.Buffer +} + +func (logger *logWriter) Write(b []byte) (int, error) { + return logger.buf.Write(b) +} + +type teeReaderCloser struct { + // io.Reader will be a tee reader that is used during logging. + // This structure will read from a body and write the contents to a logger. + io.Reader + // Source is used just to close when we are done reading. + Source io.ReadCloser +} + +func (reader *teeReaderCloser) Close() error { + return reader.Source.Close() +} + +// LogHTTPRequestHandler is a SDK request handler to log the HTTP request sent +// to a service. Will include the HTTP request body if the LogLevel of the +// request matches LogDebugWithHTTPBody. +var LogHTTPRequestHandler = request.NamedHandler{ + Name: "awssdk.client.LogRequest", + Fn: logRequest, +} + +func logRequest(r *request.Request) { + if !r.Config.LogLevel.AtLeast(aws.LogDebug) { + return + } + + logBody := r.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody) + bodySeekable := aws.IsReaderSeekable(r.Body) + + b, err := httputil.DumpRequestOut(r.HTTPRequest, logBody) + if err != nil { + r.Config.Logger.Log(fmt.Sprintf(logReqErrMsg, + r.ClientInfo.ServiceName, r.Operation.Name, err)) + return + } + + if logBody { + if !bodySeekable { + r.SetReaderBody(aws.ReadSeekCloser(r.HTTPRequest.Body)) + } + // Reset the request body because dumpRequest will re-wrap the + // r.HTTPRequest's Body as a NoOpCloser and will not be reset after + // read by the HTTP client reader. + if err := r.Error; err != nil { + r.Config.Logger.Log(fmt.Sprintf(logReqErrMsg, + r.ClientInfo.ServiceName, r.Operation.Name, err)) + return + } + } + + r.Config.Logger.Log(fmt.Sprintf(logReqMsg, + r.ClientInfo.ServiceName, r.Operation.Name, string(b))) +} + +// LogHTTPRequestHeaderHandler is a SDK request handler to log the HTTP request sent +// to a service. Will only log the HTTP request's headers. The request payload +// will not be read. +var LogHTTPRequestHeaderHandler = request.NamedHandler{ + Name: "awssdk.client.LogRequestHeader", + Fn: logRequestHeader, +} + +func logRequestHeader(r *request.Request) { + b, err := httputil.DumpRequestOut(r.HTTPRequest, false) + if err != nil { + r.Config.Logger.Log(fmt.Sprintf(logReqErrMsg, + r.ClientInfo.ServiceName, r.Operation.Name, err)) + return + } + + r.Config.Logger.Log(fmt.Sprintf(logReqMsg, + r.ClientInfo.ServiceName, r.Operation.Name, string(b))) +} + +const logRespMsg = `DEBUG: Response %s/%s Details: +---[ RESPONSE ]-------------------------------------- +%s +-----------------------------------------------------` + +const logRespErrMsg = `DEBUG ERROR: Response %s/%s: +---[ RESPONSE DUMP ERROR ]----------------------------- +%s +-----------------------------------------------------` + +// LogHTTPResponseHandler is a SDK request handler to log the HTTP response +// received from a service. Will include the HTTP response body if the LogLevel +// of the request matches LogDebugWithHTTPBody. +var LogHTTPResponseHandler = request.NamedHandler{ + Name: "awssdk.client.LogResponse", + Fn: logResponse, +} + +func logResponse(r *request.Request) { + if !r.Config.LogLevel.AtLeast(aws.LogDebug) { + return + } + + lw := &logWriter{r.Config.Logger, bytes.NewBuffer(nil)} + + if r.HTTPResponse == nil { + lw.Logger.Log(fmt.Sprintf(logRespErrMsg, + r.ClientInfo.ServiceName, r.Operation.Name, "request's HTTPResponse is nil")) + return + } + + logBody := r.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody) + if logBody { + r.HTTPResponse.Body = &teeReaderCloser{ + Reader: io.TeeReader(r.HTTPResponse.Body, lw), + Source: r.HTTPResponse.Body, + } + } + + handlerFn := func(req *request.Request) { + b, err := httputil.DumpResponse(req.HTTPResponse, false) + if err != nil { + lw.Logger.Log(fmt.Sprintf(logRespErrMsg, + req.ClientInfo.ServiceName, req.Operation.Name, err)) + return + } + + lw.Logger.Log(fmt.Sprintf(logRespMsg, + req.ClientInfo.ServiceName, req.Operation.Name, string(b))) + + if logBody { + b, err := ioutil.ReadAll(lw.buf) + if err != nil { + lw.Logger.Log(fmt.Sprintf(logRespErrMsg, + req.ClientInfo.ServiceName, req.Operation.Name, err)) + return + } + + lw.Logger.Log(string(b)) + } + } + + const handlerName = "awsdk.client.LogResponse.ResponseBody" + + r.Handlers.Unmarshal.SetBackNamed(request.NamedHandler{ + Name: handlerName, Fn: handlerFn, + }) + r.Handlers.UnmarshalError.SetBackNamed(request.NamedHandler{ + Name: handlerName, Fn: handlerFn, + }) +} + +// LogHTTPResponseHeaderHandler is a SDK request handler to log the HTTP +// response received from a service. Will only log the HTTP response's headers. +// The response payload will not be read. +var LogHTTPResponseHeaderHandler = request.NamedHandler{ + Name: "awssdk.client.LogResponseHeader", + Fn: logResponseHeader, +} + +func logResponseHeader(r *request.Request) { + if r.Config.Logger == nil { + return + } + + b, err := httputil.DumpResponse(r.HTTPResponse, false) + if err != nil { + r.Config.Logger.Log(fmt.Sprintf(logRespErrMsg, + r.ClientInfo.ServiceName, r.Operation.Name, err)) + return + } + + r.Config.Logger.Log(fmt.Sprintf(logRespMsg, + r.ClientInfo.ServiceName, r.Operation.Name, string(b))) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go b/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go new file mode 100644 index 00000000000..0c48f72e08e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go @@ -0,0 +1,14 @@ +package metadata + +// ClientInfo wraps immutable data from the client.Client structure. +type ClientInfo struct { + ServiceName string + ServiceID string + APIVersion string + PartitionID string + Endpoint string + SigningName string + SigningRegion string + JSONVersion string + TargetPrefix string +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/no_op_retryer.go b/vendor/github.com/aws/aws-sdk-go/aws/client/no_op_retryer.go new file mode 100644 index 00000000000..881d575f010 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/client/no_op_retryer.go @@ -0,0 +1,28 @@ +package client + +import ( + "time" + + "github.com/aws/aws-sdk-go/aws/request" +) + +// NoOpRetryer provides a retryer that performs no retries. +// It should be used when we do not want retries to be performed. +type NoOpRetryer struct{} + +// MaxRetries returns the number of maximum returns the service will use to make +// an individual API; For NoOpRetryer the MaxRetries will always be zero. +func (d NoOpRetryer) MaxRetries() int { + return 0 +} + +// ShouldRetry will always return false for NoOpRetryer, as it should never retry. +func (d NoOpRetryer) ShouldRetry(_ *request.Request) bool { + return false +} + +// RetryRules returns the delay duration before retrying this request again; +// since NoOpRetryer does not retry, RetryRules always returns 0. +func (d NoOpRetryer) RetryRules(_ *request.Request) time.Duration { + return 0 +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/config.go b/vendor/github.com/aws/aws-sdk-go/aws/config.go new file mode 100644 index 00000000000..39fa6d5fe74 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/config.go @@ -0,0 +1,605 @@ +package aws + +import ( + "net/http" + "time" + + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/endpoints" +) + +// UseServiceDefaultRetries instructs the config to use the service's own +// default number of retries. This will be the default action if +// Config.MaxRetries is nil also. +const UseServiceDefaultRetries = -1 + +// RequestRetryer is an alias for a type that implements the request.Retryer +// interface. +type RequestRetryer interface{} + +// A Config provides service configuration for service clients. By default, +// all clients will use the defaults.DefaultConfig structure. +// +// // Create Session with MaxRetries configuration to be shared by multiple +// // service clients. +// sess := session.Must(session.NewSession(&aws.Config{ +// MaxRetries: aws.Int(3), +// })) +// +// // Create S3 service client with a specific Region. +// svc := s3.New(sess, &aws.Config{ +// Region: aws.String("us-west-2"), +// }) +type Config struct { + // Enables verbose error printing of all credential chain errors. + // Should be used when wanting to see all errors while attempting to + // retrieve credentials. + CredentialsChainVerboseErrors *bool + + // The credentials object to use when signing requests. Defaults to a + // chain of credential providers to search for credentials in environment + // variables, shared credential file, and EC2 Instance Roles. + Credentials *credentials.Credentials + + // An optional endpoint URL (hostname only or fully qualified URI) + // that overrides the default generated endpoint for a client. Set this + // to `nil` or the value to `""` to use the default generated endpoint. + // + // Note: You must still provide a `Region` value when specifying an + // endpoint for a client. + Endpoint *string + + // The resolver to use for looking up endpoints for AWS service clients + // to use based on region. + EndpointResolver endpoints.Resolver + + // EnforceShouldRetryCheck is used in the AfterRetryHandler to always call + // ShouldRetry regardless of whether or not if request.Retryable is set. + // This will utilize ShouldRetry method of custom retryers. If EnforceShouldRetryCheck + // is not set, then ShouldRetry will only be called if request.Retryable is nil. + // Proper handling of the request.Retryable field is important when setting this field. + EnforceShouldRetryCheck *bool + + // The region to send requests to. This parameter is required and must + // be configured globally or on a per-client basis unless otherwise + // noted. A full list of regions is found in the "Regions and Endpoints" + // document. + // + // See http://docs.aws.amazon.com/general/latest/gr/rande.html for AWS + // Regions and Endpoints. + Region *string + + // Set this to `true` to disable SSL when sending requests. Defaults + // to `false`. + DisableSSL *bool + + // The HTTP client to use when sending requests. Defaults to + // `http.DefaultClient`. + HTTPClient *http.Client + + // An integer value representing the logging level. The default log level + // is zero (LogOff), which represents no logging. To enable logging set + // to a LogLevel Value. + LogLevel *LogLevelType + + // The logger writer interface to write logging messages to. Defaults to + // standard out. + Logger Logger + + // The maximum number of times that a request will be retried for failures. + // Defaults to -1, which defers the max retry setting to the service + // specific configuration. + MaxRetries *int + + // Retryer guides how HTTP requests should be retried in case of + // recoverable failures. + // + // When nil or the value does not implement the request.Retryer interface, + // the client.DefaultRetryer will be used. + // + // When both Retryer and MaxRetries are non-nil, the former is used and + // the latter ignored. + // + // To set the Retryer field in a type-safe manner and with chaining, use + // the request.WithRetryer helper function: + // + // cfg := request.WithRetryer(aws.NewConfig(), myRetryer) + // + Retryer RequestRetryer + + // Disables semantic parameter validation, which validates input for + // missing required fields and/or other semantic request input errors. + DisableParamValidation *bool + + // Disables the computation of request and response checksums, e.g., + // CRC32 checksums in Amazon DynamoDB. + DisableComputeChecksums *bool + + // Set this to `true` to force the request to use path-style addressing, + // i.e., `http://s3.amazonaws.com/BUCKET/KEY`. By default, the S3 client + // will use virtual hosted bucket addressing when possible + // (`http://BUCKET.s3.amazonaws.com/KEY`). + // + // Note: This configuration option is specific to the Amazon S3 service. + // + // See http://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html + // for Amazon S3: Virtual Hosting of Buckets + S3ForcePathStyle *bool + + // Set this to `true` to disable the SDK adding the `Expect: 100-Continue` + // header to PUT requests over 2MB of content. 100-Continue instructs the + // HTTP client not to send the body until the service responds with a + // `continue` status. This is useful to prevent sending the request body + // until after the request is authenticated, and validated. + // + // http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectPUT.html + // + // 100-Continue is only enabled for Go 1.6 and above. See `http.Transport`'s + // `ExpectContinueTimeout` for information on adjusting the continue wait + // timeout. https://golang.org/pkg/net/http/#Transport + // + // You should use this flag to disable 100-Continue if you experience issues + // with proxies or third party S3 compatible services. + S3Disable100Continue *bool + + // Set this to `true` to enable S3 Accelerate feature. For all operations + // compatible with S3 Accelerate will use the accelerate endpoint for + // requests. Requests not compatible will fall back to normal S3 requests. + // + // The bucket must be enable for accelerate to be used with S3 client with + // accelerate enabled. If the bucket is not enabled for accelerate an error + // will be returned. The bucket name must be DNS compatible to also work + // with accelerate. + S3UseAccelerate *bool + + // S3DisableContentMD5Validation config option is temporarily disabled, + // For S3 GetObject API calls, #1837. + // + // Set this to `true` to disable the S3 service client from automatically + // adding the ContentMD5 to S3 Object Put and Upload API calls. This option + // will also disable the SDK from performing object ContentMD5 validation + // on GetObject API calls. + S3DisableContentMD5Validation *bool + + // Set this to `true` to have the S3 service client to use the region specified + // in the ARN, when an ARN is provided as an argument to a bucket parameter. + S3UseARNRegion *bool + + // Set this to `true` to enable the SDK to unmarshal API response header maps to + // normalized lower case map keys. + // + // For example S3's X-Amz-Meta prefixed header will be unmarshaled to lower case + // Metadata member's map keys. The value of the header in the map is unaffected. + LowerCaseHeaderMaps *bool + + // Set this to `true` to disable the EC2Metadata client from overriding the + // default http.Client's Timeout. This is helpful if you do not want the + // EC2Metadata client to create a new http.Client. This options is only + // meaningful if you're not already using a custom HTTP client with the + // SDK. Enabled by default. + // + // Must be set and provided to the session.NewSession() in order to disable + // the EC2Metadata overriding the timeout for default credentials chain. + // + // Example: + // sess := session.Must(session.NewSession(aws.NewConfig() + // .WithEC2MetadataDisableTimeoutOverride(true))) + // + // svc := s3.New(sess) + // + EC2MetadataDisableTimeoutOverride *bool + + // Instructs the endpoint to be generated for a service client to + // be the dual stack endpoint. The dual stack endpoint will support + // both IPv4 and IPv6 addressing. + // + // Setting this for a service which does not support dual stack will fail + // to make requests. It is not recommended to set this value on the session + // as it will apply to all service clients created with the session. Even + // services which don't support dual stack endpoints. + // + // If the Endpoint config value is also provided the UseDualStack flag + // will be ignored. + // + // Only supported with. + // + // sess := session.Must(session.NewSession()) + // + // svc := s3.New(sess, &aws.Config{ + // UseDualStack: aws.Bool(true), + // }) + UseDualStack *bool + + // SleepDelay is an override for the func the SDK will call when sleeping + // during the lifecycle of a request. Specifically this will be used for + // request delays. This value should only be used for testing. To adjust + // the delay of a request see the aws/client.DefaultRetryer and + // aws/request.Retryer. + // + // SleepDelay will prevent any Context from being used for canceling retry + // delay of an API operation. It is recommended to not use SleepDelay at all + // and specify a Retryer instead. + SleepDelay func(time.Duration) + + // DisableRestProtocolURICleaning will not clean the URL path when making rest protocol requests. + // Will default to false. This would only be used for empty directory names in s3 requests. + // + // Example: + // sess := session.Must(session.NewSession(&aws.Config{ + // DisableRestProtocolURICleaning: aws.Bool(true), + // })) + // + // svc := s3.New(sess) + // out, err := svc.GetObject(&s3.GetObjectInput { + // Bucket: aws.String("bucketname"), + // Key: aws.String("//foo//bar//moo"), + // }) + DisableRestProtocolURICleaning *bool + + // EnableEndpointDiscovery will allow for endpoint discovery on operations that + // have the definition in its model. By default, endpoint discovery is off. + // To use EndpointDiscovery, Endpoint should be unset or set to an empty string. + // + // Example: + // sess := session.Must(session.NewSession(&aws.Config{ + // EnableEndpointDiscovery: aws.Bool(true), + // })) + // + // svc := s3.New(sess) + // out, err := svc.GetObject(&s3.GetObjectInput { + // Bucket: aws.String("bucketname"), + // Key: aws.String("/foo/bar/moo"), + // }) + EnableEndpointDiscovery *bool + + // DisableEndpointHostPrefix will disable the SDK's behavior of prefixing + // request endpoint hosts with modeled information. + // + // Disabling this feature is useful when you want to use local endpoints + // for testing that do not support the modeled host prefix pattern. + DisableEndpointHostPrefix *bool + + // STSRegionalEndpoint will enable regional or legacy endpoint resolving + STSRegionalEndpoint endpoints.STSRegionalEndpoint + + // S3UsEast1RegionalEndpoint will enable regional or legacy endpoint resolving + S3UsEast1RegionalEndpoint endpoints.S3UsEast1RegionalEndpoint +} + +// NewConfig returns a new Config pointer that can be chained with builder +// methods to set multiple configuration values inline without using pointers. +// +// // Create Session with MaxRetries configuration to be shared by multiple +// // service clients. +// sess := session.Must(session.NewSession(aws.NewConfig(). +// WithMaxRetries(3), +// )) +// +// // Create S3 service client with a specific Region. +// svc := s3.New(sess, aws.NewConfig(). +// WithRegion("us-west-2"), +// ) +func NewConfig() *Config { + return &Config{} +} + +// WithCredentialsChainVerboseErrors sets a config verbose errors boolean and returning +// a Config pointer. +func (c *Config) WithCredentialsChainVerboseErrors(verboseErrs bool) *Config { + c.CredentialsChainVerboseErrors = &verboseErrs + return c +} + +// WithCredentials sets a config Credentials value returning a Config pointer +// for chaining. +func (c *Config) WithCredentials(creds *credentials.Credentials) *Config { + c.Credentials = creds + return c +} + +// WithEndpoint sets a config Endpoint value returning a Config pointer for +// chaining. +func (c *Config) WithEndpoint(endpoint string) *Config { + c.Endpoint = &endpoint + return c +} + +// WithEndpointResolver sets a config EndpointResolver value returning a +// Config pointer for chaining. +func (c *Config) WithEndpointResolver(resolver endpoints.Resolver) *Config { + c.EndpointResolver = resolver + return c +} + +// WithRegion sets a config Region value returning a Config pointer for +// chaining. +func (c *Config) WithRegion(region string) *Config { + c.Region = ®ion + return c +} + +// WithDisableSSL sets a config DisableSSL value returning a Config pointer +// for chaining. +func (c *Config) WithDisableSSL(disable bool) *Config { + c.DisableSSL = &disable + return c +} + +// WithHTTPClient sets a config HTTPClient value returning a Config pointer +// for chaining. +func (c *Config) WithHTTPClient(client *http.Client) *Config { + c.HTTPClient = client + return c +} + +// WithMaxRetries sets a config MaxRetries value returning a Config pointer +// for chaining. +func (c *Config) WithMaxRetries(max int) *Config { + c.MaxRetries = &max + return c +} + +// WithDisableParamValidation sets a config DisableParamValidation value +// returning a Config pointer for chaining. +func (c *Config) WithDisableParamValidation(disable bool) *Config { + c.DisableParamValidation = &disable + return c +} + +// WithDisableComputeChecksums sets a config DisableComputeChecksums value +// returning a Config pointer for chaining. +func (c *Config) WithDisableComputeChecksums(disable bool) *Config { + c.DisableComputeChecksums = &disable + return c +} + +// WithLogLevel sets a config LogLevel value returning a Config pointer for +// chaining. +func (c *Config) WithLogLevel(level LogLevelType) *Config { + c.LogLevel = &level + return c +} + +// WithLogger sets a config Logger value returning a Config pointer for +// chaining. +func (c *Config) WithLogger(logger Logger) *Config { + c.Logger = logger + return c +} + +// WithS3ForcePathStyle sets a config S3ForcePathStyle value returning a Config +// pointer for chaining. +func (c *Config) WithS3ForcePathStyle(force bool) *Config { + c.S3ForcePathStyle = &force + return c +} + +// WithS3Disable100Continue sets a config S3Disable100Continue value returning +// a Config pointer for chaining. +func (c *Config) WithS3Disable100Continue(disable bool) *Config { + c.S3Disable100Continue = &disable + return c +} + +// WithS3UseAccelerate sets a config S3UseAccelerate value returning a Config +// pointer for chaining. +func (c *Config) WithS3UseAccelerate(enable bool) *Config { + c.S3UseAccelerate = &enable + return c + +} + +// WithS3DisableContentMD5Validation sets a config +// S3DisableContentMD5Validation value returning a Config pointer for chaining. +func (c *Config) WithS3DisableContentMD5Validation(enable bool) *Config { + c.S3DisableContentMD5Validation = &enable + return c + +} + +// WithS3UseARNRegion sets a config S3UseARNRegion value and +// returning a Config pointer for chaining +func (c *Config) WithS3UseARNRegion(enable bool) *Config { + c.S3UseARNRegion = &enable + return c +} + +// WithUseDualStack sets a config UseDualStack value returning a Config +// pointer for chaining. +func (c *Config) WithUseDualStack(enable bool) *Config { + c.UseDualStack = &enable + return c +} + +// WithEC2MetadataDisableTimeoutOverride sets a config EC2MetadataDisableTimeoutOverride value +// returning a Config pointer for chaining. +func (c *Config) WithEC2MetadataDisableTimeoutOverride(enable bool) *Config { + c.EC2MetadataDisableTimeoutOverride = &enable + return c +} + +// WithSleepDelay overrides the function used to sleep while waiting for the +// next retry. Defaults to time.Sleep. +func (c *Config) WithSleepDelay(fn func(time.Duration)) *Config { + c.SleepDelay = fn + return c +} + +// WithEndpointDiscovery will set whether or not to use endpoint discovery. +func (c *Config) WithEndpointDiscovery(t bool) *Config { + c.EnableEndpointDiscovery = &t + return c +} + +// WithDisableEndpointHostPrefix will set whether or not to use modeled host prefix +// when making requests. +func (c *Config) WithDisableEndpointHostPrefix(t bool) *Config { + c.DisableEndpointHostPrefix = &t + return c +} + +// WithSTSRegionalEndpoint will set whether or not to use regional endpoint flag +// when resolving the endpoint for a service +func (c *Config) WithSTSRegionalEndpoint(sre endpoints.STSRegionalEndpoint) *Config { + c.STSRegionalEndpoint = sre + return c +} + +// WithS3UsEast1RegionalEndpoint will set whether or not to use regional endpoint flag +// when resolving the endpoint for a service +func (c *Config) WithS3UsEast1RegionalEndpoint(sre endpoints.S3UsEast1RegionalEndpoint) *Config { + c.S3UsEast1RegionalEndpoint = sre + return c +} + +// WithLowerCaseHeaderMaps sets a config LowerCaseHeaderMaps value +// returning a Config pointer for chaining. +func (c *Config) WithLowerCaseHeaderMaps(t bool) *Config { + c.LowerCaseHeaderMaps = &t + return c +} + +// WithDisableRestProtocolURICleaning sets a config DisableRestProtocolURICleaning value +// returning a Config pointer for chaining. +func (c *Config) WithDisableRestProtocolURICleaning(t bool) *Config { + c.DisableRestProtocolURICleaning = &t + return c +} + +// MergeIn merges the passed in configs into the existing config object. +func (c *Config) MergeIn(cfgs ...*Config) { + for _, other := range cfgs { + mergeInConfig(c, other) + } +} + +func mergeInConfig(dst *Config, other *Config) { + if other == nil { + return + } + + if other.CredentialsChainVerboseErrors != nil { + dst.CredentialsChainVerboseErrors = other.CredentialsChainVerboseErrors + } + + if other.Credentials != nil { + dst.Credentials = other.Credentials + } + + if other.Endpoint != nil { + dst.Endpoint = other.Endpoint + } + + if other.EndpointResolver != nil { + dst.EndpointResolver = other.EndpointResolver + } + + if other.Region != nil { + dst.Region = other.Region + } + + if other.DisableSSL != nil { + dst.DisableSSL = other.DisableSSL + } + + if other.HTTPClient != nil { + dst.HTTPClient = other.HTTPClient + } + + if other.LogLevel != nil { + dst.LogLevel = other.LogLevel + } + + if other.Logger != nil { + dst.Logger = other.Logger + } + + if other.MaxRetries != nil { + dst.MaxRetries = other.MaxRetries + } + + if other.Retryer != nil { + dst.Retryer = other.Retryer + } + + if other.DisableParamValidation != nil { + dst.DisableParamValidation = other.DisableParamValidation + } + + if other.DisableComputeChecksums != nil { + dst.DisableComputeChecksums = other.DisableComputeChecksums + } + + if other.S3ForcePathStyle != nil { + dst.S3ForcePathStyle = other.S3ForcePathStyle + } + + if other.S3Disable100Continue != nil { + dst.S3Disable100Continue = other.S3Disable100Continue + } + + if other.S3UseAccelerate != nil { + dst.S3UseAccelerate = other.S3UseAccelerate + } + + if other.S3DisableContentMD5Validation != nil { + dst.S3DisableContentMD5Validation = other.S3DisableContentMD5Validation + } + + if other.S3UseARNRegion != nil { + dst.S3UseARNRegion = other.S3UseARNRegion + } + + if other.UseDualStack != nil { + dst.UseDualStack = other.UseDualStack + } + + if other.EC2MetadataDisableTimeoutOverride != nil { + dst.EC2MetadataDisableTimeoutOverride = other.EC2MetadataDisableTimeoutOverride + } + + if other.SleepDelay != nil { + dst.SleepDelay = other.SleepDelay + } + + if other.DisableRestProtocolURICleaning != nil { + dst.DisableRestProtocolURICleaning = other.DisableRestProtocolURICleaning + } + + if other.EnforceShouldRetryCheck != nil { + dst.EnforceShouldRetryCheck = other.EnforceShouldRetryCheck + } + + if other.EnableEndpointDiscovery != nil { + dst.EnableEndpointDiscovery = other.EnableEndpointDiscovery + } + + if other.DisableEndpointHostPrefix != nil { + dst.DisableEndpointHostPrefix = other.DisableEndpointHostPrefix + } + + if other.STSRegionalEndpoint != endpoints.UnsetSTSEndpoint { + dst.STSRegionalEndpoint = other.STSRegionalEndpoint + } + + if other.S3UsEast1RegionalEndpoint != endpoints.UnsetS3UsEast1Endpoint { + dst.S3UsEast1RegionalEndpoint = other.S3UsEast1RegionalEndpoint + } + + if other.LowerCaseHeaderMaps != nil { + dst.LowerCaseHeaderMaps = other.LowerCaseHeaderMaps + } +} + +// Copy will return a shallow copy of the Config object. If any additional +// configurations are provided they will be merged into the new config returned. +func (c *Config) Copy(cfgs ...*Config) *Config { + dst := &Config{} + dst.MergeIn(c) + + for _, cfg := range cfgs { + dst.MergeIn(cfg) + } + + return dst +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/context_1_5.go b/vendor/github.com/aws/aws-sdk-go/aws/context_1_5.go new file mode 100644 index 00000000000..2866f9a7fb9 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/context_1_5.go @@ -0,0 +1,37 @@ +// +build !go1.9 + +package aws + +import "time" + +// Context is an copy of the Go v1.7 stdlib's context.Context interface. +// It is represented as a SDK interface to enable you to use the "WithContext" +// API methods with Go v1.6 and a Context type such as golang.org/x/net/context. +// +// See https://golang.org/pkg/context on how to use contexts. +type Context interface { + // Deadline returns the time when work done on behalf of this context + // should be canceled. Deadline returns ok==false when no deadline is + // set. Successive calls to Deadline return the same results. + Deadline() (deadline time.Time, ok bool) + + // Done returns a channel that's closed when work done on behalf of this + // context should be canceled. Done may return nil if this context can + // never be canceled. Successive calls to Done return the same value. + Done() <-chan struct{} + + // Err returns a non-nil error value after Done is closed. Err returns + // Canceled if the context was canceled or DeadlineExceeded if the + // context's deadline passed. No other values for Err are defined. + // After Done is closed, successive calls to Err return the same value. + Err() error + + // Value returns the value associated with this context for key, or nil + // if no value is associated with key. Successive calls to Value with + // the same key returns the same result. + // + // Use context values only for request-scoped data that transits + // processes and API boundaries, not for passing optional parameters to + // functions. + Value(key interface{}) interface{} +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/context_1_9.go b/vendor/github.com/aws/aws-sdk-go/aws/context_1_9.go new file mode 100644 index 00000000000..3718b26e101 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/context_1_9.go @@ -0,0 +1,11 @@ +// +build go1.9 + +package aws + +import "context" + +// Context is an alias of the Go stdlib's context.Context interface. +// It can be used within the SDK's API operation "WithContext" methods. +// +// See https://golang.org/pkg/context on how to use contexts. +type Context = context.Context diff --git a/vendor/github.com/aws/aws-sdk-go/aws/context_background_1_5.go b/vendor/github.com/aws/aws-sdk-go/aws/context_background_1_5.go new file mode 100644 index 00000000000..2f9446333a6 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/context_background_1_5.go @@ -0,0 +1,22 @@ +// +build !go1.7 + +package aws + +import ( + "github.com/aws/aws-sdk-go/internal/context" +) + +// BackgroundContext returns a context that will never be canceled, has no +// values, and no deadline. This context is used by the SDK to provide +// backwards compatibility with non-context API operations and functionality. +// +// Go 1.6 and before: +// This context function is equivalent to context.Background in the Go stdlib. +// +// Go 1.7 and later: +// The context returned will be the value returned by context.Background() +// +// See https://golang.org/pkg/context for more information on Contexts. +func BackgroundContext() Context { + return context.BackgroundCtx +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/context_background_1_7.go b/vendor/github.com/aws/aws-sdk-go/aws/context_background_1_7.go new file mode 100644 index 00000000000..9c29f29af17 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/context_background_1_7.go @@ -0,0 +1,20 @@ +// +build go1.7 + +package aws + +import "context" + +// BackgroundContext returns a context that will never be canceled, has no +// values, and no deadline. This context is used by the SDK to provide +// backwards compatibility with non-context API operations and functionality. +// +// Go 1.6 and before: +// This context function is equivalent to context.Background in the Go stdlib. +// +// Go 1.7 and later: +// The context returned will be the value returned by context.Background() +// +// See https://golang.org/pkg/context for more information on Contexts. +func BackgroundContext() Context { + return context.Background() +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/context_sleep.go b/vendor/github.com/aws/aws-sdk-go/aws/context_sleep.go new file mode 100644 index 00000000000..304fd156120 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/context_sleep.go @@ -0,0 +1,24 @@ +package aws + +import ( + "time" +) + +// SleepWithContext will wait for the timer duration to expire, or the context +// is canceled. Which ever happens first. If the context is canceled the Context's +// error will be returned. +// +// Expects Context to always return a non-nil error if the Done channel is closed. +func SleepWithContext(ctx Context, dur time.Duration) error { + t := time.NewTimer(dur) + defer t.Stop() + + select { + case <-t.C: + break + case <-ctx.Done(): + return ctx.Err() + } + + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/convert_types.go b/vendor/github.com/aws/aws-sdk-go/aws/convert_types.go new file mode 100644 index 00000000000..4e076c1837a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/convert_types.go @@ -0,0 +1,918 @@ +package aws + +import "time" + +// String returns a pointer to the string value passed in. +func String(v string) *string { + return &v +} + +// StringValue returns the value of the string pointer passed in or +// "" if the pointer is nil. +func StringValue(v *string) string { + if v != nil { + return *v + } + return "" +} + +// StringSlice converts a slice of string values into a slice of +// string pointers +func StringSlice(src []string) []*string { + dst := make([]*string, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// StringValueSlice converts a slice of string pointers into a slice of +// string values +func StringValueSlice(src []*string) []string { + dst := make([]string, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// StringMap converts a string map of string values into a string +// map of string pointers +func StringMap(src map[string]string) map[string]*string { + dst := make(map[string]*string) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// StringValueMap converts a string map of string pointers into a string +// map of string values +func StringValueMap(src map[string]*string) map[string]string { + dst := make(map[string]string) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Bool returns a pointer to the bool value passed in. +func Bool(v bool) *bool { + return &v +} + +// BoolValue returns the value of the bool pointer passed in or +// false if the pointer is nil. +func BoolValue(v *bool) bool { + if v != nil { + return *v + } + return false +} + +// BoolSlice converts a slice of bool values into a slice of +// bool pointers +func BoolSlice(src []bool) []*bool { + dst := make([]*bool, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// BoolValueSlice converts a slice of bool pointers into a slice of +// bool values +func BoolValueSlice(src []*bool) []bool { + dst := make([]bool, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// BoolMap converts a string map of bool values into a string +// map of bool pointers +func BoolMap(src map[string]bool) map[string]*bool { + dst := make(map[string]*bool) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// BoolValueMap converts a string map of bool pointers into a string +// map of bool values +func BoolValueMap(src map[string]*bool) map[string]bool { + dst := make(map[string]bool) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Int returns a pointer to the int value passed in. +func Int(v int) *int { + return &v +} + +// IntValue returns the value of the int pointer passed in or +// 0 if the pointer is nil. +func IntValue(v *int) int { + if v != nil { + return *v + } + return 0 +} + +// IntSlice converts a slice of int values into a slice of +// int pointers +func IntSlice(src []int) []*int { + dst := make([]*int, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// IntValueSlice converts a slice of int pointers into a slice of +// int values +func IntValueSlice(src []*int) []int { + dst := make([]int, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// IntMap converts a string map of int values into a string +// map of int pointers +func IntMap(src map[string]int) map[string]*int { + dst := make(map[string]*int) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// IntValueMap converts a string map of int pointers into a string +// map of int values +func IntValueMap(src map[string]*int) map[string]int { + dst := make(map[string]int) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Uint returns a pointer to the uint value passed in. +func Uint(v uint) *uint { + return &v +} + +// UintValue returns the value of the uint pointer passed in or +// 0 if the pointer is nil. +func UintValue(v *uint) uint { + if v != nil { + return *v + } + return 0 +} + +// UintSlice converts a slice of uint values uinto a slice of +// uint pointers +func UintSlice(src []uint) []*uint { + dst := make([]*uint, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// UintValueSlice converts a slice of uint pointers uinto a slice of +// uint values +func UintValueSlice(src []*uint) []uint { + dst := make([]uint, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// UintMap converts a string map of uint values uinto a string +// map of uint pointers +func UintMap(src map[string]uint) map[string]*uint { + dst := make(map[string]*uint) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// UintValueMap converts a string map of uint pointers uinto a string +// map of uint values +func UintValueMap(src map[string]*uint) map[string]uint { + dst := make(map[string]uint) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Int8 returns a pointer to the int8 value passed in. +func Int8(v int8) *int8 { + return &v +} + +// Int8Value returns the value of the int8 pointer passed in or +// 0 if the pointer is nil. +func Int8Value(v *int8) int8 { + if v != nil { + return *v + } + return 0 +} + +// Int8Slice converts a slice of int8 values into a slice of +// int8 pointers +func Int8Slice(src []int8) []*int8 { + dst := make([]*int8, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Int8ValueSlice converts a slice of int8 pointers into a slice of +// int8 values +func Int8ValueSlice(src []*int8) []int8 { + dst := make([]int8, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Int8Map converts a string map of int8 values into a string +// map of int8 pointers +func Int8Map(src map[string]int8) map[string]*int8 { + dst := make(map[string]*int8) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Int8ValueMap converts a string map of int8 pointers into a string +// map of int8 values +func Int8ValueMap(src map[string]*int8) map[string]int8 { + dst := make(map[string]int8) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Int16 returns a pointer to the int16 value passed in. +func Int16(v int16) *int16 { + return &v +} + +// Int16Value returns the value of the int16 pointer passed in or +// 0 if the pointer is nil. +func Int16Value(v *int16) int16 { + if v != nil { + return *v + } + return 0 +} + +// Int16Slice converts a slice of int16 values into a slice of +// int16 pointers +func Int16Slice(src []int16) []*int16 { + dst := make([]*int16, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Int16ValueSlice converts a slice of int16 pointers into a slice of +// int16 values +func Int16ValueSlice(src []*int16) []int16 { + dst := make([]int16, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Int16Map converts a string map of int16 values into a string +// map of int16 pointers +func Int16Map(src map[string]int16) map[string]*int16 { + dst := make(map[string]*int16) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Int16ValueMap converts a string map of int16 pointers into a string +// map of int16 values +func Int16ValueMap(src map[string]*int16) map[string]int16 { + dst := make(map[string]int16) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Int32 returns a pointer to the int32 value passed in. +func Int32(v int32) *int32 { + return &v +} + +// Int32Value returns the value of the int32 pointer passed in or +// 0 if the pointer is nil. +func Int32Value(v *int32) int32 { + if v != nil { + return *v + } + return 0 +} + +// Int32Slice converts a slice of int32 values into a slice of +// int32 pointers +func Int32Slice(src []int32) []*int32 { + dst := make([]*int32, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Int32ValueSlice converts a slice of int32 pointers into a slice of +// int32 values +func Int32ValueSlice(src []*int32) []int32 { + dst := make([]int32, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Int32Map converts a string map of int32 values into a string +// map of int32 pointers +func Int32Map(src map[string]int32) map[string]*int32 { + dst := make(map[string]*int32) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Int32ValueMap converts a string map of int32 pointers into a string +// map of int32 values +func Int32ValueMap(src map[string]*int32) map[string]int32 { + dst := make(map[string]int32) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Int64 returns a pointer to the int64 value passed in. +func Int64(v int64) *int64 { + return &v +} + +// Int64Value returns the value of the int64 pointer passed in or +// 0 if the pointer is nil. +func Int64Value(v *int64) int64 { + if v != nil { + return *v + } + return 0 +} + +// Int64Slice converts a slice of int64 values into a slice of +// int64 pointers +func Int64Slice(src []int64) []*int64 { + dst := make([]*int64, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Int64ValueSlice converts a slice of int64 pointers into a slice of +// int64 values +func Int64ValueSlice(src []*int64) []int64 { + dst := make([]int64, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Int64Map converts a string map of int64 values into a string +// map of int64 pointers +func Int64Map(src map[string]int64) map[string]*int64 { + dst := make(map[string]*int64) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Int64ValueMap converts a string map of int64 pointers into a string +// map of int64 values +func Int64ValueMap(src map[string]*int64) map[string]int64 { + dst := make(map[string]int64) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Uint8 returns a pointer to the uint8 value passed in. +func Uint8(v uint8) *uint8 { + return &v +} + +// Uint8Value returns the value of the uint8 pointer passed in or +// 0 if the pointer is nil. +func Uint8Value(v *uint8) uint8 { + if v != nil { + return *v + } + return 0 +} + +// Uint8Slice converts a slice of uint8 values into a slice of +// uint8 pointers +func Uint8Slice(src []uint8) []*uint8 { + dst := make([]*uint8, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Uint8ValueSlice converts a slice of uint8 pointers into a slice of +// uint8 values +func Uint8ValueSlice(src []*uint8) []uint8 { + dst := make([]uint8, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Uint8Map converts a string map of uint8 values into a string +// map of uint8 pointers +func Uint8Map(src map[string]uint8) map[string]*uint8 { + dst := make(map[string]*uint8) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Uint8ValueMap converts a string map of uint8 pointers into a string +// map of uint8 values +func Uint8ValueMap(src map[string]*uint8) map[string]uint8 { + dst := make(map[string]uint8) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Uint16 returns a pointer to the uint16 value passed in. +func Uint16(v uint16) *uint16 { + return &v +} + +// Uint16Value returns the value of the uint16 pointer passed in or +// 0 if the pointer is nil. +func Uint16Value(v *uint16) uint16 { + if v != nil { + return *v + } + return 0 +} + +// Uint16Slice converts a slice of uint16 values into a slice of +// uint16 pointers +func Uint16Slice(src []uint16) []*uint16 { + dst := make([]*uint16, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Uint16ValueSlice converts a slice of uint16 pointers into a slice of +// uint16 values +func Uint16ValueSlice(src []*uint16) []uint16 { + dst := make([]uint16, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Uint16Map converts a string map of uint16 values into a string +// map of uint16 pointers +func Uint16Map(src map[string]uint16) map[string]*uint16 { + dst := make(map[string]*uint16) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Uint16ValueMap converts a string map of uint16 pointers into a string +// map of uint16 values +func Uint16ValueMap(src map[string]*uint16) map[string]uint16 { + dst := make(map[string]uint16) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Uint32 returns a pointer to the uint32 value passed in. +func Uint32(v uint32) *uint32 { + return &v +} + +// Uint32Value returns the value of the uint32 pointer passed in or +// 0 if the pointer is nil. +func Uint32Value(v *uint32) uint32 { + if v != nil { + return *v + } + return 0 +} + +// Uint32Slice converts a slice of uint32 values into a slice of +// uint32 pointers +func Uint32Slice(src []uint32) []*uint32 { + dst := make([]*uint32, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Uint32ValueSlice converts a slice of uint32 pointers into a slice of +// uint32 values +func Uint32ValueSlice(src []*uint32) []uint32 { + dst := make([]uint32, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Uint32Map converts a string map of uint32 values into a string +// map of uint32 pointers +func Uint32Map(src map[string]uint32) map[string]*uint32 { + dst := make(map[string]*uint32) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Uint32ValueMap converts a string map of uint32 pointers into a string +// map of uint32 values +func Uint32ValueMap(src map[string]*uint32) map[string]uint32 { + dst := make(map[string]uint32) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Uint64 returns a pointer to the uint64 value passed in. +func Uint64(v uint64) *uint64 { + return &v +} + +// Uint64Value returns the value of the uint64 pointer passed in or +// 0 if the pointer is nil. +func Uint64Value(v *uint64) uint64 { + if v != nil { + return *v + } + return 0 +} + +// Uint64Slice converts a slice of uint64 values into a slice of +// uint64 pointers +func Uint64Slice(src []uint64) []*uint64 { + dst := make([]*uint64, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Uint64ValueSlice converts a slice of uint64 pointers into a slice of +// uint64 values +func Uint64ValueSlice(src []*uint64) []uint64 { + dst := make([]uint64, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Uint64Map converts a string map of uint64 values into a string +// map of uint64 pointers +func Uint64Map(src map[string]uint64) map[string]*uint64 { + dst := make(map[string]*uint64) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Uint64ValueMap converts a string map of uint64 pointers into a string +// map of uint64 values +func Uint64ValueMap(src map[string]*uint64) map[string]uint64 { + dst := make(map[string]uint64) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Float32 returns a pointer to the float32 value passed in. +func Float32(v float32) *float32 { + return &v +} + +// Float32Value returns the value of the float32 pointer passed in or +// 0 if the pointer is nil. +func Float32Value(v *float32) float32 { + if v != nil { + return *v + } + return 0 +} + +// Float32Slice converts a slice of float32 values into a slice of +// float32 pointers +func Float32Slice(src []float32) []*float32 { + dst := make([]*float32, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Float32ValueSlice converts a slice of float32 pointers into a slice of +// float32 values +func Float32ValueSlice(src []*float32) []float32 { + dst := make([]float32, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Float32Map converts a string map of float32 values into a string +// map of float32 pointers +func Float32Map(src map[string]float32) map[string]*float32 { + dst := make(map[string]*float32) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Float32ValueMap converts a string map of float32 pointers into a string +// map of float32 values +func Float32ValueMap(src map[string]*float32) map[string]float32 { + dst := make(map[string]float32) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Float64 returns a pointer to the float64 value passed in. +func Float64(v float64) *float64 { + return &v +} + +// Float64Value returns the value of the float64 pointer passed in or +// 0 if the pointer is nil. +func Float64Value(v *float64) float64 { + if v != nil { + return *v + } + return 0 +} + +// Float64Slice converts a slice of float64 values into a slice of +// float64 pointers +func Float64Slice(src []float64) []*float64 { + dst := make([]*float64, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Float64ValueSlice converts a slice of float64 pointers into a slice of +// float64 values +func Float64ValueSlice(src []*float64) []float64 { + dst := make([]float64, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Float64Map converts a string map of float64 values into a string +// map of float64 pointers +func Float64Map(src map[string]float64) map[string]*float64 { + dst := make(map[string]*float64) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Float64ValueMap converts a string map of float64 pointers into a string +// map of float64 values +func Float64ValueMap(src map[string]*float64) map[string]float64 { + dst := make(map[string]float64) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Time returns a pointer to the time.Time value passed in. +func Time(v time.Time) *time.Time { + return &v +} + +// TimeValue returns the value of the time.Time pointer passed in or +// time.Time{} if the pointer is nil. +func TimeValue(v *time.Time) time.Time { + if v != nil { + return *v + } + return time.Time{} +} + +// SecondsTimeValue converts an int64 pointer to a time.Time value +// representing seconds since Epoch or time.Time{} if the pointer is nil. +func SecondsTimeValue(v *int64) time.Time { + if v != nil { + return time.Unix((*v / 1000), 0) + } + return time.Time{} +} + +// MillisecondsTimeValue converts an int64 pointer to a time.Time value +// representing milliseconds sinch Epoch or time.Time{} if the pointer is nil. +func MillisecondsTimeValue(v *int64) time.Time { + if v != nil { + return time.Unix(0, (*v * 1000000)) + } + return time.Time{} +} + +// TimeUnixMilli returns a Unix timestamp in milliseconds from "January 1, 1970 UTC". +// The result is undefined if the Unix time cannot be represented by an int64. +// Which includes calling TimeUnixMilli on a zero Time is undefined. +// +// This utility is useful for service API's such as CloudWatch Logs which require +// their unix time values to be in milliseconds. +// +// See Go stdlib https://golang.org/pkg/time/#Time.UnixNano for more information. +func TimeUnixMilli(t time.Time) int64 { + return t.UnixNano() / int64(time.Millisecond/time.Nanosecond) +} + +// TimeSlice converts a slice of time.Time values into a slice of +// time.Time pointers +func TimeSlice(src []time.Time) []*time.Time { + dst := make([]*time.Time, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// TimeValueSlice converts a slice of time.Time pointers into a slice of +// time.Time values +func TimeValueSlice(src []*time.Time) []time.Time { + dst := make([]time.Time, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// TimeMap converts a string map of time.Time values into a string +// map of time.Time pointers +func TimeMap(src map[string]time.Time) map[string]*time.Time { + dst := make(map[string]*time.Time) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// TimeValueMap converts a string map of time.Time pointers into a string +// map of time.Time values +func TimeValueMap(src map[string]*time.Time) map[string]time.Time { + dst := make(map[string]time.Time) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go new file mode 100644 index 00000000000..d95a5eb5408 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go @@ -0,0 +1,232 @@ +package corehandlers + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "net/url" + "regexp" + "strconv" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/request" +) + +// Interface for matching types which also have a Len method. +type lener interface { + Len() int +} + +// BuildContentLengthHandler builds the content length of a request based on the body, +// or will use the HTTPRequest.Header's "Content-Length" if defined. If unable +// to determine request body length and no "Content-Length" was specified it will panic. +// +// The Content-Length will only be added to the request if the length of the body +// is greater than 0. If the body is empty or the current `Content-Length` +// header is <= 0, the header will also be stripped. +var BuildContentLengthHandler = request.NamedHandler{Name: "core.BuildContentLengthHandler", Fn: func(r *request.Request) { + var length int64 + + if slength := r.HTTPRequest.Header.Get("Content-Length"); slength != "" { + length, _ = strconv.ParseInt(slength, 10, 64) + } else { + if r.Body != nil { + var err error + length, err = aws.SeekerLen(r.Body) + if err != nil { + r.Error = awserr.New(request.ErrCodeSerialization, "failed to get request body's length", err) + return + } + } + } + + if length > 0 { + r.HTTPRequest.ContentLength = length + r.HTTPRequest.Header.Set("Content-Length", fmt.Sprintf("%d", length)) + } else { + r.HTTPRequest.ContentLength = 0 + r.HTTPRequest.Header.Del("Content-Length") + } +}} + +var reStatusCode = regexp.MustCompile(`^(\d{3})`) + +// ValidateReqSigHandler is a request handler to ensure that the request's +// signature doesn't expire before it is sent. This can happen when a request +// is built and signed significantly before it is sent. Or significant delays +// occur when retrying requests that would cause the signature to expire. +var ValidateReqSigHandler = request.NamedHandler{ + Name: "core.ValidateReqSigHandler", + Fn: func(r *request.Request) { + // Unsigned requests are not signed + if r.Config.Credentials == credentials.AnonymousCredentials { + return + } + + signedTime := r.Time + if !r.LastSignedAt.IsZero() { + signedTime = r.LastSignedAt + } + + // 5 minutes to allow for some clock skew/delays in transmission. + // Would be improved with aws/aws-sdk-go#423 + if signedTime.Add(5 * time.Minute).After(time.Now()) { + return + } + + fmt.Println("request expired, resigning") + r.Sign() + }, +} + +// SendHandler is a request handler to send service request using HTTP client. +var SendHandler = request.NamedHandler{ + Name: "core.SendHandler", + Fn: func(r *request.Request) { + sender := sendFollowRedirects + if r.DisableFollowRedirects { + sender = sendWithoutFollowRedirects + } + + if request.NoBody == r.HTTPRequest.Body { + // Strip off the request body if the NoBody reader was used as a + // place holder for a request body. This prevents the SDK from + // making requests with a request body when it would be invalid + // to do so. + // + // Use a shallow copy of the http.Request to ensure the race condition + // of transport on Body will not trigger + reqOrig, reqCopy := r.HTTPRequest, *r.HTTPRequest + reqCopy.Body = nil + r.HTTPRequest = &reqCopy + defer func() { + r.HTTPRequest = reqOrig + }() + } + + var err error + r.HTTPResponse, err = sender(r) + if err != nil { + handleSendError(r, err) + } + }, +} + +func sendFollowRedirects(r *request.Request) (*http.Response, error) { + return r.Config.HTTPClient.Do(r.HTTPRequest) +} + +func sendWithoutFollowRedirects(r *request.Request) (*http.Response, error) { + transport := r.Config.HTTPClient.Transport + if transport == nil { + transport = http.DefaultTransport + } + + return transport.RoundTrip(r.HTTPRequest) +} + +func handleSendError(r *request.Request, err error) { + // Prevent leaking if an HTTPResponse was returned. Clean up + // the body. + if r.HTTPResponse != nil { + r.HTTPResponse.Body.Close() + } + // Capture the case where url.Error is returned for error processing + // response. e.g. 301 without location header comes back as string + // error and r.HTTPResponse is nil. Other URL redirect errors will + // comeback in a similar method. + if e, ok := err.(*url.Error); ok && e.Err != nil { + if s := reStatusCode.FindStringSubmatch(e.Err.Error()); s != nil { + code, _ := strconv.ParseInt(s[1], 10, 64) + r.HTTPResponse = &http.Response{ + StatusCode: int(code), + Status: http.StatusText(int(code)), + Body: ioutil.NopCloser(bytes.NewReader([]byte{})), + } + return + } + } + if r.HTTPResponse == nil { + // Add a dummy request response object to ensure the HTTPResponse + // value is consistent. + r.HTTPResponse = &http.Response{ + StatusCode: int(0), + Status: http.StatusText(int(0)), + Body: ioutil.NopCloser(bytes.NewReader([]byte{})), + } + } + // Catch all request errors, and let the default retrier determine + // if the error is retryable. + r.Error = awserr.New(request.ErrCodeRequestError, "send request failed", err) + + // Override the error with a context canceled error, if that was canceled. + ctx := r.Context() + select { + case <-ctx.Done(): + r.Error = awserr.New(request.CanceledErrorCode, + "request context canceled", ctx.Err()) + r.Retryable = aws.Bool(false) + default: + } +} + +// ValidateResponseHandler is a request handler to validate service response. +var ValidateResponseHandler = request.NamedHandler{Name: "core.ValidateResponseHandler", Fn: func(r *request.Request) { + if r.HTTPResponse.StatusCode == 0 || r.HTTPResponse.StatusCode >= 300 { + // this may be replaced by an UnmarshalError handler + r.Error = awserr.New("UnknownError", "unknown error", nil) + } +}} + +// AfterRetryHandler performs final checks to determine if the request should +// be retried and how long to delay. +var AfterRetryHandler = request.NamedHandler{ + Name: "core.AfterRetryHandler", + Fn: func(r *request.Request) { + // If one of the other handlers already set the retry state + // we don't want to override it based on the service's state + if r.Retryable == nil || aws.BoolValue(r.Config.EnforceShouldRetryCheck) { + r.Retryable = aws.Bool(r.ShouldRetry(r)) + } + + if r.WillRetry() { + r.RetryDelay = r.RetryRules(r) + + if sleepFn := r.Config.SleepDelay; sleepFn != nil { + // Support SleepDelay for backwards compatibility and testing + sleepFn(r.RetryDelay) + } else if err := aws.SleepWithContext(r.Context(), r.RetryDelay); err != nil { + r.Error = awserr.New(request.CanceledErrorCode, + "request context canceled", err) + r.Retryable = aws.Bool(false) + return + } + + // when the expired token exception occurs the credentials + // need to be expired locally so that the next request to + // get credentials will trigger a credentials refresh. + if r.IsErrorExpired() { + r.Config.Credentials.Expire() + } + + r.RetryCount++ + r.Error = nil + } + }} + +// ValidateEndpointHandler is a request handler to validate a request had the +// appropriate Region and Endpoint set. Will set r.Error if the endpoint or +// region is not valid. +var ValidateEndpointHandler = request.NamedHandler{Name: "core.ValidateEndpointHandler", Fn: func(r *request.Request) { + if r.ClientInfo.SigningRegion == "" && aws.StringValue(r.Config.Region) == "" { + r.Error = aws.ErrMissingRegion + } else if r.ClientInfo.Endpoint == "" { + // Was any endpoint provided by the user, or one was derived by the + // SDK's endpoint resolver? + r.Error = aws.ErrMissingEndpoint + } +}} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator.go b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator.go new file mode 100644 index 00000000000..7d50b1557cc --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator.go @@ -0,0 +1,17 @@ +package corehandlers + +import "github.com/aws/aws-sdk-go/aws/request" + +// ValidateParametersHandler is a request handler to validate the input parameters. +// Validating parameters only has meaning if done prior to the request being sent. +var ValidateParametersHandler = request.NamedHandler{Name: "core.ValidateParametersHandler", Fn: func(r *request.Request) { + if !r.ParamsFilled() { + return + } + + if v, ok := r.Params.(request.Validator); ok { + if err := v.Validate(); err != nil { + r.Error = err + } + } +}} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/user_agent.go b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/user_agent.go new file mode 100644 index 00000000000..ab69c7a6f38 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/user_agent.go @@ -0,0 +1,37 @@ +package corehandlers + +import ( + "os" + "runtime" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/request" +) + +// SDKVersionUserAgentHandler is a request handler for adding the SDK Version +// to the user agent. +var SDKVersionUserAgentHandler = request.NamedHandler{ + Name: "core.SDKVersionUserAgentHandler", + Fn: request.MakeAddToUserAgentHandler(aws.SDKName, aws.SDKVersion, + runtime.Version(), runtime.GOOS, runtime.GOARCH), +} + +const execEnvVar = `AWS_EXECUTION_ENV` +const execEnvUAKey = `exec-env` + +// AddHostExecEnvUserAgentHander is a request handler appending the SDK's +// execution environment to the user agent. +// +// If the environment variable AWS_EXECUTION_ENV is set, its value will be +// appended to the user agent string. +var AddHostExecEnvUserAgentHander = request.NamedHandler{ + Name: "core.AddHostExecEnvUserAgentHander", + Fn: func(r *request.Request) { + v := os.Getenv(execEnvVar) + if len(v) == 0 { + return + } + + request.AddToUserAgent(r, execEnvUAKey+"/"+v) + }, +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go new file mode 100644 index 00000000000..3ad1e798df8 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go @@ -0,0 +1,100 @@ +package credentials + +import ( + "github.com/aws/aws-sdk-go/aws/awserr" +) + +var ( + // ErrNoValidProvidersFoundInChain Is returned when there are no valid + // providers in the ChainProvider. + // + // This has been deprecated. For verbose error messaging set + // aws.Config.CredentialsChainVerboseErrors to true. + ErrNoValidProvidersFoundInChain = awserr.New("NoCredentialProviders", + `no valid providers in chain. Deprecated. + For verbose messaging see aws.Config.CredentialsChainVerboseErrors`, + nil) +) + +// A ChainProvider will search for a provider which returns credentials +// and cache that provider until Retrieve is called again. +// +// The ChainProvider provides a way of chaining multiple providers together +// which will pick the first available using priority order of the Providers +// in the list. +// +// If none of the Providers retrieve valid credentials Value, ChainProvider's +// Retrieve() will return the error ErrNoValidProvidersFoundInChain. +// +// If a Provider is found which returns valid credentials Value ChainProvider +// will cache that Provider for all calls to IsExpired(), until Retrieve is +// called again. +// +// Example of ChainProvider to be used with an EnvProvider and EC2RoleProvider. +// In this example EnvProvider will first check if any credentials are available +// via the environment variables. If there are none ChainProvider will check +// the next Provider in the list, EC2RoleProvider in this case. If EC2RoleProvider +// does not return any credentials ChainProvider will return the error +// ErrNoValidProvidersFoundInChain +// +// creds := credentials.NewChainCredentials( +// []credentials.Provider{ +// &credentials.EnvProvider{}, +// &ec2rolecreds.EC2RoleProvider{ +// Client: ec2metadata.New(sess), +// }, +// }) +// +// // Usage of ChainCredentials with aws.Config +// svc := ec2.New(session.Must(session.NewSession(&aws.Config{ +// Credentials: creds, +// }))) +// +type ChainProvider struct { + Providers []Provider + curr Provider + VerboseErrors bool +} + +// NewChainCredentials returns a pointer to a new Credentials object +// wrapping a chain of providers. +func NewChainCredentials(providers []Provider) *Credentials { + return NewCredentials(&ChainProvider{ + Providers: append([]Provider{}, providers...), + }) +} + +// Retrieve returns the credentials value or error if no provider returned +// without error. +// +// If a provider is found it will be cached and any calls to IsExpired() +// will return the expired state of the cached provider. +func (c *ChainProvider) Retrieve() (Value, error) { + var errs []error + for _, p := range c.Providers { + creds, err := p.Retrieve() + if err == nil { + c.curr = p + return creds, nil + } + errs = append(errs, err) + } + c.curr = nil + + var err error + err = ErrNoValidProvidersFoundInChain + if c.VerboseErrors { + err = awserr.NewBatchError("NoCredentialProviders", "no valid providers in chain", errs) + } + return Value{}, err +} + +// IsExpired will returned the expired state of the currently cached provider +// if there is one. If there is no current provider, true will be returned. +func (c *ChainProvider) IsExpired() bool { + if c.curr != nil { + return c.curr.IsExpired() + } + + return true +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_background_go1.5.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_background_go1.5.go new file mode 100644 index 00000000000..5852b264870 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_background_go1.5.go @@ -0,0 +1,22 @@ +// +build !go1.7 + +package credentials + +import ( + "github.com/aws/aws-sdk-go/internal/context" +) + +// backgroundContext returns a context that will never be canceled, has no +// values, and no deadline. This context is used by the SDK to provide +// backwards compatibility with non-context API operations and functionality. +// +// Go 1.6 and before: +// This context function is equivalent to context.Background in the Go stdlib. +// +// Go 1.7 and later: +// The context returned will be the value returned by context.Background() +// +// See https://golang.org/pkg/context for more information on Contexts. +func backgroundContext() Context { + return context.BackgroundCtx +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_background_go1.7.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_background_go1.7.go new file mode 100644 index 00000000000..388b2154182 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_background_go1.7.go @@ -0,0 +1,20 @@ +// +build go1.7 + +package credentials + +import "context" + +// backgroundContext returns a context that will never be canceled, has no +// values, and no deadline. This context is used by the SDK to provide +// backwards compatibility with non-context API operations and functionality. +// +// Go 1.6 and before: +// This context function is equivalent to context.Background in the Go stdlib. +// +// Go 1.7 and later: +// The context returned will be the value returned by context.Background() +// +// See https://golang.org/pkg/context for more information on Contexts. +func backgroundContext() Context { + return context.Background() +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_go1.5.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_go1.5.go new file mode 100644 index 00000000000..8152a864add --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_go1.5.go @@ -0,0 +1,39 @@ +// +build !go1.9 + +package credentials + +import "time" + +// Context is an copy of the Go v1.7 stdlib's context.Context interface. +// It is represented as a SDK interface to enable you to use the "WithContext" +// API methods with Go v1.6 and a Context type such as golang.org/x/net/context. +// +// This type, aws.Context, and context.Context are equivalent. +// +// See https://golang.org/pkg/context on how to use contexts. +type Context interface { + // Deadline returns the time when work done on behalf of this context + // should be canceled. Deadline returns ok==false when no deadline is + // set. Successive calls to Deadline return the same results. + Deadline() (deadline time.Time, ok bool) + + // Done returns a channel that's closed when work done on behalf of this + // context should be canceled. Done may return nil if this context can + // never be canceled. Successive calls to Done return the same value. + Done() <-chan struct{} + + // Err returns a non-nil error value after Done is closed. Err returns + // Canceled if the context was canceled or DeadlineExceeded if the + // context's deadline passed. No other values for Err are defined. + // After Done is closed, successive calls to Err return the same value. + Err() error + + // Value returns the value associated with this context for key, or nil + // if no value is associated with key. Successive calls to Value with + // the same key returns the same result. + // + // Use context values only for request-scoped data that transits + // processes and API boundaries, not for passing optional parameters to + // functions. + Value(key interface{}) interface{} +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_go1.9.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_go1.9.go new file mode 100644 index 00000000000..4356edb3d5d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_go1.9.go @@ -0,0 +1,13 @@ +// +build go1.9 + +package credentials + +import "context" + +// Context is an alias of the Go stdlib's context.Context interface. +// It can be used within the SDK's API operation "WithContext" methods. +// +// This type, aws.Context, and context.Context are equivalent. +// +// See https://golang.org/pkg/context on how to use contexts. +type Context = context.Context diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go new file mode 100644 index 00000000000..a880a3de8fe --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go @@ -0,0 +1,383 @@ +// Package credentials provides credential retrieval and management +// +// The Credentials is the primary method of getting access to and managing +// credentials Values. Using dependency injection retrieval of the credential +// values is handled by a object which satisfies the Provider interface. +// +// By default the Credentials.Get() will cache the successful result of a +// Provider's Retrieve() until Provider.IsExpired() returns true. At which +// point Credentials will call Provider's Retrieve() to get new credential Value. +// +// The Provider is responsible for determining when credentials Value have expired. +// It is also important to note that Credentials will always call Retrieve the +// first time Credentials.Get() is called. +// +// Example of using the environment variable credentials. +// +// creds := credentials.NewEnvCredentials() +// +// // Retrieve the credentials value +// credValue, err := creds.Get() +// if err != nil { +// // handle error +// } +// +// Example of forcing credentials to expire and be refreshed on the next Get(). +// This may be helpful to proactively expire credentials and refresh them sooner +// than they would naturally expire on their own. +// +// creds := credentials.NewCredentials(&ec2rolecreds.EC2RoleProvider{}) +// creds.Expire() +// credsValue, err := creds.Get() +// // New credentials will be retrieved instead of from cache. +// +// +// Custom Provider +// +// Each Provider built into this package also provides a helper method to generate +// a Credentials pointer setup with the provider. To use a custom Provider just +// create a type which satisfies the Provider interface and pass it to the +// NewCredentials method. +// +// type MyProvider struct{} +// func (m *MyProvider) Retrieve() (Value, error) {...} +// func (m *MyProvider) IsExpired() bool {...} +// +// creds := credentials.NewCredentials(&MyProvider{}) +// credValue, err := creds.Get() +// +package credentials + +import ( + "fmt" + "sync" + "time" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/internal/sync/singleflight" +) + +// AnonymousCredentials is an empty Credential object that can be used as +// dummy placeholder credentials for requests that do not need signed. +// +// This Credentials can be used to configure a service to not sign requests +// when making service API calls. For example, when accessing public +// s3 buckets. +// +// svc := s3.New(session.Must(session.NewSession(&aws.Config{ +// Credentials: credentials.AnonymousCredentials, +// }))) +// // Access public S3 buckets. +var AnonymousCredentials = NewStaticCredentials("", "", "") + +// A Value is the AWS credentials value for individual credential fields. +type Value struct { + // AWS Access key ID + AccessKeyID string + + // AWS Secret Access Key + SecretAccessKey string + + // AWS Session Token + SessionToken string + + // Provider used to get credentials + ProviderName string +} + +// HasKeys returns if the credentials Value has both AccessKeyID and +// SecretAccessKey value set. +func (v Value) HasKeys() bool { + return len(v.AccessKeyID) != 0 && len(v.SecretAccessKey) != 0 +} + +// A Provider is the interface for any component which will provide credentials +// Value. A provider is required to manage its own Expired state, and what to +// be expired means. +// +// The Provider should not need to implement its own mutexes, because +// that will be managed by Credentials. +type Provider interface { + // Retrieve returns nil if it successfully retrieved the value. + // Error is returned if the value were not obtainable, or empty. + Retrieve() (Value, error) + + // IsExpired returns if the credentials are no longer valid, and need + // to be retrieved. + IsExpired() bool +} + +// ProviderWithContext is a Provider that can retrieve credentials with a Context +type ProviderWithContext interface { + Provider + + RetrieveWithContext(Context) (Value, error) +} + +// An Expirer is an interface that Providers can implement to expose the expiration +// time, if known. If the Provider cannot accurately provide this info, +// it should not implement this interface. +type Expirer interface { + // The time at which the credentials are no longer valid + ExpiresAt() time.Time +} + +// An ErrorProvider is a stub credentials provider that always returns an error +// this is used by the SDK when construction a known provider is not possible +// due to an error. +type ErrorProvider struct { + // The error to be returned from Retrieve + Err error + + // The provider name to set on the Retrieved returned Value + ProviderName string +} + +// Retrieve will always return the error that the ErrorProvider was created with. +func (p ErrorProvider) Retrieve() (Value, error) { + return Value{ProviderName: p.ProviderName}, p.Err +} + +// IsExpired will always return not expired. +func (p ErrorProvider) IsExpired() bool { + return false +} + +// A Expiry provides shared expiration logic to be used by credentials +// providers to implement expiry functionality. +// +// The best method to use this struct is as an anonymous field within the +// provider's struct. +// +// Example: +// type EC2RoleProvider struct { +// Expiry +// ... +// } +type Expiry struct { + // The date/time when to expire on + expiration time.Time + + // If set will be used by IsExpired to determine the current time. + // Defaults to time.Now if CurrentTime is not set. Available for testing + // to be able to mock out the current time. + CurrentTime func() time.Time +} + +// SetExpiration sets the expiration IsExpired will check when called. +// +// If window is greater than 0 the expiration time will be reduced by the +// window value. +// +// Using a window is helpful to trigger credentials to expire sooner than +// the expiration time given to ensure no requests are made with expired +// tokens. +func (e *Expiry) SetExpiration(expiration time.Time, window time.Duration) { + // Passed in expirations should have the monotonic clock values stripped. + // This ensures time comparisons will be based on wall-time. + e.expiration = expiration.Round(0) + if window > 0 { + e.expiration = e.expiration.Add(-window) + } +} + +// IsExpired returns if the credentials are expired. +func (e *Expiry) IsExpired() bool { + curTime := e.CurrentTime + if curTime == nil { + curTime = time.Now + } + return e.expiration.Before(curTime()) +} + +// ExpiresAt returns the expiration time of the credential +func (e *Expiry) ExpiresAt() time.Time { + return e.expiration +} + +// A Credentials provides concurrency safe retrieval of AWS credentials Value. +// Credentials will cache the credentials value until they expire. Once the value +// expires the next Get will attempt to retrieve valid credentials. +// +// Credentials is safe to use across multiple goroutines and will manage the +// synchronous state so the Providers do not need to implement their own +// synchronization. +// +// The first Credentials.Get() will always call Provider.Retrieve() to get the +// first instance of the credentials Value. All calls to Get() after that +// will return the cached credentials Value until IsExpired() returns true. +type Credentials struct { + sf singleflight.Group + + m sync.RWMutex + creds Value + provider Provider +} + +// NewCredentials returns a pointer to a new Credentials with the provider set. +func NewCredentials(provider Provider) *Credentials { + c := &Credentials{ + provider: provider, + } + return c +} + +// GetWithContext returns the credentials value, or error if the credentials +// Value failed to be retrieved. Will return early if the passed in context is +// canceled. +// +// Will return the cached credentials Value if it has not expired. If the +// credentials Value has expired the Provider's Retrieve() will be called +// to refresh the credentials. +// +// If Credentials.Expire() was called the credentials Value will be force +// expired, and the next call to Get() will cause them to be refreshed. +// +// Passed in Context is equivalent to aws.Context, and context.Context. +func (c *Credentials) GetWithContext(ctx Context) (Value, error) { + // Check if credentials are cached, and not expired. + select { + case curCreds, ok := <-c.asyncIsExpired(): + // ok will only be true, of the credentials were not expired. ok will + // be false and have no value if the credentials are expired. + if ok { + return curCreds, nil + } + case <-ctx.Done(): + return Value{}, awserr.New("RequestCanceled", + "request context canceled", ctx.Err()) + } + + // Cannot pass context down to the actual retrieve, because the first + // context would cancel the whole group when there is not direct + // association of items in the group. + resCh := c.sf.DoChan("", func() (interface{}, error) { + return c.singleRetrieve(&suppressedContext{ctx}) + }) + select { + case res := <-resCh: + return res.Val.(Value), res.Err + case <-ctx.Done(): + return Value{}, awserr.New("RequestCanceled", + "request context canceled", ctx.Err()) + } +} + +func (c *Credentials) singleRetrieve(ctx Context) (interface{}, error) { + c.m.Lock() + defer c.m.Unlock() + + if curCreds := c.creds; !c.isExpiredLocked(curCreds) { + return curCreds, nil + } + + var creds Value + var err error + if p, ok := c.provider.(ProviderWithContext); ok { + creds, err = p.RetrieveWithContext(ctx) + } else { + creds, err = c.provider.Retrieve() + } + if err == nil { + c.creds = creds + } + + return creds, err +} + +// Get returns the credentials value, or error if the credentials Value failed +// to be retrieved. +// +// Will return the cached credentials Value if it has not expired. If the +// credentials Value has expired the Provider's Retrieve() will be called +// to refresh the credentials. +// +// If Credentials.Expire() was called the credentials Value will be force +// expired, and the next call to Get() will cause them to be refreshed. +func (c *Credentials) Get() (Value, error) { + return c.GetWithContext(backgroundContext()) +} + +// Expire expires the credentials and forces them to be retrieved on the +// next call to Get(). +// +// This will override the Provider's expired state, and force Credentials +// to call the Provider's Retrieve(). +func (c *Credentials) Expire() { + c.m.Lock() + defer c.m.Unlock() + + c.creds = Value{} +} + +// IsExpired returns if the credentials are no longer valid, and need +// to be retrieved. +// +// If the Credentials were forced to be expired with Expire() this will +// reflect that override. +func (c *Credentials) IsExpired() bool { + c.m.RLock() + defer c.m.RUnlock() + + return c.isExpiredLocked(c.creds) +} + +// asyncIsExpired returns a channel of credentials Value. If the channel is +// closed the credentials are expired and credentials value are not empty. +func (c *Credentials) asyncIsExpired() <-chan Value { + ch := make(chan Value, 1) + go func() { + c.m.RLock() + defer c.m.RUnlock() + + if curCreds := c.creds; !c.isExpiredLocked(curCreds) { + ch <- curCreds + } + + close(ch) + }() + + return ch +} + +// isExpiredLocked helper method wrapping the definition of expired credentials. +func (c *Credentials) isExpiredLocked(creds interface{}) bool { + return creds == nil || creds.(Value) == Value{} || c.provider.IsExpired() +} + +// ExpiresAt provides access to the functionality of the Expirer interface of +// the underlying Provider, if it supports that interface. Otherwise, it returns +// an error. +func (c *Credentials) ExpiresAt() (time.Time, error) { + c.m.RLock() + defer c.m.RUnlock() + + expirer, ok := c.provider.(Expirer) + if !ok { + return time.Time{}, awserr.New("ProviderNotExpirer", + fmt.Sprintf("provider %s does not support ExpiresAt()", + c.creds.ProviderName), + nil) + } + if c.creds == (Value{}) { + // set expiration time to the distant past + return time.Time{}, nil + } + return expirer.ExpiresAt(), nil +} + +type suppressedContext struct { + Context +} + +func (s *suppressedContext) Deadline() (deadline time.Time, ok bool) { + return time.Time{}, false +} + +func (s *suppressedContext) Done() <-chan struct{} { + return nil +} + +func (s *suppressedContext) Err() error { + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go new file mode 100644 index 00000000000..92af5b7250a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go @@ -0,0 +1,188 @@ +package ec2rolecreds + +import ( + "bufio" + "encoding/json" + "fmt" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/ec2metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/internal/sdkuri" +) + +// ProviderName provides a name of EC2Role provider +const ProviderName = "EC2RoleProvider" + +// A EC2RoleProvider retrieves credentials from the EC2 service, and keeps track if +// those credentials are expired. +// +// Example how to configure the EC2RoleProvider with custom http Client, Endpoint +// or ExpiryWindow +// +// p := &ec2rolecreds.EC2RoleProvider{ +// // Pass in a custom timeout to be used when requesting +// // IAM EC2 Role credentials. +// Client: ec2metadata.New(sess, aws.Config{ +// HTTPClient: &http.Client{Timeout: 10 * time.Second}, +// }), +// +// // Do not use early expiry of credentials. If a non zero value is +// // specified the credentials will be expired early +// ExpiryWindow: 0, +// } +type EC2RoleProvider struct { + credentials.Expiry + + // Required EC2Metadata client to use when connecting to EC2 metadata service. + Client *ec2metadata.EC2Metadata + + // ExpiryWindow will allow the credentials to trigger refreshing prior to + // the credentials actually expiring. This is beneficial so race conditions + // with expiring credentials do not cause request to fail unexpectedly + // due to ExpiredTokenException exceptions. + // + // So a ExpiryWindow of 10s would cause calls to IsExpired() to return true + // 10 seconds before the credentials are actually expired. + // + // If ExpiryWindow is 0 or less it will be ignored. + ExpiryWindow time.Duration +} + +// NewCredentials returns a pointer to a new Credentials object wrapping +// the EC2RoleProvider. Takes a ConfigProvider to create a EC2Metadata client. +// The ConfigProvider is satisfied by the session.Session type. +func NewCredentials(c client.ConfigProvider, options ...func(*EC2RoleProvider)) *credentials.Credentials { + p := &EC2RoleProvider{ + Client: ec2metadata.New(c), + } + + for _, option := range options { + option(p) + } + + return credentials.NewCredentials(p) +} + +// NewCredentialsWithClient returns a pointer to a new Credentials object wrapping +// the EC2RoleProvider. Takes a EC2Metadata client to use when connecting to EC2 +// metadata service. +func NewCredentialsWithClient(client *ec2metadata.EC2Metadata, options ...func(*EC2RoleProvider)) *credentials.Credentials { + p := &EC2RoleProvider{ + Client: client, + } + + for _, option := range options { + option(p) + } + + return credentials.NewCredentials(p) +} + +// Retrieve retrieves credentials from the EC2 service. +// Error will be returned if the request fails, or unable to extract +// the desired credentials. +func (m *EC2RoleProvider) Retrieve() (credentials.Value, error) { + return m.RetrieveWithContext(aws.BackgroundContext()) +} + +// RetrieveWithContext retrieves credentials from the EC2 service. +// Error will be returned if the request fails, or unable to extract +// the desired credentials. +func (m *EC2RoleProvider) RetrieveWithContext(ctx credentials.Context) (credentials.Value, error) { + credsList, err := requestCredList(ctx, m.Client) + if err != nil { + return credentials.Value{ProviderName: ProviderName}, err + } + + if len(credsList) == 0 { + return credentials.Value{ProviderName: ProviderName}, awserr.New("EmptyEC2RoleList", "empty EC2 Role list", nil) + } + credsName := credsList[0] + + roleCreds, err := requestCred(ctx, m.Client, credsName) + if err != nil { + return credentials.Value{ProviderName: ProviderName}, err + } + + m.SetExpiration(roleCreds.Expiration, m.ExpiryWindow) + + return credentials.Value{ + AccessKeyID: roleCreds.AccessKeyID, + SecretAccessKey: roleCreds.SecretAccessKey, + SessionToken: roleCreds.Token, + ProviderName: ProviderName, + }, nil +} + +// A ec2RoleCredRespBody provides the shape for unmarshaling credential +// request responses. +type ec2RoleCredRespBody struct { + // Success State + Expiration time.Time + AccessKeyID string + SecretAccessKey string + Token string + + // Error state + Code string + Message string +} + +const iamSecurityCredsPath = "iam/security-credentials/" + +// requestCredList requests a list of credentials from the EC2 service. +// If there are no credentials, or there is an error making or receiving the request +func requestCredList(ctx aws.Context, client *ec2metadata.EC2Metadata) ([]string, error) { + resp, err := client.GetMetadataWithContext(ctx, iamSecurityCredsPath) + if err != nil { + return nil, awserr.New("EC2RoleRequestError", "no EC2 instance role found", err) + } + + credsList := []string{} + s := bufio.NewScanner(strings.NewReader(resp)) + for s.Scan() { + credsList = append(credsList, s.Text()) + } + + if err := s.Err(); err != nil { + return nil, awserr.New(request.ErrCodeSerialization, + "failed to read EC2 instance role from metadata service", err) + } + + return credsList, nil +} + +// requestCred requests the credentials for a specific credentials from the EC2 service. +// +// If the credentials cannot be found, or there is an error reading the response +// and error will be returned. +func requestCred(ctx aws.Context, client *ec2metadata.EC2Metadata, credsName string) (ec2RoleCredRespBody, error) { + resp, err := client.GetMetadataWithContext(ctx, sdkuri.PathJoin(iamSecurityCredsPath, credsName)) + if err != nil { + return ec2RoleCredRespBody{}, + awserr.New("EC2RoleRequestError", + fmt.Sprintf("failed to get %s EC2 instance role credentials", credsName), + err) + } + + respCreds := ec2RoleCredRespBody{} + if err := json.NewDecoder(strings.NewReader(resp)).Decode(&respCreds); err != nil { + return ec2RoleCredRespBody{}, + awserr.New(request.ErrCodeSerialization, + fmt.Sprintf("failed to decode %s EC2 instance role credentials", credsName), + err) + } + + if respCreds.Code != "Success" { + // If an error code was returned something failed requesting the role. + return ec2RoleCredRespBody{}, awserr.New(respCreds.Code, respCreds.Message, nil) + } + + return respCreds, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go new file mode 100644 index 00000000000..785f30d8e6c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go @@ -0,0 +1,210 @@ +// Package endpointcreds provides support for retrieving credentials from an +// arbitrary HTTP endpoint. +// +// The credentials endpoint Provider can receive both static and refreshable +// credentials that will expire. Credentials are static when an "Expiration" +// value is not provided in the endpoint's response. +// +// Static credentials will never expire once they have been retrieved. The format +// of the static credentials response: +// { +// "AccessKeyId" : "MUA...", +// "SecretAccessKey" : "/7PC5om....", +// } +// +// Refreshable credentials will expire within the "ExpiryWindow" of the Expiration +// value in the response. The format of the refreshable credentials response: +// { +// "AccessKeyId" : "MUA...", +// "SecretAccessKey" : "/7PC5om....", +// "Token" : "AQoDY....=", +// "Expiration" : "2016-02-25T06:03:31Z" +// } +// +// Errors should be returned in the following format and only returned with 400 +// or 500 HTTP status codes. +// { +// "code": "ErrorCode", +// "message": "Helpful error message." +// } +package endpointcreds + +import ( + "encoding/json" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/json/jsonutil" +) + +// ProviderName is the name of the credentials provider. +const ProviderName = `CredentialsEndpointProvider` + +// Provider satisfies the credentials.Provider interface, and is a client to +// retrieve credentials from an arbitrary endpoint. +type Provider struct { + staticCreds bool + credentials.Expiry + + // Requires a AWS Client to make HTTP requests to the endpoint with. + // the Endpoint the request will be made to is provided by the aws.Config's + // Endpoint value. + Client *client.Client + + // ExpiryWindow will allow the credentials to trigger refreshing prior to + // the credentials actually expiring. This is beneficial so race conditions + // with expiring credentials do not cause request to fail unexpectedly + // due to ExpiredTokenException exceptions. + // + // So a ExpiryWindow of 10s would cause calls to IsExpired() to return true + // 10 seconds before the credentials are actually expired. + // + // If ExpiryWindow is 0 or less it will be ignored. + ExpiryWindow time.Duration + + // Optional authorization token value if set will be used as the value of + // the Authorization header of the endpoint credential request. + AuthorizationToken string +} + +// NewProviderClient returns a credentials Provider for retrieving AWS credentials +// from arbitrary endpoint. +func NewProviderClient(cfg aws.Config, handlers request.Handlers, endpoint string, options ...func(*Provider)) credentials.Provider { + p := &Provider{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "CredentialsEndpoint", + Endpoint: endpoint, + }, + handlers, + ), + } + + p.Client.Handlers.Unmarshal.PushBack(unmarshalHandler) + p.Client.Handlers.UnmarshalError.PushBack(unmarshalError) + p.Client.Handlers.Validate.Clear() + p.Client.Handlers.Validate.PushBack(validateEndpointHandler) + + for _, option := range options { + option(p) + } + + return p +} + +// NewCredentialsClient returns a pointer to a new Credentials object +// wrapping the endpoint credentials Provider. +func NewCredentialsClient(cfg aws.Config, handlers request.Handlers, endpoint string, options ...func(*Provider)) *credentials.Credentials { + return credentials.NewCredentials(NewProviderClient(cfg, handlers, endpoint, options...)) +} + +// IsExpired returns true if the credentials retrieved are expired, or not yet +// retrieved. +func (p *Provider) IsExpired() bool { + if p.staticCreds { + return false + } + return p.Expiry.IsExpired() +} + +// Retrieve will attempt to request the credentials from the endpoint the Provider +// was configured for. And error will be returned if the retrieval fails. +func (p *Provider) Retrieve() (credentials.Value, error) { + return p.RetrieveWithContext(aws.BackgroundContext()) +} + +// RetrieveWithContext will attempt to request the credentials from the endpoint the Provider +// was configured for. And error will be returned if the retrieval fails. +func (p *Provider) RetrieveWithContext(ctx credentials.Context) (credentials.Value, error) { + resp, err := p.getCredentials(ctx) + if err != nil { + return credentials.Value{ProviderName: ProviderName}, + awserr.New("CredentialsEndpointError", "failed to load credentials", err) + } + + if resp.Expiration != nil { + p.SetExpiration(*resp.Expiration, p.ExpiryWindow) + } else { + p.staticCreds = true + } + + return credentials.Value{ + AccessKeyID: resp.AccessKeyID, + SecretAccessKey: resp.SecretAccessKey, + SessionToken: resp.Token, + ProviderName: ProviderName, + }, nil +} + +type getCredentialsOutput struct { + Expiration *time.Time + AccessKeyID string + SecretAccessKey string + Token string +} + +type errorOutput struct { + Code string `json:"code"` + Message string `json:"message"` +} + +func (p *Provider) getCredentials(ctx aws.Context) (*getCredentialsOutput, error) { + op := &request.Operation{ + Name: "GetCredentials", + HTTPMethod: "GET", + } + + out := &getCredentialsOutput{} + req := p.Client.NewRequest(op, nil, out) + req.SetContext(ctx) + req.HTTPRequest.Header.Set("Accept", "application/json") + if authToken := p.AuthorizationToken; len(authToken) != 0 { + req.HTTPRequest.Header.Set("Authorization", authToken) + } + + return out, req.Send() +} + +func validateEndpointHandler(r *request.Request) { + if len(r.ClientInfo.Endpoint) == 0 { + r.Error = aws.ErrMissingEndpoint + } +} + +func unmarshalHandler(r *request.Request) { + defer r.HTTPResponse.Body.Close() + + out := r.Data.(*getCredentialsOutput) + if err := json.NewDecoder(r.HTTPResponse.Body).Decode(&out); err != nil { + r.Error = awserr.New(request.ErrCodeSerialization, + "failed to decode endpoint credentials", + err, + ) + } +} + +func unmarshalError(r *request.Request) { + defer r.HTTPResponse.Body.Close() + + var errOut errorOutput + err := jsonutil.UnmarshalJSONError(&errOut, r.HTTPResponse.Body) + if err != nil { + r.Error = awserr.NewRequestFailure( + awserr.New(request.ErrCodeSerialization, + "failed to decode error message", err), + r.HTTPResponse.StatusCode, + r.RequestID, + ) + return + } + + // Response body format is not consistent between metadata endpoints. + // Grab the error message as a string and include that as the source error + r.Error = awserr.New(errOut.Code, errOut.Message, nil) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go new file mode 100644 index 00000000000..54c5cf7333f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go @@ -0,0 +1,74 @@ +package credentials + +import ( + "os" + + "github.com/aws/aws-sdk-go/aws/awserr" +) + +// EnvProviderName provides a name of Env provider +const EnvProviderName = "EnvProvider" + +var ( + // ErrAccessKeyIDNotFound is returned when the AWS Access Key ID can't be + // found in the process's environment. + ErrAccessKeyIDNotFound = awserr.New("EnvAccessKeyNotFound", "AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY not found in environment", nil) + + // ErrSecretAccessKeyNotFound is returned when the AWS Secret Access Key + // can't be found in the process's environment. + ErrSecretAccessKeyNotFound = awserr.New("EnvSecretNotFound", "AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY not found in environment", nil) +) + +// A EnvProvider retrieves credentials from the environment variables of the +// running process. Environment credentials never expire. +// +// Environment variables used: +// +// * Access Key ID: AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY +// +// * Secret Access Key: AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY +type EnvProvider struct { + retrieved bool +} + +// NewEnvCredentials returns a pointer to a new Credentials object +// wrapping the environment variable provider. +func NewEnvCredentials() *Credentials { + return NewCredentials(&EnvProvider{}) +} + +// Retrieve retrieves the keys from the environment. +func (e *EnvProvider) Retrieve() (Value, error) { + e.retrieved = false + + id := os.Getenv("AWS_ACCESS_KEY_ID") + if id == "" { + id = os.Getenv("AWS_ACCESS_KEY") + } + + secret := os.Getenv("AWS_SECRET_ACCESS_KEY") + if secret == "" { + secret = os.Getenv("AWS_SECRET_KEY") + } + + if id == "" { + return Value{ProviderName: EnvProviderName}, ErrAccessKeyIDNotFound + } + + if secret == "" { + return Value{ProviderName: EnvProviderName}, ErrSecretAccessKeyNotFound + } + + e.retrieved = true + return Value{ + AccessKeyID: id, + SecretAccessKey: secret, + SessionToken: os.Getenv("AWS_SESSION_TOKEN"), + ProviderName: EnvProviderName, + }, nil +} + +// IsExpired returns if the credentials have been retrieved. +func (e *EnvProvider) IsExpired() bool { + return !e.retrieved +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/example.ini b/vendor/github.com/aws/aws-sdk-go/aws/credentials/example.ini new file mode 100644 index 00000000000..7fc91d9d204 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/example.ini @@ -0,0 +1,12 @@ +[default] +aws_access_key_id = accessKey +aws_secret_access_key = secret +aws_session_token = token + +[no_token] +aws_access_key_id = accessKey +aws_secret_access_key = secret + +[with_colon] +aws_access_key_id: accessKey +aws_secret_access_key: secret diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/processcreds/provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/processcreds/provider.go new file mode 100644 index 00000000000..e6248360029 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/processcreds/provider.go @@ -0,0 +1,426 @@ +/* +Package processcreds is a credential Provider to retrieve `credential_process` +credentials. + +WARNING: The following describes a method of sourcing credentials from an external +process. This can potentially be dangerous, so proceed with caution. Other +credential providers should be preferred if at all possible. If using this +option, you should make sure that the config file is as locked down as possible +using security best practices for your operating system. + +You can use credentials from a `credential_process` in a variety of ways. + +One way is to setup your shared config file, located in the default +location, with the `credential_process` key and the command you want to be +called. You also need to set the AWS_SDK_LOAD_CONFIG environment variable +(e.g., `export AWS_SDK_LOAD_CONFIG=1`) to use the shared config file. + + [default] + credential_process = /command/to/call + +Creating a new session will use the credential process to retrieve credentials. +NOTE: If there are credentials in the profile you are using, the credential +process will not be used. + + // Initialize a session to load credentials. + sess, _ := session.NewSession(&aws.Config{ + Region: aws.String("us-east-1")}, + ) + + // Create S3 service client to use the credentials. + svc := s3.New(sess) + +Another way to use the `credential_process` method is by using +`credentials.NewCredentials()` and providing a command to be executed to +retrieve credentials: + + // Create credentials using the ProcessProvider. + creds := processcreds.NewCredentials("/path/to/command") + + // Create service client value configured for credentials. + svc := s3.New(sess, &aws.Config{Credentials: creds}) + +You can set a non-default timeout for the `credential_process` with another +constructor, `credentials.NewCredentialsTimeout()`, providing the timeout. To +set a one minute timeout: + + // Create credentials using the ProcessProvider. + creds := processcreds.NewCredentialsTimeout( + "/path/to/command", + time.Duration(500) * time.Millisecond) + +If you need more control, you can set any configurable options in the +credentials using one or more option functions. For example, you can set a two +minute timeout, a credential duration of 60 minutes, and a maximum stdout +buffer size of 2k. + + creds := processcreds.NewCredentials( + "/path/to/command", + func(opt *ProcessProvider) { + opt.Timeout = time.Duration(2) * time.Minute + opt.Duration = time.Duration(60) * time.Minute + opt.MaxBufSize = 2048 + }) + +You can also use your own `exec.Cmd`: + + // Create an exec.Cmd + myCommand := exec.Command("/path/to/command") + + // Create credentials using your exec.Cmd and custom timeout + creds := processcreds.NewCredentialsCommand( + myCommand, + func(opt *processcreds.ProcessProvider) { + opt.Timeout = time.Duration(1) * time.Second + }) +*/ +package processcreds + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "os" + "os/exec" + "runtime" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/internal/sdkio" +) + +const ( + // ProviderName is the name this credentials provider will label any + // returned credentials Value with. + ProviderName = `ProcessProvider` + + // ErrCodeProcessProviderParse error parsing process output + ErrCodeProcessProviderParse = "ProcessProviderParseError" + + // ErrCodeProcessProviderVersion version error in output + ErrCodeProcessProviderVersion = "ProcessProviderVersionError" + + // ErrCodeProcessProviderRequired required attribute missing in output + ErrCodeProcessProviderRequired = "ProcessProviderRequiredError" + + // ErrCodeProcessProviderExecution execution of command failed + ErrCodeProcessProviderExecution = "ProcessProviderExecutionError" + + // errMsgProcessProviderTimeout process took longer than allowed + errMsgProcessProviderTimeout = "credential process timed out" + + // errMsgProcessProviderProcess process error + errMsgProcessProviderProcess = "error in credential_process" + + // errMsgProcessProviderParse problem parsing output + errMsgProcessProviderParse = "parse failed of credential_process output" + + // errMsgProcessProviderVersion version error in output + errMsgProcessProviderVersion = "wrong version in process output (not 1)" + + // errMsgProcessProviderMissKey missing access key id in output + errMsgProcessProviderMissKey = "missing AccessKeyId in process output" + + // errMsgProcessProviderMissSecret missing secret acess key in output + errMsgProcessProviderMissSecret = "missing SecretAccessKey in process output" + + // errMsgProcessProviderPrepareCmd prepare of command failed + errMsgProcessProviderPrepareCmd = "failed to prepare command" + + // errMsgProcessProviderEmptyCmd command must not be empty + errMsgProcessProviderEmptyCmd = "command must not be empty" + + // errMsgProcessProviderPipe failed to initialize pipe + errMsgProcessProviderPipe = "failed to initialize pipe" + + // DefaultDuration is the default amount of time in minutes that the + // credentials will be valid for. + DefaultDuration = time.Duration(15) * time.Minute + + // DefaultBufSize limits buffer size from growing to an enormous + // amount due to a faulty process. + DefaultBufSize = int(8 * sdkio.KibiByte) + + // DefaultTimeout default limit on time a process can run. + DefaultTimeout = time.Duration(1) * time.Minute +) + +// ProcessProvider satisfies the credentials.Provider interface, and is a +// client to retrieve credentials from a process. +type ProcessProvider struct { + staticCreds bool + credentials.Expiry + originalCommand []string + + // Expiry duration of the credentials. Defaults to 15 minutes if not set. + Duration time.Duration + + // ExpiryWindow will allow the credentials to trigger refreshing prior to + // the credentials actually expiring. This is beneficial so race conditions + // with expiring credentials do not cause request to fail unexpectedly + // due to ExpiredTokenException exceptions. + // + // So a ExpiryWindow of 10s would cause calls to IsExpired() to return true + // 10 seconds before the credentials are actually expired. + // + // If ExpiryWindow is 0 or less it will be ignored. + ExpiryWindow time.Duration + + // A string representing an os command that should return a JSON with + // credential information. + command *exec.Cmd + + // MaxBufSize limits memory usage from growing to an enormous + // amount due to a faulty process. + MaxBufSize int + + // Timeout limits the time a process can run. + Timeout time.Duration +} + +// NewCredentials returns a pointer to a new Credentials object wrapping the +// ProcessProvider. The credentials will expire every 15 minutes by default. +func NewCredentials(command string, options ...func(*ProcessProvider)) *credentials.Credentials { + p := &ProcessProvider{ + command: exec.Command(command), + Duration: DefaultDuration, + Timeout: DefaultTimeout, + MaxBufSize: DefaultBufSize, + } + + for _, option := range options { + option(p) + } + + return credentials.NewCredentials(p) +} + +// NewCredentialsTimeout returns a pointer to a new Credentials object with +// the specified command and timeout, and default duration and max buffer size. +func NewCredentialsTimeout(command string, timeout time.Duration) *credentials.Credentials { + p := NewCredentials(command, func(opt *ProcessProvider) { + opt.Timeout = timeout + }) + + return p +} + +// NewCredentialsCommand returns a pointer to a new Credentials object with +// the specified command, and default timeout, duration and max buffer size. +func NewCredentialsCommand(command *exec.Cmd, options ...func(*ProcessProvider)) *credentials.Credentials { + p := &ProcessProvider{ + command: command, + Duration: DefaultDuration, + Timeout: DefaultTimeout, + MaxBufSize: DefaultBufSize, + } + + for _, option := range options { + option(p) + } + + return credentials.NewCredentials(p) +} + +type credentialProcessResponse struct { + Version int + AccessKeyID string `json:"AccessKeyId"` + SecretAccessKey string + SessionToken string + Expiration *time.Time +} + +// Retrieve executes the 'credential_process' and returns the credentials. +func (p *ProcessProvider) Retrieve() (credentials.Value, error) { + out, err := p.executeCredentialProcess() + if err != nil { + return credentials.Value{ProviderName: ProviderName}, err + } + + // Serialize and validate response + resp := &credentialProcessResponse{} + if err = json.Unmarshal(out, resp); err != nil { + return credentials.Value{ProviderName: ProviderName}, awserr.New( + ErrCodeProcessProviderParse, + fmt.Sprintf("%s: %s", errMsgProcessProviderParse, string(out)), + err) + } + + if resp.Version != 1 { + return credentials.Value{ProviderName: ProviderName}, awserr.New( + ErrCodeProcessProviderVersion, + errMsgProcessProviderVersion, + nil) + } + + if len(resp.AccessKeyID) == 0 { + return credentials.Value{ProviderName: ProviderName}, awserr.New( + ErrCodeProcessProviderRequired, + errMsgProcessProviderMissKey, + nil) + } + + if len(resp.SecretAccessKey) == 0 { + return credentials.Value{ProviderName: ProviderName}, awserr.New( + ErrCodeProcessProviderRequired, + errMsgProcessProviderMissSecret, + nil) + } + + // Handle expiration + p.staticCreds = resp.Expiration == nil + if resp.Expiration != nil { + p.SetExpiration(*resp.Expiration, p.ExpiryWindow) + } + + return credentials.Value{ + ProviderName: ProviderName, + AccessKeyID: resp.AccessKeyID, + SecretAccessKey: resp.SecretAccessKey, + SessionToken: resp.SessionToken, + }, nil +} + +// IsExpired returns true if the credentials retrieved are expired, or not yet +// retrieved. +func (p *ProcessProvider) IsExpired() bool { + if p.staticCreds { + return false + } + return p.Expiry.IsExpired() +} + +// prepareCommand prepares the command to be executed. +func (p *ProcessProvider) prepareCommand() error { + + var cmdArgs []string + if runtime.GOOS == "windows" { + cmdArgs = []string{"cmd.exe", "/C"} + } else { + cmdArgs = []string{"sh", "-c"} + } + + if len(p.originalCommand) == 0 { + p.originalCommand = make([]string, len(p.command.Args)) + copy(p.originalCommand, p.command.Args) + + // check for empty command because it succeeds + if len(strings.TrimSpace(p.originalCommand[0])) < 1 { + return awserr.New( + ErrCodeProcessProviderExecution, + fmt.Sprintf( + "%s: %s", + errMsgProcessProviderPrepareCmd, + errMsgProcessProviderEmptyCmd), + nil) + } + } + + cmdArgs = append(cmdArgs, p.originalCommand...) + p.command = exec.Command(cmdArgs[0], cmdArgs[1:]...) + p.command.Env = os.Environ() + + return nil +} + +// executeCredentialProcess starts the credential process on the OS and +// returns the results or an error. +func (p *ProcessProvider) executeCredentialProcess() ([]byte, error) { + + if err := p.prepareCommand(); err != nil { + return nil, err + } + + // Setup the pipes + outReadPipe, outWritePipe, err := os.Pipe() + if err != nil { + return nil, awserr.New( + ErrCodeProcessProviderExecution, + errMsgProcessProviderPipe, + err) + } + + p.command.Stderr = os.Stderr // display stderr on console for MFA + p.command.Stdout = outWritePipe // get creds json on process's stdout + p.command.Stdin = os.Stdin // enable stdin for MFA + + output := bytes.NewBuffer(make([]byte, 0, p.MaxBufSize)) + + stdoutCh := make(chan error, 1) + go readInput( + io.LimitReader(outReadPipe, int64(p.MaxBufSize)), + output, + stdoutCh) + + execCh := make(chan error, 1) + go executeCommand(*p.command, execCh) + + finished := false + var errors []error + for !finished { + select { + case readError := <-stdoutCh: + errors = appendError(errors, readError) + finished = true + case execError := <-execCh: + err := outWritePipe.Close() + errors = appendError(errors, err) + errors = appendError(errors, execError) + if errors != nil { + return output.Bytes(), awserr.NewBatchError( + ErrCodeProcessProviderExecution, + errMsgProcessProviderProcess, + errors) + } + case <-time.After(p.Timeout): + finished = true + return output.Bytes(), awserr.NewBatchError( + ErrCodeProcessProviderExecution, + errMsgProcessProviderTimeout, + errors) // errors can be nil + } + } + + out := output.Bytes() + + if runtime.GOOS == "windows" { + // windows adds slashes to quotes + out = []byte(strings.Replace(string(out), `\"`, `"`, -1)) + } + + return out, nil +} + +// appendError conveniently checks for nil before appending slice +func appendError(errors []error, err error) []error { + if err != nil { + return append(errors, err) + } + return errors +} + +func executeCommand(cmd exec.Cmd, exec chan error) { + // Start the command + err := cmd.Start() + if err == nil { + err = cmd.Wait() + } + + exec <- err +} + +func readInput(r io.Reader, w io.Writer, read chan error) { + tee := io.TeeReader(r, w) + + _, err := ioutil.ReadAll(tee) + + if err == io.EOF { + err = nil + } + + read <- err // will only arrive here when write end of pipe is closed +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go new file mode 100644 index 00000000000..22b5c5d9f32 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go @@ -0,0 +1,151 @@ +package credentials + +import ( + "fmt" + "os" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/internal/ini" + "github.com/aws/aws-sdk-go/internal/shareddefaults" +) + +// SharedCredsProviderName provides a name of SharedCreds provider +const SharedCredsProviderName = "SharedCredentialsProvider" + +var ( + // ErrSharedCredentialsHomeNotFound is emitted when the user directory cannot be found. + ErrSharedCredentialsHomeNotFound = awserr.New("UserHomeNotFound", "user home directory not found.", nil) +) + +// A SharedCredentialsProvider retrieves access key pair (access key ID, +// secret access key, and session token if present) credentials from the current +// user's home directory, and keeps track if those credentials are expired. +// +// Profile ini file example: $HOME/.aws/credentials +type SharedCredentialsProvider struct { + // Path to the shared credentials file. + // + // If empty will look for "AWS_SHARED_CREDENTIALS_FILE" env variable. If the + // env value is empty will default to current user's home directory. + // Linux/OSX: "$HOME/.aws/credentials" + // Windows: "%USERPROFILE%\.aws\credentials" + Filename string + + // AWS Profile to extract credentials from the shared credentials file. If empty + // will default to environment variable "AWS_PROFILE" or "default" if + // environment variable is also not set. + Profile string + + // retrieved states if the credentials have been successfully retrieved. + retrieved bool +} + +// NewSharedCredentials returns a pointer to a new Credentials object +// wrapping the Profile file provider. +func NewSharedCredentials(filename, profile string) *Credentials { + return NewCredentials(&SharedCredentialsProvider{ + Filename: filename, + Profile: profile, + }) +} + +// Retrieve reads and extracts the shared credentials from the current +// users home directory. +func (p *SharedCredentialsProvider) Retrieve() (Value, error) { + p.retrieved = false + + filename, err := p.filename() + if err != nil { + return Value{ProviderName: SharedCredsProviderName}, err + } + + creds, err := loadProfile(filename, p.profile()) + if err != nil { + return Value{ProviderName: SharedCredsProviderName}, err + } + + p.retrieved = true + return creds, nil +} + +// IsExpired returns if the shared credentials have expired. +func (p *SharedCredentialsProvider) IsExpired() bool { + return !p.retrieved +} + +// loadProfiles loads from the file pointed to by shared credentials filename for profile. +// The credentials retrieved from the profile will be returned or error. Error will be +// returned if it fails to read from the file, or the data is invalid. +func loadProfile(filename, profile string) (Value, error) { + config, err := ini.OpenFile(filename) + if err != nil { + return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsLoad", "failed to load shared credentials file", err) + } + + iniProfile, ok := config.GetSection(profile) + if !ok { + return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsLoad", "failed to get profile", nil) + } + + id := iniProfile.String("aws_access_key_id") + if len(id) == 0 { + return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsAccessKey", + fmt.Sprintf("shared credentials %s in %s did not contain aws_access_key_id", profile, filename), + nil) + } + + secret := iniProfile.String("aws_secret_access_key") + if len(secret) == 0 { + return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsSecret", + fmt.Sprintf("shared credentials %s in %s did not contain aws_secret_access_key", profile, filename), + nil) + } + + // Default to empty string if not found + token := iniProfile.String("aws_session_token") + + return Value{ + AccessKeyID: id, + SecretAccessKey: secret, + SessionToken: token, + ProviderName: SharedCredsProviderName, + }, nil +} + +// filename returns the filename to use to read AWS shared credentials. +// +// Will return an error if the user's home directory path cannot be found. +func (p *SharedCredentialsProvider) filename() (string, error) { + if len(p.Filename) != 0 { + return p.Filename, nil + } + + if p.Filename = os.Getenv("AWS_SHARED_CREDENTIALS_FILE"); len(p.Filename) != 0 { + return p.Filename, nil + } + + if home := shareddefaults.UserHomeDir(); len(home) == 0 { + // Backwards compatibility of home directly not found error being returned. + // This error is too verbose, failure when opening the file would of been + // a better error to return. + return "", ErrSharedCredentialsHomeNotFound + } + + p.Filename = shareddefaults.SharedCredentialsFilename() + + return p.Filename, nil +} + +// profile returns the AWS shared credentials profile. If empty will read +// environment variable "AWS_PROFILE". If that is not set profile will +// return "default". +func (p *SharedCredentialsProvider) profile() string { + if p.Profile == "" { + p.Profile = os.Getenv("AWS_PROFILE") + } + if p.Profile == "" { + p.Profile = "default" + } + + return p.Profile +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/doc.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/doc.go new file mode 100644 index 00000000000..18c940ab3c3 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/doc.go @@ -0,0 +1,60 @@ +// Package ssocreds provides a credential provider for retrieving temporary AWS credentials using an SSO access token. +// +// IMPORTANT: The provider in this package does not initiate or perform the AWS SSO login flow. The SDK provider +// expects that you have already performed the SSO login flow using AWS CLI using the "aws sso login" command, or by +// some other mechanism. The provider must find a valid non-expired access token for the AWS SSO user portal URL in +// ~/.aws/sso/cache. If a cached token is not found, it is expired, or the file is malformed an error will be returned. +// +// Loading AWS SSO credentials with the AWS shared configuration file +// +// You can use configure AWS SSO credentials from the AWS shared configuration file by +// providing the specifying the required keys in the profile: +// +// sso_account_id +// sso_region +// sso_role_name +// sso_start_url +// +// For example, the following defines a profile "devsso" and specifies the AWS SSO parameters that defines the target +// account, role, sign-on portal, and the region where the user portal is located. Note: all SSO arguments must be +// provided, or an error will be returned. +// +// [profile devsso] +// sso_start_url = https://my-sso-portal.awsapps.com/start +// sso_role_name = SSOReadOnlyRole +// sso_region = us-east-1 +// sso_account_id = 123456789012 +// +// Using the config module, you can load the AWS SDK shared configuration, and specify that this profile be used to +// retrieve credentials. For example: +// +// sess, err := session.NewSessionWithOptions(session.Options{ +// SharedConfigState: session.SharedConfigEnable, +// Profile: "devsso", +// }) +// if err != nil { +// return err +// } +// +// Programmatically loading AWS SSO credentials directly +// +// You can programmatically construct the AWS SSO Provider in your application, and provide the necessary information +// to load and retrieve temporary credentials using an access token from ~/.aws/sso/cache. +// +// svc := sso.New(sess, &aws.Config{ +// Region: aws.String("us-west-2"), // Client Region must correspond to the AWS SSO user portal region +// }) +// +// provider := ssocreds.NewCredentialsWithClient(svc, "123456789012", "SSOReadOnlyRole", "https://my-sso-portal.awsapps.com/start") +// +// credentials, err := provider.Get() +// if err != nil { +// return err +// } +// +// Additional Resources +// +// Configuring the AWS CLI to use AWS Single Sign-On: https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-sso.html +// +// AWS Single Sign-On User Guide: https://docs.aws.amazon.com/singlesignon/latest/userguide/what-is.html +package ssocreds diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/os.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/os.go new file mode 100644 index 00000000000..ceca7dceecb --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/os.go @@ -0,0 +1,9 @@ +// +build !windows + +package ssocreds + +import "os" + +func getHomeDirectory() string { + return os.Getenv("HOME") +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/os_windows.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/os_windows.go new file mode 100644 index 00000000000..eb48f61e5bc --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/os_windows.go @@ -0,0 +1,7 @@ +package ssocreds + +import "os" + +func getHomeDirectory() string { + return os.Getenv("USERPROFILE") +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/provider.go new file mode 100644 index 00000000000..6eda2a5557f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/provider.go @@ -0,0 +1,180 @@ +package ssocreds + +import ( + "crypto/sha1" + "encoding/hex" + "encoding/json" + "fmt" + "io/ioutil" + "path/filepath" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/service/sso" + "github.com/aws/aws-sdk-go/service/sso/ssoiface" +) + +// ErrCodeSSOProviderInvalidToken is the code type that is returned if loaded token has expired or is otherwise invalid. +// To refresh the SSO session run aws sso login with the corresponding profile. +const ErrCodeSSOProviderInvalidToken = "SSOProviderInvalidToken" + +const invalidTokenMessage = "the SSO session has expired or is invalid" + +func init() { + nowTime = time.Now + defaultCacheLocation = defaultCacheLocationImpl +} + +var nowTime func() time.Time + +// ProviderName is the name of the provider used to specify the source of credentials. +const ProviderName = "SSOProvider" + +var defaultCacheLocation func() string + +func defaultCacheLocationImpl() string { + return filepath.Join(getHomeDirectory(), ".aws", "sso", "cache") +} + +// Provider is an AWS credential provider that retrieves temporary AWS credentials by exchanging an SSO login token. +type Provider struct { + credentials.Expiry + + // The Client which is configured for the AWS Region where the AWS SSO user portal is located. + Client ssoiface.SSOAPI + + // The AWS account that is assigned to the user. + AccountID string + + // The role name that is assigned to the user. + RoleName string + + // The URL that points to the organization's AWS Single Sign-On (AWS SSO) user portal. + StartURL string +} + +// NewCredentials returns a new AWS Single Sign-On (AWS SSO) credential provider. The ConfigProvider is expected to be configured +// for the AWS Region where the AWS SSO user portal is located. +func NewCredentials(configProvider client.ConfigProvider, accountID, roleName, startURL string, optFns ...func(provider *Provider)) *credentials.Credentials { + return NewCredentialsWithClient(sso.New(configProvider), accountID, roleName, startURL, optFns...) +} + +// NewCredentialsWithClient returns a new AWS Single Sign-On (AWS SSO) credential provider. The provided client is expected to be configured +// for the AWS Region where the AWS SSO user portal is located. +func NewCredentialsWithClient(client ssoiface.SSOAPI, accountID, roleName, startURL string, optFns ...func(provider *Provider)) *credentials.Credentials { + p := &Provider{ + Client: client, + AccountID: accountID, + RoleName: roleName, + StartURL: startURL, + } + + for _, fn := range optFns { + fn(p) + } + + return credentials.NewCredentials(p) +} + +// Retrieve retrieves temporary AWS credentials from the configured Amazon Single Sign-On (AWS SSO) user portal +// by exchanging the accessToken present in ~/.aws/sso/cache. +func (p *Provider) Retrieve() (credentials.Value, error) { + return p.RetrieveWithContext(aws.BackgroundContext()) +} + +// RetrieveWithContext retrieves temporary AWS credentials from the configured Amazon Single Sign-On (AWS SSO) user portal +// by exchanging the accessToken present in ~/.aws/sso/cache. +func (p *Provider) RetrieveWithContext(ctx credentials.Context) (credentials.Value, error) { + tokenFile, err := loadTokenFile(p.StartURL) + if err != nil { + return credentials.Value{}, err + } + + output, err := p.Client.GetRoleCredentialsWithContext(ctx, &sso.GetRoleCredentialsInput{ + AccessToken: &tokenFile.AccessToken, + AccountId: &p.AccountID, + RoleName: &p.RoleName, + }) + if err != nil { + return credentials.Value{}, err + } + + expireTime := time.Unix(0, aws.Int64Value(output.RoleCredentials.Expiration)*int64(time.Millisecond)).UTC() + p.SetExpiration(expireTime, 0) + + return credentials.Value{ + AccessKeyID: aws.StringValue(output.RoleCredentials.AccessKeyId), + SecretAccessKey: aws.StringValue(output.RoleCredentials.SecretAccessKey), + SessionToken: aws.StringValue(output.RoleCredentials.SessionToken), + ProviderName: ProviderName, + }, nil +} + +func getCacheFileName(url string) (string, error) { + hash := sha1.New() + _, err := hash.Write([]byte(url)) + if err != nil { + return "", err + } + return strings.ToLower(hex.EncodeToString(hash.Sum(nil))) + ".json", nil +} + +type rfc3339 time.Time + +func (r *rfc3339) UnmarshalJSON(bytes []byte) error { + var value string + + if err := json.Unmarshal(bytes, &value); err != nil { + return err + } + + parse, err := time.Parse(time.RFC3339, value) + if err != nil { + return fmt.Errorf("expected RFC3339 timestamp: %v", err) + } + + *r = rfc3339(parse) + + return nil +} + +type token struct { + AccessToken string `json:"accessToken"` + ExpiresAt rfc3339 `json:"expiresAt"` + Region string `json:"region,omitempty"` + StartURL string `json:"startUrl,omitempty"` +} + +func (t token) Expired() bool { + return nowTime().Round(0).After(time.Time(t.ExpiresAt)) +} + +func loadTokenFile(startURL string) (t token, err error) { + key, err := getCacheFileName(startURL) + if err != nil { + return token{}, awserr.New(ErrCodeSSOProviderInvalidToken, invalidTokenMessage, err) + } + + fileBytes, err := ioutil.ReadFile(filepath.Join(defaultCacheLocation(), key)) + if err != nil { + return token{}, awserr.New(ErrCodeSSOProviderInvalidToken, invalidTokenMessage, err) + } + + if err := json.Unmarshal(fileBytes, &t); err != nil { + return token{}, awserr.New(ErrCodeSSOProviderInvalidToken, invalidTokenMessage, err) + } + + if len(t.AccessToken) == 0 { + return token{}, awserr.New(ErrCodeSSOProviderInvalidToken, invalidTokenMessage, nil) + } + + if t.Expired() { + return token{}, awserr.New(ErrCodeSSOProviderInvalidToken, invalidTokenMessage, nil) + } + + return t, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go new file mode 100644 index 00000000000..cbba1e3d560 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go @@ -0,0 +1,57 @@ +package credentials + +import ( + "github.com/aws/aws-sdk-go/aws/awserr" +) + +// StaticProviderName provides a name of Static provider +const StaticProviderName = "StaticProvider" + +var ( + // ErrStaticCredentialsEmpty is emitted when static credentials are empty. + ErrStaticCredentialsEmpty = awserr.New("EmptyStaticCreds", "static credentials are empty", nil) +) + +// A StaticProvider is a set of credentials which are set programmatically, +// and will never expire. +type StaticProvider struct { + Value +} + +// NewStaticCredentials returns a pointer to a new Credentials object +// wrapping a static credentials value provider. Token is only required +// for temporary security credentials retrieved via STS, otherwise an empty +// string can be passed for this parameter. +func NewStaticCredentials(id, secret, token string) *Credentials { + return NewCredentials(&StaticProvider{Value: Value{ + AccessKeyID: id, + SecretAccessKey: secret, + SessionToken: token, + }}) +} + +// NewStaticCredentialsFromCreds returns a pointer to a new Credentials object +// wrapping the static credentials value provide. Same as NewStaticCredentials +// but takes the creds Value instead of individual fields +func NewStaticCredentialsFromCreds(creds Value) *Credentials { + return NewCredentials(&StaticProvider{Value: creds}) +} + +// Retrieve returns the credentials or error if the credentials are invalid. +func (s *StaticProvider) Retrieve() (Value, error) { + if s.AccessKeyID == "" || s.SecretAccessKey == "" { + return Value{ProviderName: StaticProviderName}, ErrStaticCredentialsEmpty + } + + if len(s.Value.ProviderName) == 0 { + s.Value.ProviderName = StaticProviderName + } + return s.Value, nil +} + +// IsExpired returns if the credentials are expired. +// +// For StaticProvider, the credentials never expired. +func (s *StaticProvider) IsExpired() bool { + return false +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go new file mode 100644 index 00000000000..260a37cbbab --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go @@ -0,0 +1,367 @@ +/* +Package stscreds are credential Providers to retrieve STS AWS credentials. + +STS provides multiple ways to retrieve credentials which can be used when making +future AWS service API operation calls. + +The SDK will ensure that per instance of credentials.Credentials all requests +to refresh the credentials will be synchronized. But, the SDK is unable to +ensure synchronous usage of the AssumeRoleProvider if the value is shared +between multiple Credentials, Sessions or service clients. + +Assume Role + +To assume an IAM role using STS with the SDK you can create a new Credentials +with the SDKs's stscreds package. + + // Initial credentials loaded from SDK's default credential chain. Such as + // the environment, shared credentials (~/.aws/credentials), or EC2 Instance + // Role. These credentials will be used to to make the STS Assume Role API. + sess := session.Must(session.NewSession()) + + // Create the credentials from AssumeRoleProvider to assume the role + // referenced by the "myRoleARN" ARN. + creds := stscreds.NewCredentials(sess, "myRoleArn") + + // Create service client value configured for credentials + // from assumed role. + svc := s3.New(sess, &aws.Config{Credentials: creds}) + +Assume Role with static MFA Token + +To assume an IAM role with a MFA token you can either specify a MFA token code +directly or provide a function to prompt the user each time the credentials +need to refresh the role's credentials. Specifying the TokenCode should be used +for short lived operations that will not need to be refreshed, and when you do +not want to have direct control over the user provides their MFA token. + +With TokenCode the AssumeRoleProvider will be not be able to refresh the role's +credentials. + + // Create the credentials from AssumeRoleProvider to assume the role + // referenced by the "myRoleARN" ARN using the MFA token code provided. + creds := stscreds.NewCredentials(sess, "myRoleArn", func(p *stscreds.AssumeRoleProvider) { + p.SerialNumber = aws.String("myTokenSerialNumber") + p.TokenCode = aws.String("00000000") + }) + + // Create service client value configured for credentials + // from assumed role. + svc := s3.New(sess, &aws.Config{Credentials: creds}) + +Assume Role with MFA Token Provider + +To assume an IAM role with MFA for longer running tasks where the credentials +may need to be refreshed setting the TokenProvider field of AssumeRoleProvider +will allow the credential provider to prompt for new MFA token code when the +role's credentials need to be refreshed. + +The StdinTokenProvider function is available to prompt on stdin to retrieve +the MFA token code from the user. You can also implement custom prompts by +satisfing the TokenProvider function signature. + +Using StdinTokenProvider with multiple AssumeRoleProviders, or Credentials will +have undesirable results as the StdinTokenProvider will not be synchronized. A +single Credentials with an AssumeRoleProvider can be shared safely. + + // Create the credentials from AssumeRoleProvider to assume the role + // referenced by the "myRoleARN" ARN. Prompting for MFA token from stdin. + creds := stscreds.NewCredentials(sess, "myRoleArn", func(p *stscreds.AssumeRoleProvider) { + p.SerialNumber = aws.String("myTokenSerialNumber") + p.TokenProvider = stscreds.StdinTokenProvider + }) + + // Create service client value configured for credentials + // from assumed role. + svc := s3.New(sess, &aws.Config{Credentials: creds}) + +*/ +package stscreds + +import ( + "fmt" + "os" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/internal/sdkrand" + "github.com/aws/aws-sdk-go/service/sts" +) + +// StdinTokenProvider will prompt on stderr and read from stdin for a string value. +// An error is returned if reading from stdin fails. +// +// Use this function to read MFA tokens from stdin. The function makes no attempt +// to make atomic prompts from stdin across multiple gorouties. +// +// Using StdinTokenProvider with multiple AssumeRoleProviders, or Credentials will +// have undesirable results as the StdinTokenProvider will not be synchronized. A +// single Credentials with an AssumeRoleProvider can be shared safely +// +// Will wait forever until something is provided on the stdin. +func StdinTokenProvider() (string, error) { + var v string + fmt.Fprintf(os.Stderr, "Assume Role MFA token code: ") + _, err := fmt.Scanln(&v) + + return v, err +} + +// ProviderName provides a name of AssumeRole provider +const ProviderName = "AssumeRoleProvider" + +// AssumeRoler represents the minimal subset of the STS client API used by this provider. +type AssumeRoler interface { + AssumeRole(input *sts.AssumeRoleInput) (*sts.AssumeRoleOutput, error) +} + +type assumeRolerWithContext interface { + AssumeRoleWithContext(aws.Context, *sts.AssumeRoleInput, ...request.Option) (*sts.AssumeRoleOutput, error) +} + +// DefaultDuration is the default amount of time in minutes that the credentials +// will be valid for. +var DefaultDuration = time.Duration(15) * time.Minute + +// AssumeRoleProvider retrieves temporary credentials from the STS service, and +// keeps track of their expiration time. +// +// This credential provider will be used by the SDKs default credential change +// when shared configuration is enabled, and the shared config or shared credentials +// file configure assume role. See Session docs for how to do this. +// +// AssumeRoleProvider does not provide any synchronization and it is not safe +// to share this value across multiple Credentials, Sessions, or service clients +// without also sharing the same Credentials instance. +type AssumeRoleProvider struct { + credentials.Expiry + + // STS client to make assume role request with. + Client AssumeRoler + + // Role to be assumed. + RoleARN string + + // Session name, if you wish to reuse the credentials elsewhere. + RoleSessionName string + + // Optional, you can pass tag key-value pairs to your session. These tags are called session tags. + Tags []*sts.Tag + + // A list of keys for session tags that you want to set as transitive. + // If you set a tag key as transitive, the corresponding key and value passes to subsequent sessions in a role chain. + TransitiveTagKeys []*string + + // Expiry duration of the STS credentials. Defaults to 15 minutes if not set. + Duration time.Duration + + // Optional ExternalID to pass along, defaults to nil if not set. + ExternalID *string + + // The policy plain text must be 2048 bytes or shorter. However, an internal + // conversion compresses it into a packed binary format with a separate limit. + // The PackedPolicySize response element indicates by percentage how close to + // the upper size limit the policy is, with 100% equaling the maximum allowed + // size. + Policy *string + + // The ARNs of IAM managed policies you want to use as managed session policies. + // The policies must exist in the same account as the role. + // + // This parameter is optional. You can provide up to 10 managed policy ARNs. + // However, the plain text that you use for both inline and managed session + // policies can't exceed 2,048 characters. + // + // An AWS conversion compresses the passed session policies and session tags + // into a packed binary format that has a separate limit. Your request can fail + // for this limit even if your plain text meets the other requirements. The + // PackedPolicySize response element indicates by percentage how close the policies + // and tags for your request are to the upper size limit. + // + // Passing policies to this operation returns new temporary credentials. The + // resulting session's permissions are the intersection of the role's identity-based + // policy and the session policies. You can use the role's temporary credentials + // in subsequent AWS API calls to access resources in the account that owns + // the role. You cannot use session policies to grant more permissions than + // those allowed by the identity-based policy of the role that is being assumed. + // For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // in the IAM User Guide. + PolicyArns []*sts.PolicyDescriptorType + + // The identification number of the MFA device that is associated with the user + // who is making the AssumeRole call. Specify this value if the trust policy + // of the role being assumed includes a condition that requires MFA authentication. + // The value is either the serial number for a hardware device (such as GAHT12345678) + // or an Amazon Resource Name (ARN) for a virtual device (such as arn:aws:iam::123456789012:mfa/user). + SerialNumber *string + + // The value provided by the MFA device, if the trust policy of the role being + // assumed requires MFA (that is, if the policy includes a condition that tests + // for MFA). If the role being assumed requires MFA and if the TokenCode value + // is missing or expired, the AssumeRole call returns an "access denied" error. + // + // If SerialNumber is set and neither TokenCode nor TokenProvider are also + // set an error will be returned. + TokenCode *string + + // Async method of providing MFA token code for assuming an IAM role with MFA. + // The value returned by the function will be used as the TokenCode in the Retrieve + // call. See StdinTokenProvider for a provider that prompts and reads from stdin. + // + // This token provider will be called when ever the assumed role's + // credentials need to be refreshed when SerialNumber is also set and + // TokenCode is not set. + // + // If both TokenCode and TokenProvider is set, TokenProvider will be used and + // TokenCode is ignored. + TokenProvider func() (string, error) + + // ExpiryWindow will allow the credentials to trigger refreshing prior to + // the credentials actually expiring. This is beneficial so race conditions + // with expiring credentials do not cause request to fail unexpectedly + // due to ExpiredTokenException exceptions. + // + // So a ExpiryWindow of 10s would cause calls to IsExpired() to return true + // 10 seconds before the credentials are actually expired. + // + // If ExpiryWindow is 0 or less it will be ignored. + ExpiryWindow time.Duration + + // MaxJitterFrac reduces the effective Duration of each credential requested + // by a random percentage between 0 and MaxJitterFraction. MaxJitterFrac must + // have a value between 0 and 1. Any other value may lead to expected behavior. + // With a MaxJitterFrac value of 0, default) will no jitter will be used. + // + // For example, with a Duration of 30m and a MaxJitterFrac of 0.1, the + // AssumeRole call will be made with an arbitrary Duration between 27m and + // 30m. + // + // MaxJitterFrac should not be negative. + MaxJitterFrac float64 +} + +// NewCredentials returns a pointer to a new Credentials value wrapping the +// AssumeRoleProvider. The credentials will expire every 15 minutes and the +// role will be named after a nanosecond timestamp of this operation. The +// Credentials value will attempt to refresh the credentials using the provider +// when Credentials.Get is called, if the cached credentials are expiring. +// +// Takes a Config provider to create the STS client. The ConfigProvider is +// satisfied by the session.Session type. +// +// It is safe to share the returned Credentials with multiple Sessions and +// service clients. All access to the credentials and refreshing them +// will be synchronized. +func NewCredentials(c client.ConfigProvider, roleARN string, options ...func(*AssumeRoleProvider)) *credentials.Credentials { + p := &AssumeRoleProvider{ + Client: sts.New(c), + RoleARN: roleARN, + Duration: DefaultDuration, + } + + for _, option := range options { + option(p) + } + + return credentials.NewCredentials(p) +} + +// NewCredentialsWithClient returns a pointer to a new Credentials value wrapping the +// AssumeRoleProvider. The credentials will expire every 15 minutes and the +// role will be named after a nanosecond timestamp of this operation. The +// Credentials value will attempt to refresh the credentials using the provider +// when Credentials.Get is called, if the cached credentials are expiring. +// +// Takes an AssumeRoler which can be satisfied by the STS client. +// +// It is safe to share the returned Credentials with multiple Sessions and +// service clients. All access to the credentials and refreshing them +// will be synchronized. +func NewCredentialsWithClient(svc AssumeRoler, roleARN string, options ...func(*AssumeRoleProvider)) *credentials.Credentials { + p := &AssumeRoleProvider{ + Client: svc, + RoleARN: roleARN, + Duration: DefaultDuration, + } + + for _, option := range options { + option(p) + } + + return credentials.NewCredentials(p) +} + +// Retrieve generates a new set of temporary credentials using STS. +func (p *AssumeRoleProvider) Retrieve() (credentials.Value, error) { + return p.RetrieveWithContext(aws.BackgroundContext()) +} + +// RetrieveWithContext generates a new set of temporary credentials using STS. +func (p *AssumeRoleProvider) RetrieveWithContext(ctx credentials.Context) (credentials.Value, error) { + // Apply defaults where parameters are not set. + if p.RoleSessionName == "" { + // Try to work out a role name that will hopefully end up unique. + p.RoleSessionName = fmt.Sprintf("%d", time.Now().UTC().UnixNano()) + } + if p.Duration == 0 { + // Expire as often as AWS permits. + p.Duration = DefaultDuration + } + jitter := time.Duration(sdkrand.SeededRand.Float64() * p.MaxJitterFrac * float64(p.Duration)) + input := &sts.AssumeRoleInput{ + DurationSeconds: aws.Int64(int64((p.Duration - jitter) / time.Second)), + RoleArn: aws.String(p.RoleARN), + RoleSessionName: aws.String(p.RoleSessionName), + ExternalId: p.ExternalID, + Tags: p.Tags, + PolicyArns: p.PolicyArns, + TransitiveTagKeys: p.TransitiveTagKeys, + } + if p.Policy != nil { + input.Policy = p.Policy + } + if p.SerialNumber != nil { + if p.TokenCode != nil { + input.SerialNumber = p.SerialNumber + input.TokenCode = p.TokenCode + } else if p.TokenProvider != nil { + input.SerialNumber = p.SerialNumber + code, err := p.TokenProvider() + if err != nil { + return credentials.Value{ProviderName: ProviderName}, err + } + input.TokenCode = aws.String(code) + } else { + return credentials.Value{ProviderName: ProviderName}, + awserr.New("AssumeRoleTokenNotAvailable", + "assume role with MFA enabled, but neither TokenCode nor TokenProvider are set", nil) + } + } + + var roleOutput *sts.AssumeRoleOutput + var err error + + if c, ok := p.Client.(assumeRolerWithContext); ok { + roleOutput, err = c.AssumeRoleWithContext(ctx, input) + } else { + roleOutput, err = p.Client.AssumeRole(input) + } + + if err != nil { + return credentials.Value{ProviderName: ProviderName}, err + } + + // We will proactively generate new credentials before they expire. + p.SetExpiration(*roleOutput.Credentials.Expiration, p.ExpiryWindow) + + return credentials.Value{ + AccessKeyID: *roleOutput.Credentials.AccessKeyId, + SecretAccessKey: *roleOutput.Credentials.SecretAccessKey, + SessionToken: *roleOutput.Credentials.SessionToken, + ProviderName: ProviderName, + }, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/web_identity_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/web_identity_provider.go new file mode 100644 index 00000000000..cefe2a76d4d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/web_identity_provider.go @@ -0,0 +1,154 @@ +package stscreds + +import ( + "fmt" + "io/ioutil" + "strconv" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/service/sts" + "github.com/aws/aws-sdk-go/service/sts/stsiface" +) + +const ( + // ErrCodeWebIdentity will be used as an error code when constructing + // a new error to be returned during session creation or retrieval. + ErrCodeWebIdentity = "WebIdentityErr" + + // WebIdentityProviderName is the web identity provider name + WebIdentityProviderName = "WebIdentityCredentials" +) + +// now is used to return a time.Time object representing +// the current time. This can be used to easily test and +// compare test values. +var now = time.Now + +// TokenFetcher shuold return WebIdentity token bytes or an error +type TokenFetcher interface { + FetchToken(credentials.Context) ([]byte, error) +} + +// FetchTokenPath is a path to a WebIdentity token file +type FetchTokenPath string + +// FetchToken returns a token by reading from the filesystem +func (f FetchTokenPath) FetchToken(ctx credentials.Context) ([]byte, error) { + data, err := ioutil.ReadFile(string(f)) + if err != nil { + errMsg := fmt.Sprintf("unable to read file at %s", f) + return nil, awserr.New(ErrCodeWebIdentity, errMsg, err) + } + return data, nil +} + +// WebIdentityRoleProvider is used to retrieve credentials using +// an OIDC token. +type WebIdentityRoleProvider struct { + credentials.Expiry + PolicyArns []*sts.PolicyDescriptorType + + // Duration the STS credentials will be valid for. Truncated to seconds. + // If unset, the assumed role will use AssumeRoleWithWebIdentity's default + // expiry duration. See + // https://docs.aws.amazon.com/sdk-for-go/api/service/sts/#STS.AssumeRoleWithWebIdentity + // for more information. + Duration time.Duration + + // The amount of time the credentials will be refreshed before they expire. + // This is useful refresh credentials before they expire to reduce risk of + // using credentials as they expire. If unset, will default to no expiry + // window. + ExpiryWindow time.Duration + + client stsiface.STSAPI + + tokenFetcher TokenFetcher + roleARN string + roleSessionName string +} + +// NewWebIdentityCredentials will return a new set of credentials with a given +// configuration, role arn, and token file path. +func NewWebIdentityCredentials(c client.ConfigProvider, roleARN, roleSessionName, path string) *credentials.Credentials { + svc := sts.New(c) + p := NewWebIdentityRoleProvider(svc, roleARN, roleSessionName, path) + return credentials.NewCredentials(p) +} + +// NewWebIdentityRoleProvider will return a new WebIdentityRoleProvider with the +// provided stsiface.STSAPI +func NewWebIdentityRoleProvider(svc stsiface.STSAPI, roleARN, roleSessionName, path string) *WebIdentityRoleProvider { + return NewWebIdentityRoleProviderWithToken(svc, roleARN, roleSessionName, FetchTokenPath(path)) +} + +// NewWebIdentityRoleProviderWithToken will return a new WebIdentityRoleProvider with the +// provided stsiface.STSAPI and a TokenFetcher +func NewWebIdentityRoleProviderWithToken(svc stsiface.STSAPI, roleARN, roleSessionName string, tokenFetcher TokenFetcher) *WebIdentityRoleProvider { + return &WebIdentityRoleProvider{ + client: svc, + tokenFetcher: tokenFetcher, + roleARN: roleARN, + roleSessionName: roleSessionName, + } +} + +// Retrieve will attempt to assume a role from a token which is located at +// 'WebIdentityTokenFilePath' specified destination and if that is empty an +// error will be returned. +func (p *WebIdentityRoleProvider) Retrieve() (credentials.Value, error) { + return p.RetrieveWithContext(aws.BackgroundContext()) +} + +// RetrieveWithContext will attempt to assume a role from a token which is located at +// 'WebIdentityTokenFilePath' specified destination and if that is empty an +// error will be returned. +func (p *WebIdentityRoleProvider) RetrieveWithContext(ctx credentials.Context) (credentials.Value, error) { + b, err := p.tokenFetcher.FetchToken(ctx) + if err != nil { + return credentials.Value{}, awserr.New(ErrCodeWebIdentity, "failed fetching WebIdentity token: ", err) + } + + sessionName := p.roleSessionName + if len(sessionName) == 0 { + // session name is used to uniquely identify a session. This simply + // uses unix time in nanoseconds to uniquely identify sessions. + sessionName = strconv.FormatInt(now().UnixNano(), 10) + } + + var duration *int64 + if p.Duration != 0 { + duration = aws.Int64(int64(p.Duration / time.Second)) + } + + req, resp := p.client.AssumeRoleWithWebIdentityRequest(&sts.AssumeRoleWithWebIdentityInput{ + PolicyArns: p.PolicyArns, + RoleArn: &p.roleARN, + RoleSessionName: &sessionName, + WebIdentityToken: aws.String(string(b)), + DurationSeconds: duration, + }) + + req.SetContext(ctx) + + // InvalidIdentityToken error is a temporary error that can occur + // when assuming an Role with a JWT web identity token. + req.RetryErrorCodes = append(req.RetryErrorCodes, sts.ErrCodeInvalidIdentityTokenException) + if err := req.Send(); err != nil { + return credentials.Value{}, awserr.New(ErrCodeWebIdentity, "failed to retrieve credentials", err) + } + + p.SetExpiration(aws.TimeValue(resp.Credentials.Expiration), p.ExpiryWindow) + + value := credentials.Value{ + AccessKeyID: aws.StringValue(resp.Credentials.AccessKeyId), + SecretAccessKey: aws.StringValue(resp.Credentials.SecretAccessKey), + SessionToken: aws.StringValue(resp.Credentials.SessionToken), + ProviderName: WebIdentityProviderName, + } + return value, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/csm/doc.go b/vendor/github.com/aws/aws-sdk-go/aws/csm/doc.go new file mode 100644 index 00000000000..25a66d1dda2 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/csm/doc.go @@ -0,0 +1,69 @@ +// Package csm provides the Client Side Monitoring (CSM) client which enables +// sending metrics via UDP connection to the CSM agent. This package provides +// control options, and configuration for the CSM client. The client can be +// controlled manually, or automatically via the SDK's Session configuration. +// +// Enabling CSM client via SDK's Session configuration +// +// The CSM client can be enabled automatically via SDK's Session configuration. +// The SDK's session configuration enables the CSM client if the AWS_CSM_PORT +// environment variable is set to a non-empty value. +// +// The configuration options for the CSM client via the SDK's session +// configuration are: +// +// * AWS_CSM_PORT= +// The port number the CSM agent will receive metrics on. +// +// * AWS_CSM_HOST= +// The hostname, or IP address the CSM agent will receive metrics on. +// Without port number. +// +// Manually enabling the CSM client +// +// The CSM client can be started, paused, and resumed manually. The Start +// function will enable the CSM client to publish metrics to the CSM agent. It +// is safe to call Start concurrently, but if Start is called additional times +// with different ClientID or address it will panic. +// +// r, err := csm.Start("clientID", ":31000") +// if err != nil { +// panic(fmt.Errorf("failed starting CSM: %v", err)) +// } +// +// When controlling the CSM client manually, you must also inject its request +// handlers into the SDK's Session configuration for the SDK's API clients to +// publish metrics. +// +// sess, err := session.NewSession(&aws.Config{}) +// if err != nil { +// panic(fmt.Errorf("failed loading session: %v", err)) +// } +// +// // Add CSM client's metric publishing request handlers to the SDK's +// // Session Configuration. +// r.InjectHandlers(&sess.Handlers) +// +// Controlling CSM client +// +// Once the CSM client has been enabled the Get function will return a Reporter +// value that you can use to pause and resume the metrics published to the CSM +// agent. If Get function is called before the reporter is enabled with the +// Start function or via SDK's Session configuration nil will be returned. +// +// The Pause method can be called to stop the CSM client publishing metrics to +// the CSM agent. The Continue method will resume metric publishing. +// +// // Get the CSM client Reporter. +// r := csm.Get() +// +// // Will pause monitoring +// r.Pause() +// resp, err = client.GetObject(&s3.GetObjectInput{ +// Bucket: aws.String("bucket"), +// Key: aws.String("key"), +// }) +// +// // Resume monitoring +// r.Continue() +package csm diff --git a/vendor/github.com/aws/aws-sdk-go/aws/csm/enable.go b/vendor/github.com/aws/aws-sdk-go/aws/csm/enable.go new file mode 100644 index 00000000000..4b19e2800e3 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/csm/enable.go @@ -0,0 +1,89 @@ +package csm + +import ( + "fmt" + "strings" + "sync" +) + +var ( + lock sync.Mutex +) + +const ( + // DefaultPort is used when no port is specified. + DefaultPort = "31000" + + // DefaultHost is the host that will be used when none is specified. + DefaultHost = "127.0.0.1" +) + +// AddressWithDefaults returns a CSM address built from the host and port +// values. If the host or port is not set, default values will be used +// instead. If host is "localhost" it will be replaced with "127.0.0.1". +func AddressWithDefaults(host, port string) string { + if len(host) == 0 || strings.EqualFold(host, "localhost") { + host = DefaultHost + } + + if len(port) == 0 { + port = DefaultPort + } + + // Only IP6 host can contain a colon + if strings.Contains(host, ":") { + return "[" + host + "]:" + port + } + + return host + ":" + port +} + +// Start will start a long running go routine to capture +// client side metrics. Calling start multiple time will only +// start the metric listener once and will panic if a different +// client ID or port is passed in. +// +// r, err := csm.Start("clientID", "127.0.0.1:31000") +// if err != nil { +// panic(fmt.Errorf("expected no error, but received %v", err)) +// } +// sess := session.NewSession() +// r.InjectHandlers(sess.Handlers) +// +// svc := s3.New(sess) +// out, err := svc.GetObject(&s3.GetObjectInput{ +// Bucket: aws.String("bucket"), +// Key: aws.String("key"), +// }) +func Start(clientID string, url string) (*Reporter, error) { + lock.Lock() + defer lock.Unlock() + + if sender == nil { + sender = newReporter(clientID, url) + } else { + if sender.clientID != clientID { + panic(fmt.Errorf("inconsistent client IDs. %q was expected, but received %q", sender.clientID, clientID)) + } + + if sender.url != url { + panic(fmt.Errorf("inconsistent URLs. %q was expected, but received %q", sender.url, url)) + } + } + + if err := connect(url); err != nil { + sender = nil + return nil, err + } + + return sender, nil +} + +// Get will return a reporter if one exists, if one does not exist, nil will +// be returned. +func Get() *Reporter { + lock.Lock() + defer lock.Unlock() + + return sender +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/csm/metric.go b/vendor/github.com/aws/aws-sdk-go/aws/csm/metric.go new file mode 100644 index 00000000000..5bacc791a1e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/csm/metric.go @@ -0,0 +1,109 @@ +package csm + +import ( + "strconv" + "time" + + "github.com/aws/aws-sdk-go/aws" +) + +type metricTime time.Time + +func (t metricTime) MarshalJSON() ([]byte, error) { + ns := time.Duration(time.Time(t).UnixNano()) + return []byte(strconv.FormatInt(int64(ns/time.Millisecond), 10)), nil +} + +type metric struct { + ClientID *string `json:"ClientId,omitempty"` + API *string `json:"Api,omitempty"` + Service *string `json:"Service,omitempty"` + Timestamp *metricTime `json:"Timestamp,omitempty"` + Type *string `json:"Type,omitempty"` + Version *int `json:"Version,omitempty"` + + AttemptCount *int `json:"AttemptCount,omitempty"` + Latency *int `json:"Latency,omitempty"` + + Fqdn *string `json:"Fqdn,omitempty"` + UserAgent *string `json:"UserAgent,omitempty"` + AttemptLatency *int `json:"AttemptLatency,omitempty"` + + SessionToken *string `json:"SessionToken,omitempty"` + Region *string `json:"Region,omitempty"` + AccessKey *string `json:"AccessKey,omitempty"` + HTTPStatusCode *int `json:"HttpStatusCode,omitempty"` + XAmzID2 *string `json:"XAmzId2,omitempty"` + XAmzRequestID *string `json:"XAmznRequestId,omitempty"` + + AWSException *string `json:"AwsException,omitempty"` + AWSExceptionMessage *string `json:"AwsExceptionMessage,omitempty"` + SDKException *string `json:"SdkException,omitempty"` + SDKExceptionMessage *string `json:"SdkExceptionMessage,omitempty"` + + FinalHTTPStatusCode *int `json:"FinalHttpStatusCode,omitempty"` + FinalAWSException *string `json:"FinalAwsException,omitempty"` + FinalAWSExceptionMessage *string `json:"FinalAwsExceptionMessage,omitempty"` + FinalSDKException *string `json:"FinalSdkException,omitempty"` + FinalSDKExceptionMessage *string `json:"FinalSdkExceptionMessage,omitempty"` + + DestinationIP *string `json:"DestinationIp,omitempty"` + ConnectionReused *int `json:"ConnectionReused,omitempty"` + + AcquireConnectionLatency *int `json:"AcquireConnectionLatency,omitempty"` + ConnectLatency *int `json:"ConnectLatency,omitempty"` + RequestLatency *int `json:"RequestLatency,omitempty"` + DNSLatency *int `json:"DnsLatency,omitempty"` + TCPLatency *int `json:"TcpLatency,omitempty"` + SSLLatency *int `json:"SslLatency,omitempty"` + + MaxRetriesExceeded *int `json:"MaxRetriesExceeded,omitempty"` +} + +func (m *metric) TruncateFields() { + m.ClientID = truncateString(m.ClientID, 255) + m.UserAgent = truncateString(m.UserAgent, 256) + + m.AWSException = truncateString(m.AWSException, 128) + m.AWSExceptionMessage = truncateString(m.AWSExceptionMessage, 512) + + m.SDKException = truncateString(m.SDKException, 128) + m.SDKExceptionMessage = truncateString(m.SDKExceptionMessage, 512) + + m.FinalAWSException = truncateString(m.FinalAWSException, 128) + m.FinalAWSExceptionMessage = truncateString(m.FinalAWSExceptionMessage, 512) + + m.FinalSDKException = truncateString(m.FinalSDKException, 128) + m.FinalSDKExceptionMessage = truncateString(m.FinalSDKExceptionMessage, 512) +} + +func truncateString(v *string, l int) *string { + if v != nil && len(*v) > l { + nv := (*v)[:l] + return &nv + } + + return v +} + +func (m *metric) SetException(e metricException) { + switch te := e.(type) { + case awsException: + m.AWSException = aws.String(te.exception) + m.AWSExceptionMessage = aws.String(te.message) + case sdkException: + m.SDKException = aws.String(te.exception) + m.SDKExceptionMessage = aws.String(te.message) + } +} + +func (m *metric) SetFinalException(e metricException) { + switch te := e.(type) { + case awsException: + m.FinalAWSException = aws.String(te.exception) + m.FinalAWSExceptionMessage = aws.String(te.message) + case sdkException: + m.FinalSDKException = aws.String(te.exception) + m.FinalSDKExceptionMessage = aws.String(te.message) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_chan.go b/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_chan.go new file mode 100644 index 00000000000..82a3e345e93 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_chan.go @@ -0,0 +1,55 @@ +package csm + +import ( + "sync/atomic" +) + +const ( + runningEnum = iota + pausedEnum +) + +var ( + // MetricsChannelSize of metrics to hold in the channel + MetricsChannelSize = 100 +) + +type metricChan struct { + ch chan metric + paused *int64 +} + +func newMetricChan(size int) metricChan { + return metricChan{ + ch: make(chan metric, size), + paused: new(int64), + } +} + +func (ch *metricChan) Pause() { + atomic.StoreInt64(ch.paused, pausedEnum) +} + +func (ch *metricChan) Continue() { + atomic.StoreInt64(ch.paused, runningEnum) +} + +func (ch *metricChan) IsPaused() bool { + v := atomic.LoadInt64(ch.paused) + return v == pausedEnum +} + +// Push will push metrics to the metric channel if the channel +// is not paused +func (ch *metricChan) Push(m metric) bool { + if ch.IsPaused() { + return false + } + + select { + case ch.ch <- m: + return true + default: + return false + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_exception.go b/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_exception.go new file mode 100644 index 00000000000..54a99280ce9 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_exception.go @@ -0,0 +1,26 @@ +package csm + +type metricException interface { + Exception() string + Message() string +} + +type requestException struct { + exception string + message string +} + +func (e requestException) Exception() string { + return e.exception +} +func (e requestException) Message() string { + return e.message +} + +type awsException struct { + requestException +} + +type sdkException struct { + requestException +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/csm/reporter.go b/vendor/github.com/aws/aws-sdk-go/aws/csm/reporter.go new file mode 100644 index 00000000000..835bcd49cba --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/csm/reporter.go @@ -0,0 +1,264 @@ +package csm + +import ( + "encoding/json" + "net" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" +) + +// Reporter will gather metrics of API requests made and +// send those metrics to the CSM endpoint. +type Reporter struct { + clientID string + url string + conn net.Conn + metricsCh metricChan + done chan struct{} +} + +var ( + sender *Reporter +) + +func connect(url string) error { + const network = "udp" + if err := sender.connect(network, url); err != nil { + return err + } + + if sender.done == nil { + sender.done = make(chan struct{}) + go sender.start() + } + + return nil +} + +func newReporter(clientID, url string) *Reporter { + return &Reporter{ + clientID: clientID, + url: url, + metricsCh: newMetricChan(MetricsChannelSize), + } +} + +func (rep *Reporter) sendAPICallAttemptMetric(r *request.Request) { + if rep == nil { + return + } + + now := time.Now() + creds, _ := r.Config.Credentials.Get() + + m := metric{ + ClientID: aws.String(rep.clientID), + API: aws.String(r.Operation.Name), + Service: aws.String(r.ClientInfo.ServiceID), + Timestamp: (*metricTime)(&now), + UserAgent: aws.String(r.HTTPRequest.Header.Get("User-Agent")), + Region: r.Config.Region, + Type: aws.String("ApiCallAttempt"), + Version: aws.Int(1), + + XAmzRequestID: aws.String(r.RequestID), + + AttemptLatency: aws.Int(int(now.Sub(r.AttemptTime).Nanoseconds() / int64(time.Millisecond))), + AccessKey: aws.String(creds.AccessKeyID), + } + + if r.HTTPResponse != nil { + m.HTTPStatusCode = aws.Int(r.HTTPResponse.StatusCode) + } + + if r.Error != nil { + if awserr, ok := r.Error.(awserr.Error); ok { + m.SetException(getMetricException(awserr)) + } + } + + m.TruncateFields() + rep.metricsCh.Push(m) +} + +func getMetricException(err awserr.Error) metricException { + msg := err.Error() + code := err.Code() + + switch code { + case request.ErrCodeRequestError, + request.ErrCodeSerialization, + request.CanceledErrorCode: + return sdkException{ + requestException{exception: code, message: msg}, + } + default: + return awsException{ + requestException{exception: code, message: msg}, + } + } +} + +func (rep *Reporter) sendAPICallMetric(r *request.Request) { + if rep == nil { + return + } + + now := time.Now() + m := metric{ + ClientID: aws.String(rep.clientID), + API: aws.String(r.Operation.Name), + Service: aws.String(r.ClientInfo.ServiceID), + Timestamp: (*metricTime)(&now), + UserAgent: aws.String(r.HTTPRequest.Header.Get("User-Agent")), + Type: aws.String("ApiCall"), + AttemptCount: aws.Int(r.RetryCount + 1), + Region: r.Config.Region, + Latency: aws.Int(int(time.Since(r.Time) / time.Millisecond)), + XAmzRequestID: aws.String(r.RequestID), + MaxRetriesExceeded: aws.Int(boolIntValue(r.RetryCount >= r.MaxRetries())), + } + + if r.HTTPResponse != nil { + m.FinalHTTPStatusCode = aws.Int(r.HTTPResponse.StatusCode) + } + + if r.Error != nil { + if awserr, ok := r.Error.(awserr.Error); ok { + m.SetFinalException(getMetricException(awserr)) + } + } + + m.TruncateFields() + + // TODO: Probably want to figure something out for logging dropped + // metrics + rep.metricsCh.Push(m) +} + +func (rep *Reporter) connect(network, url string) error { + if rep.conn != nil { + rep.conn.Close() + } + + conn, err := net.Dial(network, url) + if err != nil { + return awserr.New("UDPError", "Could not connect", err) + } + + rep.conn = conn + + return nil +} + +func (rep *Reporter) close() { + if rep.done != nil { + close(rep.done) + } + + rep.metricsCh.Pause() +} + +func (rep *Reporter) start() { + defer func() { + rep.metricsCh.Pause() + }() + + for { + select { + case <-rep.done: + rep.done = nil + return + case m := <-rep.metricsCh.ch: + // TODO: What to do with this error? Probably should just log + b, err := json.Marshal(m) + if err != nil { + continue + } + + rep.conn.Write(b) + } + } +} + +// Pause will pause the metric channel preventing any new metrics from being +// added. It is safe to call concurrently with other calls to Pause, but if +// called concurently with Continue can lead to unexpected state. +func (rep *Reporter) Pause() { + lock.Lock() + defer lock.Unlock() + + if rep == nil { + return + } + + rep.close() +} + +// Continue will reopen the metric channel and allow for monitoring to be +// resumed. It is safe to call concurrently with other calls to Continue, but +// if called concurently with Pause can lead to unexpected state. +func (rep *Reporter) Continue() { + lock.Lock() + defer lock.Unlock() + if rep == nil { + return + } + + if !rep.metricsCh.IsPaused() { + return + } + + rep.metricsCh.Continue() +} + +// Client side metric handler names +const ( + APICallMetricHandlerName = "awscsm.SendAPICallMetric" + APICallAttemptMetricHandlerName = "awscsm.SendAPICallAttemptMetric" +) + +// InjectHandlers will will enable client side metrics and inject the proper +// handlers to handle how metrics are sent. +// +// InjectHandlers is NOT safe to call concurrently. Calling InjectHandlers +// multiple times may lead to unexpected behavior, (e.g. duplicate metrics). +// +// // Start must be called in order to inject the correct handlers +// r, err := csm.Start("clientID", "127.0.0.1:8094") +// if err != nil { +// panic(fmt.Errorf("expected no error, but received %v", err)) +// } +// +// sess := session.NewSession() +// r.InjectHandlers(&sess.Handlers) +// +// // create a new service client with our client side metric session +// svc := s3.New(sess) +func (rep *Reporter) InjectHandlers(handlers *request.Handlers) { + if rep == nil { + return + } + + handlers.Complete.PushFrontNamed(request.NamedHandler{ + Name: APICallMetricHandlerName, + Fn: rep.sendAPICallMetric, + }) + + handlers.CompleteAttempt.PushFrontNamed(request.NamedHandler{ + Name: APICallAttemptMetricHandlerName, + Fn: rep.sendAPICallAttemptMetric, + }) +} + +// boolIntValue return 1 for true and 0 for false. +func boolIntValue(b bool) int { + if b { + return 1 + } + + return 0 +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go b/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go new file mode 100644 index 00000000000..23bb639e018 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go @@ -0,0 +1,207 @@ +// Package defaults is a collection of helpers to retrieve the SDK's default +// configuration and handlers. +// +// Generally this package shouldn't be used directly, but session.Session +// instead. This package is useful when you need to reset the defaults +// of a session or service client to the SDK defaults before setting +// additional parameters. +package defaults + +import ( + "fmt" + "net" + "net/http" + "net/url" + "os" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/corehandlers" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds" + "github.com/aws/aws-sdk-go/aws/credentials/endpointcreds" + "github.com/aws/aws-sdk-go/aws/ec2metadata" + "github.com/aws/aws-sdk-go/aws/endpoints" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/internal/shareddefaults" +) + +// A Defaults provides a collection of default values for SDK clients. +type Defaults struct { + Config *aws.Config + Handlers request.Handlers +} + +// Get returns the SDK's default values with Config and handlers pre-configured. +func Get() Defaults { + cfg := Config() + handlers := Handlers() + cfg.Credentials = CredChain(cfg, handlers) + + return Defaults{ + Config: cfg, + Handlers: handlers, + } +} + +// Config returns the default configuration without credentials. +// To retrieve a config with credentials also included use +// `defaults.Get().Config` instead. +// +// Generally you shouldn't need to use this method directly, but +// is available if you need to reset the configuration of an +// existing service client or session. +func Config() *aws.Config { + return aws.NewConfig(). + WithCredentials(credentials.AnonymousCredentials). + WithRegion(os.Getenv("AWS_REGION")). + WithHTTPClient(http.DefaultClient). + WithMaxRetries(aws.UseServiceDefaultRetries). + WithLogger(aws.NewDefaultLogger()). + WithLogLevel(aws.LogOff). + WithEndpointResolver(endpoints.DefaultResolver()) +} + +// Handlers returns the default request handlers. +// +// Generally you shouldn't need to use this method directly, but +// is available if you need to reset the request handlers of an +// existing service client or session. +func Handlers() request.Handlers { + var handlers request.Handlers + + handlers.Validate.PushBackNamed(corehandlers.ValidateEndpointHandler) + handlers.Validate.AfterEachFn = request.HandlerListStopOnError + handlers.Build.PushBackNamed(corehandlers.SDKVersionUserAgentHandler) + handlers.Build.PushBackNamed(corehandlers.AddHostExecEnvUserAgentHander) + handlers.Build.AfterEachFn = request.HandlerListStopOnError + handlers.Sign.PushBackNamed(corehandlers.BuildContentLengthHandler) + handlers.Send.PushBackNamed(corehandlers.ValidateReqSigHandler) + handlers.Send.PushBackNamed(corehandlers.SendHandler) + handlers.AfterRetry.PushBackNamed(corehandlers.AfterRetryHandler) + handlers.ValidateResponse.PushBackNamed(corehandlers.ValidateResponseHandler) + + return handlers +} + +// CredChain returns the default credential chain. +// +// Generally you shouldn't need to use this method directly, but +// is available if you need to reset the credentials of an +// existing service client or session's Config. +func CredChain(cfg *aws.Config, handlers request.Handlers) *credentials.Credentials { + return credentials.NewCredentials(&credentials.ChainProvider{ + VerboseErrors: aws.BoolValue(cfg.CredentialsChainVerboseErrors), + Providers: CredProviders(cfg, handlers), + }) +} + +// CredProviders returns the slice of providers used in +// the default credential chain. +// +// For applications that need to use some other provider (for example use +// different environment variables for legacy reasons) but still fall back +// on the default chain of providers. This allows that default chaint to be +// automatically updated +func CredProviders(cfg *aws.Config, handlers request.Handlers) []credentials.Provider { + return []credentials.Provider{ + &credentials.EnvProvider{}, + &credentials.SharedCredentialsProvider{Filename: "", Profile: ""}, + RemoteCredProvider(*cfg, handlers), + } +} + +const ( + httpProviderAuthorizationEnvVar = "AWS_CONTAINER_AUTHORIZATION_TOKEN" + httpProviderEnvVar = "AWS_CONTAINER_CREDENTIALS_FULL_URI" +) + +// RemoteCredProvider returns a credentials provider for the default remote +// endpoints such as EC2 or ECS Roles. +func RemoteCredProvider(cfg aws.Config, handlers request.Handlers) credentials.Provider { + if u := os.Getenv(httpProviderEnvVar); len(u) > 0 { + return localHTTPCredProvider(cfg, handlers, u) + } + + if uri := os.Getenv(shareddefaults.ECSCredsProviderEnvVar); len(uri) > 0 { + u := fmt.Sprintf("%s%s", shareddefaults.ECSContainerCredentialsURI, uri) + return httpCredProvider(cfg, handlers, u) + } + + return ec2RoleProvider(cfg, handlers) +} + +var lookupHostFn = net.LookupHost + +func isLoopbackHost(host string) (bool, error) { + ip := net.ParseIP(host) + if ip != nil { + return ip.IsLoopback(), nil + } + + // Host is not an ip, perform lookup + addrs, err := lookupHostFn(host) + if err != nil { + return false, err + } + for _, addr := range addrs { + if !net.ParseIP(addr).IsLoopback() { + return false, nil + } + } + + return true, nil +} + +func localHTTPCredProvider(cfg aws.Config, handlers request.Handlers, u string) credentials.Provider { + var errMsg string + + parsed, err := url.Parse(u) + if err != nil { + errMsg = fmt.Sprintf("invalid URL, %v", err) + } else { + host := aws.URLHostname(parsed) + if len(host) == 0 { + errMsg = "unable to parse host from local HTTP cred provider URL" + } else if isLoopback, loopbackErr := isLoopbackHost(host); loopbackErr != nil { + errMsg = fmt.Sprintf("failed to resolve host %q, %v", host, loopbackErr) + } else if !isLoopback { + errMsg = fmt.Sprintf("invalid endpoint host, %q, only loopback hosts are allowed.", host) + } + } + + if len(errMsg) > 0 { + if cfg.Logger != nil { + cfg.Logger.Log("Ignoring, HTTP credential provider", errMsg, err) + } + return credentials.ErrorProvider{ + Err: awserr.New("CredentialsEndpointError", errMsg, err), + ProviderName: endpointcreds.ProviderName, + } + } + + return httpCredProvider(cfg, handlers, u) +} + +func httpCredProvider(cfg aws.Config, handlers request.Handlers, u string) credentials.Provider { + return endpointcreds.NewProviderClient(cfg, handlers, u, + func(p *endpointcreds.Provider) { + p.ExpiryWindow = 5 * time.Minute + p.AuthorizationToken = os.Getenv(httpProviderAuthorizationEnvVar) + }, + ) +} + +func ec2RoleProvider(cfg aws.Config, handlers request.Handlers) credentials.Provider { + resolver := cfg.EndpointResolver + if resolver == nil { + resolver = endpoints.DefaultResolver() + } + + e, _ := resolver.EndpointFor(endpoints.Ec2metadataServiceID, "") + return &ec2rolecreds.EC2RoleProvider{ + Client: ec2metadata.NewClient(cfg, handlers, e.URL, e.SigningRegion), + ExpiryWindow: 5 * time.Minute, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/defaults/shared_config.go b/vendor/github.com/aws/aws-sdk-go/aws/defaults/shared_config.go new file mode 100644 index 00000000000..ca0ee1dcc78 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/defaults/shared_config.go @@ -0,0 +1,27 @@ +package defaults + +import ( + "github.com/aws/aws-sdk-go/internal/shareddefaults" +) + +// SharedCredentialsFilename returns the SDK's default file path +// for the shared credentials file. +// +// Builds the shared config file path based on the OS's platform. +// +// - Linux/Unix: $HOME/.aws/credentials +// - Windows: %USERPROFILE%\.aws\credentials +func SharedCredentialsFilename() string { + return shareddefaults.SharedCredentialsFilename() +} + +// SharedConfigFilename returns the SDK's default file path for +// the shared config file. +// +// Builds the shared config file path based on the OS's platform. +// +// - Linux/Unix: $HOME/.aws/config +// - Windows: %USERPROFILE%\.aws\config +func SharedConfigFilename() string { + return shareddefaults.SharedConfigFilename() +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/doc.go b/vendor/github.com/aws/aws-sdk-go/aws/doc.go new file mode 100644 index 00000000000..4fcb6161848 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/doc.go @@ -0,0 +1,56 @@ +// Package aws provides the core SDK's utilities and shared types. Use this package's +// utilities to simplify setting and reading API operations parameters. +// +// Value and Pointer Conversion Utilities +// +// This package includes a helper conversion utility for each scalar type the SDK's +// API use. These utilities make getting a pointer of the scalar, and dereferencing +// a pointer easier. +// +// Each conversion utility comes in two forms. Value to Pointer and Pointer to Value. +// The Pointer to value will safely dereference the pointer and return its value. +// If the pointer was nil, the scalar's zero value will be returned. +// +// The value to pointer functions will be named after the scalar type. So get a +// *string from a string value use the "String" function. This makes it easy to +// to get pointer of a literal string value, because getting the address of a +// literal requires assigning the value to a variable first. +// +// var strPtr *string +// +// // Without the SDK's conversion functions +// str := "my string" +// strPtr = &str +// +// // With the SDK's conversion functions +// strPtr = aws.String("my string") +// +// // Convert *string to string value +// str = aws.StringValue(strPtr) +// +// In addition to scalars the aws package also includes conversion utilities for +// map and slice for commonly types used in API parameters. The map and slice +// conversion functions use similar naming pattern as the scalar conversion +// functions. +// +// var strPtrs []*string +// var strs []string = []string{"Go", "Gophers", "Go"} +// +// // Convert []string to []*string +// strPtrs = aws.StringSlice(strs) +// +// // Convert []*string to []string +// strs = aws.StringValueSlice(strPtrs) +// +// SDK Default HTTP Client +// +// The SDK will use the http.DefaultClient if a HTTP client is not provided to +// the SDK's Session, or service client constructor. This means that if the +// http.DefaultClient is modified by other components of your application the +// modifications will be picked up by the SDK as well. +// +// In some cases this might be intended, but it is a better practice to create +// a custom HTTP Client to share explicitly through your application. You can +// configure the SDK to use the custom HTTP Client by setting the HTTPClient +// value of the SDK's Config type when creating a Session or service client. +package aws diff --git a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go new file mode 100644 index 00000000000..69fa63dc08f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go @@ -0,0 +1,250 @@ +package ec2metadata + +import ( + "encoding/json" + "fmt" + "net/http" + "strconv" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/internal/sdkuri" +) + +// getToken uses the duration to return a token for EC2 metadata service, +// or an error if the request failed. +func (c *EC2Metadata) getToken(ctx aws.Context, duration time.Duration) (tokenOutput, error) { + op := &request.Operation{ + Name: "GetToken", + HTTPMethod: "PUT", + HTTPPath: "/latest/api/token", + } + + var output tokenOutput + req := c.NewRequest(op, nil, &output) + req.SetContext(ctx) + + // remove the fetch token handler from the request handlers to avoid infinite recursion + req.Handlers.Sign.RemoveByName(fetchTokenHandlerName) + + // Swap the unmarshalMetadataHandler with unmarshalTokenHandler on this request. + req.Handlers.Unmarshal.Swap(unmarshalMetadataHandlerName, unmarshalTokenHandler) + + ttl := strconv.FormatInt(int64(duration/time.Second), 10) + req.HTTPRequest.Header.Set(ttlHeader, ttl) + + err := req.Send() + + // Errors with bad request status should be returned. + if err != nil { + err = awserr.NewRequestFailure( + awserr.New(req.HTTPResponse.Status, http.StatusText(req.HTTPResponse.StatusCode), err), + req.HTTPResponse.StatusCode, req.RequestID) + } + + return output, err +} + +// GetMetadata uses the path provided to request information from the EC2 +// instance metadata service. The content will be returned as a string, or +// error if the request failed. +func (c *EC2Metadata) GetMetadata(p string) (string, error) { + return c.GetMetadataWithContext(aws.BackgroundContext(), p) +} + +// GetMetadataWithContext uses the path provided to request information from the EC2 +// instance metadata service. The content will be returned as a string, or +// error if the request failed. +func (c *EC2Metadata) GetMetadataWithContext(ctx aws.Context, p string) (string, error) { + op := &request.Operation{ + Name: "GetMetadata", + HTTPMethod: "GET", + HTTPPath: sdkuri.PathJoin("/latest/meta-data", p), + } + output := &metadataOutput{} + + req := c.NewRequest(op, nil, output) + + req.SetContext(ctx) + + err := req.Send() + return output.Content, err +} + +// GetUserData returns the userdata that was configured for the service. If +// there is no user-data setup for the EC2 instance a "NotFoundError" error +// code will be returned. +func (c *EC2Metadata) GetUserData() (string, error) { + return c.GetUserDataWithContext(aws.BackgroundContext()) +} + +// GetUserDataWithContext returns the userdata that was configured for the service. If +// there is no user-data setup for the EC2 instance a "NotFoundError" error +// code will be returned. +func (c *EC2Metadata) GetUserDataWithContext(ctx aws.Context) (string, error) { + op := &request.Operation{ + Name: "GetUserData", + HTTPMethod: "GET", + HTTPPath: "/latest/user-data", + } + + output := &metadataOutput{} + req := c.NewRequest(op, nil, output) + req.SetContext(ctx) + + err := req.Send() + return output.Content, err +} + +// GetDynamicData uses the path provided to request information from the EC2 +// instance metadata service for dynamic data. The content will be returned +// as a string, or error if the request failed. +func (c *EC2Metadata) GetDynamicData(p string) (string, error) { + return c.GetDynamicDataWithContext(aws.BackgroundContext(), p) +} + +// GetDynamicDataWithContext uses the path provided to request information from the EC2 +// instance metadata service for dynamic data. The content will be returned +// as a string, or error if the request failed. +func (c *EC2Metadata) GetDynamicDataWithContext(ctx aws.Context, p string) (string, error) { + op := &request.Operation{ + Name: "GetDynamicData", + HTTPMethod: "GET", + HTTPPath: sdkuri.PathJoin("/latest/dynamic", p), + } + + output := &metadataOutput{} + req := c.NewRequest(op, nil, output) + req.SetContext(ctx) + + err := req.Send() + return output.Content, err +} + +// GetInstanceIdentityDocument retrieves an identity document describing an +// instance. Error is returned if the request fails or is unable to parse +// the response. +func (c *EC2Metadata) GetInstanceIdentityDocument() (EC2InstanceIdentityDocument, error) { + return c.GetInstanceIdentityDocumentWithContext(aws.BackgroundContext()) +} + +// GetInstanceIdentityDocumentWithContext retrieves an identity document describing an +// instance. Error is returned if the request fails or is unable to parse +// the response. +func (c *EC2Metadata) GetInstanceIdentityDocumentWithContext(ctx aws.Context) (EC2InstanceIdentityDocument, error) { + resp, err := c.GetDynamicDataWithContext(ctx, "instance-identity/document") + if err != nil { + return EC2InstanceIdentityDocument{}, + awserr.New("EC2MetadataRequestError", + "failed to get EC2 instance identity document", err) + } + + doc := EC2InstanceIdentityDocument{} + if err := json.NewDecoder(strings.NewReader(resp)).Decode(&doc); err != nil { + return EC2InstanceIdentityDocument{}, + awserr.New(request.ErrCodeSerialization, + "failed to decode EC2 instance identity document", err) + } + + return doc, nil +} + +// IAMInfo retrieves IAM info from the metadata API +func (c *EC2Metadata) IAMInfo() (EC2IAMInfo, error) { + return c.IAMInfoWithContext(aws.BackgroundContext()) +} + +// IAMInfoWithContext retrieves IAM info from the metadata API +func (c *EC2Metadata) IAMInfoWithContext(ctx aws.Context) (EC2IAMInfo, error) { + resp, err := c.GetMetadataWithContext(ctx, "iam/info") + if err != nil { + return EC2IAMInfo{}, + awserr.New("EC2MetadataRequestError", + "failed to get EC2 IAM info", err) + } + + info := EC2IAMInfo{} + if err := json.NewDecoder(strings.NewReader(resp)).Decode(&info); err != nil { + return EC2IAMInfo{}, + awserr.New(request.ErrCodeSerialization, + "failed to decode EC2 IAM info", err) + } + + if info.Code != "Success" { + errMsg := fmt.Sprintf("failed to get EC2 IAM Info (%s)", info.Code) + return EC2IAMInfo{}, + awserr.New("EC2MetadataError", errMsg, nil) + } + + return info, nil +} + +// Region returns the region the instance is running in. +func (c *EC2Metadata) Region() (string, error) { + return c.RegionWithContext(aws.BackgroundContext()) +} + +// RegionWithContext returns the region the instance is running in. +func (c *EC2Metadata) RegionWithContext(ctx aws.Context) (string, error) { + ec2InstanceIdentityDocument, err := c.GetInstanceIdentityDocumentWithContext(ctx) + if err != nil { + return "", err + } + // extract region from the ec2InstanceIdentityDocument + region := ec2InstanceIdentityDocument.Region + if len(region) == 0 { + return "", awserr.New("EC2MetadataError", "invalid region received for ec2metadata instance", nil) + } + // returns region + return region, nil +} + +// Available returns if the application has access to the EC2 Metadata service. +// Can be used to determine if application is running within an EC2 Instance and +// the metadata service is available. +func (c *EC2Metadata) Available() bool { + return c.AvailableWithContext(aws.BackgroundContext()) +} + +// AvailableWithContext returns if the application has access to the EC2 Metadata service. +// Can be used to determine if application is running within an EC2 Instance and +// the metadata service is available. +func (c *EC2Metadata) AvailableWithContext(ctx aws.Context) bool { + if _, err := c.GetMetadataWithContext(ctx, "instance-id"); err != nil { + return false + } + + return true +} + +// An EC2IAMInfo provides the shape for unmarshaling +// an IAM info from the metadata API +type EC2IAMInfo struct { + Code string + LastUpdated time.Time + InstanceProfileArn string + InstanceProfileID string +} + +// An EC2InstanceIdentityDocument provides the shape for unmarshaling +// an instance identity document +type EC2InstanceIdentityDocument struct { + DevpayProductCodes []string `json:"devpayProductCodes"` + MarketplaceProductCodes []string `json:"marketplaceProductCodes"` + AvailabilityZone string `json:"availabilityZone"` + PrivateIP string `json:"privateIp"` + Version string `json:"version"` + Region string `json:"region"` + InstanceID string `json:"instanceId"` + BillingProducts []string `json:"billingProducts"` + InstanceType string `json:"instanceType"` + AccountID string `json:"accountId"` + PendingTime time.Time `json:"pendingTime"` + ImageID string `json:"imageId"` + KernelID string `json:"kernelId"` + RamdiskID string `json:"ramdiskId"` + Architecture string `json:"architecture"` +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go new file mode 100644 index 00000000000..8f35b3464ba --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go @@ -0,0 +1,245 @@ +// Package ec2metadata provides the client for making API calls to the +// EC2 Metadata service. +// +// This package's client can be disabled completely by setting the environment +// variable "AWS_EC2_METADATA_DISABLED=true". This environment variable set to +// true instructs the SDK to disable the EC2 Metadata client. The client cannot +// be used while the environment variable is set to true, (case insensitive). +// +// The endpoint of the EC2 IMDS client can be configured via the environment +// variable, AWS_EC2_METADATA_SERVICE_ENDPOINT when creating the client with a +// Session. See aws/session#Options.EC2IMDSEndpoint for more details. +package ec2metadata + +import ( + "bytes" + "errors" + "io" + "net/http" + "net/url" + "os" + "strconv" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/corehandlers" + "github.com/aws/aws-sdk-go/aws/request" +) + +const ( + // ServiceName is the name of the service. + ServiceName = "ec2metadata" + disableServiceEnvVar = "AWS_EC2_METADATA_DISABLED" + + // Headers for Token and TTL + ttlHeader = "x-aws-ec2-metadata-token-ttl-seconds" + tokenHeader = "x-aws-ec2-metadata-token" + + // Named Handler constants + fetchTokenHandlerName = "FetchTokenHandler" + unmarshalMetadataHandlerName = "unmarshalMetadataHandler" + unmarshalTokenHandlerName = "unmarshalTokenHandler" + enableTokenProviderHandlerName = "enableTokenProviderHandler" + + // TTL constants + defaultTTL = 21600 * time.Second + ttlExpirationWindow = 30 * time.Second +) + +// A EC2Metadata is an EC2 Metadata service Client. +type EC2Metadata struct { + *client.Client +} + +// New creates a new instance of the EC2Metadata client with a session. +// This client is safe to use across multiple goroutines. +// +// +// Example: +// // Create a EC2Metadata client from just a session. +// svc := ec2metadata.New(mySession) +// +// // Create a EC2Metadata client with additional configuration +// svc := ec2metadata.New(mySession, aws.NewConfig().WithLogLevel(aws.LogDebugHTTPBody)) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *EC2Metadata { + c := p.ClientConfig(ServiceName, cfgs...) + return NewClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// NewClient returns a new EC2Metadata client. Should be used to create +// a client when not using a session. Generally using just New with a session +// is preferred. +// +// Will remove the URL path from the endpoint provided to ensure the EC2 IMDS +// client is able to communicate with the EC2 IMDS API. +// +// If an unmodified HTTP client is provided from the stdlib default, or no client +// the EC2RoleProvider's EC2Metadata HTTP client's timeout will be shortened. +// To disable this set Config.EC2MetadataDisableTimeoutOverride to false. Enabled by default. +func NewClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string, opts ...func(*client.Client)) *EC2Metadata { + if !aws.BoolValue(cfg.EC2MetadataDisableTimeoutOverride) && httpClientZero(cfg.HTTPClient) { + // If the http client is unmodified and this feature is not disabled + // set custom timeouts for EC2Metadata requests. + cfg.HTTPClient = &http.Client{ + // use a shorter timeout than default because the metadata + // service is local if it is running, and to fail faster + // if not running on an ec2 instance. + Timeout: 1 * time.Second, + } + // max number of retries on the client operation + cfg.MaxRetries = aws.Int(2) + } + + if u, err := url.Parse(endpoint); err == nil { + // Remove path from the endpoint since it will be added by requests. + // This is an artifact of the SDK adding `/latest` to the endpoint for + // EC2 IMDS, but this is now moved to the operation definition. + u.Path = "" + u.RawPath = "" + endpoint = u.String() + } + + svc := &EC2Metadata{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + ServiceID: ServiceName, + Endpoint: endpoint, + APIVersion: "latest", + }, + handlers, + ), + } + + // token provider instance + tp := newTokenProvider(svc, defaultTTL) + + // NamedHandler for fetching token + svc.Handlers.Sign.PushBackNamed(request.NamedHandler{ + Name: fetchTokenHandlerName, + Fn: tp.fetchTokenHandler, + }) + // NamedHandler for enabling token provider + svc.Handlers.Complete.PushBackNamed(request.NamedHandler{ + Name: enableTokenProviderHandlerName, + Fn: tp.enableTokenProviderHandler, + }) + + svc.Handlers.Unmarshal.PushBackNamed(unmarshalHandler) + svc.Handlers.UnmarshalError.PushBack(unmarshalError) + svc.Handlers.Validate.Clear() + svc.Handlers.Validate.PushBack(validateEndpointHandler) + + // Disable the EC2 Metadata service if the environment variable is set. + // This short-circuits the service's functionality to always fail to send + // requests. + if strings.ToLower(os.Getenv(disableServiceEnvVar)) == "true" { + svc.Handlers.Send.SwapNamed(request.NamedHandler{ + Name: corehandlers.SendHandler.Name, + Fn: func(r *request.Request) { + r.HTTPResponse = &http.Response{ + Header: http.Header{}, + } + r.Error = awserr.New( + request.CanceledErrorCode, + "EC2 IMDS access disabled via "+disableServiceEnvVar+" env var", + nil) + }, + }) + } + + // Add additional options to the service config + for _, option := range opts { + option(svc.Client) + } + return svc +} + +func httpClientZero(c *http.Client) bool { + return c == nil || (c.Transport == nil && c.CheckRedirect == nil && c.Jar == nil && c.Timeout == 0) +} + +type metadataOutput struct { + Content string +} + +type tokenOutput struct { + Token string + TTL time.Duration +} + +// unmarshal token handler is used to parse the response of a getToken operation +var unmarshalTokenHandler = request.NamedHandler{ + Name: unmarshalTokenHandlerName, + Fn: func(r *request.Request) { + defer r.HTTPResponse.Body.Close() + var b bytes.Buffer + if _, err := io.Copy(&b, r.HTTPResponse.Body); err != nil { + r.Error = awserr.NewRequestFailure(awserr.New(request.ErrCodeSerialization, + "unable to unmarshal EC2 metadata response", err), r.HTTPResponse.StatusCode, r.RequestID) + return + } + + v := r.HTTPResponse.Header.Get(ttlHeader) + data, ok := r.Data.(*tokenOutput) + if !ok { + return + } + + data.Token = b.String() + // TTL is in seconds + i, err := strconv.ParseInt(v, 10, 64) + if err != nil { + r.Error = awserr.NewRequestFailure(awserr.New(request.ParamFormatErrCode, + "unable to parse EC2 token TTL response", err), r.HTTPResponse.StatusCode, r.RequestID) + return + } + t := time.Duration(i) * time.Second + data.TTL = t + }, +} + +var unmarshalHandler = request.NamedHandler{ + Name: unmarshalMetadataHandlerName, + Fn: func(r *request.Request) { + defer r.HTTPResponse.Body.Close() + var b bytes.Buffer + if _, err := io.Copy(&b, r.HTTPResponse.Body); err != nil { + r.Error = awserr.NewRequestFailure(awserr.New(request.ErrCodeSerialization, + "unable to unmarshal EC2 metadata response", err), r.HTTPResponse.StatusCode, r.RequestID) + return + } + + if data, ok := r.Data.(*metadataOutput); ok { + data.Content = b.String() + } + }, +} + +func unmarshalError(r *request.Request) { + defer r.HTTPResponse.Body.Close() + var b bytes.Buffer + + if _, err := io.Copy(&b, r.HTTPResponse.Body); err != nil { + r.Error = awserr.NewRequestFailure( + awserr.New(request.ErrCodeSerialization, "unable to unmarshal EC2 metadata error response", err), + r.HTTPResponse.StatusCode, r.RequestID) + return + } + + // Response body format is not consistent between metadata endpoints. + // Grab the error message as a string and include that as the source error + r.Error = awserr.NewRequestFailure(awserr.New("EC2MetadataError", "failed to make EC2Metadata request", errors.New(b.String())), + r.HTTPResponse.StatusCode, r.RequestID) +} + +func validateEndpointHandler(r *request.Request) { + if r.ClientInfo.Endpoint == "" { + r.Error = aws.ErrMissingEndpoint + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/token_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/token_provider.go new file mode 100644 index 00000000000..4b29f190bf9 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/token_provider.go @@ -0,0 +1,93 @@ +package ec2metadata + +import ( + "net/http" + "sync/atomic" + "time" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/request" +) + +// A tokenProvider struct provides access to EC2Metadata client +// and atomic instance of a token, along with configuredTTL for it. +// tokenProvider also provides an atomic flag to disable the +// fetch token operation. +// The disabled member will use 0 as false, and 1 as true. +type tokenProvider struct { + client *EC2Metadata + token atomic.Value + configuredTTL time.Duration + disabled uint32 +} + +// A ec2Token struct helps use of token in EC2 Metadata service ops +type ec2Token struct { + token string + credentials.Expiry +} + +// newTokenProvider provides a pointer to a tokenProvider instance +func newTokenProvider(c *EC2Metadata, duration time.Duration) *tokenProvider { + return &tokenProvider{client: c, configuredTTL: duration} +} + +// fetchTokenHandler fetches token for EC2Metadata service client by default. +func (t *tokenProvider) fetchTokenHandler(r *request.Request) { + + // short-circuits to insecure data flow if tokenProvider is disabled. + if v := atomic.LoadUint32(&t.disabled); v == 1 { + return + } + + if ec2Token, ok := t.token.Load().(ec2Token); ok && !ec2Token.IsExpired() { + r.HTTPRequest.Header.Set(tokenHeader, ec2Token.token) + return + } + + output, err := t.client.getToken(r.Context(), t.configuredTTL) + + if err != nil { + + // change the disabled flag on token provider to true, + // when error is request timeout error. + if requestFailureError, ok := err.(awserr.RequestFailure); ok { + switch requestFailureError.StatusCode() { + case http.StatusForbidden, http.StatusNotFound, http.StatusMethodNotAllowed: + atomic.StoreUint32(&t.disabled, 1) + case http.StatusBadRequest: + r.Error = requestFailureError + } + + // Check if request timed out while waiting for response + if e, ok := requestFailureError.OrigErr().(awserr.Error); ok { + if e.Code() == request.ErrCodeRequestError { + atomic.StoreUint32(&t.disabled, 1) + } + } + } + return + } + + newToken := ec2Token{ + token: output.Token, + } + newToken.SetExpiration(time.Now().Add(output.TTL), ttlExpirationWindow) + t.token.Store(newToken) + + // Inject token header to the request. + if ec2Token, ok := t.token.Load().(ec2Token); ok { + r.HTTPRequest.Header.Set(tokenHeader, ec2Token.token) + } +} + +// enableTokenProviderHandler enables the token provider +func (t *tokenProvider) enableTokenProviderHandler(r *request.Request) { + // If the error code status is 401, we enable the token provider + if e, ok := r.Error.(awserr.RequestFailure); ok && e != nil && + e.StatusCode() == http.StatusUnauthorized { + t.token.Store(ec2Token{}) + atomic.StoreUint32(&t.disabled, 0) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go new file mode 100644 index 00000000000..654fb1ad52d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go @@ -0,0 +1,216 @@ +package endpoints + +import ( + "encoding/json" + "fmt" + "io" + + "github.com/aws/aws-sdk-go/aws/awserr" +) + +type modelDefinition map[string]json.RawMessage + +// A DecodeModelOptions are the options for how the endpoints model definition +// are decoded. +type DecodeModelOptions struct { + SkipCustomizations bool +} + +// Set combines all of the option functions together. +func (d *DecodeModelOptions) Set(optFns ...func(*DecodeModelOptions)) { + for _, fn := range optFns { + fn(d) + } +} + +// DecodeModel unmarshals a Regions and Endpoint model definition file into +// a endpoint Resolver. If the file format is not supported, or an error occurs +// when unmarshaling the model an error will be returned. +// +// Casting the return value of this func to a EnumPartitions will +// allow you to get a list of the partitions in the order the endpoints +// will be resolved in. +// +// resolver, err := endpoints.DecodeModel(reader) +// +// partitions := resolver.(endpoints.EnumPartitions).Partitions() +// for _, p := range partitions { +// // ... inspect partitions +// } +func DecodeModel(r io.Reader, optFns ...func(*DecodeModelOptions)) (Resolver, error) { + var opts DecodeModelOptions + opts.Set(optFns...) + + // Get the version of the partition file to determine what + // unmarshaling model to use. + modelDef := modelDefinition{} + if err := json.NewDecoder(r).Decode(&modelDef); err != nil { + return nil, newDecodeModelError("failed to decode endpoints model", err) + } + + var version string + if b, ok := modelDef["version"]; ok { + version = string(b) + } else { + return nil, newDecodeModelError("endpoints version not found in model", nil) + } + + if version == "3" { + return decodeV3Endpoints(modelDef, opts) + } + + return nil, newDecodeModelError( + fmt.Sprintf("endpoints version %s, not supported", version), nil) +} + +func decodeV3Endpoints(modelDef modelDefinition, opts DecodeModelOptions) (Resolver, error) { + b, ok := modelDef["partitions"] + if !ok { + return nil, newDecodeModelError("endpoints model missing partitions", nil) + } + + ps := partitions{} + if err := json.Unmarshal(b, &ps); err != nil { + return nil, newDecodeModelError("failed to decode endpoints model", err) + } + + if opts.SkipCustomizations { + return ps, nil + } + + // Customization + for i := 0; i < len(ps); i++ { + p := &ps[i] + custAddEC2Metadata(p) + custAddS3DualStack(p) + custRegionalS3(p) + custRmIotDataService(p) + custFixAppAutoscalingChina(p) + custFixAppAutoscalingUsGov(p) + } + + return ps, nil +} + +func custAddS3DualStack(p *partition) { + if !(p.ID == "aws" || p.ID == "aws-cn" || p.ID == "aws-us-gov") { + return + } + + custAddDualstack(p, "s3") + custAddDualstack(p, "s3-control") +} + +func custRegionalS3(p *partition) { + if p.ID != "aws" { + return + } + + service, ok := p.Services["s3"] + if !ok { + return + } + + // If global endpoint already exists no customization needed. + if _, ok := service.Endpoints["aws-global"]; ok { + return + } + + service.PartitionEndpoint = "aws-global" + service.Endpoints["us-east-1"] = endpoint{} + service.Endpoints["aws-global"] = endpoint{ + Hostname: "s3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + } + + p.Services["s3"] = service +} + +func custAddDualstack(p *partition, svcName string) { + s, ok := p.Services[svcName] + if !ok { + return + } + + s.Defaults.HasDualStack = boxedTrue + s.Defaults.DualStackHostname = "{service}.dualstack.{region}.{dnsSuffix}" + + p.Services[svcName] = s +} + +func custAddEC2Metadata(p *partition) { + p.Services["ec2metadata"] = service{ + IsRegionalized: boxedFalse, + PartitionEndpoint: "aws-global", + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "169.254.169.254/latest", + Protocols: []string{"http"}, + }, + }, + } +} + +func custRmIotDataService(p *partition) { + delete(p.Services, "data.iot") +} + +func custFixAppAutoscalingChina(p *partition) { + if p.ID != "aws-cn" { + return + } + + const serviceName = "application-autoscaling" + s, ok := p.Services[serviceName] + if !ok { + return + } + + const expectHostname = `autoscaling.{region}.amazonaws.com` + if e, a := s.Defaults.Hostname, expectHostname; e != a { + fmt.Printf("custFixAppAutoscalingChina: ignoring customization, expected %s, got %s\n", e, a) + return + } + + s.Defaults.Hostname = expectHostname + ".cn" + p.Services[serviceName] = s +} + +func custFixAppAutoscalingUsGov(p *partition) { + if p.ID != "aws-us-gov" { + return + } + + const serviceName = "application-autoscaling" + s, ok := p.Services[serviceName] + if !ok { + return + } + + if a := s.Defaults.CredentialScope.Service; a != "" { + fmt.Printf("custFixAppAutoscalingUsGov: ignoring customization, expected empty credential scope service, got %s\n", a) + return + } + + if a := s.Defaults.Hostname; a != "" { + fmt.Printf("custFixAppAutoscalingUsGov: ignoring customization, expected empty hostname, got %s\n", a) + return + } + + s.Defaults.CredentialScope.Service = "application-autoscaling" + s.Defaults.Hostname = "autoscaling.{region}.amazonaws.com" + + p.Services[serviceName] = s +} + +type decodeModelError struct { + awsError +} + +func newDecodeModelError(msg string, err error) decodeModelError { + return decodeModelError{ + awsError: awserr.New("DecodeEndpointsModelError", msg, err), + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go new file mode 100644 index 00000000000..0c6fcdb016a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go @@ -0,0 +1,10484 @@ +// Code generated by aws/endpoints/v3model_codegen.go. DO NOT EDIT. + +package endpoints + +import ( + "regexp" +) + +// Partition identifiers +const ( + AwsPartitionID = "aws" // AWS Standard partition. + AwsCnPartitionID = "aws-cn" // AWS China partition. + AwsUsGovPartitionID = "aws-us-gov" // AWS GovCloud (US) partition. + AwsIsoPartitionID = "aws-iso" // AWS ISO (US) partition. + AwsIsoBPartitionID = "aws-iso-b" // AWS ISOB (US) partition. +) + +// AWS Standard partition's regions. +const ( + AfSouth1RegionID = "af-south-1" // Africa (Cape Town). + ApEast1RegionID = "ap-east-1" // Asia Pacific (Hong Kong). + ApNortheast1RegionID = "ap-northeast-1" // Asia Pacific (Tokyo). + ApNortheast2RegionID = "ap-northeast-2" // Asia Pacific (Seoul). + ApSouth1RegionID = "ap-south-1" // Asia Pacific (Mumbai). + ApSoutheast1RegionID = "ap-southeast-1" // Asia Pacific (Singapore). + ApSoutheast2RegionID = "ap-southeast-2" // Asia Pacific (Sydney). + CaCentral1RegionID = "ca-central-1" // Canada (Central). + EuCentral1RegionID = "eu-central-1" // Europe (Frankfurt). + EuNorth1RegionID = "eu-north-1" // Europe (Stockholm). + EuSouth1RegionID = "eu-south-1" // Europe (Milan). + EuWest1RegionID = "eu-west-1" // Europe (Ireland). + EuWest2RegionID = "eu-west-2" // Europe (London). + EuWest3RegionID = "eu-west-3" // Europe (Paris). + MeSouth1RegionID = "me-south-1" // Middle East (Bahrain). + SaEast1RegionID = "sa-east-1" // South America (Sao Paulo). + UsEast1RegionID = "us-east-1" // US East (N. Virginia). + UsEast2RegionID = "us-east-2" // US East (Ohio). + UsWest1RegionID = "us-west-1" // US West (N. California). + UsWest2RegionID = "us-west-2" // US West (Oregon). +) + +// AWS China partition's regions. +const ( + CnNorth1RegionID = "cn-north-1" // China (Beijing). + CnNorthwest1RegionID = "cn-northwest-1" // China (Ningxia). +) + +// AWS GovCloud (US) partition's regions. +const ( + UsGovEast1RegionID = "us-gov-east-1" // AWS GovCloud (US-East). + UsGovWest1RegionID = "us-gov-west-1" // AWS GovCloud (US-West). +) + +// AWS ISO (US) partition's regions. +const ( + UsIsoEast1RegionID = "us-iso-east-1" // US ISO East. +) + +// AWS ISOB (US) partition's regions. +const ( + UsIsobEast1RegionID = "us-isob-east-1" // US ISOB East (Ohio). +) + +// DefaultResolver returns an Endpoint resolver that will be able +// to resolve endpoints for: AWS Standard, AWS China, AWS GovCloud (US), AWS ISO (US), and AWS ISOB (US). +// +// Use DefaultPartitions() to get the list of the default partitions. +func DefaultResolver() Resolver { + return defaultPartitions +} + +// DefaultPartitions returns a list of the partitions the SDK is bundled +// with. The available partitions are: AWS Standard, AWS China, AWS GovCloud (US), AWS ISO (US), and AWS ISOB (US). +// +// partitions := endpoints.DefaultPartitions +// for _, p := range partitions { +// // ... inspect partitions +// } +func DefaultPartitions() []Partition { + return defaultPartitions.Partitions() +} + +var defaultPartitions = partitions{ + awsPartition, + awscnPartition, + awsusgovPartition, + awsisoPartition, + awsisobPartition, +} + +// AwsPartition returns the Resolver for AWS Standard. +func AwsPartition() Partition { + return awsPartition.Partition() +} + +var awsPartition = partition{ + ID: "aws", + Name: "AWS Standard", + DNSSuffix: "amazonaws.com", + RegionRegex: regionRegex{ + Regexp: func() *regexp.Regexp { + reg, _ := regexp.Compile("^(us|eu|ap|sa|ca|me|af)\\-\\w+\\-\\d+$") + return reg + }(), + }, + Defaults: endpoint{ + Hostname: "{service}.{region}.{dnsSuffix}", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + Regions: regions{ + "af-south-1": region{ + Description: "Africa (Cape Town)", + }, + "ap-east-1": region{ + Description: "Asia Pacific (Hong Kong)", + }, + "ap-northeast-1": region{ + Description: "Asia Pacific (Tokyo)", + }, + "ap-northeast-2": region{ + Description: "Asia Pacific (Seoul)", + }, + "ap-south-1": region{ + Description: "Asia Pacific (Mumbai)", + }, + "ap-southeast-1": region{ + Description: "Asia Pacific (Singapore)", + }, + "ap-southeast-2": region{ + Description: "Asia Pacific (Sydney)", + }, + "ca-central-1": region{ + Description: "Canada (Central)", + }, + "eu-central-1": region{ + Description: "Europe (Frankfurt)", + }, + "eu-north-1": region{ + Description: "Europe (Stockholm)", + }, + "eu-south-1": region{ + Description: "Europe (Milan)", + }, + "eu-west-1": region{ + Description: "Europe (Ireland)", + }, + "eu-west-2": region{ + Description: "Europe (London)", + }, + "eu-west-3": region{ + Description: "Europe (Paris)", + }, + "me-south-1": region{ + Description: "Middle East (Bahrain)", + }, + "sa-east-1": region{ + Description: "South America (Sao Paulo)", + }, + "us-east-1": region{ + Description: "US East (N. Virginia)", + }, + "us-east-2": region{ + Description: "US East (Ohio)", + }, + "us-west-1": region{ + Description: "US West (N. California)", + }, + "us-west-2": region{ + Description: "US West (Oregon)", + }, + }, + Services: services{ + "a4b": service{ + + Endpoints: endpoints{ + "us-east-1": endpoint{}, + }, + }, + "access-analyzer": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-ca-central-1": endpoint{ + Hostname: "access-analyzer-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "fips-us-east-1": endpoint{ + Hostname: "access-analyzer-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "access-analyzer-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "access-analyzer-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "access-analyzer-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "acm": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "ca-central-1-fips": endpoint{ + Hostname: "acm-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-1-fips": endpoint{ + Hostname: "acm-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{}, + "us-east-2-fips": endpoint{ + Hostname: "acm-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-1": endpoint{}, + "us-west-1-fips": endpoint{ + Hostname: "acm-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "us-west-2": endpoint{}, + "us-west-2-fips": endpoint{ + Hostname: "acm-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "acm-pca": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-ca-central-1": endpoint{ + Hostname: "acm-pca-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "fips-us-east-1": endpoint{ + Hostname: "acm-pca-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "acm-pca-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "acm-pca-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "acm-pca-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "airflow": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "amplifybackend": service{ + + Endpoints: endpoints{ + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "api.detective": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-1-fips": endpoint{ + Hostname: "api.detective-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{}, + "us-east-2-fips": endpoint{ + Hostname: "api.detective-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-1": endpoint{}, + "us-west-1-fips": endpoint{ + Hostname: "api.detective-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "us-west-2": endpoint{}, + "us-west-2-fips": endpoint{ + Hostname: "api.detective-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "api.ecr": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{ + Hostname: "api.ecr.af-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "af-south-1", + }, + }, + "ap-east-1": endpoint{ + Hostname: "api.ecr.ap-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-east-1", + }, + }, + "ap-northeast-1": endpoint{ + Hostname: "api.ecr.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + "ap-northeast-2": endpoint{ + Hostname: "api.ecr.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + }, + "ap-south-1": endpoint{ + Hostname: "api.ecr.ap-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + }, + "ap-southeast-1": endpoint{ + Hostname: "api.ecr.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + "ap-southeast-2": endpoint{ + Hostname: "api.ecr.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + "ca-central-1": endpoint{ + Hostname: "api.ecr.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "eu-central-1": endpoint{ + Hostname: "api.ecr.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + "eu-north-1": endpoint{ + Hostname: "api.ecr.eu-north-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-north-1", + }, + }, + "eu-south-1": endpoint{ + Hostname: "api.ecr.eu-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-south-1", + }, + }, + "eu-west-1": endpoint{ + Hostname: "api.ecr.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + "eu-west-2": endpoint{ + Hostname: "api.ecr.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + "eu-west-3": endpoint{ + Hostname: "api.ecr.eu-west-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-3", + }, + }, + "fips-dkr-us-east-1": endpoint{ + Hostname: "ecr-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-dkr-us-east-2": endpoint{ + Hostname: "ecr-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-dkr-us-west-1": endpoint{ + Hostname: "ecr-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-dkr-us-west-2": endpoint{ + Hostname: "ecr-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "fips-us-east-1": endpoint{ + Hostname: "ecr-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "ecr-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "ecr-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "ecr-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{ + Hostname: "api.ecr.me-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "me-south-1", + }, + }, + "sa-east-1": endpoint{ + Hostname: "api.ecr.sa-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "sa-east-1", + }, + }, + "us-east-1": endpoint{ + Hostname: "api.ecr.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{ + Hostname: "api.ecr.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-1": endpoint{ + Hostname: "api.ecr.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "us-west-2": endpoint{ + Hostname: "api.ecr.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "api.elastic-inference": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{ + Hostname: "api.elastic-inference.ap-northeast-1.amazonaws.com", + }, + "ap-northeast-2": endpoint{ + Hostname: "api.elastic-inference.ap-northeast-2.amazonaws.com", + }, + "eu-west-1": endpoint{ + Hostname: "api.elastic-inference.eu-west-1.amazonaws.com", + }, + "us-east-1": endpoint{ + Hostname: "api.elastic-inference.us-east-1.amazonaws.com", + }, + "us-east-2": endpoint{ + Hostname: "api.elastic-inference.us-east-2.amazonaws.com", + }, + "us-west-2": endpoint{ + Hostname: "api.elastic-inference.us-west-2.amazonaws.com", + }, + }, + }, + "api.fleethub.iot": service{ + + Endpoints: endpoints{ + "us-east-1": endpoint{}, + }, + }, + "api.mediatailor": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "api.pricing": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "pricing", + }, + }, + Endpoints: endpoints{ + "ap-south-1": endpoint{}, + "us-east-1": endpoint{}, + }, + }, + "api.sagemaker": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-1-fips": endpoint{ + Hostname: "api-fips.sagemaker.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{}, + "us-east-2-fips": endpoint{ + Hostname: "api-fips.sagemaker.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-1": endpoint{}, + "us-west-1-fips": endpoint{ + Hostname: "api-fips.sagemaker.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "us-west-2": endpoint{}, + "us-west-2-fips": endpoint{ + Hostname: "api-fips.sagemaker.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "apigateway": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "app-integrations": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "appflow": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "application-autoscaling": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "appmesh": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "appstream2": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + CredentialScope: credentialScope{ + Service: "appstream", + }, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "fips": endpoint{ + Hostname: "appstream2-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "appsync": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "athena": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "athena-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "athena-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "athena-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "athena-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "autoscaling": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "autoscaling-plans": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "backup": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "batch": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "fips.batch.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "fips.batch.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "fips.batch.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "fips.batch.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "budgets": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "budgets.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, + "ce": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "ce.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, + "chime": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "chime.us-east-1.amazonaws.com", + Protocols: []string{"https"}, + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, + "cloud9": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "clouddirectory": service{ + + Endpoints: endpoints{ + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "cloudformation": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-1-fips": endpoint{ + Hostname: "cloudformation-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{}, + "us-east-2-fips": endpoint{ + Hostname: "cloudformation-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-1": endpoint{}, + "us-west-1-fips": endpoint{ + Hostname: "cloudformation-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "us-west-2": endpoint{}, + "us-west-2-fips": endpoint{ + Hostname: "cloudformation-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "cloudfront": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "cloudfront.amazonaws.com", + Protocols: []string{"http", "https"}, + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, + "cloudhsm": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "cloudhsmv2": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "cloudhsm", + }, + }, + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "cloudsearch": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "cloudtrail": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "cloudtrail-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "cloudtrail-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "cloudtrail-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "cloudtrail-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "codeartifact": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "codebuild": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-1-fips": endpoint{ + Hostname: "codebuild-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{}, + "us-east-2-fips": endpoint{ + Hostname: "codebuild-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-1": endpoint{}, + "us-west-1-fips": endpoint{ + Hostname: "codebuild-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "us-west-2": endpoint{}, + "us-west-2-fips": endpoint{ + Hostname: "codebuild-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "codecommit": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips": endpoint{ + Hostname: "codecommit-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "codedeploy": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-1-fips": endpoint{ + Hostname: "codedeploy-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{}, + "us-east-2-fips": endpoint{ + Hostname: "codedeploy-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-1": endpoint{}, + "us-west-1-fips": endpoint{ + Hostname: "codedeploy-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "us-west-2": endpoint{}, + "us-west-2-fips": endpoint{ + Hostname: "codedeploy-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "codeguru-reviewer": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "codepipeline": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-ca-central-1": endpoint{ + Hostname: "codepipeline-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "fips-us-east-1": endpoint{ + Hostname: "codepipeline-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "codepipeline-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "codepipeline-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "codepipeline-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "codestar": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "codestar-connections": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "cognito-identity": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "cognito-identity-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "cognito-identity-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "cognito-identity-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "cognito-idp": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "cognito-idp-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "cognito-idp-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "cognito-idp-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "cognito-idp-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "cognito-sync": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "comprehend": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "comprehend-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "comprehend-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "comprehend-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "comprehendmedical": service{ + + Endpoints: endpoints{ + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "comprehendmedical-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "comprehendmedical-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "comprehendmedical-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "config": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "config-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "config-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "config-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "config-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "connect": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "contact-lens": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "cur": service{ + + Endpoints: endpoints{ + "us-east-1": endpoint{}, + }, + }, + "data.mediastore": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "dataexchange": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "datapipeline": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "datasync": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-ca-central-1": endpoint{ + Hostname: "datasync-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "fips-us-east-1": endpoint{ + Hostname: "datasync-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "datasync-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "datasync-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "datasync-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "dax": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "devicefarm": service{ + + Endpoints: endpoints{ + "us-west-2": endpoint{}, + }, + }, + "directconnect": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "directconnect-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "directconnect-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "directconnect-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "directconnect-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "discovery": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "dms": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "dms-fips": endpoint{ + Hostname: "dms-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "docdb": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{ + Hostname: "rds.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + "ap-northeast-2": endpoint{ + Hostname: "rds.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + }, + "ap-south-1": endpoint{ + Hostname: "rds.ap-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + }, + "ap-southeast-1": endpoint{ + Hostname: "rds.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + "ap-southeast-2": endpoint{ + Hostname: "rds.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + "ca-central-1": endpoint{ + Hostname: "rds.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "eu-central-1": endpoint{ + Hostname: "rds.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + "eu-west-1": endpoint{ + Hostname: "rds.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + "eu-west-2": endpoint{ + Hostname: "rds.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + "eu-west-3": endpoint{ + Hostname: "rds.eu-west-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-3", + }, + }, + "sa-east-1": endpoint{ + Hostname: "rds.sa-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "sa-east-1", + }, + }, + "us-east-1": endpoint{ + Hostname: "rds.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{ + Hostname: "rds.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-2": endpoint{ + Hostname: "rds.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "ds": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-ca-central-1": endpoint{ + Hostname: "ds-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "fips-us-east-1": endpoint{ + Hostname: "ds-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "ds-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "ds-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "ds-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "dynamodb": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "ca-central-1-fips": endpoint{ + Hostname: "dynamodb-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "local": endpoint{ + Hostname: "localhost:8000", + Protocols: []string{"http"}, + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-1-fips": endpoint{ + Hostname: "dynamodb-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{}, + "us-east-2-fips": endpoint{ + Hostname: "dynamodb-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-1": endpoint{}, + "us-west-1-fips": endpoint{ + Hostname: "dynamodb-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "us-west-2": endpoint{}, + "us-west-2-fips": endpoint{ + Hostname: "dynamodb-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "ebs": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-ca-central-1": endpoint{ + Hostname: "ebs-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "fips-us-east-1": endpoint{ + Hostname: "ebs-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "ebs-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "ebs-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "ebs-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "ec2": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-ca-central-1": endpoint{ + Hostname: "ec2-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "fips-us-east-1": endpoint{ + Hostname: "ec2-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "ec2-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "ec2-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "ec2-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "ec2metadata": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "169.254.169.254/latest", + Protocols: []string{"http"}, + }, + }, + }, + "ecs": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "ecs-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "ecs-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "ecs-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "ecs-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "eks": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "fips.eks.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "fips.eks.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "fips.eks.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "fips.eks.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "elasticache": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips": endpoint{ + Hostname: "elasticache-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "elasticbeanstalk": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "elasticbeanstalk-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "elasticbeanstalk-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "elasticbeanstalk-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "elasticbeanstalk-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "elasticfilesystem": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-af-south-1": endpoint{ + Hostname: "elasticfilesystem-fips.af-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "af-south-1", + }, + }, + "fips-ap-east-1": endpoint{ + Hostname: "elasticfilesystem-fips.ap-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-east-1", + }, + }, + "fips-ap-northeast-1": endpoint{ + Hostname: "elasticfilesystem-fips.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + "fips-ap-northeast-2": endpoint{ + Hostname: "elasticfilesystem-fips.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + }, + "fips-ap-south-1": endpoint{ + Hostname: "elasticfilesystem-fips.ap-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + }, + "fips-ap-southeast-1": endpoint{ + Hostname: "elasticfilesystem-fips.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + "fips-ap-southeast-2": endpoint{ + Hostname: "elasticfilesystem-fips.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + "fips-ca-central-1": endpoint{ + Hostname: "elasticfilesystem-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "fips-eu-central-1": endpoint{ + Hostname: "elasticfilesystem-fips.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + "fips-eu-north-1": endpoint{ + Hostname: "elasticfilesystem-fips.eu-north-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-north-1", + }, + }, + "fips-eu-south-1": endpoint{ + Hostname: "elasticfilesystem-fips.eu-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-south-1", + }, + }, + "fips-eu-west-1": endpoint{ + Hostname: "elasticfilesystem-fips.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + "fips-eu-west-2": endpoint{ + Hostname: "elasticfilesystem-fips.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + "fips-eu-west-3": endpoint{ + Hostname: "elasticfilesystem-fips.eu-west-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-3", + }, + }, + "fips-me-south-1": endpoint{ + Hostname: "elasticfilesystem-fips.me-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "me-south-1", + }, + }, + "fips-sa-east-1": endpoint{ + Hostname: "elasticfilesystem-fips.sa-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "sa-east-1", + }, + }, + "fips-us-east-1": endpoint{ + Hostname: "elasticfilesystem-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "elasticfilesystem-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "elasticfilesystem-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "elasticfilesystem-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "elasticloadbalancing": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "elasticloadbalancing-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "elasticloadbalancing-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "elasticloadbalancing-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "elasticloadbalancing-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "elasticmapreduce": service{ + Defaults: endpoint{ + SSLCommonName: "{region}.{service}.{dnsSuffix}", + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{ + SSLCommonName: "{service}.{region}.{dnsSuffix}", + }, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-ca-central-1": endpoint{ + Hostname: "elasticmapreduce-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "fips-us-east-1": endpoint{ + Hostname: "elasticmapreduce-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "elasticmapreduce-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "elasticmapreduce-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "elasticmapreduce-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{ + SSLCommonName: "{service}.{region}.{dnsSuffix}", + }, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "elastictranscoder": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "email": service{ + + Endpoints: endpoints{ + "ap-south-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "emr-containers": service{ + + Endpoints: endpoints{ + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "entitlement.marketplace": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "aws-marketplace", + }, + }, + Endpoints: endpoints{ + "us-east-1": endpoint{}, + }, + }, + "es": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips": endpoint{ + Hostname: "es-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "events": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "events-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "events-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "events-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "events-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "firehose": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "firehose-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "firehose-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "firehose-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "firehose-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "fms": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-af-south-1": endpoint{ + Hostname: "fms-fips.af-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "af-south-1", + }, + }, + "fips-ap-east-1": endpoint{ + Hostname: "fms-fips.ap-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-east-1", + }, + }, + "fips-ap-northeast-1": endpoint{ + Hostname: "fms-fips.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + "fips-ap-northeast-2": endpoint{ + Hostname: "fms-fips.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + }, + "fips-ap-south-1": endpoint{ + Hostname: "fms-fips.ap-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + }, + "fips-ap-southeast-1": endpoint{ + Hostname: "fms-fips.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + "fips-ap-southeast-2": endpoint{ + Hostname: "fms-fips.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + "fips-ca-central-1": endpoint{ + Hostname: "fms-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "fips-eu-central-1": endpoint{ + Hostname: "fms-fips.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + "fips-eu-south-1": endpoint{ + Hostname: "fms-fips.eu-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-south-1", + }, + }, + "fips-eu-west-1": endpoint{ + Hostname: "fms-fips.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + "fips-eu-west-2": endpoint{ + Hostname: "fms-fips.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + "fips-eu-west-3": endpoint{ + Hostname: "fms-fips.eu-west-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-3", + }, + }, + "fips-me-south-1": endpoint{ + Hostname: "fms-fips.me-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "me-south-1", + }, + }, + "fips-sa-east-1": endpoint{ + Hostname: "fms-fips.sa-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "sa-east-1", + }, + }, + "fips-us-east-1": endpoint{ + Hostname: "fms-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "fms-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "fms-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "fms-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "forecast": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "forecastquery": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "fsx": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-prod-ca-central-1": endpoint{ + Hostname: "fsx-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "fips-prod-us-east-1": endpoint{ + Hostname: "fsx-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-prod-us-east-2": endpoint{ + Hostname: "fsx-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-prod-us-west-1": endpoint{ + Hostname: "fsx-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-prod-us-west-2": endpoint{ + Hostname: "fsx-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "gamelift": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "glacier": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-ca-central-1": endpoint{ + Hostname: "glacier-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "fips-us-east-1": endpoint{ + Hostname: "glacier-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "glacier-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "glacier-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "glacier-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "glue": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "glue-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "glue-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "glue-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "glue-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "greengrass": service{ + IsRegionalized: boxedTrue, + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "groundstation": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "fips-us-east-2": endpoint{ + Hostname: "groundstation-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "groundstation-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "guardduty": service{ + IsRegionalized: boxedTrue, + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-1-fips": endpoint{ + Hostname: "guardduty-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{}, + "us-east-2-fips": endpoint{ + Hostname: "guardduty-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-1": endpoint{}, + "us-west-1-fips": endpoint{ + Hostname: "guardduty-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "us-west-2": endpoint{}, + "us-west-2-fips": endpoint{ + Hostname: "guardduty-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "health": service{ + + Endpoints: endpoints{ + "fips-us-east-2": endpoint{ + Hostname: "health-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + }, + }, + "healthlake": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "us-east-1": endpoint{}, + }, + }, + "honeycode": service{ + + Endpoints: endpoints{ + "us-west-2": endpoint{}, + }, + }, + "iam": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "iam.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "iam-fips": endpoint{ + Hostname: "iam-fips.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, + "identitystore": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "importexport": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "importexport.amazonaws.com", + SignatureVersions: []string{"v2", "v4"}, + CredentialScope: credentialScope{ + Region: "us-east-1", + Service: "IngestionService", + }, + }, + }, + }, + "inspector": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "inspector-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "inspector-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "inspector-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "inspector-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "iot": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "execute-api", + }, + }, + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "iotanalytics": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "iotevents": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "ioteventsdata": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{ + Hostname: "data.iotevents.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + "ap-northeast-2": endpoint{ + Hostname: "data.iotevents.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + }, + "ap-southeast-1": endpoint{ + Hostname: "data.iotevents.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + "ap-southeast-2": endpoint{ + Hostname: "data.iotevents.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + "eu-central-1": endpoint{ + Hostname: "data.iotevents.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + "eu-west-1": endpoint{ + Hostname: "data.iotevents.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + "eu-west-2": endpoint{ + Hostname: "data.iotevents.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + "us-east-1": endpoint{ + Hostname: "data.iotevents.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{ + Hostname: "data.iotevents.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-2": endpoint{ + Hostname: "data.iotevents.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "iotsecuredtunneling": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "iotthingsgraph": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "iotthingsgraph", + }, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "iotwireless": service{ + + Endpoints: endpoints{ + "eu-west-1": endpoint{ + Hostname: "api.iotwireless.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + "us-east-1": endpoint{ + Hostname: "api.iotwireless.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, + "kafka": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "kinesis": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "kinesis-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "kinesis-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "kinesis-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "kinesis-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "kinesisanalytics": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "kinesisvideo": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "kms": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "lakeformation": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "lakeformation-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "lakeformation-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "lakeformation-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "lakeformation-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "lambda": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "lambda-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "lambda-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "lambda-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "lambda-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "license-manager": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "license-manager-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "license-manager-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "license-manager-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "license-manager-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "lightsail": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "logs": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "logs-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "logs-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "logs-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "logs-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "lookoutvision": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "machinelearning": service{ + + Endpoints: endpoints{ + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + }, + }, + "macie": service{ + + Endpoints: endpoints{ + "fips-us-east-1": endpoint{ + Hostname: "macie-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "macie-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "macie2": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "macie2-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "macie2-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "macie2-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "macie2-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "managedblockchain": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + }, + }, + "marketplacecommerceanalytics": service{ + + Endpoints: endpoints{ + "us-east-1": endpoint{}, + }, + }, + "mediaconnect": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "mediaconvert": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-ca-central-1": endpoint{ + Hostname: "mediaconvert-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "fips-us-east-1": endpoint{ + Hostname: "mediaconvert-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "mediaconvert-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "mediaconvert-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "mediaconvert-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "medialive": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "medialive-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "medialive-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "medialive-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "mediapackage": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "mediastore": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "metering.marketplace": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "aws-marketplace", + }, + }, + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "mgh": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "mobileanalytics": service{ + + Endpoints: endpoints{ + "us-east-1": endpoint{}, + }, + }, + "models.lex": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "lex", + }, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-1-fips": endpoint{ + Hostname: "models-fips.lex.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-west-2": endpoint{}, + "us-west-2-fips": endpoint{ + Hostname: "models-fips.lex.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "monitoring": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "monitoring-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "monitoring-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "monitoring-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "monitoring-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "mq": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "mq-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "mq-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "mq-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "mq-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "mturk-requester": service{ + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "sandbox": endpoint{ + Hostname: "mturk-requester-sandbox.us-east-1.amazonaws.com", + }, + "us-east-1": endpoint{}, + }, + }, + "neptune": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{ + Hostname: "rds.ap-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-east-1", + }, + }, + "ap-northeast-1": endpoint{ + Hostname: "rds.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + "ap-northeast-2": endpoint{ + Hostname: "rds.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + }, + "ap-south-1": endpoint{ + Hostname: "rds.ap-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + }, + "ap-southeast-1": endpoint{ + Hostname: "rds.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + "ap-southeast-2": endpoint{ + Hostname: "rds.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + "ca-central-1": endpoint{ + Hostname: "rds.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "eu-central-1": endpoint{ + Hostname: "rds.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + "eu-north-1": endpoint{ + Hostname: "rds.eu-north-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-north-1", + }, + }, + "eu-west-1": endpoint{ + Hostname: "rds.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + "eu-west-2": endpoint{ + Hostname: "rds.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + "eu-west-3": endpoint{ + Hostname: "rds.eu-west-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-3", + }, + }, + "me-south-1": endpoint{ + Hostname: "rds.me-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "me-south-1", + }, + }, + "sa-east-1": endpoint{ + Hostname: "rds.sa-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "sa-east-1", + }, + }, + "us-east-1": endpoint{ + Hostname: "rds.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{ + Hostname: "rds.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-1": endpoint{ + Hostname: "rds.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "us-west-2": endpoint{ + Hostname: "rds.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "oidc": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{ + Hostname: "oidc.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + "ap-northeast-2": endpoint{ + Hostname: "oidc.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + }, + "ap-south-1": endpoint{ + Hostname: "oidc.ap-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + }, + "ap-southeast-1": endpoint{ + Hostname: "oidc.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + "ap-southeast-2": endpoint{ + Hostname: "oidc.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + "ca-central-1": endpoint{ + Hostname: "oidc.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "eu-central-1": endpoint{ + Hostname: "oidc.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + "eu-north-1": endpoint{ + Hostname: "oidc.eu-north-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-north-1", + }, + }, + "eu-west-1": endpoint{ + Hostname: "oidc.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + "eu-west-2": endpoint{ + Hostname: "oidc.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + "us-east-1": endpoint{ + Hostname: "oidc.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{ + Hostname: "oidc.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-2": endpoint{ + Hostname: "oidc.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "opsworks": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "opsworks-cm": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "organizations": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "organizations.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-aws-global": endpoint{ + Hostname: "organizations-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, + "outposts": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-ca-central-1": endpoint{ + Hostname: "outposts-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "fips-us-east-1": endpoint{ + Hostname: "outposts-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "outposts-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "outposts-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "outposts-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "pinpoint": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "mobiletargeting", + }, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "pinpoint-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "pinpoint-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "us-east-1": endpoint{ + Hostname: "pinpoint.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-west-2": endpoint{ + Hostname: "pinpoint.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "polly": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "polly-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "polly-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "polly-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "polly-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "portal.sso": service{ + + Endpoints: endpoints{ + "ap-southeast-1": endpoint{ + Hostname: "portal.sso.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + "ap-southeast-2": endpoint{ + Hostname: "portal.sso.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + "ca-central-1": endpoint{ + Hostname: "portal.sso.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "eu-central-1": endpoint{ + Hostname: "portal.sso.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + "eu-west-1": endpoint{ + Hostname: "portal.sso.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + "eu-west-2": endpoint{ + Hostname: "portal.sso.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + "us-east-1": endpoint{ + Hostname: "portal.sso.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{ + Hostname: "portal.sso.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-2": endpoint{ + Hostname: "portal.sso.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "profile": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "projects.iot1click": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "qldb": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "ram": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "rds": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "rds-fips.ca-central-1": endpoint{ + Hostname: "rds-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "rds-fips.us-east-1": endpoint{ + Hostname: "rds-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "rds-fips.us-east-2": endpoint{ + Hostname: "rds-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "rds-fips.us-west-1": endpoint{ + Hostname: "rds-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "rds-fips.us-west-2": endpoint{ + Hostname: "rds-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{ + SSLCommonName: "{service}.{dnsSuffix}", + }, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "redshift": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-ca-central-1": endpoint{ + Hostname: "redshift-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "fips-us-east-1": endpoint{ + Hostname: "redshift-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "redshift-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "redshift-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "redshift-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "rekognition": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "rekognition-fips.ca-central-1": endpoint{ + Hostname: "rekognition-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "rekognition-fips.us-east-1": endpoint{ + Hostname: "rekognition-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "rekognition-fips.us-east-2": endpoint{ + Hostname: "rekognition-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "rekognition-fips.us-west-1": endpoint{ + Hostname: "rekognition-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "rekognition-fips.us-west-2": endpoint{ + Hostname: "rekognition-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "resource-groups": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "resource-groups-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "resource-groups-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "resource-groups-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "resource-groups-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "robomaker": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "route53": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "route53.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-aws-global": endpoint{ + Hostname: "route53-fips.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, + "route53domains": service{ + + Endpoints: endpoints{ + "us-east-1": endpoint{}, + }, + }, + "route53resolver": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "runtime.lex": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "lex", + }, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-1-fips": endpoint{ + Hostname: "runtime-fips.lex.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-west-2": endpoint{}, + "us-west-2-fips": endpoint{ + Hostname: "runtime-fips.lex.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "runtime.sagemaker": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-1-fips": endpoint{ + Hostname: "runtime-fips.sagemaker.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{}, + "us-east-2-fips": endpoint{ + Hostname: "runtime-fips.sagemaker.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-1": endpoint{}, + "us-west-1-fips": endpoint{ + Hostname: "runtime-fips.sagemaker.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "us-west-2": endpoint{}, + "us-west-2-fips": endpoint{ + Hostname: "runtime-fips.sagemaker.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "s3": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedTrue, + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + SignatureVersions: []string{"s3v4"}, + + HasDualStack: boxedTrue, + DualStackHostname: "{service}.dualstack.{region}.{dnsSuffix}", + }, + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{ + Hostname: "s3.ap-northeast-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{ + Hostname: "s3.ap-southeast-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + "ap-southeast-2": endpoint{ + Hostname: "s3.ap-southeast-2.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + "aws-global": endpoint{ + Hostname: "s3.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{ + Hostname: "s3.eu-west-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "s3-external-1": endpoint{ + Hostname: "s3-external-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "sa-east-1": endpoint{ + Hostname: "s3.sa-east-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + "us-east-1": endpoint{ + Hostname: "s3.us-east-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + "us-east-2": endpoint{}, + "us-west-1": endpoint{ + Hostname: "s3.us-west-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + "us-west-2": endpoint{ + Hostname: "s3.us-west-2.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + }, + }, + "s3-control": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + SignatureVersions: []string{"s3v4"}, + + HasDualStack: boxedTrue, + DualStackHostname: "{service}.dualstack.{region}.{dnsSuffix}", + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{ + Hostname: "s3-control.ap-northeast-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + "ap-northeast-2": endpoint{ + Hostname: "s3-control.ap-northeast-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + }, + "ap-south-1": endpoint{ + Hostname: "s3-control.ap-south-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + }, + "ap-southeast-1": endpoint{ + Hostname: "s3-control.ap-southeast-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + "ap-southeast-2": endpoint{ + Hostname: "s3-control.ap-southeast-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + "ca-central-1": endpoint{ + Hostname: "s3-control.ca-central-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "ca-central-1-fips": endpoint{ + Hostname: "s3-control-fips.ca-central-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "eu-central-1": endpoint{ + Hostname: "s3-control.eu-central-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + "eu-north-1": endpoint{ + Hostname: "s3-control.eu-north-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "eu-north-1", + }, + }, + "eu-west-1": endpoint{ + Hostname: "s3-control.eu-west-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + "eu-west-2": endpoint{ + Hostname: "s3-control.eu-west-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + "eu-west-3": endpoint{ + Hostname: "s3-control.eu-west-3.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "eu-west-3", + }, + }, + "sa-east-1": endpoint{ + Hostname: "s3-control.sa-east-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "sa-east-1", + }, + }, + "us-east-1": endpoint{ + Hostname: "s3-control.us-east-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-1-fips": endpoint{ + Hostname: "s3-control-fips.us-east-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{ + Hostname: "s3-control.us-east-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-east-2-fips": endpoint{ + Hostname: "s3-control-fips.us-east-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-1": endpoint{ + Hostname: "s3-control.us-west-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "us-west-1-fips": endpoint{ + Hostname: "s3-control-fips.us-west-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "us-west-2": endpoint{ + Hostname: "s3-control.us-west-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "us-west-2-fips": endpoint{ + Hostname: "s3-control-fips.us-west-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "savingsplans": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "savingsplans.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, + "schemas": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "sdb": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + SignatureVersions: []string{"v2"}, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-west-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{ + Hostname: "sdb.amazonaws.com", + }, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "secretsmanager": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-1-fips": endpoint{ + Hostname: "secretsmanager-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{}, + "us-east-2-fips": endpoint{ + Hostname: "secretsmanager-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-1": endpoint{}, + "us-west-1-fips": endpoint{ + Hostname: "secretsmanager-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "us-west-2": endpoint{}, + "us-west-2-fips": endpoint{ + Hostname: "secretsmanager-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "securityhub": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "securityhub-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "securityhub-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "securityhub-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "securityhub-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "serverlessrepo": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "ap-east-1": endpoint{ + Protocols: []string{"https"}, + }, + "ap-northeast-1": endpoint{ + Protocols: []string{"https"}, + }, + "ap-northeast-2": endpoint{ + Protocols: []string{"https"}, + }, + "ap-south-1": endpoint{ + Protocols: []string{"https"}, + }, + "ap-southeast-1": endpoint{ + Protocols: []string{"https"}, + }, + "ap-southeast-2": endpoint{ + Protocols: []string{"https"}, + }, + "ca-central-1": endpoint{ + Protocols: []string{"https"}, + }, + "eu-central-1": endpoint{ + Protocols: []string{"https"}, + }, + "eu-north-1": endpoint{ + Protocols: []string{"https"}, + }, + "eu-west-1": endpoint{ + Protocols: []string{"https"}, + }, + "eu-west-2": endpoint{ + Protocols: []string{"https"}, + }, + "eu-west-3": endpoint{ + Protocols: []string{"https"}, + }, + "me-south-1": endpoint{ + Protocols: []string{"https"}, + }, + "sa-east-1": endpoint{ + Protocols: []string{"https"}, + }, + "us-east-1": endpoint{ + Protocols: []string{"https"}, + }, + "us-east-2": endpoint{ + Protocols: []string{"https"}, + }, + "us-west-1": endpoint{ + Protocols: []string{"https"}, + }, + "us-west-2": endpoint{ + Protocols: []string{"https"}, + }, + }, + }, + "servicecatalog": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-1-fips": endpoint{ + Hostname: "servicecatalog-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{}, + "us-east-2-fips": endpoint{ + Hostname: "servicecatalog-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-1": endpoint{}, + "us-west-1-fips": endpoint{ + Hostname: "servicecatalog-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "us-west-2": endpoint{}, + "us-west-2-fips": endpoint{ + Hostname: "servicecatalog-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "servicediscovery": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "servicediscovery-fips": endpoint{ + Hostname: "servicediscovery-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "servicequotas": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "session.qldb": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "shield": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + Defaults: endpoint{ + SSLCommonName: "shield.us-east-1.amazonaws.com", + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "shield.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-aws-global": endpoint{ + Hostname: "shield-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, + "sms": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "sms-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "sms-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "sms-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "sms-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "snowball": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-ap-northeast-1": endpoint{ + Hostname: "snowball-fips.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + "fips-ap-northeast-2": endpoint{ + Hostname: "snowball-fips.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + }, + "fips-ap-northeast-3": endpoint{ + Hostname: "snowball-fips.ap-northeast-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-3", + }, + }, + "fips-ap-south-1": endpoint{ + Hostname: "snowball-fips.ap-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + }, + "fips-ap-southeast-1": endpoint{ + Hostname: "snowball-fips.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + "fips-ap-southeast-2": endpoint{ + Hostname: "snowball-fips.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + "fips-ca-central-1": endpoint{ + Hostname: "snowball-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "fips-eu-central-1": endpoint{ + Hostname: "snowball-fips.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + "fips-eu-west-1": endpoint{ + Hostname: "snowball-fips.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + "fips-eu-west-2": endpoint{ + Hostname: "snowball-fips.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + "fips-eu-west-3": endpoint{ + Hostname: "snowball-fips.eu-west-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-3", + }, + }, + "fips-sa-east-1": endpoint{ + Hostname: "snowball-fips.sa-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "sa-east-1", + }, + }, + "fips-us-east-1": endpoint{ + Hostname: "snowball-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "snowball-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "snowball-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "snowball-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "sns": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "sns-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "sns-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "sns-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "sns-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "sqs": service{ + Defaults: endpoint{ + SSLCommonName: "{region}.queue.{dnsSuffix}", + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "sqs-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "sqs-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "sqs-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "sqs-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{ + SSLCommonName: "queue.{dnsSuffix}", + }, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "ssm": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-ca-central-1": endpoint{ + Hostname: "ssm-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "fips-us-east-1": endpoint{ + Hostname: "ssm-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "ssm-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "ssm-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "ssm-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "states": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "states-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "states-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "states-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "states-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "storagegateway": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips": endpoint{ + Hostname: "storagegateway-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "streams.dynamodb": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + CredentialScope: credentialScope{ + Service: "dynamodb", + }, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "ca-central-1-fips": endpoint{ + Hostname: "dynamodb-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "local": endpoint{ + Hostname: "localhost:8000", + Protocols: []string{"http"}, + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-1-fips": endpoint{ + Hostname: "dynamodb-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{}, + "us-east-2-fips": endpoint{ + Hostname: "dynamodb-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-1": endpoint{}, + "us-west-1-fips": endpoint{ + Hostname: "dynamodb-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "us-west-2": endpoint{}, + "us-west-2-fips": endpoint{ + Hostname: "dynamodb-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "sts": service{ + PartitionEndpoint: "aws-global", + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "aws-global": endpoint{ + Hostname: "sts.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-1-fips": endpoint{ + Hostname: "sts-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{}, + "us-east-2-fips": endpoint{ + Hostname: "sts-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-1": endpoint{}, + "us-west-1-fips": endpoint{ + Hostname: "sts-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "us-west-2": endpoint{}, + "us-west-2-fips": endpoint{ + Hostname: "sts-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "support": service{ + PartitionEndpoint: "aws-global", + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "support.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, + "swf": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "swf-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "swf-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "swf-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "swf-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "tagging": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "transcribe": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "fips.transcribe.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "fips.transcribe.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "fips.transcribe.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "fips.transcribe.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "transcribestreaming": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "transfer": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-ca-central-1": endpoint{ + Hostname: "transfer-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "fips-us-east-1": endpoint{ + Hostname: "transfer-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "transfer-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "transfer-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "transfer-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "translate": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "us-east-1": endpoint{}, + "us-east-1-fips": endpoint{ + Hostname: "translate-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{}, + "us-east-2-fips": endpoint{ + Hostname: "translate-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + "us-west-2-fips": endpoint{ + Hostname: "translate-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "waf": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-fips": endpoint{ + Hostname: "waf-fips.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "aws-global": endpoint{ + Hostname: "waf.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, + "waf-regional": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{ + Hostname: "waf-regional.af-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "af-south-1", + }, + }, + "ap-east-1": endpoint{ + Hostname: "waf-regional.ap-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-east-1", + }, + }, + "ap-northeast-1": endpoint{ + Hostname: "waf-regional.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + "ap-northeast-2": endpoint{ + Hostname: "waf-regional.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + }, + "ap-south-1": endpoint{ + Hostname: "waf-regional.ap-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + }, + "ap-southeast-1": endpoint{ + Hostname: "waf-regional.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + "ap-southeast-2": endpoint{ + Hostname: "waf-regional.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + "ca-central-1": endpoint{ + Hostname: "waf-regional.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "eu-central-1": endpoint{ + Hostname: "waf-regional.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + "eu-north-1": endpoint{ + Hostname: "waf-regional.eu-north-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-north-1", + }, + }, + "eu-south-1": endpoint{ + Hostname: "waf-regional.eu-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-south-1", + }, + }, + "eu-west-1": endpoint{ + Hostname: "waf-regional.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + "eu-west-2": endpoint{ + Hostname: "waf-regional.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + "eu-west-3": endpoint{ + Hostname: "waf-regional.eu-west-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-3", + }, + }, + "fips-af-south-1": endpoint{ + Hostname: "waf-regional-fips.af-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "af-south-1", + }, + }, + "fips-ap-east-1": endpoint{ + Hostname: "waf-regional-fips.ap-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-east-1", + }, + }, + "fips-ap-northeast-1": endpoint{ + Hostname: "waf-regional-fips.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + "fips-ap-northeast-2": endpoint{ + Hostname: "waf-regional-fips.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + }, + "fips-ap-south-1": endpoint{ + Hostname: "waf-regional-fips.ap-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + }, + "fips-ap-southeast-1": endpoint{ + Hostname: "waf-regional-fips.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + "fips-ap-southeast-2": endpoint{ + Hostname: "waf-regional-fips.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + "fips-ca-central-1": endpoint{ + Hostname: "waf-regional-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "fips-eu-central-1": endpoint{ + Hostname: "waf-regional-fips.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + "fips-eu-north-1": endpoint{ + Hostname: "waf-regional-fips.eu-north-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-north-1", + }, + }, + "fips-eu-south-1": endpoint{ + Hostname: "waf-regional-fips.eu-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-south-1", + }, + }, + "fips-eu-west-1": endpoint{ + Hostname: "waf-regional-fips.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + "fips-eu-west-2": endpoint{ + Hostname: "waf-regional-fips.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + "fips-eu-west-3": endpoint{ + Hostname: "waf-regional-fips.eu-west-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-3", + }, + }, + "fips-me-south-1": endpoint{ + Hostname: "waf-regional-fips.me-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "me-south-1", + }, + }, + "fips-sa-east-1": endpoint{ + Hostname: "waf-regional-fips.sa-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "sa-east-1", + }, + }, + "fips-us-east-1": endpoint{ + Hostname: "waf-regional-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "waf-regional-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "waf-regional-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "waf-regional-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{ + Hostname: "waf-regional.me-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "me-south-1", + }, + }, + "sa-east-1": endpoint{ + Hostname: "waf-regional.sa-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "sa-east-1", + }, + }, + "us-east-1": endpoint{ + Hostname: "waf-regional.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{ + Hostname: "waf-regional.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-1": endpoint{ + Hostname: "waf-regional.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "us-west-2": endpoint{ + Hostname: "waf-regional.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "workdocs": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-west-1": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "workdocs-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "workdocs-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "workmail": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "workspaces": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "workspaces-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "workspaces-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "xray": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "xray-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "xray-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "xray-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "xray-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + }, +} + +// AwsCnPartition returns the Resolver for AWS China. +func AwsCnPartition() Partition { + return awscnPartition.Partition() +} + +var awscnPartition = partition{ + ID: "aws-cn", + Name: "AWS China", + DNSSuffix: "amazonaws.com.cn", + RegionRegex: regionRegex{ + Regexp: func() *regexp.Regexp { + reg, _ := regexp.Compile("^cn\\-\\w+\\-\\d+$") + return reg + }(), + }, + Defaults: endpoint{ + Hostname: "{service}.{region}.{dnsSuffix}", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + Regions: regions{ + "cn-north-1": region{ + Description: "China (Beijing)", + }, + "cn-northwest-1": region{ + Description: "China (Ningxia)", + }, + }, + Services: services{ + "access-analyzer": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "acm": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "api.ecr": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{ + Hostname: "api.ecr.cn-north-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, + "cn-northwest-1": endpoint{ + Hostname: "api.ecr.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, + "api.sagemaker": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "apigateway": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "application-autoscaling": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "appsync": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "athena": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "autoscaling": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "autoscaling-plans": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "backup": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "batch": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "budgets": service{ + PartitionEndpoint: "aws-cn-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-cn-global": endpoint{ + Hostname: "budgets.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, + "ce": service{ + PartitionEndpoint: "aws-cn-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-cn-global": endpoint{ + Hostname: "ce.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, + "cloudformation": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "cloudfront": service{ + PartitionEndpoint: "aws-cn-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-cn-global": endpoint{ + Hostname: "cloudfront.cn-northwest-1.amazonaws.com.cn", + Protocols: []string{"http", "https"}, + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, + "cloudtrail": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "codebuild": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "codecommit": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "codedeploy": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "cognito-identity": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, + "config": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "cur": service{ + + Endpoints: endpoints{ + "cn-northwest-1": endpoint{}, + }, + }, + "dax": service{ + + Endpoints: endpoints{ + "cn-northwest-1": endpoint{}, + }, + }, + "directconnect": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "dms": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "docdb": service{ + + Endpoints: endpoints{ + "cn-northwest-1": endpoint{ + Hostname: "rds.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, + "ds": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "dynamodb": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "ebs": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "ec2": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "ec2metadata": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "169.254.169.254/latest", + Protocols: []string{"http"}, + }, + }, + }, + "ecs": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "eks": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "elasticache": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "elasticbeanstalk": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "elasticfilesystem": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + "fips-cn-north-1": endpoint{ + Hostname: "elasticfilesystem-fips.cn-north-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, + "fips-cn-northwest-1": endpoint{ + Hostname: "elasticfilesystem-fips.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, + "elasticloadbalancing": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "elasticmapreduce": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "es": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "events": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "firehose": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "fsx": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "gamelift": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, + "glacier": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "glue": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "greengrass": service{ + IsRegionalized: boxedTrue, + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, + "guardduty": service{ + IsRegionalized: boxedTrue, + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "health": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "iam": service{ + PartitionEndpoint: "aws-cn-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-cn-global": endpoint{ + Hostname: "iam.cn-north-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, + }, + }, + "iot": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "execute-api", + }, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "iotanalytics": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, + "iotevents": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, + "ioteventsdata": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{ + Hostname: "data.iotevents.cn-north-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, + }, + }, + "iotsecuredtunneling": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "kafka": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "kinesis": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "kinesisanalytics": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "kms": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "lakeformation": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, + "lambda": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "license-manager": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "logs": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "mediaconvert": service{ + + Endpoints: endpoints{ + "cn-northwest-1": endpoint{ + Hostname: "subscribe.mediaconvert.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, + "monitoring": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "neptune": service{ + + Endpoints: endpoints{ + "cn-northwest-1": endpoint{ + Hostname: "rds.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, + "organizations": service{ + PartitionEndpoint: "aws-cn-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-cn-global": endpoint{ + Hostname: "organizations.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, + "polly": service{ + + Endpoints: endpoints{ + "cn-northwest-1": endpoint{}, + }, + }, + "ram": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "rds": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "redshift": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "resource-groups": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "route53": service{ + PartitionEndpoint: "aws-cn-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-cn-global": endpoint{ + Hostname: "route53.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, + "runtime.sagemaker": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "s3": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + SignatureVersions: []string{"s3v4"}, + + HasDualStack: boxedTrue, + DualStackHostname: "{service}.dualstack.{region}.{dnsSuffix}", + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "s3-control": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + SignatureVersions: []string{"s3v4"}, + + HasDualStack: boxedTrue, + DualStackHostname: "{service}.dualstack.{region}.{dnsSuffix}", + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{ + Hostname: "s3-control.cn-north-1.amazonaws.com.cn", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, + "cn-northwest-1": endpoint{ + Hostname: "s3-control.cn-northwest-1.amazonaws.com.cn", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, + "secretsmanager": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "securityhub": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "serverlessrepo": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{ + Protocols: []string{"https"}, + }, + "cn-northwest-1": endpoint{ + Protocols: []string{"https"}, + }, + }, + }, + "servicediscovery": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "sms": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "snowball": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + "fips-cn-north-1": endpoint{ + Hostname: "snowball-fips.cn-north-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, + "fips-cn-northwest-1": endpoint{ + Hostname: "snowball-fips.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, + "sns": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "sqs": service{ + Defaults: endpoint{ + SSLCommonName: "{region}.queue.{dnsSuffix}", + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "ssm": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "states": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "storagegateway": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "streams.dynamodb": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + CredentialScope: credentialScope{ + Service: "dynamodb", + }, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "sts": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "support": service{ + PartitionEndpoint: "aws-cn-global", + + Endpoints: endpoints{ + "aws-cn-global": endpoint{ + Hostname: "support.cn-north-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, + }, + }, + "swf": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "tagging": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "transcribe": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{ + Hostname: "cn.transcribe.cn-north-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, + "cn-northwest-1": endpoint{ + Hostname: "cn.transcribe.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, + "workspaces": service{ + + Endpoints: endpoints{ + "cn-northwest-1": endpoint{}, + }, + }, + "xray": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + }, +} + +// AwsUsGovPartition returns the Resolver for AWS GovCloud (US). +func AwsUsGovPartition() Partition { + return awsusgovPartition.Partition() +} + +var awsusgovPartition = partition{ + ID: "aws-us-gov", + Name: "AWS GovCloud (US)", + DNSSuffix: "amazonaws.com", + RegionRegex: regionRegex{ + Regexp: func() *regexp.Regexp { + reg, _ := regexp.Compile("^us\\-gov\\-\\w+\\-\\d+$") + return reg + }(), + }, + Defaults: endpoint{ + Hostname: "{service}.{region}.{dnsSuffix}", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + Regions: regions{ + "us-gov-east-1": region{ + Description: "AWS GovCloud (US-East)", + }, + "us-gov-west-1": region{ + Description: "AWS GovCloud (US-West)", + }, + }, + Services: services{ + "access-analyzer": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{ + Hostname: "access-analyzer.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{ + Hostname: "access-analyzer.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "acm": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{ + Hostname: "acm.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{ + Hostname: "acm.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "acm-pca": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "acm-pca.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "acm-pca.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "api.ecr": service{ + + Endpoints: endpoints{ + "fips-dkr-us-gov-east-1": endpoint{ + Hostname: "ecr-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-dkr-us-gov-west-1": endpoint{ + Hostname: "ecr-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "fips-us-gov-east-1": endpoint{ + Hostname: "ecr-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "ecr-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{ + Hostname: "api.ecr.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{ + Hostname: "api.ecr.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "api.sagemaker": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + "us-gov-west-1-fips": endpoint{ + Hostname: "api-fips.sagemaker.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-west-1-fips-secondary": endpoint{ + Hostname: "api.sagemaker.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "apigateway": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "application-autoscaling": service{ + Defaults: endpoint{ + Hostname: "autoscaling.{region}.amazonaws.com", + Protocols: []string{"http", "https"}, + CredentialScope: credentialScope{ + Service: "application-autoscaling", + }, + }, + Endpoints: endpoints{ + "us-gov-east-1": endpoint{ + Protocols: []string{"http", "https"}, + }, + "us-gov-west-1": endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + }, + "appstream2": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + CredentialScope: credentialScope{ + Service: "appstream", + }, + }, + Endpoints: endpoints{ + "fips": endpoint{ + Hostname: "appstream2-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-west-1": endpoint{}, + }, + }, + "athena": service{ + + Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "athena-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "athena-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "autoscaling": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{ + Protocols: []string{"http", "https"}, + }, + "us-gov-west-1": endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + }, + "autoscaling-plans": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "us-gov-east-1": endpoint{ + Protocols: []string{"http", "https"}, + }, + "us-gov-west-1": endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + }, + "backup": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "batch": service{ + + Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "batch.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "batch.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "clouddirectory": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "cloudformation": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{ + Hostname: "cloudformation.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{ + Hostname: "cloudformation.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "cloudhsm": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "cloudhsmv2": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "cloudhsm", + }, + }, + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "cloudtrail": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{ + Hostname: "cloudtrail.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{ + Hostname: "cloudtrail.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "codebuild": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-east-1-fips": endpoint{ + Hostname: "codebuild-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{}, + "us-gov-west-1-fips": endpoint{ + Hostname: "codebuild-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "codecommit": service{ + + Endpoints: endpoints{ + "fips": endpoint{ + Hostname: "codecommit-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "codedeploy": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-east-1-fips": endpoint{ + Hostname: "codedeploy-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{}, + "us-gov-west-1-fips": endpoint{ + Hostname: "codedeploy-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "codepipeline": service{ + + Endpoints: endpoints{ + "fips-us-gov-west-1": endpoint{ + Hostname: "codepipeline-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-west-1": endpoint{}, + }, + }, + "cognito-identity": service{ + + Endpoints: endpoints{ + "fips-us-gov-west-1": endpoint{ + Hostname: "cognito-identity-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-west-1": endpoint{}, + }, + }, + "cognito-idp": service{ + + Endpoints: endpoints{ + "fips-us-gov-west-1": endpoint{ + Hostname: "cognito-idp-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-west-1": endpoint{}, + }, + }, + "comprehend": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "fips-us-gov-west-1": endpoint{ + Hostname: "comprehend-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-west-1": endpoint{}, + }, + }, + "comprehendmedical": service{ + + Endpoints: endpoints{ + "fips-us-gov-west-1": endpoint{ + Hostname: "comprehendmedical-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-west-1": endpoint{}, + }, + }, + "config": service{ + + Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "config.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "config.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "connect": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "datasync": service{ + + Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "datasync-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "datasync-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "directconnect": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{ + Hostname: "directconnect.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{ + Hostname: "directconnect.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "dms": service{ + + Endpoints: endpoints{ + "dms-fips": endpoint{ + Hostname: "dms.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "docdb": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{ + Hostname: "rds.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "ds": service{ + + Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "ds-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "ds-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "dynamodb": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-east-1-fips": endpoint{ + Hostname: "dynamodb.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{}, + "us-gov-west-1-fips": endpoint{ + Hostname: "dynamodb.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "ebs": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "ec2": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{ + Hostname: "ec2.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{ + Hostname: "ec2.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "ec2metadata": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "169.254.169.254/latest", + Protocols: []string{"http"}, + }, + }, + }, + "ecs": service{ + + Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "ecs-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "ecs-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "eks": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "eks.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "eks.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "elasticache": service{ + + Endpoints: endpoints{ + "fips": endpoint{ + Hostname: "elasticache.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "elasticbeanstalk": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{ + Hostname: "elasticbeanstalk.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{ + Hostname: "elasticbeanstalk.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "elasticfilesystem": service{ + + Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "elasticfilesystem-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "elasticfilesystem-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "elasticloadbalancing": service{ + + Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "elasticloadbalancing.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "elasticloadbalancing.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + }, + "elasticmapreduce": service{ + + Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "elasticmapreduce.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "elasticmapreduce.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{ + Protocols: []string{"https"}, + }, + }, + }, + "email": service{ + + Endpoints: endpoints{ + "fips-us-gov-west-1": endpoint{ + Hostname: "email-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-west-1": endpoint{}, + }, + }, + "es": service{ + + Endpoints: endpoints{ + "fips": endpoint{ + Hostname: "es-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "events": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{ + Hostname: "events.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{ + Hostname: "events.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "firehose": service{ + + Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "firehose-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "firehose-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "fsx": service{ + + Endpoints: endpoints{ + "fips-prod-us-gov-east-1": endpoint{ + Hostname: "fsx-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-prod-us-gov-west-1": endpoint{ + Hostname: "fsx-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "glacier": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{ + Hostname: "glacier.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{ + Hostname: "glacier.us-gov-west-1.amazonaws.com", + Protocols: []string{"http", "https"}, + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "glue": service{ + + Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "glue-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "glue-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "greengrass": service{ + IsRegionalized: boxedTrue, + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "dataplane-us-gov-east-1": endpoint{ + Hostname: "greengrass-ats.iot.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "dataplane-us-gov-west-1": endpoint{ + Hostname: "greengrass-ats.iot.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "fips-us-gov-east-1": endpoint{ + Hostname: "greengrass-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-east-1": endpoint{ + Hostname: "greengrass.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{ + Hostname: "greengrass.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "guardduty": service{ + IsRegionalized: boxedTrue, + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-east-1-fips": endpoint{ + Hostname: "guardduty.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{}, + "us-gov-west-1-fips": endpoint{ + Hostname: "guardduty.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "health": service{ + + Endpoints: endpoints{ + "fips-us-gov-west-1": endpoint{ + Hostname: "health-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "iam": service{ + PartitionEndpoint: "aws-us-gov-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-us-gov-global": endpoint{ + Hostname: "iam.us-gov.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "iam-govcloud-fips": endpoint{ + Hostname: "iam.us-gov.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "inspector": service{ + + Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "inspector-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "inspector-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "iot": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "execute-api", + }, + }, + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "iotsecuredtunneling": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "kafka": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "kinesis": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{ + Hostname: "kinesis.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{ + Hostname: "kinesis.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "kinesisanalytics": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "kms": service{ + + Endpoints: endpoints{ + "ProdFips": endpoint{ + Hostname: "kms-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "lakeformation": service{ + + Endpoints: endpoints{ + "fips-us-gov-west-1": endpoint{ + Hostname: "lakeformation-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-west-1": endpoint{}, + }, + }, + "lambda": service{ + + Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "lambda-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "lambda-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "license-manager": service{ + + Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "license-manager-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "license-manager-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "logs": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{ + Hostname: "logs.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{ + Hostname: "logs.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "mediaconvert": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{ + Hostname: "mediaconvert.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "metering.marketplace": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "aws-marketplace", + }, + }, + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "models.lex": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "lex", + }, + }, + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + "us-gov-west-1-fips": endpoint{ + Hostname: "models-fips.lex.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "monitoring": service{ + + Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "monitoring.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "monitoring.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "neptune": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{ + Hostname: "rds.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{ + Hostname: "rds.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "organizations": service{ + PartitionEndpoint: "aws-us-gov-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-us-gov-global": endpoint{ + Hostname: "organizations.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "fips-aws-us-gov-global": endpoint{ + Hostname: "organizations.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "outposts": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{ + Hostname: "outposts.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{ + Hostname: "outposts.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "pinpoint": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "mobiletargeting", + }, + }, + Endpoints: endpoints{ + "fips-us-gov-west-1": endpoint{ + Hostname: "pinpoint-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-west-1": endpoint{ + Hostname: "pinpoint.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "polly": service{ + + Endpoints: endpoints{ + "fips-us-gov-west-1": endpoint{ + Hostname: "polly-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-west-1": endpoint{}, + }, + }, + "ram": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "rds": service{ + + Endpoints: endpoints{ + "rds.us-gov-east-1": endpoint{ + Hostname: "rds.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "rds.us-gov-west-1": endpoint{ + Hostname: "rds.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "redshift": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{ + Hostname: "redshift.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{ + Hostname: "redshift.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "rekognition": service{ + + Endpoints: endpoints{ + "rekognition-fips.us-gov-west-1": endpoint{ + Hostname: "rekognition-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-west-1": endpoint{}, + }, + }, + "resource-groups": service{ + + Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "resource-groups.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "resource-groups.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "route53": service{ + PartitionEndpoint: "aws-us-gov-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-us-gov-global": endpoint{ + Hostname: "route53.us-gov.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "fips-aws-us-gov-global": endpoint{ + Hostname: "route53.us-gov.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "route53resolver": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "runtime.lex": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "lex", + }, + }, + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + "us-gov-west-1-fips": endpoint{ + Hostname: "runtime-fips.lex.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "runtime.sagemaker": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + "us-gov-west-1-fips": endpoint{ + Hostname: "runtime.sagemaker.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "s3": service{ + Defaults: endpoint{ + SignatureVersions: []string{"s3", "s3v4"}, + + HasDualStack: boxedTrue, + DualStackHostname: "{service}.dualstack.{region}.{dnsSuffix}", + }, + Endpoints: endpoints{ + "fips-us-gov-west-1": endpoint{ + Hostname: "s3-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{ + Hostname: "s3.us-gov-east-1.amazonaws.com", + Protocols: []string{"http", "https"}, + }, + "us-gov-west-1": endpoint{ + Hostname: "s3.us-gov-west-1.amazonaws.com", + Protocols: []string{"http", "https"}, + }, + }, + }, + "s3-control": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + SignatureVersions: []string{"s3v4"}, + + HasDualStack: boxedTrue, + DualStackHostname: "{service}.dualstack.{region}.{dnsSuffix}", + }, + Endpoints: endpoints{ + "us-gov-east-1": endpoint{ + Hostname: "s3-control.us-gov-east-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-east-1-fips": endpoint{ + Hostname: "s3-control-fips.us-gov-east-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{ + Hostname: "s3-control.us-gov-west-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-west-1-fips": endpoint{ + Hostname: "s3-control-fips.us-gov-west-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "secretsmanager": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-east-1-fips": endpoint{ + Hostname: "secretsmanager-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{}, + "us-gov-west-1-fips": endpoint{ + Hostname: "secretsmanager-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "securityhub": service{ + + Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "securityhub-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "securityhub-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "serverlessrepo": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "us-gov-east-1": endpoint{ + Hostname: "serverlessrepo.us-gov-east-1.amazonaws.com", + Protocols: []string{"https"}, + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{ + Hostname: "serverlessrepo.us-gov-west-1.amazonaws.com", + Protocols: []string{"https"}, + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "servicecatalog": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-east-1-fips": endpoint{ + Hostname: "servicecatalog-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{}, + "us-gov-west-1-fips": endpoint{ + Hostname: "servicecatalog-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "sms": service{ + + Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "sms-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "sms-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "snowball": service{ + + Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "snowball-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "snowball-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "sns": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{ + Hostname: "sns.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{ + Hostname: "sns.us-gov-west-1.amazonaws.com", + Protocols: []string{"http", "https"}, + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "sqs": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{ + Hostname: "sqs.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{ + Hostname: "sqs.us-gov-west-1.amazonaws.com", + SSLCommonName: "{region}.queue.{dnsSuffix}", + Protocols: []string{"http", "https"}, + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "ssm": service{ + + Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "ssm.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "ssm.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "states": service{ + + Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "states-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "states.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "storagegateway": service{ + + Endpoints: endpoints{ + "fips": endpoint{ + Hostname: "storagegateway-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "streams.dynamodb": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "dynamodb", + }, + }, + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-east-1-fips": endpoint{ + Hostname: "dynamodb.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{}, + "us-gov-west-1-fips": endpoint{ + Hostname: "dynamodb.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "sts": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-east-1-fips": endpoint{ + Hostname: "sts.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{}, + "us-gov-west-1-fips": endpoint{ + Hostname: "sts.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "support": service{ + PartitionEndpoint: "aws-us-gov-global", + + Endpoints: endpoints{ + "aws-us-gov-global": endpoint{ + Hostname: "support.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "support.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "swf": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{ + Hostname: "swf.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{ + Hostname: "swf.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "tagging": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "transcribe": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "fips.transcribe.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "fips.transcribe.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "transfer": service{ + + Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "transfer-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "transfer-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "translate": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + "us-gov-west-1-fips": endpoint{ + Hostname: "translate-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "waf-regional": service{ + + Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "waf-regional-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "waf-regional-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{ + Hostname: "waf-regional.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{ + Hostname: "waf-regional.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "workspaces": service{ + + Endpoints: endpoints{ + "fips-us-gov-west-1": endpoint{ + Hostname: "workspaces-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-west-1": endpoint{}, + }, + }, + "xray": service{ + + Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "xray-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "xray-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + }, +} + +// AwsIsoPartition returns the Resolver for AWS ISO (US). +func AwsIsoPartition() Partition { + return awsisoPartition.Partition() +} + +var awsisoPartition = partition{ + ID: "aws-iso", + Name: "AWS ISO (US)", + DNSSuffix: "c2s.ic.gov", + RegionRegex: regionRegex{ + Regexp: func() *regexp.Regexp { + reg, _ := regexp.Compile("^us\\-iso\\-\\w+\\-\\d+$") + return reg + }(), + }, + Defaults: endpoint{ + Hostname: "{service}.{region}.{dnsSuffix}", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + Regions: regions{ + "us-iso-east-1": region{ + Description: "US ISO East", + }, + }, + Services: services{ + "api.ecr": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{ + Hostname: "api.ecr.us-iso-east-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + }, + }, + }, + "api.sagemaker": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "apigateway": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "application-autoscaling": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "autoscaling": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + }, + "cloudformation": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "cloudtrail": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "codedeploy": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "comprehend": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "config": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "datapipeline": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "directconnect": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "dms": service{ + + Endpoints: endpoints{ + "dms-fips": endpoint{ + Hostname: "dms.us-iso-east-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + }, + "us-iso-east-1": endpoint{}, + }, + }, + "ds": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "dynamodb": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + }, + "ec2": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "ec2metadata": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "169.254.169.254/latest", + Protocols: []string{"http"}, + }, + }, + }, + "ecs": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "elasticache": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "elasticloadbalancing": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + }, + "elasticmapreduce": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{ + Protocols: []string{"https"}, + }, + }, + }, + "es": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "events": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "glacier": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + }, + "health": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "iam": service{ + PartitionEndpoint: "aws-iso-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-iso-global": endpoint{ + Hostname: "iam.us-iso-east-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + }, + }, + }, + "kinesis": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "kms": service{ + + Endpoints: endpoints{ + "ProdFips": endpoint{ + Hostname: "kms-fips.us-iso-east-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + }, + "us-iso-east-1": endpoint{}, + }, + }, + "lambda": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "logs": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "medialive": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "mediapackage": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "monitoring": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "outposts": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "rds": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "redshift": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "route53": service{ + PartitionEndpoint: "aws-iso-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-iso-global": endpoint{ + Hostname: "route53.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + }, + }, + }, + "runtime.sagemaker": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "s3": service{ + Defaults: endpoint{ + SignatureVersions: []string{"s3v4"}, + }, + Endpoints: endpoints{ + "us-iso-east-1": endpoint{ + Protocols: []string{"http", "https"}, + SignatureVersions: []string{"s3v4"}, + }, + }, + }, + "secretsmanager": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "snowball": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "sns": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + }, + "sqs": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + }, + "ssm": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "states": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "streams.dynamodb": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + CredentialScope: credentialScope{ + Service: "dynamodb", + }, + }, + Endpoints: endpoints{ + "us-iso-east-1": endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + }, + "sts": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "support": service{ + PartitionEndpoint: "aws-iso-global", + + Endpoints: endpoints{ + "aws-iso-global": endpoint{ + Hostname: "support.us-iso-east-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + }, + }, + }, + "swf": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "transcribe": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "transcribestreaming": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "translate": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "workspaces": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + }, +} + +// AwsIsoBPartition returns the Resolver for AWS ISOB (US). +func AwsIsoBPartition() Partition { + return awsisobPartition.Partition() +} + +var awsisobPartition = partition{ + ID: "aws-iso-b", + Name: "AWS ISOB (US)", + DNSSuffix: "sc2s.sgov.gov", + RegionRegex: regionRegex{ + Regexp: func() *regexp.Regexp { + reg, _ := regexp.Compile("^us\\-isob\\-\\w+\\-\\d+$") + return reg + }(), + }, + Defaults: endpoint{ + Hostname: "{service}.{region}.{dnsSuffix}", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + Regions: regions{ + "us-isob-east-1": region{ + Description: "US ISOB East (Ohio)", + }, + }, + Services: services{ + "api.ecr": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{ + Hostname: "api.ecr.us-isob-east-1.sc2s.sgov.gov", + CredentialScope: credentialScope{ + Region: "us-isob-east-1", + }, + }, + }, + }, + "application-autoscaling": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "autoscaling": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "cloudformation": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "cloudtrail": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "codedeploy": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "config": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "directconnect": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "dms": service{ + + Endpoints: endpoints{ + "dms-fips": endpoint{ + Hostname: "dms.us-isob-east-1.sc2s.sgov.gov", + CredentialScope: credentialScope{ + Region: "us-isob-east-1", + }, + }, + "us-isob-east-1": endpoint{}, + }, + }, + "dynamodb": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "ec2": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "ec2metadata": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "169.254.169.254/latest", + Protocols: []string{"http"}, + }, + }, + }, + "ecs": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "elasticache": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "elasticloadbalancing": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{ + Protocols: []string{"https"}, + }, + }, + }, + "elasticmapreduce": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "es": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "events": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "glacier": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "health": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "iam": service{ + PartitionEndpoint: "aws-iso-b-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-iso-b-global": endpoint{ + Hostname: "iam.us-isob-east-1.sc2s.sgov.gov", + CredentialScope: credentialScope{ + Region: "us-isob-east-1", + }, + }, + }, + }, + "kinesis": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "kms": service{ + + Endpoints: endpoints{ + "ProdFips": endpoint{ + Hostname: "kms-fips.us-isob-east-1.sc2s.sgov.gov", + CredentialScope: credentialScope{ + Region: "us-isob-east-1", + }, + }, + "us-isob-east-1": endpoint{}, + }, + }, + "lambda": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "license-manager": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "logs": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "monitoring": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "rds": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "redshift": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "route53": service{ + PartitionEndpoint: "aws-iso-b-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-iso-b-global": endpoint{ + Hostname: "route53.sc2s.sgov.gov", + CredentialScope: credentialScope{ + Region: "us-isob-east-1", + }, + }, + }, + }, + "s3": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + SignatureVersions: []string{"s3v4"}, + }, + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "snowball": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "sns": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "sqs": service{ + Defaults: endpoint{ + SSLCommonName: "{region}.queue.{dnsSuffix}", + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "ssm": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "states": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "streams.dynamodb": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + CredentialScope: credentialScope{ + Service: "dynamodb", + }, + }, + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "sts": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "support": service{ + PartitionEndpoint: "aws-iso-b-global", + + Endpoints: endpoints{ + "aws-iso-b-global": endpoint{ + Hostname: "support.us-isob-east-1.sc2s.sgov.gov", + CredentialScope: credentialScope{ + Region: "us-isob-east-1", + }, + }, + }, + }, + "swf": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + }, +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/dep_service_ids.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/dep_service_ids.go new file mode 100644 index 00000000000..ca8fc828e15 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/dep_service_ids.go @@ -0,0 +1,141 @@ +package endpoints + +// Service identifiers +// +// Deprecated: Use client package's EndpointsID value instead of these +// ServiceIDs. These IDs are not maintained, and are out of date. +const ( + A4bServiceID = "a4b" // A4b. + AcmServiceID = "acm" // Acm. + AcmPcaServiceID = "acm-pca" // AcmPca. + ApiMediatailorServiceID = "api.mediatailor" // ApiMediatailor. + ApiPricingServiceID = "api.pricing" // ApiPricing. + ApiSagemakerServiceID = "api.sagemaker" // ApiSagemaker. + ApigatewayServiceID = "apigateway" // Apigateway. + ApplicationAutoscalingServiceID = "application-autoscaling" // ApplicationAutoscaling. + Appstream2ServiceID = "appstream2" // Appstream2. + AppsyncServiceID = "appsync" // Appsync. + AthenaServiceID = "athena" // Athena. + AutoscalingServiceID = "autoscaling" // Autoscaling. + AutoscalingPlansServiceID = "autoscaling-plans" // AutoscalingPlans. + BatchServiceID = "batch" // Batch. + BudgetsServiceID = "budgets" // Budgets. + CeServiceID = "ce" // Ce. + ChimeServiceID = "chime" // Chime. + Cloud9ServiceID = "cloud9" // Cloud9. + ClouddirectoryServiceID = "clouddirectory" // Clouddirectory. + CloudformationServiceID = "cloudformation" // Cloudformation. + CloudfrontServiceID = "cloudfront" // Cloudfront. + CloudhsmServiceID = "cloudhsm" // Cloudhsm. + Cloudhsmv2ServiceID = "cloudhsmv2" // Cloudhsmv2. + CloudsearchServiceID = "cloudsearch" // Cloudsearch. + CloudtrailServiceID = "cloudtrail" // Cloudtrail. + CodebuildServiceID = "codebuild" // Codebuild. + CodecommitServiceID = "codecommit" // Codecommit. + CodedeployServiceID = "codedeploy" // Codedeploy. + CodepipelineServiceID = "codepipeline" // Codepipeline. + CodestarServiceID = "codestar" // Codestar. + CognitoIdentityServiceID = "cognito-identity" // CognitoIdentity. + CognitoIdpServiceID = "cognito-idp" // CognitoIdp. + CognitoSyncServiceID = "cognito-sync" // CognitoSync. + ComprehendServiceID = "comprehend" // Comprehend. + ConfigServiceID = "config" // Config. + CurServiceID = "cur" // Cur. + DatapipelineServiceID = "datapipeline" // Datapipeline. + DaxServiceID = "dax" // Dax. + DevicefarmServiceID = "devicefarm" // Devicefarm. + DirectconnectServiceID = "directconnect" // Directconnect. + DiscoveryServiceID = "discovery" // Discovery. + DmsServiceID = "dms" // Dms. + DsServiceID = "ds" // Ds. + DynamodbServiceID = "dynamodb" // Dynamodb. + Ec2ServiceID = "ec2" // Ec2. + Ec2metadataServiceID = "ec2metadata" // Ec2metadata. + EcrServiceID = "ecr" // Ecr. + EcsServiceID = "ecs" // Ecs. + ElasticacheServiceID = "elasticache" // Elasticache. + ElasticbeanstalkServiceID = "elasticbeanstalk" // Elasticbeanstalk. + ElasticfilesystemServiceID = "elasticfilesystem" // Elasticfilesystem. + ElasticloadbalancingServiceID = "elasticloadbalancing" // Elasticloadbalancing. + ElasticmapreduceServiceID = "elasticmapreduce" // Elasticmapreduce. + ElastictranscoderServiceID = "elastictranscoder" // Elastictranscoder. + EmailServiceID = "email" // Email. + EntitlementMarketplaceServiceID = "entitlement.marketplace" // EntitlementMarketplace. + EsServiceID = "es" // Es. + EventsServiceID = "events" // Events. + FirehoseServiceID = "firehose" // Firehose. + FmsServiceID = "fms" // Fms. + GameliftServiceID = "gamelift" // Gamelift. + GlacierServiceID = "glacier" // Glacier. + GlueServiceID = "glue" // Glue. + GreengrassServiceID = "greengrass" // Greengrass. + GuarddutyServiceID = "guardduty" // Guardduty. + HealthServiceID = "health" // Health. + IamServiceID = "iam" // Iam. + ImportexportServiceID = "importexport" // Importexport. + InspectorServiceID = "inspector" // Inspector. + IotServiceID = "iot" // Iot. + IotanalyticsServiceID = "iotanalytics" // Iotanalytics. + KinesisServiceID = "kinesis" // Kinesis. + KinesisanalyticsServiceID = "kinesisanalytics" // Kinesisanalytics. + KinesisvideoServiceID = "kinesisvideo" // Kinesisvideo. + KmsServiceID = "kms" // Kms. + LambdaServiceID = "lambda" // Lambda. + LightsailServiceID = "lightsail" // Lightsail. + LogsServiceID = "logs" // Logs. + MachinelearningServiceID = "machinelearning" // Machinelearning. + MarketplacecommerceanalyticsServiceID = "marketplacecommerceanalytics" // Marketplacecommerceanalytics. + MediaconvertServiceID = "mediaconvert" // Mediaconvert. + MedialiveServiceID = "medialive" // Medialive. + MediapackageServiceID = "mediapackage" // Mediapackage. + MediastoreServiceID = "mediastore" // Mediastore. + MeteringMarketplaceServiceID = "metering.marketplace" // MeteringMarketplace. + MghServiceID = "mgh" // Mgh. + MobileanalyticsServiceID = "mobileanalytics" // Mobileanalytics. + ModelsLexServiceID = "models.lex" // ModelsLex. + MonitoringServiceID = "monitoring" // Monitoring. + MturkRequesterServiceID = "mturk-requester" // MturkRequester. + NeptuneServiceID = "neptune" // Neptune. + OpsworksServiceID = "opsworks" // Opsworks. + OpsworksCmServiceID = "opsworks-cm" // OpsworksCm. + OrganizationsServiceID = "organizations" // Organizations. + PinpointServiceID = "pinpoint" // Pinpoint. + PollyServiceID = "polly" // Polly. + RdsServiceID = "rds" // Rds. + RedshiftServiceID = "redshift" // Redshift. + RekognitionServiceID = "rekognition" // Rekognition. + ResourceGroupsServiceID = "resource-groups" // ResourceGroups. + Route53ServiceID = "route53" // Route53. + Route53domainsServiceID = "route53domains" // Route53domains. + RuntimeLexServiceID = "runtime.lex" // RuntimeLex. + RuntimeSagemakerServiceID = "runtime.sagemaker" // RuntimeSagemaker. + S3ServiceID = "s3" // S3. + S3ControlServiceID = "s3-control" // S3Control. + SagemakerServiceID = "api.sagemaker" // Sagemaker. + SdbServiceID = "sdb" // Sdb. + SecretsmanagerServiceID = "secretsmanager" // Secretsmanager. + ServerlessrepoServiceID = "serverlessrepo" // Serverlessrepo. + ServicecatalogServiceID = "servicecatalog" // Servicecatalog. + ServicediscoveryServiceID = "servicediscovery" // Servicediscovery. + ShieldServiceID = "shield" // Shield. + SmsServiceID = "sms" // Sms. + SnowballServiceID = "snowball" // Snowball. + SnsServiceID = "sns" // Sns. + SqsServiceID = "sqs" // Sqs. + SsmServiceID = "ssm" // Ssm. + StatesServiceID = "states" // States. + StoragegatewayServiceID = "storagegateway" // Storagegateway. + StreamsDynamodbServiceID = "streams.dynamodb" // StreamsDynamodb. + StsServiceID = "sts" // Sts. + SupportServiceID = "support" // Support. + SwfServiceID = "swf" // Swf. + TaggingServiceID = "tagging" // Tagging. + TransferServiceID = "transfer" // Transfer. + TranslateServiceID = "translate" // Translate. + WafServiceID = "waf" // Waf. + WafRegionalServiceID = "waf-regional" // WafRegional. + WorkdocsServiceID = "workdocs" // Workdocs. + WorkmailServiceID = "workmail" // Workmail. + WorkspacesServiceID = "workspaces" // Workspaces. + XrayServiceID = "xray" // Xray. +) diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/doc.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/doc.go new file mode 100644 index 00000000000..84316b92c05 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/doc.go @@ -0,0 +1,66 @@ +// Package endpoints provides the types and functionality for defining regions +// and endpoints, as well as querying those definitions. +// +// The SDK's Regions and Endpoints metadata is code generated into the endpoints +// package, and is accessible via the DefaultResolver function. This function +// returns a endpoint Resolver will search the metadata and build an associated +// endpoint if one is found. The default resolver will search all partitions +// known by the SDK. e.g AWS Standard (aws), AWS China (aws-cn), and +// AWS GovCloud (US) (aws-us-gov). +// . +// +// Enumerating Regions and Endpoint Metadata +// +// Casting the Resolver returned by DefaultResolver to a EnumPartitions interface +// will allow you to get access to the list of underlying Partitions with the +// Partitions method. This is helpful if you want to limit the SDK's endpoint +// resolving to a single partition, or enumerate regions, services, and endpoints +// in the partition. +// +// resolver := endpoints.DefaultResolver() +// partitions := resolver.(endpoints.EnumPartitions).Partitions() +// +// for _, p := range partitions { +// fmt.Println("Regions for", p.ID()) +// for id, _ := range p.Regions() { +// fmt.Println("*", id) +// } +// +// fmt.Println("Services for", p.ID()) +// for id, _ := range p.Services() { +// fmt.Println("*", id) +// } +// } +// +// Using Custom Endpoints +// +// The endpoints package also gives you the ability to use your own logic how +// endpoints are resolved. This is a great way to define a custom endpoint +// for select services, without passing that logic down through your code. +// +// If a type implements the Resolver interface it can be used to resolve +// endpoints. To use this with the SDK's Session and Config set the value +// of the type to the EndpointsResolver field of aws.Config when initializing +// the session, or service client. +// +// In addition the ResolverFunc is a wrapper for a func matching the signature +// of Resolver.EndpointFor, converting it to a type that satisfies the +// Resolver interface. +// +// +// myCustomResolver := func(service, region string, optFns ...func(*endpoints.Options)) (endpoints.ResolvedEndpoint, error) { +// if service == endpoints.S3ServiceID { +// return endpoints.ResolvedEndpoint{ +// URL: "s3.custom.endpoint.com", +// SigningRegion: "custom-signing-region", +// }, nil +// } +// +// return endpoints.DefaultResolver().EndpointFor(service, region, optFns...) +// } +// +// sess := session.Must(session.NewSession(&aws.Config{ +// Region: aws.String("us-west-2"), +// EndpointResolver: endpoints.ResolverFunc(myCustomResolver), +// })) +package endpoints diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go new file mode 100644 index 00000000000..ca956e5f12a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go @@ -0,0 +1,564 @@ +package endpoints + +import ( + "fmt" + "regexp" + "strings" + + "github.com/aws/aws-sdk-go/aws/awserr" +) + +// Options provide the configuration needed to direct how the +// endpoints will be resolved. +type Options struct { + // DisableSSL forces the endpoint to be resolved as HTTP. + // instead of HTTPS if the service supports it. + DisableSSL bool + + // Sets the resolver to resolve the endpoint as a dualstack endpoint + // for the service. If dualstack support for a service is not known and + // StrictMatching is not enabled a dualstack endpoint for the service will + // be returned. This endpoint may not be valid. If StrictMatching is + // enabled only services that are known to support dualstack will return + // dualstack endpoints. + UseDualStack bool + + // Enables strict matching of services and regions resolved endpoints. + // If the partition doesn't enumerate the exact service and region an + // error will be returned. This option will prevent returning endpoints + // that look valid, but may not resolve to any real endpoint. + StrictMatching bool + + // Enables resolving a service endpoint based on the region provided if the + // service does not exist. The service endpoint ID will be used as the service + // domain name prefix. By default the endpoint resolver requires the service + // to be known when resolving endpoints. + // + // If resolving an endpoint on the partition list the provided region will + // be used to determine which partition's domain name pattern to the service + // endpoint ID with. If both the service and region are unknown and resolving + // the endpoint on partition list an UnknownEndpointError error will be returned. + // + // If resolving and endpoint on a partition specific resolver that partition's + // domain name pattern will be used with the service endpoint ID. If both + // region and service do not exist when resolving an endpoint on a specific + // partition the partition's domain pattern will be used to combine the + // endpoint and region together. + // + // This option is ignored if StrictMatching is enabled. + ResolveUnknownService bool + + // STS Regional Endpoint flag helps with resolving the STS endpoint + STSRegionalEndpoint STSRegionalEndpoint + + // S3 Regional Endpoint flag helps with resolving the S3 endpoint + S3UsEast1RegionalEndpoint S3UsEast1RegionalEndpoint +} + +// STSRegionalEndpoint is an enum for the states of the STS Regional Endpoint +// options. +type STSRegionalEndpoint int + +func (e STSRegionalEndpoint) String() string { + switch e { + case LegacySTSEndpoint: + return "legacy" + case RegionalSTSEndpoint: + return "regional" + case UnsetSTSEndpoint: + return "" + default: + return "unknown" + } +} + +const ( + + // UnsetSTSEndpoint represents that STS Regional Endpoint flag is not specified. + UnsetSTSEndpoint STSRegionalEndpoint = iota + + // LegacySTSEndpoint represents when STS Regional Endpoint flag is specified + // to use legacy endpoints. + LegacySTSEndpoint + + // RegionalSTSEndpoint represents when STS Regional Endpoint flag is specified + // to use regional endpoints. + RegionalSTSEndpoint +) + +// GetSTSRegionalEndpoint function returns the STSRegionalEndpointFlag based +// on the input string provided in env config or shared config by the user. +// +// `legacy`, `regional` are the only case-insensitive valid strings for +// resolving the STS regional Endpoint flag. +func GetSTSRegionalEndpoint(s string) (STSRegionalEndpoint, error) { + switch { + case strings.EqualFold(s, "legacy"): + return LegacySTSEndpoint, nil + case strings.EqualFold(s, "regional"): + return RegionalSTSEndpoint, nil + default: + return UnsetSTSEndpoint, fmt.Errorf("unable to resolve the value of STSRegionalEndpoint for %v", s) + } +} + +// S3UsEast1RegionalEndpoint is an enum for the states of the S3 us-east-1 +// Regional Endpoint options. +type S3UsEast1RegionalEndpoint int + +func (e S3UsEast1RegionalEndpoint) String() string { + switch e { + case LegacyS3UsEast1Endpoint: + return "legacy" + case RegionalS3UsEast1Endpoint: + return "regional" + case UnsetS3UsEast1Endpoint: + return "" + default: + return "unknown" + } +} + +const ( + + // UnsetS3UsEast1Endpoint represents that S3 Regional Endpoint flag is not + // specified. + UnsetS3UsEast1Endpoint S3UsEast1RegionalEndpoint = iota + + // LegacyS3UsEast1Endpoint represents when S3 Regional Endpoint flag is + // specified to use legacy endpoints. + LegacyS3UsEast1Endpoint + + // RegionalS3UsEast1Endpoint represents when S3 Regional Endpoint flag is + // specified to use regional endpoints. + RegionalS3UsEast1Endpoint +) + +// GetS3UsEast1RegionalEndpoint function returns the S3UsEast1RegionalEndpointFlag based +// on the input string provided in env config or shared config by the user. +// +// `legacy`, `regional` are the only case-insensitive valid strings for +// resolving the S3 regional Endpoint flag. +func GetS3UsEast1RegionalEndpoint(s string) (S3UsEast1RegionalEndpoint, error) { + switch { + case strings.EqualFold(s, "legacy"): + return LegacyS3UsEast1Endpoint, nil + case strings.EqualFold(s, "regional"): + return RegionalS3UsEast1Endpoint, nil + default: + return UnsetS3UsEast1Endpoint, + fmt.Errorf("unable to resolve the value of S3UsEast1RegionalEndpoint for %v", s) + } +} + +// Set combines all of the option functions together. +func (o *Options) Set(optFns ...func(*Options)) { + for _, fn := range optFns { + fn(o) + } +} + +// DisableSSLOption sets the DisableSSL options. Can be used as a functional +// option when resolving endpoints. +func DisableSSLOption(o *Options) { + o.DisableSSL = true +} + +// UseDualStackOption sets the UseDualStack option. Can be used as a functional +// option when resolving endpoints. +func UseDualStackOption(o *Options) { + o.UseDualStack = true +} + +// StrictMatchingOption sets the StrictMatching option. Can be used as a functional +// option when resolving endpoints. +func StrictMatchingOption(o *Options) { + o.StrictMatching = true +} + +// ResolveUnknownServiceOption sets the ResolveUnknownService option. Can be used +// as a functional option when resolving endpoints. +func ResolveUnknownServiceOption(o *Options) { + o.ResolveUnknownService = true +} + +// STSRegionalEndpointOption enables the STS endpoint resolver behavior to resolve +// STS endpoint to their regional endpoint, instead of the global endpoint. +func STSRegionalEndpointOption(o *Options) { + o.STSRegionalEndpoint = RegionalSTSEndpoint +} + +// A Resolver provides the interface for functionality to resolve endpoints. +// The build in Partition and DefaultResolver return value satisfy this interface. +type Resolver interface { + EndpointFor(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error) +} + +// ResolverFunc is a helper utility that wraps a function so it satisfies the +// Resolver interface. This is useful when you want to add additional endpoint +// resolving logic, or stub out specific endpoints with custom values. +type ResolverFunc func(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error) + +// EndpointFor wraps the ResolverFunc function to satisfy the Resolver interface. +func (fn ResolverFunc) EndpointFor(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error) { + return fn(service, region, opts...) +} + +var schemeRE = regexp.MustCompile("^([^:]+)://") + +// AddScheme adds the HTTP or HTTPS schemes to a endpoint URL if there is no +// scheme. If disableSSL is true HTTP will set HTTP instead of the default HTTPS. +// +// If disableSSL is set, it will only set the URL's scheme if the URL does not +// contain a scheme. +func AddScheme(endpoint string, disableSSL bool) string { + if !schemeRE.MatchString(endpoint) { + scheme := "https" + if disableSSL { + scheme = "http" + } + endpoint = fmt.Sprintf("%s://%s", scheme, endpoint) + } + + return endpoint +} + +// EnumPartitions a provides a way to retrieve the underlying partitions that +// make up the SDK's default Resolver, or any resolver decoded from a model +// file. +// +// Use this interface with DefaultResolver and DecodeModels to get the list of +// Partitions. +type EnumPartitions interface { + Partitions() []Partition +} + +// RegionsForService returns a map of regions for the partition and service. +// If either the partition or service does not exist false will be returned +// as the second parameter. +// +// This example shows how to get the regions for DynamoDB in the AWS partition. +// rs, exists := endpoints.RegionsForService(endpoints.DefaultPartitions(), endpoints.AwsPartitionID, endpoints.DynamodbServiceID) +// +// This is equivalent to using the partition directly. +// rs := endpoints.AwsPartition().Services()[endpoints.DynamodbServiceID].Regions() +func RegionsForService(ps []Partition, partitionID, serviceID string) (map[string]Region, bool) { + for _, p := range ps { + if p.ID() != partitionID { + continue + } + if _, ok := p.p.Services[serviceID]; !ok { + break + } + + s := Service{ + id: serviceID, + p: p.p, + } + return s.Regions(), true + } + + return map[string]Region{}, false +} + +// PartitionForRegion returns the first partition which includes the region +// passed in. This includes both known regions and regions which match +// a pattern supported by the partition which may include regions that are +// not explicitly known by the partition. Use the Regions method of the +// returned Partition if explicit support is needed. +func PartitionForRegion(ps []Partition, regionID string) (Partition, bool) { + for _, p := range ps { + if _, ok := p.p.Regions[regionID]; ok || p.p.RegionRegex.MatchString(regionID) { + return p, true + } + } + + return Partition{}, false +} + +// A Partition provides the ability to enumerate the partition's regions +// and services. +type Partition struct { + id, dnsSuffix string + p *partition +} + +// DNSSuffix returns the base domain name of the partition. +func (p Partition) DNSSuffix() string { return p.dnsSuffix } + +// ID returns the identifier of the partition. +func (p Partition) ID() string { return p.id } + +// EndpointFor attempts to resolve the endpoint based on service and region. +// See Options for information on configuring how the endpoint is resolved. +// +// If the service cannot be found in the metadata the UnknownServiceError +// error will be returned. This validation will occur regardless if +// StrictMatching is enabled. To enable resolving unknown services set the +// "ResolveUnknownService" option to true. When StrictMatching is disabled +// this option allows the partition resolver to resolve a endpoint based on +// the service endpoint ID provided. +// +// When resolving endpoints you can choose to enable StrictMatching. This will +// require the provided service and region to be known by the partition. +// If the endpoint cannot be strictly resolved an error will be returned. This +// mode is useful to ensure the endpoint resolved is valid. Without +// StrictMatching enabled the endpoint returned may look valid but may not work. +// StrictMatching requires the SDK to be updated if you want to take advantage +// of new regions and services expansions. +// +// Errors that can be returned. +// * UnknownServiceError +// * UnknownEndpointError +func (p Partition) EndpointFor(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error) { + return p.p.EndpointFor(service, region, opts...) +} + +// Regions returns a map of Regions indexed by their ID. This is useful for +// enumerating over the regions in a partition. +func (p Partition) Regions() map[string]Region { + rs := make(map[string]Region, len(p.p.Regions)) + for id, r := range p.p.Regions { + rs[id] = Region{ + id: id, + desc: r.Description, + p: p.p, + } + } + + return rs +} + +// Services returns a map of Service indexed by their ID. This is useful for +// enumerating over the services in a partition. +func (p Partition) Services() map[string]Service { + ss := make(map[string]Service, len(p.p.Services)) + for id := range p.p.Services { + ss[id] = Service{ + id: id, + p: p.p, + } + } + + return ss +} + +// A Region provides information about a region, and ability to resolve an +// endpoint from the context of a region, given a service. +type Region struct { + id, desc string + p *partition +} + +// ID returns the region's identifier. +func (r Region) ID() string { return r.id } + +// Description returns the region's description. The region description +// is free text, it can be empty, and it may change between SDK releases. +func (r Region) Description() string { return r.desc } + +// ResolveEndpoint resolves an endpoint from the context of the region given +// a service. See Partition.EndpointFor for usage and errors that can be returned. +func (r Region) ResolveEndpoint(service string, opts ...func(*Options)) (ResolvedEndpoint, error) { + return r.p.EndpointFor(service, r.id, opts...) +} + +// Services returns a list of all services that are known to be in this region. +func (r Region) Services() map[string]Service { + ss := map[string]Service{} + for id, s := range r.p.Services { + if _, ok := s.Endpoints[r.id]; ok { + ss[id] = Service{ + id: id, + p: r.p, + } + } + } + + return ss +} + +// A Service provides information about a service, and ability to resolve an +// endpoint from the context of a service, given a region. +type Service struct { + id string + p *partition +} + +// ID returns the identifier for the service. +func (s Service) ID() string { return s.id } + +// ResolveEndpoint resolves an endpoint from the context of a service given +// a region. See Partition.EndpointFor for usage and errors that can be returned. +func (s Service) ResolveEndpoint(region string, opts ...func(*Options)) (ResolvedEndpoint, error) { + return s.p.EndpointFor(s.id, region, opts...) +} + +// Regions returns a map of Regions that the service is present in. +// +// A region is the AWS region the service exists in. Whereas a Endpoint is +// an URL that can be resolved to a instance of a service. +func (s Service) Regions() map[string]Region { + rs := map[string]Region{} + for id := range s.p.Services[s.id].Endpoints { + if r, ok := s.p.Regions[id]; ok { + rs[id] = Region{ + id: id, + desc: r.Description, + p: s.p, + } + } + } + + return rs +} + +// Endpoints returns a map of Endpoints indexed by their ID for all known +// endpoints for a service. +// +// A region is the AWS region the service exists in. Whereas a Endpoint is +// an URL that can be resolved to a instance of a service. +func (s Service) Endpoints() map[string]Endpoint { + es := make(map[string]Endpoint, len(s.p.Services[s.id].Endpoints)) + for id := range s.p.Services[s.id].Endpoints { + es[id] = Endpoint{ + id: id, + serviceID: s.id, + p: s.p, + } + } + + return es +} + +// A Endpoint provides information about endpoints, and provides the ability +// to resolve that endpoint for the service, and the region the endpoint +// represents. +type Endpoint struct { + id string + serviceID string + p *partition +} + +// ID returns the identifier for an endpoint. +func (e Endpoint) ID() string { return e.id } + +// ServiceID returns the identifier the endpoint belongs to. +func (e Endpoint) ServiceID() string { return e.serviceID } + +// ResolveEndpoint resolves an endpoint from the context of a service and +// region the endpoint represents. See Partition.EndpointFor for usage and +// errors that can be returned. +func (e Endpoint) ResolveEndpoint(opts ...func(*Options)) (ResolvedEndpoint, error) { + return e.p.EndpointFor(e.serviceID, e.id, opts...) +} + +// A ResolvedEndpoint is an endpoint that has been resolved based on a partition +// service, and region. +type ResolvedEndpoint struct { + // The endpoint URL + URL string + + // The endpoint partition + PartitionID string + + // The region that should be used for signing requests. + SigningRegion string + + // The service name that should be used for signing requests. + SigningName string + + // States that the signing name for this endpoint was derived from metadata + // passed in, but was not explicitly modeled. + SigningNameDerived bool + + // The signing method that should be used for signing requests. + SigningMethod string +} + +// So that the Error interface type can be included as an anonymous field +// in the requestError struct and not conflict with the error.Error() method. +type awsError awserr.Error + +// A EndpointNotFoundError is returned when in StrictMatching mode, and the +// endpoint for the service and region cannot be found in any of the partitions. +type EndpointNotFoundError struct { + awsError + Partition string + Service string + Region string +} + +// A UnknownServiceError is returned when the service does not resolve to an +// endpoint. Includes a list of all known services for the partition. Returned +// when a partition does not support the service. +type UnknownServiceError struct { + awsError + Partition string + Service string + Known []string +} + +// NewUnknownServiceError builds and returns UnknownServiceError. +func NewUnknownServiceError(p, s string, known []string) UnknownServiceError { + return UnknownServiceError{ + awsError: awserr.New("UnknownServiceError", + "could not resolve endpoint for unknown service", nil), + Partition: p, + Service: s, + Known: known, + } +} + +// String returns the string representation of the error. +func (e UnknownServiceError) Error() string { + extra := fmt.Sprintf("partition: %q, service: %q", + e.Partition, e.Service) + if len(e.Known) > 0 { + extra += fmt.Sprintf(", known: %v", e.Known) + } + return awserr.SprintError(e.Code(), e.Message(), extra, e.OrigErr()) +} + +// String returns the string representation of the error. +func (e UnknownServiceError) String() string { + return e.Error() +} + +// A UnknownEndpointError is returned when in StrictMatching mode and the +// service is valid, but the region does not resolve to an endpoint. Includes +// a list of all known endpoints for the service. +type UnknownEndpointError struct { + awsError + Partition string + Service string + Region string + Known []string +} + +// NewUnknownEndpointError builds and returns UnknownEndpointError. +func NewUnknownEndpointError(p, s, r string, known []string) UnknownEndpointError { + return UnknownEndpointError{ + awsError: awserr.New("UnknownEndpointError", + "could not resolve endpoint", nil), + Partition: p, + Service: s, + Region: r, + Known: known, + } +} + +// String returns the string representation of the error. +func (e UnknownEndpointError) Error() string { + extra := fmt.Sprintf("partition: %q, service: %q, region: %q", + e.Partition, e.Service, e.Region) + if len(e.Known) > 0 { + extra += fmt.Sprintf(", known: %v", e.Known) + } + return awserr.SprintError(e.Code(), e.Message(), extra, e.OrigErr()) +} + +// String returns the string representation of the error. +func (e UnknownEndpointError) String() string { + return e.Error() +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/legacy_regions.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/legacy_regions.go new file mode 100644 index 00000000000..df75e899adb --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/legacy_regions.go @@ -0,0 +1,24 @@ +package endpoints + +var legacyGlobalRegions = map[string]map[string]struct{}{ + "sts": { + "ap-northeast-1": {}, + "ap-south-1": {}, + "ap-southeast-1": {}, + "ap-southeast-2": {}, + "ca-central-1": {}, + "eu-central-1": {}, + "eu-north-1": {}, + "eu-west-1": {}, + "eu-west-2": {}, + "eu-west-3": {}, + "sa-east-1": {}, + "us-east-1": {}, + "us-east-2": {}, + "us-west-1": {}, + "us-west-2": {}, + }, + "s3": { + "us-east-1": {}, + }, +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go new file mode 100644 index 00000000000..773613722f4 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go @@ -0,0 +1,351 @@ +package endpoints + +import ( + "fmt" + "regexp" + "strconv" + "strings" +) + +var regionValidationRegex = regexp.MustCompile(`^[[:alnum:]]([[:alnum:]\-]*[[:alnum:]])?$`) + +type partitions []partition + +func (ps partitions) EndpointFor(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error) { + var opt Options + opt.Set(opts...) + + for i := 0; i < len(ps); i++ { + if !ps[i].canResolveEndpoint(service, region, opt.StrictMatching) { + continue + } + + return ps[i].EndpointFor(service, region, opts...) + } + + // If loose matching fallback to first partition format to use + // when resolving the endpoint. + if !opt.StrictMatching && len(ps) > 0 { + return ps[0].EndpointFor(service, region, opts...) + } + + return ResolvedEndpoint{}, NewUnknownEndpointError("all partitions", service, region, []string{}) +} + +// Partitions satisfies the EnumPartitions interface and returns a list +// of Partitions representing each partition represented in the SDK's +// endpoints model. +func (ps partitions) Partitions() []Partition { + parts := make([]Partition, 0, len(ps)) + for i := 0; i < len(ps); i++ { + parts = append(parts, ps[i].Partition()) + } + + return parts +} + +type partition struct { + ID string `json:"partition"` + Name string `json:"partitionName"` + DNSSuffix string `json:"dnsSuffix"` + RegionRegex regionRegex `json:"regionRegex"` + Defaults endpoint `json:"defaults"` + Regions regions `json:"regions"` + Services services `json:"services"` +} + +func (p partition) Partition() Partition { + return Partition{ + dnsSuffix: p.DNSSuffix, + id: p.ID, + p: &p, + } +} + +func (p partition) canResolveEndpoint(service, region string, strictMatch bool) bool { + s, hasService := p.Services[service] + _, hasEndpoint := s.Endpoints[region] + + if hasEndpoint && hasService { + return true + } + + if strictMatch { + return false + } + + return p.RegionRegex.MatchString(region) +} + +func allowLegacyEmptyRegion(service string) bool { + legacy := map[string]struct{}{ + "budgets": {}, + "ce": {}, + "chime": {}, + "cloudfront": {}, + "ec2metadata": {}, + "iam": {}, + "importexport": {}, + "organizations": {}, + "route53": {}, + "sts": {}, + "support": {}, + "waf": {}, + } + + _, allowed := legacy[service] + return allowed +} + +func (p partition) EndpointFor(service, region string, opts ...func(*Options)) (resolved ResolvedEndpoint, err error) { + var opt Options + opt.Set(opts...) + + s, hasService := p.Services[service] + if len(service) == 0 || !(hasService || opt.ResolveUnknownService) { + // Only return error if the resolver will not fallback to creating + // endpoint based on service endpoint ID passed in. + return resolved, NewUnknownServiceError(p.ID, service, serviceList(p.Services)) + } + + if len(region) == 0 && allowLegacyEmptyRegion(service) && len(s.PartitionEndpoint) != 0 { + region = s.PartitionEndpoint + } + + if (service == "sts" && opt.STSRegionalEndpoint != RegionalSTSEndpoint) || + (service == "s3" && opt.S3UsEast1RegionalEndpoint != RegionalS3UsEast1Endpoint) { + if _, ok := legacyGlobalRegions[service][region]; ok { + region = "aws-global" + } + } + + e, hasEndpoint := s.endpointForRegion(region) + if len(region) == 0 || (!hasEndpoint && opt.StrictMatching) { + return resolved, NewUnknownEndpointError(p.ID, service, region, endpointList(s.Endpoints)) + } + + defs := []endpoint{p.Defaults, s.Defaults} + + return e.resolve(service, p.ID, region, p.DNSSuffix, defs, opt) +} + +func serviceList(ss services) []string { + list := make([]string, 0, len(ss)) + for k := range ss { + list = append(list, k) + } + return list +} +func endpointList(es endpoints) []string { + list := make([]string, 0, len(es)) + for k := range es { + list = append(list, k) + } + return list +} + +type regionRegex struct { + *regexp.Regexp +} + +func (rr *regionRegex) UnmarshalJSON(b []byte) (err error) { + // Strip leading and trailing quotes + regex, err := strconv.Unquote(string(b)) + if err != nil { + return fmt.Errorf("unable to strip quotes from regex, %v", err) + } + + rr.Regexp, err = regexp.Compile(regex) + if err != nil { + return fmt.Errorf("unable to unmarshal region regex, %v", err) + } + return nil +} + +type regions map[string]region + +type region struct { + Description string `json:"description"` +} + +type services map[string]service + +type service struct { + PartitionEndpoint string `json:"partitionEndpoint"` + IsRegionalized boxedBool `json:"isRegionalized,omitempty"` + Defaults endpoint `json:"defaults"` + Endpoints endpoints `json:"endpoints"` +} + +func (s *service) endpointForRegion(region string) (endpoint, bool) { + if s.IsRegionalized == boxedFalse { + return s.Endpoints[s.PartitionEndpoint], region == s.PartitionEndpoint + } + + if e, ok := s.Endpoints[region]; ok { + return e, true + } + + // Unable to find any matching endpoint, return + // blank that will be used for generic endpoint creation. + return endpoint{}, false +} + +type endpoints map[string]endpoint + +type endpoint struct { + Hostname string `json:"hostname"` + Protocols []string `json:"protocols"` + CredentialScope credentialScope `json:"credentialScope"` + + // Custom fields not modeled + HasDualStack boxedBool `json:"-"` + DualStackHostname string `json:"-"` + + // Signature Version not used + SignatureVersions []string `json:"signatureVersions"` + + // SSLCommonName not used. + SSLCommonName string `json:"sslCommonName"` +} + +const ( + defaultProtocol = "https" + defaultSigner = "v4" +) + +var ( + protocolPriority = []string{"https", "http"} + signerPriority = []string{"v4", "v2"} +) + +func getByPriority(s []string, p []string, def string) string { + if len(s) == 0 { + return def + } + + for i := 0; i < len(p); i++ { + for j := 0; j < len(s); j++ { + if s[j] == p[i] { + return s[j] + } + } + } + + return s[0] +} + +func (e endpoint) resolve(service, partitionID, region, dnsSuffix string, defs []endpoint, opts Options) (ResolvedEndpoint, error) { + var merged endpoint + for _, def := range defs { + merged.mergeIn(def) + } + merged.mergeIn(e) + e = merged + + signingRegion := e.CredentialScope.Region + if len(signingRegion) == 0 { + signingRegion = region + } + + signingName := e.CredentialScope.Service + var signingNameDerived bool + if len(signingName) == 0 { + signingName = service + signingNameDerived = true + } + + hostname := e.Hostname + // Offset the hostname for dualstack if enabled + if opts.UseDualStack && e.HasDualStack == boxedTrue { + hostname = e.DualStackHostname + region = signingRegion + } + + if !validateInputRegion(region) { + return ResolvedEndpoint{}, fmt.Errorf("invalid region identifier format provided") + } + + u := strings.Replace(hostname, "{service}", service, 1) + u = strings.Replace(u, "{region}", region, 1) + u = strings.Replace(u, "{dnsSuffix}", dnsSuffix, 1) + + scheme := getEndpointScheme(e.Protocols, opts.DisableSSL) + u = fmt.Sprintf("%s://%s", scheme, u) + + return ResolvedEndpoint{ + URL: u, + PartitionID: partitionID, + SigningRegion: signingRegion, + SigningName: signingName, + SigningNameDerived: signingNameDerived, + SigningMethod: getByPriority(e.SignatureVersions, signerPriority, defaultSigner), + }, nil +} + +func getEndpointScheme(protocols []string, disableSSL bool) string { + if disableSSL { + return "http" + } + + return getByPriority(protocols, protocolPriority, defaultProtocol) +} + +func (e *endpoint) mergeIn(other endpoint) { + if len(other.Hostname) > 0 { + e.Hostname = other.Hostname + } + if len(other.Protocols) > 0 { + e.Protocols = other.Protocols + } + if len(other.SignatureVersions) > 0 { + e.SignatureVersions = other.SignatureVersions + } + if len(other.CredentialScope.Region) > 0 { + e.CredentialScope.Region = other.CredentialScope.Region + } + if len(other.CredentialScope.Service) > 0 { + e.CredentialScope.Service = other.CredentialScope.Service + } + if len(other.SSLCommonName) > 0 { + e.SSLCommonName = other.SSLCommonName + } + if other.HasDualStack != boxedBoolUnset { + e.HasDualStack = other.HasDualStack + } + if len(other.DualStackHostname) > 0 { + e.DualStackHostname = other.DualStackHostname + } +} + +type credentialScope struct { + Region string `json:"region"` + Service string `json:"service"` +} + +type boxedBool int + +func (b *boxedBool) UnmarshalJSON(buf []byte) error { + v, err := strconv.ParseBool(string(buf)) + if err != nil { + return err + } + + if v { + *b = boxedTrue + } else { + *b = boxedFalse + } + + return nil +} + +const ( + boxedBoolUnset boxedBool = iota + boxedFalse + boxedTrue +) + +func validateInputRegion(region string) bool { + return regionValidationRegex.MatchString(region) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model_codegen.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model_codegen.go new file mode 100644 index 00000000000..0fdfcc56e05 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model_codegen.go @@ -0,0 +1,351 @@ +// +build codegen + +package endpoints + +import ( + "fmt" + "io" + "reflect" + "strings" + "text/template" + "unicode" +) + +// A CodeGenOptions are the options for code generating the endpoints into +// Go code from the endpoints model definition. +type CodeGenOptions struct { + // Options for how the model will be decoded. + DecodeModelOptions DecodeModelOptions + + // Disables code generation of the service endpoint prefix IDs defined in + // the model. + DisableGenerateServiceIDs bool +} + +// Set combines all of the option functions together +func (d *CodeGenOptions) Set(optFns ...func(*CodeGenOptions)) { + for _, fn := range optFns { + fn(d) + } +} + +// CodeGenModel given a endpoints model file will decode it and attempt to +// generate Go code from the model definition. Error will be returned if +// the code is unable to be generated, or decoded. +func CodeGenModel(modelFile io.Reader, outFile io.Writer, optFns ...func(*CodeGenOptions)) error { + var opts CodeGenOptions + opts.Set(optFns...) + + resolver, err := DecodeModel(modelFile, func(d *DecodeModelOptions) { + *d = opts.DecodeModelOptions + }) + if err != nil { + return err + } + + v := struct { + Resolver + CodeGenOptions + }{ + Resolver: resolver, + CodeGenOptions: opts, + } + + tmpl := template.Must(template.New("tmpl").Funcs(funcMap).Parse(v3Tmpl)) + if err := tmpl.ExecuteTemplate(outFile, "defaults", v); err != nil { + return fmt.Errorf("failed to execute template, %v", err) + } + + return nil +} + +func toSymbol(v string) string { + out := []rune{} + for _, c := range strings.Title(v) { + if !(unicode.IsNumber(c) || unicode.IsLetter(c)) { + continue + } + + out = append(out, c) + } + + return string(out) +} + +func quoteString(v string) string { + return fmt.Sprintf("%q", v) +} + +func regionConstName(p, r string) string { + return toSymbol(p) + toSymbol(r) +} + +func partitionGetter(id string) string { + return fmt.Sprintf("%sPartition", toSymbol(id)) +} + +func partitionVarName(id string) string { + return fmt.Sprintf("%sPartition", strings.ToLower(toSymbol(id))) +} + +func listPartitionNames(ps partitions) string { + names := []string{} + switch len(ps) { + case 1: + return ps[0].Name + case 2: + return fmt.Sprintf("%s and %s", ps[0].Name, ps[1].Name) + default: + for i, p := range ps { + if i == len(ps)-1 { + names = append(names, "and "+p.Name) + } else { + names = append(names, p.Name) + } + } + return strings.Join(names, ", ") + } +} + +func boxedBoolIfSet(msg string, v boxedBool) string { + switch v { + case boxedTrue: + return fmt.Sprintf(msg, "boxedTrue") + case boxedFalse: + return fmt.Sprintf(msg, "boxedFalse") + default: + return "" + } +} + +func stringIfSet(msg, v string) string { + if len(v) == 0 { + return "" + } + + return fmt.Sprintf(msg, v) +} + +func stringSliceIfSet(msg string, vs []string) string { + if len(vs) == 0 { + return "" + } + + names := []string{} + for _, v := range vs { + names = append(names, `"`+v+`"`) + } + + return fmt.Sprintf(msg, strings.Join(names, ",")) +} + +func endpointIsSet(v endpoint) bool { + return !reflect.DeepEqual(v, endpoint{}) +} + +func serviceSet(ps partitions) map[string]struct{} { + set := map[string]struct{}{} + for _, p := range ps { + for id := range p.Services { + set[id] = struct{}{} + } + } + + return set +} + +var funcMap = template.FuncMap{ + "ToSymbol": toSymbol, + "QuoteString": quoteString, + "RegionConst": regionConstName, + "PartitionGetter": partitionGetter, + "PartitionVarName": partitionVarName, + "ListPartitionNames": listPartitionNames, + "BoxedBoolIfSet": boxedBoolIfSet, + "StringIfSet": stringIfSet, + "StringSliceIfSet": stringSliceIfSet, + "EndpointIsSet": endpointIsSet, + "ServicesSet": serviceSet, +} + +const v3Tmpl = ` +{{ define "defaults" -}} +// Code generated by aws/endpoints/v3model_codegen.go. DO NOT EDIT. + +package endpoints + +import ( + "regexp" +) + + {{ template "partition consts" $.Resolver }} + + {{ range $_, $partition := $.Resolver }} + {{ template "partition region consts" $partition }} + {{ end }} + + {{ if not $.DisableGenerateServiceIDs -}} + {{ template "service consts" $.Resolver }} + {{- end }} + + {{ template "endpoint resolvers" $.Resolver }} +{{- end }} + +{{ define "partition consts" }} + // Partition identifiers + const ( + {{ range $_, $p := . -}} + {{ ToSymbol $p.ID }}PartitionID = {{ QuoteString $p.ID }} // {{ $p.Name }} partition. + {{ end -}} + ) +{{- end }} + +{{ define "partition region consts" }} + // {{ .Name }} partition's regions. + const ( + {{ range $id, $region := .Regions -}} + {{ ToSymbol $id }}RegionID = {{ QuoteString $id }} // {{ $region.Description }}. + {{ end -}} + ) +{{- end }} + +{{ define "service consts" }} + // Service identifiers + const ( + {{ $serviceSet := ServicesSet . -}} + {{ range $id, $_ := $serviceSet -}} + {{ ToSymbol $id }}ServiceID = {{ QuoteString $id }} // {{ ToSymbol $id }}. + {{ end -}} + ) +{{- end }} + +{{ define "endpoint resolvers" }} + // DefaultResolver returns an Endpoint resolver that will be able + // to resolve endpoints for: {{ ListPartitionNames . }}. + // + // Use DefaultPartitions() to get the list of the default partitions. + func DefaultResolver() Resolver { + return defaultPartitions + } + + // DefaultPartitions returns a list of the partitions the SDK is bundled + // with. The available partitions are: {{ ListPartitionNames . }}. + // + // partitions := endpoints.DefaultPartitions + // for _, p := range partitions { + // // ... inspect partitions + // } + func DefaultPartitions() []Partition { + return defaultPartitions.Partitions() + } + + var defaultPartitions = partitions{ + {{ range $_, $partition := . -}} + {{ PartitionVarName $partition.ID }}, + {{ end }} + } + + {{ range $_, $partition := . -}} + {{ $name := PartitionGetter $partition.ID -}} + // {{ $name }} returns the Resolver for {{ $partition.Name }}. + func {{ $name }}() Partition { + return {{ PartitionVarName $partition.ID }}.Partition() + } + var {{ PartitionVarName $partition.ID }} = {{ template "gocode Partition" $partition }} + {{ end }} +{{ end }} + +{{ define "default partitions" }} + func DefaultPartitions() []Partition { + return []partition{ + {{ range $_, $partition := . -}} + // {{ ToSymbol $partition.ID}}Partition(), + {{ end }} + } + } +{{ end }} + +{{ define "gocode Partition" -}} +partition{ + {{ StringIfSet "ID: %q,\n" .ID -}} + {{ StringIfSet "Name: %q,\n" .Name -}} + {{ StringIfSet "DNSSuffix: %q,\n" .DNSSuffix -}} + RegionRegex: {{ template "gocode RegionRegex" .RegionRegex }}, + {{ if EndpointIsSet .Defaults -}} + Defaults: {{ template "gocode Endpoint" .Defaults }}, + {{- end }} + Regions: {{ template "gocode Regions" .Regions }}, + Services: {{ template "gocode Services" .Services }}, +} +{{- end }} + +{{ define "gocode RegionRegex" -}} +regionRegex{ + Regexp: func() *regexp.Regexp{ + reg, _ := regexp.Compile({{ QuoteString .Regexp.String }}) + return reg + }(), +} +{{- end }} + +{{ define "gocode Regions" -}} +regions{ + {{ range $id, $region := . -}} + "{{ $id }}": {{ template "gocode Region" $region }}, + {{ end -}} +} +{{- end }} + +{{ define "gocode Region" -}} +region{ + {{ StringIfSet "Description: %q,\n" .Description -}} +} +{{- end }} + +{{ define "gocode Services" -}} +services{ + {{ range $id, $service := . -}} + "{{ $id }}": {{ template "gocode Service" $service }}, + {{ end }} +} +{{- end }} + +{{ define "gocode Service" -}} +service{ + {{ StringIfSet "PartitionEndpoint: %q,\n" .PartitionEndpoint -}} + {{ BoxedBoolIfSet "IsRegionalized: %s,\n" .IsRegionalized -}} + {{ if EndpointIsSet .Defaults -}} + Defaults: {{ template "gocode Endpoint" .Defaults -}}, + {{- end }} + {{ if .Endpoints -}} + Endpoints: {{ template "gocode Endpoints" .Endpoints }}, + {{- end }} +} +{{- end }} + +{{ define "gocode Endpoints" -}} +endpoints{ + {{ range $id, $endpoint := . -}} + "{{ $id }}": {{ template "gocode Endpoint" $endpoint }}, + {{ end }} +} +{{- end }} + +{{ define "gocode Endpoint" -}} +endpoint{ + {{ StringIfSet "Hostname: %q,\n" .Hostname -}} + {{ StringIfSet "SSLCommonName: %q,\n" .SSLCommonName -}} + {{ StringSliceIfSet "Protocols: []string{%s},\n" .Protocols -}} + {{ StringSliceIfSet "SignatureVersions: []string{%s},\n" .SignatureVersions -}} + {{ if or .CredentialScope.Region .CredentialScope.Service -}} + CredentialScope: credentialScope{ + {{ StringIfSet "Region: %q,\n" .CredentialScope.Region -}} + {{ StringIfSet "Service: %q,\n" .CredentialScope.Service -}} + }, + {{- end }} + {{ BoxedBoolIfSet "HasDualStack: %s,\n" .HasDualStack -}} + {{ StringIfSet "DualStackHostname: %q,\n" .DualStackHostname -}} + +} +{{- end }} +` diff --git a/vendor/github.com/aws/aws-sdk-go/aws/errors.go b/vendor/github.com/aws/aws-sdk-go/aws/errors.go new file mode 100644 index 00000000000..fa06f7a8f8b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/errors.go @@ -0,0 +1,13 @@ +package aws + +import "github.com/aws/aws-sdk-go/aws/awserr" + +var ( + // ErrMissingRegion is an error that is returned if region configuration is + // not found. + ErrMissingRegion = awserr.New("MissingRegion", "could not find region configuration", nil) + + // ErrMissingEndpoint is an error that is returned if an endpoint cannot be + // resolved for a service. + ErrMissingEndpoint = awserr.New("MissingEndpoint", "'Endpoint' configuration is required for this service", nil) +) diff --git a/vendor/github.com/aws/aws-sdk-go/aws/jsonvalue.go b/vendor/github.com/aws/aws-sdk-go/aws/jsonvalue.go new file mode 100644 index 00000000000..91a6f277a7e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/jsonvalue.go @@ -0,0 +1,12 @@ +package aws + +// JSONValue is a representation of a grab bag type that will be marshaled +// into a json string. This type can be used just like any other map. +// +// Example: +// +// values := aws.JSONValue{ +// "Foo": "Bar", +// } +// values["Baz"] = "Qux" +type JSONValue map[string]interface{} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/logger.go b/vendor/github.com/aws/aws-sdk-go/aws/logger.go new file mode 100644 index 00000000000..6ed15b2ecc2 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/logger.go @@ -0,0 +1,118 @@ +package aws + +import ( + "log" + "os" +) + +// A LogLevelType defines the level logging should be performed at. Used to instruct +// the SDK which statements should be logged. +type LogLevelType uint + +// LogLevel returns the pointer to a LogLevel. Should be used to workaround +// not being able to take the address of a non-composite literal. +func LogLevel(l LogLevelType) *LogLevelType { + return &l +} + +// Value returns the LogLevel value or the default value LogOff if the LogLevel +// is nil. Safe to use on nil value LogLevelTypes. +func (l *LogLevelType) Value() LogLevelType { + if l != nil { + return *l + } + return LogOff +} + +// Matches returns true if the v LogLevel is enabled by this LogLevel. Should be +// used with logging sub levels. Is safe to use on nil value LogLevelTypes. If +// LogLevel is nil, will default to LogOff comparison. +func (l *LogLevelType) Matches(v LogLevelType) bool { + c := l.Value() + return c&v == v +} + +// AtLeast returns true if this LogLevel is at least high enough to satisfies v. +// Is safe to use on nil value LogLevelTypes. If LogLevel is nil, will default +// to LogOff comparison. +func (l *LogLevelType) AtLeast(v LogLevelType) bool { + c := l.Value() + return c >= v +} + +const ( + // LogOff states that no logging should be performed by the SDK. This is the + // default state of the SDK, and should be use to disable all logging. + LogOff LogLevelType = iota * 0x1000 + + // LogDebug state that debug output should be logged by the SDK. This should + // be used to inspect request made and responses received. + LogDebug +) + +// Debug Logging Sub Levels +const ( + // LogDebugWithSigning states that the SDK should log request signing and + // presigning events. This should be used to log the signing details of + // requests for debugging. Will also enable LogDebug. + LogDebugWithSigning LogLevelType = LogDebug | (1 << iota) + + // LogDebugWithHTTPBody states the SDK should log HTTP request and response + // HTTP bodys in addition to the headers and path. This should be used to + // see the body content of requests and responses made while using the SDK + // Will also enable LogDebug. + LogDebugWithHTTPBody + + // LogDebugWithRequestRetries states the SDK should log when service requests will + // be retried. This should be used to log when you want to log when service + // requests are being retried. Will also enable LogDebug. + LogDebugWithRequestRetries + + // LogDebugWithRequestErrors states the SDK should log when service requests fail + // to build, send, validate, or unmarshal. + LogDebugWithRequestErrors + + // LogDebugWithEventStreamBody states the SDK should log EventStream + // request and response bodys. This should be used to log the EventStream + // wire unmarshaled message content of requests and responses made while + // using the SDK Will also enable LogDebug. + LogDebugWithEventStreamBody +) + +// A Logger is a minimalistic interface for the SDK to log messages to. Should +// be used to provide custom logging writers for the SDK to use. +type Logger interface { + Log(...interface{}) +} + +// A LoggerFunc is a convenience type to convert a function taking a variadic +// list of arguments and wrap it so the Logger interface can be used. +// +// Example: +// s3.New(sess, &aws.Config{Logger: aws.LoggerFunc(func(args ...interface{}) { +// fmt.Fprintln(os.Stdout, args...) +// })}) +type LoggerFunc func(...interface{}) + +// Log calls the wrapped function with the arguments provided +func (f LoggerFunc) Log(args ...interface{}) { + f(args...) +} + +// NewDefaultLogger returns a Logger which will write log messages to stdout, and +// use same formatting runes as the stdlib log.Logger +func NewDefaultLogger() Logger { + return &defaultLogger{ + logger: log.New(os.Stdout, "", log.LstdFlags), + } +} + +// A defaultLogger provides a minimalistic logger satisfying the Logger interface. +type defaultLogger struct { + logger *log.Logger +} + +// Log logs the parameters to the stdlib logger. See log.Println. +func (l defaultLogger) Log(args ...interface{}) { + l.logger.Println(args...) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error.go b/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error.go new file mode 100644 index 00000000000..2ba3c56c11f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error.go @@ -0,0 +1,19 @@ +package request + +import ( + "strings" +) + +func isErrConnectionReset(err error) bool { + if strings.Contains(err.Error(), "read: connection reset") { + return false + } + + if strings.Contains(err.Error(), "use of closed network connection") || + strings.Contains(err.Error(), "connection reset") || + strings.Contains(err.Error(), "broken pipe") { + return true + } + + return false +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go b/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go new file mode 100644 index 00000000000..e819ab6c0e8 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go @@ -0,0 +1,343 @@ +package request + +import ( + "fmt" + "strings" +) + +// A Handlers provides a collection of request handlers for various +// stages of handling requests. +type Handlers struct { + Validate HandlerList + Build HandlerList + BuildStream HandlerList + Sign HandlerList + Send HandlerList + ValidateResponse HandlerList + Unmarshal HandlerList + UnmarshalStream HandlerList + UnmarshalMeta HandlerList + UnmarshalError HandlerList + Retry HandlerList + AfterRetry HandlerList + CompleteAttempt HandlerList + Complete HandlerList +} + +// Copy returns a copy of this handler's lists. +func (h *Handlers) Copy() Handlers { + return Handlers{ + Validate: h.Validate.copy(), + Build: h.Build.copy(), + BuildStream: h.BuildStream.copy(), + Sign: h.Sign.copy(), + Send: h.Send.copy(), + ValidateResponse: h.ValidateResponse.copy(), + Unmarshal: h.Unmarshal.copy(), + UnmarshalStream: h.UnmarshalStream.copy(), + UnmarshalError: h.UnmarshalError.copy(), + UnmarshalMeta: h.UnmarshalMeta.copy(), + Retry: h.Retry.copy(), + AfterRetry: h.AfterRetry.copy(), + CompleteAttempt: h.CompleteAttempt.copy(), + Complete: h.Complete.copy(), + } +} + +// Clear removes callback functions for all handlers. +func (h *Handlers) Clear() { + h.Validate.Clear() + h.Build.Clear() + h.BuildStream.Clear() + h.Send.Clear() + h.Sign.Clear() + h.Unmarshal.Clear() + h.UnmarshalStream.Clear() + h.UnmarshalMeta.Clear() + h.UnmarshalError.Clear() + h.ValidateResponse.Clear() + h.Retry.Clear() + h.AfterRetry.Clear() + h.CompleteAttempt.Clear() + h.Complete.Clear() +} + +// IsEmpty returns if there are no handlers in any of the handlerlists. +func (h *Handlers) IsEmpty() bool { + if h.Validate.Len() != 0 { + return false + } + if h.Build.Len() != 0 { + return false + } + if h.BuildStream.Len() != 0 { + return false + } + if h.Send.Len() != 0 { + return false + } + if h.Sign.Len() != 0 { + return false + } + if h.Unmarshal.Len() != 0 { + return false + } + if h.UnmarshalStream.Len() != 0 { + return false + } + if h.UnmarshalMeta.Len() != 0 { + return false + } + if h.UnmarshalError.Len() != 0 { + return false + } + if h.ValidateResponse.Len() != 0 { + return false + } + if h.Retry.Len() != 0 { + return false + } + if h.AfterRetry.Len() != 0 { + return false + } + if h.CompleteAttempt.Len() != 0 { + return false + } + if h.Complete.Len() != 0 { + return false + } + + return true +} + +// A HandlerListRunItem represents an entry in the HandlerList which +// is being run. +type HandlerListRunItem struct { + Index int + Handler NamedHandler + Request *Request +} + +// A HandlerList manages zero or more handlers in a list. +type HandlerList struct { + list []NamedHandler + + // Called after each request handler in the list is called. If set + // and the func returns true the HandlerList will continue to iterate + // over the request handlers. If false is returned the HandlerList + // will stop iterating. + // + // Should be used if extra logic to be performed between each handler + // in the list. This can be used to terminate a list's iteration + // based on a condition such as error like, HandlerListStopOnError. + // Or for logging like HandlerListLogItem. + AfterEachFn func(item HandlerListRunItem) bool +} + +// A NamedHandler is a struct that contains a name and function callback. +type NamedHandler struct { + Name string + Fn func(*Request) +} + +// copy creates a copy of the handler list. +func (l *HandlerList) copy() HandlerList { + n := HandlerList{ + AfterEachFn: l.AfterEachFn, + } + if len(l.list) == 0 { + return n + } + + n.list = append(make([]NamedHandler, 0, len(l.list)), l.list...) + return n +} + +// Clear clears the handler list. +func (l *HandlerList) Clear() { + l.list = l.list[0:0] +} + +// Len returns the number of handlers in the list. +func (l *HandlerList) Len() int { + return len(l.list) +} + +// PushBack pushes handler f to the back of the handler list. +func (l *HandlerList) PushBack(f func(*Request)) { + l.PushBackNamed(NamedHandler{"__anonymous", f}) +} + +// PushBackNamed pushes named handler f to the back of the handler list. +func (l *HandlerList) PushBackNamed(n NamedHandler) { + if cap(l.list) == 0 { + l.list = make([]NamedHandler, 0, 5) + } + l.list = append(l.list, n) +} + +// PushFront pushes handler f to the front of the handler list. +func (l *HandlerList) PushFront(f func(*Request)) { + l.PushFrontNamed(NamedHandler{"__anonymous", f}) +} + +// PushFrontNamed pushes named handler f to the front of the handler list. +func (l *HandlerList) PushFrontNamed(n NamedHandler) { + if cap(l.list) == len(l.list) { + // Allocating new list required + l.list = append([]NamedHandler{n}, l.list...) + } else { + // Enough room to prepend into list. + l.list = append(l.list, NamedHandler{}) + copy(l.list[1:], l.list) + l.list[0] = n + } +} + +// Remove removes a NamedHandler n +func (l *HandlerList) Remove(n NamedHandler) { + l.RemoveByName(n.Name) +} + +// RemoveByName removes a NamedHandler by name. +func (l *HandlerList) RemoveByName(name string) { + for i := 0; i < len(l.list); i++ { + m := l.list[i] + if m.Name == name { + // Shift array preventing creating new arrays + copy(l.list[i:], l.list[i+1:]) + l.list[len(l.list)-1] = NamedHandler{} + l.list = l.list[:len(l.list)-1] + + // decrement list so next check to length is correct + i-- + } + } +} + +// SwapNamed will swap out any existing handlers with the same name as the +// passed in NamedHandler returning true if handlers were swapped. False is +// returned otherwise. +func (l *HandlerList) SwapNamed(n NamedHandler) (swapped bool) { + for i := 0; i < len(l.list); i++ { + if l.list[i].Name == n.Name { + l.list[i].Fn = n.Fn + swapped = true + } + } + + return swapped +} + +// Swap will swap out all handlers matching the name passed in. The matched +// handlers will be swapped in. True is returned if the handlers were swapped. +func (l *HandlerList) Swap(name string, replace NamedHandler) bool { + var swapped bool + + for i := 0; i < len(l.list); i++ { + if l.list[i].Name == name { + l.list[i] = replace + swapped = true + } + } + + return swapped +} + +// SetBackNamed will replace the named handler if it exists in the handler list. +// If the handler does not exist the handler will be added to the end of the list. +func (l *HandlerList) SetBackNamed(n NamedHandler) { + if !l.SwapNamed(n) { + l.PushBackNamed(n) + } +} + +// SetFrontNamed will replace the named handler if it exists in the handler list. +// If the handler does not exist the handler will be added to the beginning of +// the list. +func (l *HandlerList) SetFrontNamed(n NamedHandler) { + if !l.SwapNamed(n) { + l.PushFrontNamed(n) + } +} + +// Run executes all handlers in the list with a given request object. +func (l *HandlerList) Run(r *Request) { + for i, h := range l.list { + h.Fn(r) + item := HandlerListRunItem{ + Index: i, Handler: h, Request: r, + } + if l.AfterEachFn != nil && !l.AfterEachFn(item) { + return + } + } +} + +// HandlerListLogItem logs the request handler and the state of the +// request's Error value. Always returns true to continue iterating +// request handlers in a HandlerList. +func HandlerListLogItem(item HandlerListRunItem) bool { + if item.Request.Config.Logger == nil { + return true + } + item.Request.Config.Logger.Log("DEBUG: RequestHandler", + item.Index, item.Handler.Name, item.Request.Error) + + return true +} + +// HandlerListStopOnError returns false to stop the HandlerList iterating +// over request handlers if Request.Error is not nil. True otherwise +// to continue iterating. +func HandlerListStopOnError(item HandlerListRunItem) bool { + return item.Request.Error == nil +} + +// WithAppendUserAgent will add a string to the user agent prefixed with a +// single white space. +func WithAppendUserAgent(s string) Option { + return func(r *Request) { + r.Handlers.Build.PushBack(func(r2 *Request) { + AddToUserAgent(r, s) + }) + } +} + +// MakeAddToUserAgentHandler will add the name/version pair to the User-Agent request +// header. If the extra parameters are provided they will be added as metadata to the +// name/version pair resulting in the following format. +// "name/version (extra0; extra1; ...)" +// The user agent part will be concatenated with this current request's user agent string. +func MakeAddToUserAgentHandler(name, version string, extra ...string) func(*Request) { + ua := fmt.Sprintf("%s/%s", name, version) + if len(extra) > 0 { + ua += fmt.Sprintf(" (%s)", strings.Join(extra, "; ")) + } + return func(r *Request) { + AddToUserAgent(r, ua) + } +} + +// MakeAddToUserAgentFreeFormHandler adds the input to the User-Agent request header. +// The input string will be concatenated with the current request's user agent string. +func MakeAddToUserAgentFreeFormHandler(s string) func(*Request) { + return func(r *Request) { + AddToUserAgent(r, s) + } +} + +// WithSetRequestHeaders updates the operation request's HTTP header to contain +// the header key value pairs provided. If the header key already exists in the +// request's HTTP header set, the existing value(s) will be replaced. +func WithSetRequestHeaders(h map[string]string) Option { + return withRequestHeader(h).SetRequestHeaders +} + +type withRequestHeader map[string]string + +func (h withRequestHeader) SetRequestHeaders(r *Request) { + for k, v := range h { + r.HTTPRequest.Header[k] = []string{v} + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/http_request.go b/vendor/github.com/aws/aws-sdk-go/aws/request/http_request.go new file mode 100644 index 00000000000..79f79602b03 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/http_request.go @@ -0,0 +1,24 @@ +package request + +import ( + "io" + "net/http" + "net/url" +) + +func copyHTTPRequest(r *http.Request, body io.ReadCloser) *http.Request { + req := new(http.Request) + *req = *r + req.URL = &url.URL{} + *req.URL = *r.URL + req.Body = body + + req.Header = http.Header{} + for k, v := range r.Header { + for _, vv := range v { + req.Header.Add(k, vv) + } + } + + return req +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go b/vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go new file mode 100644 index 00000000000..9370fa50c38 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go @@ -0,0 +1,65 @@ +package request + +import ( + "io" + "sync" + + "github.com/aws/aws-sdk-go/internal/sdkio" +) + +// offsetReader is a thread-safe io.ReadCloser to prevent racing +// with retrying requests +type offsetReader struct { + buf io.ReadSeeker + lock sync.Mutex + closed bool +} + +func newOffsetReader(buf io.ReadSeeker, offset int64) (*offsetReader, error) { + reader := &offsetReader{} + _, err := buf.Seek(offset, sdkio.SeekStart) + if err != nil { + return nil, err + } + + reader.buf = buf + return reader, nil +} + +// Close will close the instance of the offset reader's access to +// the underlying io.ReadSeeker. +func (o *offsetReader) Close() error { + o.lock.Lock() + defer o.lock.Unlock() + o.closed = true + return nil +} + +// Read is a thread-safe read of the underlying io.ReadSeeker +func (o *offsetReader) Read(p []byte) (int, error) { + o.lock.Lock() + defer o.lock.Unlock() + + if o.closed { + return 0, io.EOF + } + + return o.buf.Read(p) +} + +// Seek is a thread-safe seeking operation. +func (o *offsetReader) Seek(offset int64, whence int) (int64, error) { + o.lock.Lock() + defer o.lock.Unlock() + + return o.buf.Seek(offset, whence) +} + +// CloseAndCopy will return a new offsetReader with a copy of the old buffer +// and close the old buffer. +func (o *offsetReader) CloseAndCopy(offset int64) (*offsetReader, error) { + if err := o.Close(); err != nil { + return nil, err + } + return newOffsetReader(o.buf, offset) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request.go new file mode 100644 index 00000000000..d597c6ead55 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request.go @@ -0,0 +1,698 @@ +package request + +import ( + "bytes" + "fmt" + "io" + "net/http" + "net/url" + "reflect" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/internal/sdkio" +) + +const ( + // ErrCodeSerialization is the serialization error code that is received + // during protocol unmarshaling. + ErrCodeSerialization = "SerializationError" + + // ErrCodeRead is an error that is returned during HTTP reads. + ErrCodeRead = "ReadError" + + // ErrCodeResponseTimeout is the connection timeout error that is received + // during body reads. + ErrCodeResponseTimeout = "ResponseTimeout" + + // ErrCodeInvalidPresignExpire is returned when the expire time provided to + // presign is invalid + ErrCodeInvalidPresignExpire = "InvalidPresignExpireError" + + // CanceledErrorCode is the error code that will be returned by an + // API request that was canceled. Requests given a aws.Context may + // return this error when canceled. + CanceledErrorCode = "RequestCanceled" + + // ErrCodeRequestError is an error preventing the SDK from continuing to + // process the request. + ErrCodeRequestError = "RequestError" +) + +// A Request is the service request to be made. +type Request struct { + Config aws.Config + ClientInfo metadata.ClientInfo + Handlers Handlers + + Retryer + AttemptTime time.Time + Time time.Time + Operation *Operation + HTTPRequest *http.Request + HTTPResponse *http.Response + Body io.ReadSeeker + streamingBody io.ReadCloser + BodyStart int64 // offset from beginning of Body that the request body starts + Params interface{} + Error error + Data interface{} + RequestID string + RetryCount int + Retryable *bool + RetryDelay time.Duration + NotHoist bool + SignedHeaderVals http.Header + LastSignedAt time.Time + DisableFollowRedirects bool + + // Additional API error codes that should be retried. IsErrorRetryable + // will consider these codes in addition to its built in cases. + RetryErrorCodes []string + + // Additional API error codes that should be retried with throttle backoff + // delay. IsErrorThrottle will consider these codes in addition to its + // built in cases. + ThrottleErrorCodes []string + + // A value greater than 0 instructs the request to be signed as Presigned URL + // You should not set this field directly. Instead use Request's + // Presign or PresignRequest methods. + ExpireTime time.Duration + + context aws.Context + + built bool + + // Need to persist an intermediate body between the input Body and HTTP + // request body because the HTTP Client's transport can maintain a reference + // to the HTTP request's body after the client has returned. This value is + // safe to use concurrently and wrap the input Body for each HTTP request. + safeBody *offsetReader +} + +// An Operation is the service API operation to be made. +type Operation struct { + Name string + HTTPMethod string + HTTPPath string + *Paginator + + BeforePresignFn func(r *Request) error +} + +// New returns a new Request pointer for the service API operation and +// parameters. +// +// A Retryer should be provided to direct how the request is retried. If +// Retryer is nil, a default no retry value will be used. You can use +// NoOpRetryer in the Client package to disable retry behavior directly. +// +// Params is any value of input parameters to be the request payload. +// Data is pointer value to an object which the request's response +// payload will be deserialized to. +func New(cfg aws.Config, clientInfo metadata.ClientInfo, handlers Handlers, + retryer Retryer, operation *Operation, params interface{}, data interface{}) *Request { + + if retryer == nil { + retryer = noOpRetryer{} + } + + method := operation.HTTPMethod + if method == "" { + method = "POST" + } + + httpReq, _ := http.NewRequest(method, "", nil) + + var err error + httpReq.URL, err = url.Parse(clientInfo.Endpoint + operation.HTTPPath) + if err != nil { + httpReq.URL = &url.URL{} + err = awserr.New("InvalidEndpointURL", "invalid endpoint uri", err) + } + + r := &Request{ + Config: cfg, + ClientInfo: clientInfo, + Handlers: handlers.Copy(), + + Retryer: retryer, + Time: time.Now(), + ExpireTime: 0, + Operation: operation, + HTTPRequest: httpReq, + Body: nil, + Params: params, + Error: err, + Data: data, + } + r.SetBufferBody([]byte{}) + + return r +} + +// A Option is a functional option that can augment or modify a request when +// using a WithContext API operation method. +type Option func(*Request) + +// WithGetResponseHeader builds a request Option which will retrieve a single +// header value from the HTTP Response. If there are multiple values for the +// header key use WithGetResponseHeaders instead to access the http.Header +// map directly. The passed in val pointer must be non-nil. +// +// This Option can be used multiple times with a single API operation. +// +// var id2, versionID string +// svc.PutObjectWithContext(ctx, params, +// request.WithGetResponseHeader("x-amz-id-2", &id2), +// request.WithGetResponseHeader("x-amz-version-id", &versionID), +// ) +func WithGetResponseHeader(key string, val *string) Option { + return func(r *Request) { + r.Handlers.Complete.PushBack(func(req *Request) { + *val = req.HTTPResponse.Header.Get(key) + }) + } +} + +// WithGetResponseHeaders builds a request Option which will retrieve the +// headers from the HTTP response and assign them to the passed in headers +// variable. The passed in headers pointer must be non-nil. +// +// var headers http.Header +// svc.PutObjectWithContext(ctx, params, request.WithGetResponseHeaders(&headers)) +func WithGetResponseHeaders(headers *http.Header) Option { + return func(r *Request) { + r.Handlers.Complete.PushBack(func(req *Request) { + *headers = req.HTTPResponse.Header + }) + } +} + +// WithLogLevel is a request option that will set the request to use a specific +// log level when the request is made. +// +// svc.PutObjectWithContext(ctx, params, request.WithLogLevel(aws.LogDebugWithHTTPBody) +func WithLogLevel(l aws.LogLevelType) Option { + return func(r *Request) { + r.Config.LogLevel = aws.LogLevel(l) + } +} + +// ApplyOptions will apply each option to the request calling them in the order +// the were provided. +func (r *Request) ApplyOptions(opts ...Option) { + for _, opt := range opts { + opt(r) + } +} + +// Context will always returns a non-nil context. If Request does not have a +// context aws.BackgroundContext will be returned. +func (r *Request) Context() aws.Context { + if r.context != nil { + return r.context + } + return aws.BackgroundContext() +} + +// SetContext adds a Context to the current request that can be used to cancel +// a in-flight request. The Context value must not be nil, or this method will +// panic. +// +// Unlike http.Request.WithContext, SetContext does not return a copy of the +// Request. It is not safe to use use a single Request value for multiple +// requests. A new Request should be created for each API operation request. +// +// Go 1.6 and below: +// The http.Request's Cancel field will be set to the Done() value of +// the context. This will overwrite the Cancel field's value. +// +// Go 1.7 and above: +// The http.Request.WithContext will be used to set the context on the underlying +// http.Request. This will create a shallow copy of the http.Request. The SDK +// may create sub contexts in the future for nested requests such as retries. +func (r *Request) SetContext(ctx aws.Context) { + if ctx == nil { + panic("context cannot be nil") + } + setRequestContext(r, ctx) +} + +// WillRetry returns if the request's can be retried. +func (r *Request) WillRetry() bool { + if !aws.IsReaderSeekable(r.Body) && r.HTTPRequest.Body != NoBody { + return false + } + return r.Error != nil && aws.BoolValue(r.Retryable) && r.RetryCount < r.MaxRetries() +} + +func fmtAttemptCount(retryCount, maxRetries int) string { + return fmt.Sprintf("attempt %v/%v", retryCount, maxRetries) +} + +// ParamsFilled returns if the request's parameters have been populated +// and the parameters are valid. False is returned if no parameters are +// provided or invalid. +func (r *Request) ParamsFilled() bool { + return r.Params != nil && reflect.ValueOf(r.Params).Elem().IsValid() +} + +// DataFilled returns true if the request's data for response deserialization +// target has been set and is a valid. False is returned if data is not +// set, or is invalid. +func (r *Request) DataFilled() bool { + return r.Data != nil && reflect.ValueOf(r.Data).Elem().IsValid() +} + +// SetBufferBody will set the request's body bytes that will be sent to +// the service API. +func (r *Request) SetBufferBody(buf []byte) { + r.SetReaderBody(bytes.NewReader(buf)) +} + +// SetStringBody sets the body of the request to be backed by a string. +func (r *Request) SetStringBody(s string) { + r.SetReaderBody(strings.NewReader(s)) +} + +// SetReaderBody will set the request's body reader. +func (r *Request) SetReaderBody(reader io.ReadSeeker) { + r.Body = reader + + if aws.IsReaderSeekable(reader) { + var err error + // Get the Bodies current offset so retries will start from the same + // initial position. + r.BodyStart, err = reader.Seek(0, sdkio.SeekCurrent) + if err != nil { + r.Error = awserr.New(ErrCodeSerialization, + "failed to determine start of request body", err) + return + } + } + r.ResetBody() +} + +// SetStreamingBody set the reader to be used for the request that will stream +// bytes to the server. Request's Body must not be set to any reader. +func (r *Request) SetStreamingBody(reader io.ReadCloser) { + r.streamingBody = reader + r.SetReaderBody(aws.ReadSeekCloser(reader)) +} + +// Presign returns the request's signed URL. Error will be returned +// if the signing fails. The expire parameter is only used for presigned Amazon +// S3 API requests. All other AWS services will use a fixed expiration +// time of 15 minutes. +// +// It is invalid to create a presigned URL with a expire duration 0 or less. An +// error is returned if expire duration is 0 or less. +func (r *Request) Presign(expire time.Duration) (string, error) { + r = r.copy() + + // Presign requires all headers be hoisted. There is no way to retrieve + // the signed headers not hoisted without this. Making the presigned URL + // useless. + r.NotHoist = false + + u, _, err := getPresignedURL(r, expire) + return u, err +} + +// PresignRequest behaves just like presign, with the addition of returning a +// set of headers that were signed. The expire parameter is only used for +// presigned Amazon S3 API requests. All other AWS services will use a fixed +// expiration time of 15 minutes. +// +// It is invalid to create a presigned URL with a expire duration 0 or less. An +// error is returned if expire duration is 0 or less. +// +// Returns the URL string for the API operation with signature in the query string, +// and the HTTP headers that were included in the signature. These headers must +// be included in any HTTP request made with the presigned URL. +// +// To prevent hoisting any headers to the query string set NotHoist to true on +// this Request value prior to calling PresignRequest. +func (r *Request) PresignRequest(expire time.Duration) (string, http.Header, error) { + r = r.copy() + return getPresignedURL(r, expire) +} + +// IsPresigned returns true if the request represents a presigned API url. +func (r *Request) IsPresigned() bool { + return r.ExpireTime != 0 +} + +func getPresignedURL(r *Request, expire time.Duration) (string, http.Header, error) { + if expire <= 0 { + return "", nil, awserr.New( + ErrCodeInvalidPresignExpire, + "presigned URL requires an expire duration greater than 0", + nil, + ) + } + + r.ExpireTime = expire + + if r.Operation.BeforePresignFn != nil { + if err := r.Operation.BeforePresignFn(r); err != nil { + return "", nil, err + } + } + + if err := r.Sign(); err != nil { + return "", nil, err + } + + return r.HTTPRequest.URL.String(), r.SignedHeaderVals, nil +} + +const ( + notRetrying = "not retrying" +) + +func debugLogReqError(r *Request, stage, retryStr string, err error) { + if !r.Config.LogLevel.Matches(aws.LogDebugWithRequestErrors) { + return + } + + r.Config.Logger.Log(fmt.Sprintf("DEBUG: %s %s/%s failed, %s, error %v", + stage, r.ClientInfo.ServiceName, r.Operation.Name, retryStr, err)) +} + +// Build will build the request's object so it can be signed and sent +// to the service. Build will also validate all the request's parameters. +// Any additional build Handlers set on this request will be run +// in the order they were set. +// +// The request will only be built once. Multiple calls to build will have +// no effect. +// +// If any Validate or Build errors occur the build will stop and the error +// which occurred will be returned. +func (r *Request) Build() error { + if !r.built { + r.Handlers.Validate.Run(r) + if r.Error != nil { + debugLogReqError(r, "Validate Request", notRetrying, r.Error) + return r.Error + } + r.Handlers.Build.Run(r) + if r.Error != nil { + debugLogReqError(r, "Build Request", notRetrying, r.Error) + return r.Error + } + r.built = true + } + + return r.Error +} + +// Sign will sign the request, returning error if errors are encountered. +// +// Sign will build the request prior to signing. All Sign Handlers will +// be executed in the order they were set. +func (r *Request) Sign() error { + r.Build() + if r.Error != nil { + debugLogReqError(r, "Build Request", notRetrying, r.Error) + return r.Error + } + + SanitizeHostForHeader(r.HTTPRequest) + + r.Handlers.Sign.Run(r) + return r.Error +} + +func (r *Request) getNextRequestBody() (body io.ReadCloser, err error) { + if r.streamingBody != nil { + return r.streamingBody, nil + } + + if r.safeBody != nil { + r.safeBody.Close() + } + + r.safeBody, err = newOffsetReader(r.Body, r.BodyStart) + if err != nil { + return nil, awserr.New(ErrCodeSerialization, + "failed to get next request body reader", err) + } + + // Go 1.8 tightened and clarified the rules code needs to use when building + // requests with the http package. Go 1.8 removed the automatic detection + // of if the Request.Body was empty, or actually had bytes in it. The SDK + // always sets the Request.Body even if it is empty and should not actually + // be sent. This is incorrect. + // + // Go 1.8 did add a http.NoBody value that the SDK can use to tell the http + // client that the request really should be sent without a body. The + // Request.Body cannot be set to nil, which is preferable, because the + // field is exported and could introduce nil pointer dereferences for users + // of the SDK if they used that field. + // + // Related golang/go#18257 + l, err := aws.SeekerLen(r.Body) + if err != nil { + return nil, awserr.New(ErrCodeSerialization, + "failed to compute request body size", err) + } + + if l == 0 { + body = NoBody + } else if l > 0 { + body = r.safeBody + } else { + // Hack to prevent sending bodies for methods where the body + // should be ignored by the server. Sending bodies on these + // methods without an associated ContentLength will cause the + // request to socket timeout because the server does not handle + // Transfer-Encoding: chunked bodies for these methods. + // + // This would only happen if a aws.ReaderSeekerCloser was used with + // a io.Reader that was not also an io.Seeker, or did not implement + // Len() method. + switch r.Operation.HTTPMethod { + case "GET", "HEAD", "DELETE": + body = NoBody + default: + body = r.safeBody + } + } + + return body, nil +} + +// GetBody will return an io.ReadSeeker of the Request's underlying +// input body with a concurrency safe wrapper. +func (r *Request) GetBody() io.ReadSeeker { + return r.safeBody +} + +// Send will send the request, returning error if errors are encountered. +// +// Send will sign the request prior to sending. All Send Handlers will +// be executed in the order they were set. +// +// Canceling a request is non-deterministic. If a request has been canceled, +// then the transport will choose, randomly, one of the state channels during +// reads or getting the connection. +// +// readLoop() and getConn(req *Request, cm connectMethod) +// https://github.com/golang/go/blob/master/src/net/http/transport.go +// +// Send will not close the request.Request's body. +func (r *Request) Send() error { + defer func() { + // Regardless of success or failure of the request trigger the Complete + // request handlers. + r.Handlers.Complete.Run(r) + }() + + if err := r.Error; err != nil { + return err + } + + for { + r.Error = nil + r.AttemptTime = time.Now() + + if err := r.Sign(); err != nil { + debugLogReqError(r, "Sign Request", notRetrying, err) + return err + } + + if err := r.sendRequest(); err == nil { + return nil + } + r.Handlers.Retry.Run(r) + r.Handlers.AfterRetry.Run(r) + + if r.Error != nil || !aws.BoolValue(r.Retryable) { + return r.Error + } + + if err := r.prepareRetry(); err != nil { + r.Error = err + return err + } + } +} + +func (r *Request) prepareRetry() error { + if r.Config.LogLevel.Matches(aws.LogDebugWithRequestRetries) { + r.Config.Logger.Log(fmt.Sprintf("DEBUG: Retrying Request %s/%s, attempt %d", + r.ClientInfo.ServiceName, r.Operation.Name, r.RetryCount)) + } + + // The previous http.Request will have a reference to the r.Body + // and the HTTP Client's Transport may still be reading from + // the request's body even though the Client's Do returned. + r.HTTPRequest = copyHTTPRequest(r.HTTPRequest, nil) + r.ResetBody() + if err := r.Error; err != nil { + return awserr.New(ErrCodeSerialization, + "failed to prepare body for retry", err) + + } + + // Closing response body to ensure that no response body is leaked + // between retry attempts. + if r.HTTPResponse != nil && r.HTTPResponse.Body != nil { + r.HTTPResponse.Body.Close() + } + + return nil +} + +func (r *Request) sendRequest() (sendErr error) { + defer r.Handlers.CompleteAttempt.Run(r) + + r.Retryable = nil + r.Handlers.Send.Run(r) + if r.Error != nil { + debugLogReqError(r, "Send Request", + fmtAttemptCount(r.RetryCount, r.MaxRetries()), + r.Error) + return r.Error + } + + r.Handlers.UnmarshalMeta.Run(r) + r.Handlers.ValidateResponse.Run(r) + if r.Error != nil { + r.Handlers.UnmarshalError.Run(r) + debugLogReqError(r, "Validate Response", + fmtAttemptCount(r.RetryCount, r.MaxRetries()), + r.Error) + return r.Error + } + + r.Handlers.Unmarshal.Run(r) + if r.Error != nil { + debugLogReqError(r, "Unmarshal Response", + fmtAttemptCount(r.RetryCount, r.MaxRetries()), + r.Error) + return r.Error + } + + return nil +} + +// copy will copy a request which will allow for local manipulation of the +// request. +func (r *Request) copy() *Request { + req := &Request{} + *req = *r + req.Handlers = r.Handlers.Copy() + op := *r.Operation + req.Operation = &op + return req +} + +// AddToUserAgent adds the string to the end of the request's current user agent. +func AddToUserAgent(r *Request, s string) { + curUA := r.HTTPRequest.Header.Get("User-Agent") + if len(curUA) > 0 { + s = curUA + " " + s + } + r.HTTPRequest.Header.Set("User-Agent", s) +} + +// SanitizeHostForHeader removes default port from host and updates request.Host +func SanitizeHostForHeader(r *http.Request) { + host := getHost(r) + port := portOnly(host) + if port != "" && isDefaultPort(r.URL.Scheme, port) { + r.Host = stripPort(host) + } +} + +// Returns host from request +func getHost(r *http.Request) string { + if r.Host != "" { + return r.Host + } + + if r.URL == nil { + return "" + } + + return r.URL.Host +} + +// Hostname returns u.Host, without any port number. +// +// If Host is an IPv6 literal with a port number, Hostname returns the +// IPv6 literal without the square brackets. IPv6 literals may include +// a zone identifier. +// +// Copied from the Go 1.8 standard library (net/url) +func stripPort(hostport string) string { + colon := strings.IndexByte(hostport, ':') + if colon == -1 { + return hostport + } + if i := strings.IndexByte(hostport, ']'); i != -1 { + return strings.TrimPrefix(hostport[:i], "[") + } + return hostport[:colon] +} + +// Port returns the port part of u.Host, without the leading colon. +// If u.Host doesn't contain a port, Port returns an empty string. +// +// Copied from the Go 1.8 standard library (net/url) +func portOnly(hostport string) string { + colon := strings.IndexByte(hostport, ':') + if colon == -1 { + return "" + } + if i := strings.Index(hostport, "]:"); i != -1 { + return hostport[i+len("]:"):] + } + if strings.Contains(hostport, "]") { + return "" + } + return hostport[colon+len(":"):] +} + +// Returns true if the specified URI is using the standard port +// (i.e. port 80 for HTTP URIs or 443 for HTTPS URIs) +func isDefaultPort(scheme, port string) bool { + if port == "" { + return true + } + + lowerCaseScheme := strings.ToLower(scheme) + if (lowerCaseScheme == "http" && port == "80") || (lowerCaseScheme == "https" && port == "443") { + return true + } + + return false +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_7.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_7.go new file mode 100644 index 00000000000..e36e468b7c6 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_7.go @@ -0,0 +1,39 @@ +// +build !go1.8 + +package request + +import "io" + +// NoBody is an io.ReadCloser with no bytes. Read always returns EOF +// and Close always returns nil. It can be used in an outgoing client +// request to explicitly signal that a request has zero bytes. +// An alternative, however, is to simply set Request.Body to nil. +// +// Copy of Go 1.8 NoBody type from net/http/http.go +type noBody struct{} + +func (noBody) Read([]byte) (int, error) { return 0, io.EOF } +func (noBody) Close() error { return nil } +func (noBody) WriteTo(io.Writer) (int64, error) { return 0, nil } + +// NoBody is an empty reader that will trigger the Go HTTP client to not include +// and body in the HTTP request. +var NoBody = noBody{} + +// ResetBody rewinds the request body back to its starting position, and +// sets the HTTP Request body reference. When the body is read prior +// to being sent in the HTTP request it will need to be rewound. +// +// ResetBody will automatically be called by the SDK's build handler, but if +// the request is being used directly ResetBody must be called before the request +// is Sent. SetStringBody, SetBufferBody, and SetReaderBody will automatically +// call ResetBody. +func (r *Request) ResetBody() { + body, err := r.getNextRequestBody() + if err != nil { + r.Error = err + return + } + + r.HTTPRequest.Body = body +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_8.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_8.go new file mode 100644 index 00000000000..de1292f45a2 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_8.go @@ -0,0 +1,36 @@ +// +build go1.8 + +package request + +import ( + "net/http" + + "github.com/aws/aws-sdk-go/aws/awserr" +) + +// NoBody is a http.NoBody reader instructing Go HTTP client to not include +// and body in the HTTP request. +var NoBody = http.NoBody + +// ResetBody rewinds the request body back to its starting position, and +// sets the HTTP Request body reference. When the body is read prior +// to being sent in the HTTP request it will need to be rewound. +// +// ResetBody will automatically be called by the SDK's build handler, but if +// the request is being used directly ResetBody must be called before the request +// is Sent. SetStringBody, SetBufferBody, and SetReaderBody will automatically +// call ResetBody. +// +// Will also set the Go 1.8's http.Request.GetBody member to allow retrying +// PUT/POST redirects. +func (r *Request) ResetBody() { + body, err := r.getNextRequestBody() + if err != nil { + r.Error = awserr.New(ErrCodeSerialization, + "failed to reset request body", err) + return + } + + r.HTTPRequest.Body = body + r.HTTPRequest.GetBody = r.getNextRequestBody +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_context.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_context.go new file mode 100644 index 00000000000..a7365cd1e46 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request_context.go @@ -0,0 +1,14 @@ +// +build go1.7 + +package request + +import "github.com/aws/aws-sdk-go/aws" + +// setContext updates the Request to use the passed in context for cancellation. +// Context will also be used for request retry delay. +// +// Creates shallow copy of the http.Request with the WithContext method. +func setRequestContext(r *Request, ctx aws.Context) { + r.context = ctx + r.HTTPRequest = r.HTTPRequest.WithContext(ctx) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_context_1_6.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_context_1_6.go new file mode 100644 index 00000000000..307fa0705be --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request_context_1_6.go @@ -0,0 +1,14 @@ +// +build !go1.7 + +package request + +import "github.com/aws/aws-sdk-go/aws" + +// setContext updates the Request to use the passed in context for cancellation. +// Context will also be used for request retry delay. +// +// Creates shallow copy of the http.Request with the WithContext method. +func setRequestContext(r *Request, ctx aws.Context) { + r.context = ctx + r.HTTPRequest.Cancel = ctx.Done() +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go new file mode 100644 index 00000000000..64784e16f3d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go @@ -0,0 +1,266 @@ +package request + +import ( + "reflect" + "sync/atomic" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awsutil" +) + +// A Pagination provides paginating of SDK API operations which are paginatable. +// Generally you should not use this type directly, but use the "Pages" API +// operations method to automatically perform pagination for you. Such as, +// "S3.ListObjectsPages", and "S3.ListObjectsPagesWithContext" methods. +// +// Pagination differs from a Paginator type in that pagination is the type that +// does the pagination between API operations, and Paginator defines the +// configuration that will be used per page request. +// +// for p.Next() { +// data := p.Page().(*s3.ListObjectsOutput) +// // process the page's data +// // ... +// // break out of loop to stop fetching additional pages +// } +// +// return p.Err() +// +// See service client API operation Pages methods for examples how the SDK will +// use the Pagination type. +type Pagination struct { + // Function to return a Request value for each pagination request. + // Any configuration or handlers that need to be applied to the request + // prior to getting the next page should be done here before the request + // returned. + // + // NewRequest should always be built from the same API operations. It is + // undefined if different API operations are returned on subsequent calls. + NewRequest func() (*Request, error) + // EndPageOnSameToken, when enabled, will allow the paginator to stop on + // token that are the same as its previous tokens. + EndPageOnSameToken bool + + started bool + prevTokens []interface{} + nextTokens []interface{} + + err error + curPage interface{} +} + +// HasNextPage will return true if Pagination is able to determine that the API +// operation has additional pages. False will be returned if there are no more +// pages remaining. +// +// Will always return true if Next has not been called yet. +func (p *Pagination) HasNextPage() bool { + if !p.started { + return true + } + + hasNextPage := len(p.nextTokens) != 0 + if p.EndPageOnSameToken { + return hasNextPage && !awsutil.DeepEqual(p.nextTokens, p.prevTokens) + } + return hasNextPage +} + +// Err returns the error Pagination encountered when retrieving the next page. +func (p *Pagination) Err() error { + return p.err +} + +// Page returns the current page. Page should only be called after a successful +// call to Next. It is undefined what Page will return if Page is called after +// Next returns false. +func (p *Pagination) Page() interface{} { + return p.curPage +} + +// Next will attempt to retrieve the next page for the API operation. When a page +// is retrieved true will be returned. If the page cannot be retrieved, or there +// are no more pages false will be returned. +// +// Use the Page method to retrieve the current page data. The data will need +// to be cast to the API operation's output type. +// +// Use the Err method to determine if an error occurred if Page returns false. +func (p *Pagination) Next() bool { + if !p.HasNextPage() { + return false + } + + req, err := p.NewRequest() + if err != nil { + p.err = err + return false + } + + if p.started { + for i, intok := range req.Operation.InputTokens { + awsutil.SetValueAtPath(req.Params, intok, p.nextTokens[i]) + } + } + p.started = true + + err = req.Send() + if err != nil { + p.err = err + return false + } + + p.prevTokens = p.nextTokens + p.nextTokens = req.nextPageTokens() + p.curPage = req.Data + + return true +} + +// A Paginator is the configuration data that defines how an API operation +// should be paginated. This type is used by the API service models to define +// the generated pagination config for service APIs. +// +// The Pagination type is what provides iterating between pages of an API. It +// is only used to store the token metadata the SDK should use for performing +// pagination. +type Paginator struct { + InputTokens []string + OutputTokens []string + LimitToken string + TruncationToken string +} + +// nextPageTokens returns the tokens to use when asking for the next page of data. +func (r *Request) nextPageTokens() []interface{} { + if r.Operation.Paginator == nil { + return nil + } + if r.Operation.TruncationToken != "" { + tr, _ := awsutil.ValuesAtPath(r.Data, r.Operation.TruncationToken) + if len(tr) == 0 { + return nil + } + + switch v := tr[0].(type) { + case *bool: + if !aws.BoolValue(v) { + return nil + } + case bool: + if !v { + return nil + } + } + } + + tokens := []interface{}{} + tokenAdded := false + for _, outToken := range r.Operation.OutputTokens { + vs, _ := awsutil.ValuesAtPath(r.Data, outToken) + if len(vs) == 0 { + tokens = append(tokens, nil) + continue + } + v := vs[0] + + switch tv := v.(type) { + case *string: + if len(aws.StringValue(tv)) == 0 { + tokens = append(tokens, nil) + continue + } + case string: + if len(tv) == 0 { + tokens = append(tokens, nil) + continue + } + } + + tokenAdded = true + tokens = append(tokens, v) + } + if !tokenAdded { + return nil + } + + return tokens +} + +// Ensure a deprecated item is only logged once instead of each time its used. +func logDeprecatedf(logger aws.Logger, flag *int32, msg string) { + if logger == nil { + return + } + if atomic.CompareAndSwapInt32(flag, 0, 1) { + logger.Log(msg) + } +} + +var ( + logDeprecatedHasNextPage int32 + logDeprecatedNextPage int32 + logDeprecatedEachPage int32 +) + +// HasNextPage returns true if this request has more pages of data available. +// +// Deprecated Use Pagination type for configurable pagination of API operations +func (r *Request) HasNextPage() bool { + logDeprecatedf(r.Config.Logger, &logDeprecatedHasNextPage, + "Request.HasNextPage deprecated. Use Pagination type for configurable pagination of API operations") + + return len(r.nextPageTokens()) > 0 +} + +// NextPage returns a new Request that can be executed to return the next +// page of result data. Call .Send() on this request to execute it. +// +// Deprecated Use Pagination type for configurable pagination of API operations +func (r *Request) NextPage() *Request { + logDeprecatedf(r.Config.Logger, &logDeprecatedNextPage, + "Request.NextPage deprecated. Use Pagination type for configurable pagination of API operations") + + tokens := r.nextPageTokens() + if len(tokens) == 0 { + return nil + } + + data := reflect.New(reflect.TypeOf(r.Data).Elem()).Interface() + nr := New(r.Config, r.ClientInfo, r.Handlers, r.Retryer, r.Operation, awsutil.CopyOf(r.Params), data) + for i, intok := range nr.Operation.InputTokens { + awsutil.SetValueAtPath(nr.Params, intok, tokens[i]) + } + return nr +} + +// EachPage iterates over each page of a paginated request object. The fn +// parameter should be a function with the following sample signature: +// +// func(page *T, lastPage bool) bool { +// return true // return false to stop iterating +// } +// +// Where "T" is the structure type matching the output structure of the given +// operation. For example, a request object generated by +// DynamoDB.ListTablesRequest() would expect to see dynamodb.ListTablesOutput +// as the structure "T". The lastPage value represents whether the page is +// the last page of data or not. The return value of this function should +// return true to keep iterating or false to stop. +// +// Deprecated Use Pagination type for configurable pagination of API operations +func (r *Request) EachPage(fn func(data interface{}, isLastPage bool) (shouldContinue bool)) error { + logDeprecatedf(r.Config.Logger, &logDeprecatedEachPage, + "Request.EachPage deprecated. Use Pagination type for configurable pagination of API operations") + + for page := r; page != nil; page = page.NextPage() { + if err := page.Send(); err != nil { + return err + } + if getNextPage := fn(page.Data, !page.HasNextPage()); !getNextPage { + return page.Error + } + } + + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go b/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go new file mode 100644 index 00000000000..752ae47f845 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go @@ -0,0 +1,309 @@ +package request + +import ( + "net" + "net/url" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" +) + +// Retryer provides the interface drive the SDK's request retry behavior. The +// Retryer implementation is responsible for implementing exponential backoff, +// and determine if a request API error should be retried. +// +// client.DefaultRetryer is the SDK's default implementation of the Retryer. It +// uses the which uses the Request.IsErrorRetryable and Request.IsErrorThrottle +// methods to determine if the request is retried. +type Retryer interface { + // RetryRules return the retry delay that should be used by the SDK before + // making another request attempt for the failed request. + RetryRules(*Request) time.Duration + + // ShouldRetry returns if the failed request is retryable. + // + // Implementations may consider request attempt count when determining if a + // request is retryable, but the SDK will use MaxRetries to limit the + // number of attempts a request are made. + ShouldRetry(*Request) bool + + // MaxRetries is the number of times a request may be retried before + // failing. + MaxRetries() int +} + +// WithRetryer sets a Retryer value to the given Config returning the Config +// value for chaining. The value must not be nil. +func WithRetryer(cfg *aws.Config, retryer Retryer) *aws.Config { + if retryer == nil { + if cfg.Logger != nil { + cfg.Logger.Log("ERROR: Request.WithRetryer called with nil retryer. Replacing with retry disabled Retryer.") + } + retryer = noOpRetryer{} + } + cfg.Retryer = retryer + return cfg + +} + +// noOpRetryer is a internal no op retryer used when a request is created +// without a retryer. +// +// Provides a retryer that performs no retries. +// It should be used when we do not want retries to be performed. +type noOpRetryer struct{} + +// MaxRetries returns the number of maximum returns the service will use to make +// an individual API; For NoOpRetryer the MaxRetries will always be zero. +func (d noOpRetryer) MaxRetries() int { + return 0 +} + +// ShouldRetry will always return false for NoOpRetryer, as it should never retry. +func (d noOpRetryer) ShouldRetry(_ *Request) bool { + return false +} + +// RetryRules returns the delay duration before retrying this request again; +// since NoOpRetryer does not retry, RetryRules always returns 0. +func (d noOpRetryer) RetryRules(_ *Request) time.Duration { + return 0 +} + +// retryableCodes is a collection of service response codes which are retry-able +// without any further action. +var retryableCodes = map[string]struct{}{ + ErrCodeRequestError: {}, + "RequestTimeout": {}, + ErrCodeResponseTimeout: {}, + "RequestTimeoutException": {}, // Glacier's flavor of RequestTimeout +} + +var throttleCodes = map[string]struct{}{ + "ProvisionedThroughputExceededException": {}, + "ThrottledException": {}, // SNS, XRay, ResourceGroupsTagging API + "Throttling": {}, + "ThrottlingException": {}, + "RequestLimitExceeded": {}, + "RequestThrottled": {}, + "RequestThrottledException": {}, + "TooManyRequestsException": {}, // Lambda functions + "PriorRequestNotComplete": {}, // Route53 + "TransactionInProgressException": {}, + "EC2ThrottledException": {}, // EC2 +} + +// credsExpiredCodes is a collection of error codes which signify the credentials +// need to be refreshed. Expired tokens require refreshing of credentials, and +// resigning before the request can be retried. +var credsExpiredCodes = map[string]struct{}{ + "ExpiredToken": {}, + "ExpiredTokenException": {}, + "RequestExpired": {}, // EC2 Only +} + +func isCodeThrottle(code string) bool { + _, ok := throttleCodes[code] + return ok +} + +func isCodeRetryable(code string) bool { + if _, ok := retryableCodes[code]; ok { + return true + } + + return isCodeExpiredCreds(code) +} + +func isCodeExpiredCreds(code string) bool { + _, ok := credsExpiredCodes[code] + return ok +} + +var validParentCodes = map[string]struct{}{ + ErrCodeSerialization: {}, + ErrCodeRead: {}, +} + +func isNestedErrorRetryable(parentErr awserr.Error) bool { + if parentErr == nil { + return false + } + + if _, ok := validParentCodes[parentErr.Code()]; !ok { + return false + } + + err := parentErr.OrigErr() + if err == nil { + return false + } + + if aerr, ok := err.(awserr.Error); ok { + return isCodeRetryable(aerr.Code()) + } + + if t, ok := err.(temporary); ok { + return t.Temporary() || isErrConnectionReset(err) + } + + return isErrConnectionReset(err) +} + +// IsErrorRetryable returns whether the error is retryable, based on its Code. +// Returns false if error is nil. +func IsErrorRetryable(err error) bool { + if err == nil { + return false + } + return shouldRetryError(err) +} + +type temporary interface { + Temporary() bool +} + +func shouldRetryError(origErr error) bool { + switch err := origErr.(type) { + case awserr.Error: + if err.Code() == CanceledErrorCode { + return false + } + if isNestedErrorRetryable(err) { + return true + } + + origErr := err.OrigErr() + var shouldRetry bool + if origErr != nil { + shouldRetry = shouldRetryError(origErr) + if err.Code() == ErrCodeRequestError && !shouldRetry { + return false + } + } + if isCodeRetryable(err.Code()) { + return true + } + return shouldRetry + + case *url.Error: + if strings.Contains(err.Error(), "connection refused") { + // Refused connections should be retried as the service may not yet + // be running on the port. Go TCP dial considers refused + // connections as not temporary. + return true + } + // *url.Error only implements Temporary after golang 1.6 but since + // url.Error only wraps the error: + return shouldRetryError(err.Err) + + case temporary: + if netErr, ok := err.(*net.OpError); ok && netErr.Op == "dial" { + return true + } + // If the error is temporary, we want to allow continuation of the + // retry process + return err.Temporary() || isErrConnectionReset(origErr) + + case nil: + // `awserr.Error.OrigErr()` can be nil, meaning there was an error but + // because we don't know the cause, it is marked as retryable. See + // TestRequest4xxUnretryable for an example. + return true + + default: + switch err.Error() { + case "net/http: request canceled", + "net/http: request canceled while waiting for connection": + // known 1.5 error case when an http request is cancelled + return false + } + // here we don't know the error; so we allow a retry. + return true + } +} + +// IsErrorThrottle returns whether the error is to be throttled based on its code. +// Returns false if error is nil. +func IsErrorThrottle(err error) bool { + if aerr, ok := err.(awserr.Error); ok && aerr != nil { + return isCodeThrottle(aerr.Code()) + } + return false +} + +// IsErrorExpiredCreds returns whether the error code is a credential expiry +// error. Returns false if error is nil. +func IsErrorExpiredCreds(err error) bool { + if aerr, ok := err.(awserr.Error); ok && aerr != nil { + return isCodeExpiredCreds(aerr.Code()) + } + return false +} + +// IsErrorRetryable returns whether the error is retryable, based on its Code. +// Returns false if the request has no Error set. +// +// Alias for the utility function IsErrorRetryable +func (r *Request) IsErrorRetryable() bool { + if isErrCode(r.Error, r.RetryErrorCodes) { + return true + } + + // HTTP response status code 501 should not be retried. + // 501 represents Not Implemented which means the request method is not + // supported by the server and cannot be handled. + if r.HTTPResponse != nil { + // HTTP response status code 500 represents internal server error and + // should be retried without any throttle. + if r.HTTPResponse.StatusCode == 500 { + return true + } + } + return IsErrorRetryable(r.Error) +} + +// IsErrorThrottle returns whether the error is to be throttled based on its +// code. Returns false if the request has no Error set. +// +// Alias for the utility function IsErrorThrottle +func (r *Request) IsErrorThrottle() bool { + if isErrCode(r.Error, r.ThrottleErrorCodes) { + return true + } + + if r.HTTPResponse != nil { + switch r.HTTPResponse.StatusCode { + case + 429, // error caused due to too many requests + 502, // Bad Gateway error should be throttled + 503, // caused when service is unavailable + 504: // error occurred due to gateway timeout + return true + } + } + + return IsErrorThrottle(r.Error) +} + +func isErrCode(err error, codes []string) bool { + if aerr, ok := err.(awserr.Error); ok && aerr != nil { + for _, code := range codes { + if code == aerr.Code() { + return true + } + } + } + + return false +} + +// IsErrorExpired returns whether the error code is a credential expiry error. +// Returns false if the request has no Error set. +// +// Alias for the utility function IsErrorExpiredCreds +func (r *Request) IsErrorExpired() bool { + return IsErrorExpiredCreds(r.Error) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/timeout_read_closer.go b/vendor/github.com/aws/aws-sdk-go/aws/request/timeout_read_closer.go new file mode 100644 index 00000000000..09a44eb987a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/timeout_read_closer.go @@ -0,0 +1,94 @@ +package request + +import ( + "io" + "time" + + "github.com/aws/aws-sdk-go/aws/awserr" +) + +var timeoutErr = awserr.New( + ErrCodeResponseTimeout, + "read on body has reached the timeout limit", + nil, +) + +type readResult struct { + n int + err error +} + +// timeoutReadCloser will handle body reads that take too long. +// We will return a ErrReadTimeout error if a timeout occurs. +type timeoutReadCloser struct { + reader io.ReadCloser + duration time.Duration +} + +// Read will spin off a goroutine to call the reader's Read method. We will +// select on the timer's channel or the read's channel. Whoever completes first +// will be returned. +func (r *timeoutReadCloser) Read(b []byte) (int, error) { + timer := time.NewTimer(r.duration) + c := make(chan readResult, 1) + + go func() { + n, err := r.reader.Read(b) + timer.Stop() + c <- readResult{n: n, err: err} + }() + + select { + case data := <-c: + return data.n, data.err + case <-timer.C: + return 0, timeoutErr + } +} + +func (r *timeoutReadCloser) Close() error { + return r.reader.Close() +} + +const ( + // HandlerResponseTimeout is what we use to signify the name of the + // response timeout handler. + HandlerResponseTimeout = "ResponseTimeoutHandler" +) + +// adaptToResponseTimeoutError is a handler that will replace any top level error +// to a ErrCodeResponseTimeout, if its child is that. +func adaptToResponseTimeoutError(req *Request) { + if err, ok := req.Error.(awserr.Error); ok { + aerr, ok := err.OrigErr().(awserr.Error) + if ok && aerr.Code() == ErrCodeResponseTimeout { + req.Error = aerr + } + } +} + +// WithResponseReadTimeout is a request option that will wrap the body in a timeout read closer. +// This will allow for per read timeouts. If a timeout occurred, we will return the +// ErrCodeResponseTimeout. +// +// svc.PutObjectWithContext(ctx, params, request.WithTimeoutReadCloser(30 * time.Second) +func WithResponseReadTimeout(duration time.Duration) Option { + return func(r *Request) { + + var timeoutHandler = NamedHandler{ + HandlerResponseTimeout, + func(req *Request) { + req.HTTPResponse.Body = &timeoutReadCloser{ + reader: req.HTTPResponse.Body, + duration: duration, + } + }} + + // remove the handler so we are not stomping over any new durations. + r.Handlers.Send.RemoveByName(HandlerResponseTimeout) + r.Handlers.Send.PushBackNamed(timeoutHandler) + + r.Handlers.Unmarshal.PushBack(adaptToResponseTimeoutError) + r.Handlers.UnmarshalError.PushBack(adaptToResponseTimeoutError) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/validation.go b/vendor/github.com/aws/aws-sdk-go/aws/request/validation.go new file mode 100644 index 00000000000..8630683f317 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/validation.go @@ -0,0 +1,286 @@ +package request + +import ( + "bytes" + "fmt" + + "github.com/aws/aws-sdk-go/aws/awserr" +) + +const ( + // InvalidParameterErrCode is the error code for invalid parameters errors + InvalidParameterErrCode = "InvalidParameter" + // ParamRequiredErrCode is the error code for required parameter errors + ParamRequiredErrCode = "ParamRequiredError" + // ParamMinValueErrCode is the error code for fields with too low of a + // number value. + ParamMinValueErrCode = "ParamMinValueError" + // ParamMinLenErrCode is the error code for fields without enough elements. + ParamMinLenErrCode = "ParamMinLenError" + // ParamMaxLenErrCode is the error code for value being too long. + ParamMaxLenErrCode = "ParamMaxLenError" + + // ParamFormatErrCode is the error code for a field with invalid + // format or characters. + ParamFormatErrCode = "ParamFormatInvalidError" +) + +// Validator provides a way for types to perform validation logic on their +// input values that external code can use to determine if a type's values +// are valid. +type Validator interface { + Validate() error +} + +// An ErrInvalidParams provides wrapping of invalid parameter errors found when +// validating API operation input parameters. +type ErrInvalidParams struct { + // Context is the base context of the invalid parameter group. + Context string + errs []ErrInvalidParam +} + +// Add adds a new invalid parameter error to the collection of invalid +// parameters. The context of the invalid parameter will be updated to reflect +// this collection. +func (e *ErrInvalidParams) Add(err ErrInvalidParam) { + err.SetContext(e.Context) + e.errs = append(e.errs, err) +} + +// AddNested adds the invalid parameter errors from another ErrInvalidParams +// value into this collection. The nested errors will have their nested context +// updated and base context to reflect the merging. +// +// Use for nested validations errors. +func (e *ErrInvalidParams) AddNested(nestedCtx string, nested ErrInvalidParams) { + for _, err := range nested.errs { + err.SetContext(e.Context) + err.AddNestedContext(nestedCtx) + e.errs = append(e.errs, err) + } +} + +// Len returns the number of invalid parameter errors +func (e ErrInvalidParams) Len() int { + return len(e.errs) +} + +// Code returns the code of the error +func (e ErrInvalidParams) Code() string { + return InvalidParameterErrCode +} + +// Message returns the message of the error +func (e ErrInvalidParams) Message() string { + return fmt.Sprintf("%d validation error(s) found.", len(e.errs)) +} + +// Error returns the string formatted form of the invalid parameters. +func (e ErrInvalidParams) Error() string { + w := &bytes.Buffer{} + fmt.Fprintf(w, "%s: %s\n", e.Code(), e.Message()) + + for _, err := range e.errs { + fmt.Fprintf(w, "- %s\n", err.Message()) + } + + return w.String() +} + +// OrigErr returns the invalid parameters as a awserr.BatchedErrors value +func (e ErrInvalidParams) OrigErr() error { + return awserr.NewBatchError( + InvalidParameterErrCode, e.Message(), e.OrigErrs()) +} + +// OrigErrs returns a slice of the invalid parameters +func (e ErrInvalidParams) OrigErrs() []error { + errs := make([]error, len(e.errs)) + for i := 0; i < len(errs); i++ { + errs[i] = e.errs[i] + } + + return errs +} + +// An ErrInvalidParam represents an invalid parameter error type. +type ErrInvalidParam interface { + awserr.Error + + // Field name the error occurred on. + Field() string + + // SetContext updates the context of the error. + SetContext(string) + + // AddNestedContext updates the error's context to include a nested level. + AddNestedContext(string) +} + +type errInvalidParam struct { + context string + nestedContext string + field string + code string + msg string +} + +// Code returns the error code for the type of invalid parameter. +func (e *errInvalidParam) Code() string { + return e.code +} + +// Message returns the reason the parameter was invalid, and its context. +func (e *errInvalidParam) Message() string { + return fmt.Sprintf("%s, %s.", e.msg, e.Field()) +} + +// Error returns the string version of the invalid parameter error. +func (e *errInvalidParam) Error() string { + return fmt.Sprintf("%s: %s", e.code, e.Message()) +} + +// OrigErr returns nil, Implemented for awserr.Error interface. +func (e *errInvalidParam) OrigErr() error { + return nil +} + +// Field Returns the field and context the error occurred. +func (e *errInvalidParam) Field() string { + field := e.context + if len(field) > 0 { + field += "." + } + if len(e.nestedContext) > 0 { + field += fmt.Sprintf("%s.", e.nestedContext) + } + field += e.field + + return field +} + +// SetContext updates the base context of the error. +func (e *errInvalidParam) SetContext(ctx string) { + e.context = ctx +} + +// AddNestedContext prepends a context to the field's path. +func (e *errInvalidParam) AddNestedContext(ctx string) { + if len(e.nestedContext) == 0 { + e.nestedContext = ctx + } else { + e.nestedContext = fmt.Sprintf("%s.%s", ctx, e.nestedContext) + } + +} + +// An ErrParamRequired represents an required parameter error. +type ErrParamRequired struct { + errInvalidParam +} + +// NewErrParamRequired creates a new required parameter error. +func NewErrParamRequired(field string) *ErrParamRequired { + return &ErrParamRequired{ + errInvalidParam{ + code: ParamRequiredErrCode, + field: field, + msg: fmt.Sprintf("missing required field"), + }, + } +} + +// An ErrParamMinValue represents a minimum value parameter error. +type ErrParamMinValue struct { + errInvalidParam + min float64 +} + +// NewErrParamMinValue creates a new minimum value parameter error. +func NewErrParamMinValue(field string, min float64) *ErrParamMinValue { + return &ErrParamMinValue{ + errInvalidParam: errInvalidParam{ + code: ParamMinValueErrCode, + field: field, + msg: fmt.Sprintf("minimum field value of %v", min), + }, + min: min, + } +} + +// MinValue returns the field's require minimum value. +// +// float64 is returned for both int and float min values. +func (e *ErrParamMinValue) MinValue() float64 { + return e.min +} + +// An ErrParamMinLen represents a minimum length parameter error. +type ErrParamMinLen struct { + errInvalidParam + min int +} + +// NewErrParamMinLen creates a new minimum length parameter error. +func NewErrParamMinLen(field string, min int) *ErrParamMinLen { + return &ErrParamMinLen{ + errInvalidParam: errInvalidParam{ + code: ParamMinLenErrCode, + field: field, + msg: fmt.Sprintf("minimum field size of %v", min), + }, + min: min, + } +} + +// MinLen returns the field's required minimum length. +func (e *ErrParamMinLen) MinLen() int { + return e.min +} + +// An ErrParamMaxLen represents a maximum length parameter error. +type ErrParamMaxLen struct { + errInvalidParam + max int +} + +// NewErrParamMaxLen creates a new maximum length parameter error. +func NewErrParamMaxLen(field string, max int, value string) *ErrParamMaxLen { + return &ErrParamMaxLen{ + errInvalidParam: errInvalidParam{ + code: ParamMaxLenErrCode, + field: field, + msg: fmt.Sprintf("maximum size of %v, %v", max, value), + }, + max: max, + } +} + +// MaxLen returns the field's required minimum length. +func (e *ErrParamMaxLen) MaxLen() int { + return e.max +} + +// An ErrParamFormat represents a invalid format parameter error. +type ErrParamFormat struct { + errInvalidParam + format string +} + +// NewErrParamFormat creates a new invalid format parameter error. +func NewErrParamFormat(field string, format, value string) *ErrParamFormat { + return &ErrParamFormat{ + errInvalidParam: errInvalidParam{ + code: ParamFormatErrCode, + field: field, + msg: fmt.Sprintf("format %v, %v", format, value), + }, + format: format, + } +} + +// Format returns the field's required format. +func (e *ErrParamFormat) Format() string { + return e.format +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/waiter.go b/vendor/github.com/aws/aws-sdk-go/aws/request/waiter.go new file mode 100644 index 00000000000..4601f883cc5 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/waiter.go @@ -0,0 +1,295 @@ +package request + +import ( + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/awsutil" +) + +// WaiterResourceNotReadyErrorCode is the error code returned by a waiter when +// the waiter's max attempts have been exhausted. +const WaiterResourceNotReadyErrorCode = "ResourceNotReady" + +// A WaiterOption is a function that will update the Waiter value's fields to +// configure the waiter. +type WaiterOption func(*Waiter) + +// WithWaiterMaxAttempts returns the maximum number of times the waiter should +// attempt to check the resource for the target state. +func WithWaiterMaxAttempts(max int) WaiterOption { + return func(w *Waiter) { + w.MaxAttempts = max + } +} + +// WaiterDelay will return a delay the waiter should pause between attempts to +// check the resource state. The passed in attempt is the number of times the +// Waiter has checked the resource state. +// +// Attempt is the number of attempts the Waiter has made checking the resource +// state. +type WaiterDelay func(attempt int) time.Duration + +// ConstantWaiterDelay returns a WaiterDelay that will always return a constant +// delay the waiter should use between attempts. It ignores the number of +// attempts made. +func ConstantWaiterDelay(delay time.Duration) WaiterDelay { + return func(attempt int) time.Duration { + return delay + } +} + +// WithWaiterDelay will set the Waiter to use the WaiterDelay passed in. +func WithWaiterDelay(delayer WaiterDelay) WaiterOption { + return func(w *Waiter) { + w.Delay = delayer + } +} + +// WithWaiterLogger returns a waiter option to set the logger a waiter +// should use to log warnings and errors to. +func WithWaiterLogger(logger aws.Logger) WaiterOption { + return func(w *Waiter) { + w.Logger = logger + } +} + +// WithWaiterRequestOptions returns a waiter option setting the request +// options for each request the waiter makes. Appends to waiter's request +// options already set. +func WithWaiterRequestOptions(opts ...Option) WaiterOption { + return func(w *Waiter) { + w.RequestOptions = append(w.RequestOptions, opts...) + } +} + +// A Waiter provides the functionality to perform a blocking call which will +// wait for a resource state to be satisfied by a service. +// +// This type should not be used directly. The API operations provided in the +// service packages prefixed with "WaitUntil" should be used instead. +type Waiter struct { + Name string + Acceptors []WaiterAcceptor + Logger aws.Logger + + MaxAttempts int + Delay WaiterDelay + + RequestOptions []Option + NewRequest func([]Option) (*Request, error) + SleepWithContext func(aws.Context, time.Duration) error +} + +// ApplyOptions updates the waiter with the list of waiter options provided. +func (w *Waiter) ApplyOptions(opts ...WaiterOption) { + for _, fn := range opts { + fn(w) + } +} + +// WaiterState are states the waiter uses based on WaiterAcceptor definitions +// to identify if the resource state the waiter is waiting on has occurred. +type WaiterState int + +// String returns the string representation of the waiter state. +func (s WaiterState) String() string { + switch s { + case SuccessWaiterState: + return "success" + case FailureWaiterState: + return "failure" + case RetryWaiterState: + return "retry" + default: + return "unknown waiter state" + } +} + +// States the waiter acceptors will use to identify target resource states. +const ( + SuccessWaiterState WaiterState = iota // waiter successful + FailureWaiterState // waiter failed + RetryWaiterState // waiter needs to be retried +) + +// WaiterMatchMode is the mode that the waiter will use to match the WaiterAcceptor +// definition's Expected attribute. +type WaiterMatchMode int + +// Modes the waiter will use when inspecting API response to identify target +// resource states. +const ( + PathAllWaiterMatch WaiterMatchMode = iota // match on all paths + PathWaiterMatch // match on specific path + PathAnyWaiterMatch // match on any path + PathListWaiterMatch // match on list of paths + StatusWaiterMatch // match on status code + ErrorWaiterMatch // match on error +) + +// String returns the string representation of the waiter match mode. +func (m WaiterMatchMode) String() string { + switch m { + case PathAllWaiterMatch: + return "pathAll" + case PathWaiterMatch: + return "path" + case PathAnyWaiterMatch: + return "pathAny" + case PathListWaiterMatch: + return "pathList" + case StatusWaiterMatch: + return "status" + case ErrorWaiterMatch: + return "error" + default: + return "unknown waiter match mode" + } +} + +// WaitWithContext will make requests for the API operation using NewRequest to +// build API requests. The request's response will be compared against the +// Waiter's Acceptors to determine the successful state of the resource the +// waiter is inspecting. +// +// The passed in context must not be nil. If it is nil a panic will occur. The +// Context will be used to cancel the waiter's pending requests and retry delays. +// Use aws.BackgroundContext if no context is available. +// +// The waiter will continue until the target state defined by the Acceptors, +// or the max attempts expires. +// +// Will return the WaiterResourceNotReadyErrorCode error code if the waiter's +// retryer ShouldRetry returns false. This normally will happen when the max +// wait attempts expires. +func (w Waiter) WaitWithContext(ctx aws.Context) error { + + for attempt := 1; ; attempt++ { + req, err := w.NewRequest(w.RequestOptions) + if err != nil { + waiterLogf(w.Logger, "unable to create request %v", err) + return err + } + req.Handlers.Build.PushBack(MakeAddToUserAgentFreeFormHandler("Waiter")) + err = req.Send() + + // See if any of the acceptors match the request's response, or error + for _, a := range w.Acceptors { + if matched, matchErr := a.match(w.Name, w.Logger, req, err); matched { + return matchErr + } + } + + // The Waiter should only check the resource state MaxAttempts times + // This is here instead of in the for loop above to prevent delaying + // unnecessary when the waiter will not retry. + if attempt == w.MaxAttempts { + break + } + + // Delay to wait before inspecting the resource again + delay := w.Delay(attempt) + if sleepFn := req.Config.SleepDelay; sleepFn != nil { + // Support SleepDelay for backwards compatibility and testing + sleepFn(delay) + } else { + sleepCtxFn := w.SleepWithContext + if sleepCtxFn == nil { + sleepCtxFn = aws.SleepWithContext + } + + if err := sleepCtxFn(ctx, delay); err != nil { + return awserr.New(CanceledErrorCode, "waiter context canceled", err) + } + } + } + + return awserr.New(WaiterResourceNotReadyErrorCode, "exceeded wait attempts", nil) +} + +// A WaiterAcceptor provides the information needed to wait for an API operation +// to complete. +type WaiterAcceptor struct { + State WaiterState + Matcher WaiterMatchMode + Argument string + Expected interface{} +} + +// match returns if the acceptor found a match with the passed in request +// or error. True is returned if the acceptor made a match, error is returned +// if there was an error attempting to perform the match. +func (a *WaiterAcceptor) match(name string, l aws.Logger, req *Request, err error) (bool, error) { + result := false + var vals []interface{} + + switch a.Matcher { + case PathAllWaiterMatch, PathWaiterMatch: + // Require all matches to be equal for result to match + vals, _ = awsutil.ValuesAtPath(req.Data, a.Argument) + if len(vals) == 0 { + break + } + result = true + for _, val := range vals { + if !awsutil.DeepEqual(val, a.Expected) { + result = false + break + } + } + case PathAnyWaiterMatch: + // Only a single match needs to equal for the result to match + vals, _ = awsutil.ValuesAtPath(req.Data, a.Argument) + for _, val := range vals { + if awsutil.DeepEqual(val, a.Expected) { + result = true + break + } + } + case PathListWaiterMatch: + // ignored matcher + case StatusWaiterMatch: + s := a.Expected.(int) + result = s == req.HTTPResponse.StatusCode + case ErrorWaiterMatch: + if aerr, ok := err.(awserr.Error); ok { + result = aerr.Code() == a.Expected.(string) + } + default: + waiterLogf(l, "WARNING: Waiter %s encountered unexpected matcher: %s", + name, a.Matcher) + } + + if !result { + // If there was no matching result found there is nothing more to do + // for this response, retry the request. + return false, nil + } + + switch a.State { + case SuccessWaiterState: + // waiter completed + return true, nil + case FailureWaiterState: + // Waiter failure state triggered + return true, awserr.New(WaiterResourceNotReadyErrorCode, + "failed waiting for successful resource state", err) + case RetryWaiterState: + // clear the error and retry the operation + return false, nil + default: + waiterLogf(l, "WARNING: Waiter %s encountered unexpected state: %s", + name, a.State) + return false, nil + } +} + +func waiterLogf(logger aws.Logger, msg string, args ...interface{}) { + if logger != nil { + logger.Log(fmt.Sprintf(msg, args...)) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/credentials.go b/vendor/github.com/aws/aws-sdk-go/aws/session/credentials.go new file mode 100644 index 00000000000..3ddd4e51282 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/credentials.go @@ -0,0 +1,290 @@ +package session + +import ( + "fmt" + "os" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/credentials/processcreds" + "github.com/aws/aws-sdk-go/aws/credentials/ssocreds" + "github.com/aws/aws-sdk-go/aws/credentials/stscreds" + "github.com/aws/aws-sdk-go/aws/defaults" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/internal/shareddefaults" +) + +func resolveCredentials(cfg *aws.Config, + envCfg envConfig, sharedCfg sharedConfig, + handlers request.Handlers, + sessOpts Options, +) (*credentials.Credentials, error) { + + switch { + case len(sessOpts.Profile) != 0: + // User explicitly provided an Profile in the session's configuration + // so load that profile from shared config first. + // Github(aws/aws-sdk-go#2727) + return resolveCredsFromProfile(cfg, envCfg, sharedCfg, handlers, sessOpts) + + case envCfg.Creds.HasKeys(): + // Environment credentials + return credentials.NewStaticCredentialsFromCreds(envCfg.Creds), nil + + case len(envCfg.WebIdentityTokenFilePath) != 0: + // Web identity token from environment, RoleARN required to also be + // set. + return assumeWebIdentity(cfg, handlers, + envCfg.WebIdentityTokenFilePath, + envCfg.RoleARN, + envCfg.RoleSessionName, + ) + + default: + // Fallback to the "default" credential resolution chain. + return resolveCredsFromProfile(cfg, envCfg, sharedCfg, handlers, sessOpts) + } +} + +// WebIdentityEmptyRoleARNErr will occur if 'AWS_WEB_IDENTITY_TOKEN_FILE' was set but +// 'AWS_ROLE_ARN' was not set. +var WebIdentityEmptyRoleARNErr = awserr.New(stscreds.ErrCodeWebIdentity, "role ARN is not set", nil) + +// WebIdentityEmptyTokenFilePathErr will occur if 'AWS_ROLE_ARN' was set but +// 'AWS_WEB_IDENTITY_TOKEN_FILE' was not set. +var WebIdentityEmptyTokenFilePathErr = awserr.New(stscreds.ErrCodeWebIdentity, "token file path is not set", nil) + +func assumeWebIdentity(cfg *aws.Config, handlers request.Handlers, + filepath string, + roleARN, sessionName string, +) (*credentials.Credentials, error) { + + if len(filepath) == 0 { + return nil, WebIdentityEmptyTokenFilePathErr + } + + if len(roleARN) == 0 { + return nil, WebIdentityEmptyRoleARNErr + } + + creds := stscreds.NewWebIdentityCredentials( + &Session{ + Config: cfg, + Handlers: handlers.Copy(), + }, + roleARN, + sessionName, + filepath, + ) + + return creds, nil +} + +func resolveCredsFromProfile(cfg *aws.Config, + envCfg envConfig, sharedCfg sharedConfig, + handlers request.Handlers, + sessOpts Options, +) (creds *credentials.Credentials, err error) { + + switch { + case sharedCfg.SourceProfile != nil: + // Assume IAM role with credentials source from a different profile. + creds, err = resolveCredsFromProfile(cfg, envCfg, + *sharedCfg.SourceProfile, handlers, sessOpts, + ) + + case sharedCfg.Creds.HasKeys(): + // Static Credentials from Shared Config/Credentials file. + creds = credentials.NewStaticCredentialsFromCreds( + sharedCfg.Creds, + ) + + case sharedCfg.hasSSOConfiguration(): + creds, err = resolveSSOCredentials(cfg, sharedCfg, handlers) + + case len(sharedCfg.CredentialProcess) != 0: + // Get credentials from CredentialProcess + creds = processcreds.NewCredentials(sharedCfg.CredentialProcess) + + case len(sharedCfg.CredentialSource) != 0: + creds, err = resolveCredsFromSource(cfg, envCfg, + sharedCfg, handlers, sessOpts, + ) + + case len(sharedCfg.WebIdentityTokenFile) != 0: + // Credentials from Assume Web Identity token require an IAM Role, and + // that roll will be assumed. May be wrapped with another assume role + // via SourceProfile. + return assumeWebIdentity(cfg, handlers, + sharedCfg.WebIdentityTokenFile, + sharedCfg.RoleARN, + sharedCfg.RoleSessionName, + ) + + default: + // Fallback to default credentials provider, include mock errors for + // the credential chain so user can identify why credentials failed to + // be retrieved. + creds = credentials.NewCredentials(&credentials.ChainProvider{ + VerboseErrors: aws.BoolValue(cfg.CredentialsChainVerboseErrors), + Providers: []credentials.Provider{ + &credProviderError{ + Err: awserr.New("EnvAccessKeyNotFound", + "failed to find credentials in the environment.", nil), + }, + &credProviderError{ + Err: awserr.New("SharedCredsLoad", + fmt.Sprintf("failed to load profile, %s.", envCfg.Profile), nil), + }, + defaults.RemoteCredProvider(*cfg, handlers), + }, + }) + } + if err != nil { + return nil, err + } + + if len(sharedCfg.RoleARN) > 0 { + cfgCp := *cfg + cfgCp.Credentials = creds + return credsFromAssumeRole(cfgCp, handlers, sharedCfg, sessOpts) + } + + return creds, nil +} + +func resolveSSOCredentials(cfg *aws.Config, sharedCfg sharedConfig, handlers request.Handlers) (*credentials.Credentials, error) { + if err := sharedCfg.validateSSOConfiguration(); err != nil { + return nil, err + } + + cfgCopy := cfg.Copy() + cfgCopy.Region = &sharedCfg.SSORegion + + return ssocreds.NewCredentials( + &Session{ + Config: cfgCopy, + Handlers: handlers.Copy(), + }, + sharedCfg.SSOAccountID, + sharedCfg.SSORoleName, + sharedCfg.SSOStartURL, + ), nil +} + +// valid credential source values +const ( + credSourceEc2Metadata = "Ec2InstanceMetadata" + credSourceEnvironment = "Environment" + credSourceECSContainer = "EcsContainer" +) + +func resolveCredsFromSource(cfg *aws.Config, + envCfg envConfig, sharedCfg sharedConfig, + handlers request.Handlers, + sessOpts Options, +) (creds *credentials.Credentials, err error) { + + switch sharedCfg.CredentialSource { + case credSourceEc2Metadata: + p := defaults.RemoteCredProvider(*cfg, handlers) + creds = credentials.NewCredentials(p) + + case credSourceEnvironment: + creds = credentials.NewStaticCredentialsFromCreds(envCfg.Creds) + + case credSourceECSContainer: + if len(os.Getenv(shareddefaults.ECSCredsProviderEnvVar)) == 0 { + return nil, ErrSharedConfigECSContainerEnvVarEmpty + } + + p := defaults.RemoteCredProvider(*cfg, handlers) + creds = credentials.NewCredentials(p) + + default: + return nil, ErrSharedConfigInvalidCredSource + } + + return creds, nil +} + +func credsFromAssumeRole(cfg aws.Config, + handlers request.Handlers, + sharedCfg sharedConfig, + sessOpts Options, +) (*credentials.Credentials, error) { + + if len(sharedCfg.MFASerial) != 0 && sessOpts.AssumeRoleTokenProvider == nil { + // AssumeRole Token provider is required if doing Assume Role + // with MFA. + return nil, AssumeRoleTokenProviderNotSetError{} + } + + return stscreds.NewCredentials( + &Session{ + Config: &cfg, + Handlers: handlers.Copy(), + }, + sharedCfg.RoleARN, + func(opt *stscreds.AssumeRoleProvider) { + opt.RoleSessionName = sharedCfg.RoleSessionName + + if sessOpts.AssumeRoleDuration == 0 && + sharedCfg.AssumeRoleDuration != nil && + *sharedCfg.AssumeRoleDuration/time.Minute > 15 { + opt.Duration = *sharedCfg.AssumeRoleDuration + } else if sessOpts.AssumeRoleDuration != 0 { + opt.Duration = sessOpts.AssumeRoleDuration + } + + // Assume role with external ID + if len(sharedCfg.ExternalID) > 0 { + opt.ExternalID = aws.String(sharedCfg.ExternalID) + } + + // Assume role with MFA + if len(sharedCfg.MFASerial) > 0 { + opt.SerialNumber = aws.String(sharedCfg.MFASerial) + opt.TokenProvider = sessOpts.AssumeRoleTokenProvider + } + }, + ), nil +} + +// AssumeRoleTokenProviderNotSetError is an error returned when creating a +// session when the MFAToken option is not set when shared config is configured +// load assume a role with an MFA token. +type AssumeRoleTokenProviderNotSetError struct{} + +// Code is the short id of the error. +func (e AssumeRoleTokenProviderNotSetError) Code() string { + return "AssumeRoleTokenProviderNotSetError" +} + +// Message is the description of the error +func (e AssumeRoleTokenProviderNotSetError) Message() string { + return fmt.Sprintf("assume role with MFA enabled, but AssumeRoleTokenProvider session option not set.") +} + +// OrigErr is the underlying error that caused the failure. +func (e AssumeRoleTokenProviderNotSetError) OrigErr() error { + return nil +} + +// Error satisfies the error interface. +func (e AssumeRoleTokenProviderNotSetError) Error() string { + return awserr.SprintError(e.Code(), e.Message(), "", nil) +} + +type credProviderError struct { + Err error +} + +func (c credProviderError) Retrieve() (credentials.Value, error) { + return credentials.Value{}, c.Err +} +func (c credProviderError) IsExpired() bool { + return true +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/custom_transport.go b/vendor/github.com/aws/aws-sdk-go/aws/session/custom_transport.go new file mode 100644 index 00000000000..593aedc4218 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/custom_transport.go @@ -0,0 +1,27 @@ +// +build go1.13 + +package session + +import ( + "net" + "net/http" + "time" +) + +// Transport that should be used when a custom CA bundle is specified with the +// SDK. +func getCustomTransport() *http.Transport { + return &http.Transport{ + Proxy: http.ProxyFromEnvironment, + DialContext: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + DualStack: true, + }).DialContext, + ForceAttemptHTTP2: true, + MaxIdleConns: 100, + IdleConnTimeout: 90 * time.Second, + TLSHandshakeTimeout: 10 * time.Second, + ExpectContinueTimeout: 1 * time.Second, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/custom_transport_go1.12.go b/vendor/github.com/aws/aws-sdk-go/aws/session/custom_transport_go1.12.go new file mode 100644 index 00000000000..1bf31cf8e56 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/custom_transport_go1.12.go @@ -0,0 +1,26 @@ +// +build !go1.13,go1.7 + +package session + +import ( + "net" + "net/http" + "time" +) + +// Transport that should be used when a custom CA bundle is specified with the +// SDK. +func getCustomTransport() *http.Transport { + return &http.Transport{ + Proxy: http.ProxyFromEnvironment, + DialContext: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + DualStack: true, + }).DialContext, + MaxIdleConns: 100, + IdleConnTimeout: 90 * time.Second, + TLSHandshakeTimeout: 10 * time.Second, + ExpectContinueTimeout: 1 * time.Second, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/custom_transport_go1.5.go b/vendor/github.com/aws/aws-sdk-go/aws/session/custom_transport_go1.5.go new file mode 100644 index 00000000000..253d7bc9d55 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/custom_transport_go1.5.go @@ -0,0 +1,22 @@ +// +build !go1.6,go1.5 + +package session + +import ( + "net" + "net/http" + "time" +) + +// Transport that should be used when a custom CA bundle is specified with the +// SDK. +func getCustomTransport() *http.Transport { + return &http.Transport{ + Proxy: http.ProxyFromEnvironment, + Dial: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + }).Dial, + TLSHandshakeTimeout: 10 * time.Second, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/custom_transport_go1.6.go b/vendor/github.com/aws/aws-sdk-go/aws/session/custom_transport_go1.6.go new file mode 100644 index 00000000000..db240605441 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/custom_transport_go1.6.go @@ -0,0 +1,23 @@ +// +build !go1.7,go1.6 + +package session + +import ( + "net" + "net/http" + "time" +) + +// Transport that should be used when a custom CA bundle is specified with the +// SDK. +func getCustomTransport() *http.Transport { + return &http.Transport{ + Proxy: http.ProxyFromEnvironment, + Dial: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + }).Dial, + TLSHandshakeTimeout: 10 * time.Second, + ExpectContinueTimeout: 1 * time.Second, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go b/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go new file mode 100644 index 00000000000..9419b518d58 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go @@ -0,0 +1,289 @@ +/* +Package session provides configuration for the SDK's service clients. Sessions +can be shared across service clients that share the same base configuration. + +Sessions are safe to use concurrently as long as the Session is not being +modified. Sessions should be cached when possible, because creating a new +Session will load all configuration values from the environment, and config +files each time the Session is created. Sharing the Session value across all of +your service clients will ensure the configuration is loaded the fewest number +of times possible. + +Sessions options from Shared Config + +By default NewSession will only load credentials from the shared credentials +file (~/.aws/credentials). If the AWS_SDK_LOAD_CONFIG environment variable is +set to a truthy value the Session will be created from the configuration +values from the shared config (~/.aws/config) and shared credentials +(~/.aws/credentials) files. Using the NewSessionWithOptions with +SharedConfigState set to SharedConfigEnable will create the session as if the +AWS_SDK_LOAD_CONFIG environment variable was set. + +Credential and config loading order + +The Session will attempt to load configuration and credentials from the +environment, configuration files, and other credential sources. The order +configuration is loaded in is: + + * Environment Variables + * Shared Credentials file + * Shared Configuration file (if SharedConfig is enabled) + * EC2 Instance Metadata (credentials only) + +The Environment variables for credentials will have precedence over shared +config even if SharedConfig is enabled. To override this behavior, and use +shared config credentials instead specify the session.Options.Profile, (e.g. +when using credential_source=Environment to assume a role). + + sess, err := session.NewSessionWithOptions(session.Options{ + Profile: "myProfile", + }) + +Creating Sessions + +Creating a Session without additional options will load credentials region, and +profile loaded from the environment and shared config automatically. See, +"Environment Variables" section for information on environment variables used +by Session. + + // Create Session + sess, err := session.NewSession() + + +When creating Sessions optional aws.Config values can be passed in that will +override the default, or loaded, config values the Session is being created +with. This allows you to provide additional, or case based, configuration +as needed. + + // Create a Session with a custom region + sess, err := session.NewSession(&aws.Config{ + Region: aws.String("us-west-2"), + }) + +Use NewSessionWithOptions to provide additional configuration driving how the +Session's configuration will be loaded. Such as, specifying shared config +profile, or override the shared config state, (AWS_SDK_LOAD_CONFIG). + + // Equivalent to session.NewSession() + sess, err := session.NewSessionWithOptions(session.Options{ + // Options + }) + + sess, err := session.NewSessionWithOptions(session.Options{ + // Specify profile to load for the session's config + Profile: "profile_name", + + // Provide SDK Config options, such as Region. + Config: aws.Config{ + Region: aws.String("us-west-2"), + }, + + // Force enable Shared Config support + SharedConfigState: session.SharedConfigEnable, + }) + +Adding Handlers + +You can add handlers to a session to decorate API operation, (e.g. adding HTTP +headers). All clients that use the Session receive a copy of the Session's +handlers. For example, the following request handler added to the Session logs +every requests made. + + // Create a session, and add additional handlers for all service + // clients created with the Session to inherit. Adds logging handler. + sess := session.Must(session.NewSession()) + + sess.Handlers.Send.PushFront(func(r *request.Request) { + // Log every request made and its payload + logger.Printf("Request: %s/%s, Params: %s", + r.ClientInfo.ServiceName, r.Operation, r.Params) + }) + +Shared Config Fields + +By default the SDK will only load the shared credentials file's +(~/.aws/credentials) credentials values, and all other config is provided by +the environment variables, SDK defaults, and user provided aws.Config values. + +If the AWS_SDK_LOAD_CONFIG environment variable is set, or SharedConfigEnable +option is used to create the Session the full shared config values will be +loaded. This includes credentials, region, and support for assume role. In +addition the Session will load its configuration from both the shared config +file (~/.aws/config) and shared credentials file (~/.aws/credentials). Both +files have the same format. + +If both config files are present the configuration from both files will be +read. The Session will be created from configuration values from the shared +credentials file (~/.aws/credentials) over those in the shared config file +(~/.aws/config). + +Credentials are the values the SDK uses to authenticating requests with AWS +Services. When specified in a file, both aws_access_key_id and +aws_secret_access_key must be provided together in the same file to be +considered valid. They will be ignored if both are not present. +aws_session_token is an optional field that can be provided in addition to the +other two fields. + + aws_access_key_id = AKID + aws_secret_access_key = SECRET + aws_session_token = TOKEN + + ; region only supported if SharedConfigEnabled. + region = us-east-1 + +Assume Role configuration + +The role_arn field allows you to configure the SDK to assume an IAM role using +a set of credentials from another source. Such as when paired with static +credentials, "profile_source", "credential_process", or "credential_source" +fields. If "role_arn" is provided, a source of credentials must also be +specified, such as "source_profile", "credential_source", or +"credential_process". + + role_arn = arn:aws:iam:::role/ + source_profile = profile_with_creds + external_id = 1234 + mfa_serial = + role_session_name = session_name + + +The SDK supports assuming a role with MFA token. If "mfa_serial" is set, you +must also set the Session Option.AssumeRoleTokenProvider. The Session will fail +to load if the AssumeRoleTokenProvider is not specified. + + sess := session.Must(session.NewSessionWithOptions(session.Options{ + AssumeRoleTokenProvider: stscreds.StdinTokenProvider, + })) + +To setup Assume Role outside of a session see the stscreds.AssumeRoleProvider +documentation. + +Environment Variables + +When a Session is created several environment variables can be set to adjust +how the SDK functions, and what configuration data it loads when creating +Sessions. All environment values are optional, but some values like credentials +require multiple of the values to set or the partial values will be ignored. +All environment variable values are strings unless otherwise noted. + +Environment configuration values. If set both Access Key ID and Secret Access +Key must be provided. Session Token and optionally also be provided, but is +not required. + + # Access Key ID + AWS_ACCESS_KEY_ID=AKID + AWS_ACCESS_KEY=AKID # only read if AWS_ACCESS_KEY_ID is not set. + + # Secret Access Key + AWS_SECRET_ACCESS_KEY=SECRET + AWS_SECRET_KEY=SECRET=SECRET # only read if AWS_SECRET_ACCESS_KEY is not set. + + # Session Token + AWS_SESSION_TOKEN=TOKEN + +Region value will instruct the SDK where to make service API requests to. If is +not provided in the environment the region must be provided before a service +client request is made. + + AWS_REGION=us-east-1 + + # AWS_DEFAULT_REGION is only read if AWS_SDK_LOAD_CONFIG is also set, + # and AWS_REGION is not also set. + AWS_DEFAULT_REGION=us-east-1 + +Profile name the SDK should load use when loading shared config from the +configuration files. If not provided "default" will be used as the profile name. + + AWS_PROFILE=my_profile + + # AWS_DEFAULT_PROFILE is only read if AWS_SDK_LOAD_CONFIG is also set, + # and AWS_PROFILE is not also set. + AWS_DEFAULT_PROFILE=my_profile + +SDK load config instructs the SDK to load the shared config in addition to +shared credentials. This also expands the configuration loaded so the shared +credentials will have parity with the shared config file. This also enables +Region and Profile support for the AWS_DEFAULT_REGION and AWS_DEFAULT_PROFILE +env values as well. + + AWS_SDK_LOAD_CONFIG=1 + +Custom Shared Config and Credential Files + +Shared credentials file path can be set to instruct the SDK to use an alternative +file for the shared credentials. If not set the file will be loaded from +$HOME/.aws/credentials on Linux/Unix based systems, and +%USERPROFILE%\.aws\credentials on Windows. + + AWS_SHARED_CREDENTIALS_FILE=$HOME/my_shared_credentials + +Shared config file path can be set to instruct the SDK to use an alternative +file for the shared config. If not set the file will be loaded from +$HOME/.aws/config on Linux/Unix based systems, and +%USERPROFILE%\.aws\config on Windows. + + AWS_CONFIG_FILE=$HOME/my_shared_config + +Custom CA Bundle + +Path to a custom Credentials Authority (CA) bundle PEM file that the SDK +will use instead of the default system's root CA bundle. Use this only +if you want to replace the CA bundle the SDK uses for TLS requests. + + AWS_CA_BUNDLE=$HOME/my_custom_ca_bundle + +Enabling this option will attempt to merge the Transport into the SDK's HTTP +client. If the client's Transport is not a http.Transport an error will be +returned. If the Transport's TLS config is set this option will cause the SDK +to overwrite the Transport's TLS config's RootCAs value. If the CA bundle file +contains multiple certificates all of them will be loaded. + +The Session option CustomCABundle is also available when creating sessions +to also enable this feature. CustomCABundle session option field has priority +over the AWS_CA_BUNDLE environment variable, and will be used if both are set. + +Setting a custom HTTPClient in the aws.Config options will override this setting. +To use this option and custom HTTP client, the HTTP client needs to be provided +when creating the session. Not the service client. + +Custom Client TLS Certificate + +The SDK supports the environment and session option being configured with +Client TLS certificates that are sent as a part of the client's TLS handshake +for client authentication. If used, both Cert and Key values are required. If +one is missing, or either fail to load the contents of the file an error will +be returned. + +HTTP Client's Transport concrete implementation must be a http.Transport +or creating the session will fail. + + AWS_SDK_GO_CLIENT_TLS_KEY=$HOME/my_client_key + AWS_SDK_GO_CLIENT_TLS_CERT=$HOME/my_client_cert + +This can also be configured via the session.Options ClientTLSCert and ClientTLSKey. + + sess, err := session.NewSessionWithOptions(session.Options{ + ClientTLSCert: myCertFile, + ClientTLSKey: myKeyFile, + }) + +Custom EC2 IMDS Endpoint + +The endpoint of the EC2 IMDS client can be configured via the environment +variable, AWS_EC2_METADATA_SERVICE_ENDPOINT when creating the client with a +Session. See Options.EC2IMDSEndpoint for more details. + + AWS_EC2_METADATA_SERVICE_ENDPOINT=http://169.254.169.254 + +If using an URL with an IPv6 address literal, the IPv6 address +component must be enclosed in square brackets. + + AWS_EC2_METADATA_SERVICE_ENDPOINT=http://[::1] + +The custom EC2 IMDS endpoint can also be specified via the Session options. + + sess, err := session.NewSessionWithOptions(session.Options{ + EC2IMDSEndpoint: "http://[::1]", + }) +*/ +package session diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go b/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go new file mode 100644 index 00000000000..3cd5d4b5ae1 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go @@ -0,0 +1,378 @@ +package session + +import ( + "fmt" + "os" + "strconv" + "strings" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/defaults" + "github.com/aws/aws-sdk-go/aws/endpoints" +) + +// EnvProviderName provides a name of the provider when config is loaded from environment. +const EnvProviderName = "EnvConfigCredentials" + +// envConfig is a collection of environment values the SDK will read +// setup config from. All environment values are optional. But some values +// such as credentials require multiple values to be complete or the values +// will be ignored. +type envConfig struct { + // Environment configuration values. If set both Access Key ID and Secret Access + // Key must be provided. Session Token and optionally also be provided, but is + // not required. + // + // # Access Key ID + // AWS_ACCESS_KEY_ID=AKID + // AWS_ACCESS_KEY=AKID # only read if AWS_ACCESS_KEY_ID is not set. + // + // # Secret Access Key + // AWS_SECRET_ACCESS_KEY=SECRET + // AWS_SECRET_KEY=SECRET=SECRET # only read if AWS_SECRET_ACCESS_KEY is not set. + // + // # Session Token + // AWS_SESSION_TOKEN=TOKEN + Creds credentials.Value + + // Region value will instruct the SDK where to make service API requests to. If is + // not provided in the environment the region must be provided before a service + // client request is made. + // + // AWS_REGION=us-east-1 + // + // # AWS_DEFAULT_REGION is only read if AWS_SDK_LOAD_CONFIG is also set, + // # and AWS_REGION is not also set. + // AWS_DEFAULT_REGION=us-east-1 + Region string + + // Profile name the SDK should load use when loading shared configuration from the + // shared configuration files. If not provided "default" will be used as the + // profile name. + // + // AWS_PROFILE=my_profile + // + // # AWS_DEFAULT_PROFILE is only read if AWS_SDK_LOAD_CONFIG is also set, + // # and AWS_PROFILE is not also set. + // AWS_DEFAULT_PROFILE=my_profile + Profile string + + // SDK load config instructs the SDK to load the shared config in addition to + // shared credentials. This also expands the configuration loaded from the shared + // credentials to have parity with the shared config file. This also enables + // Region and Profile support for the AWS_DEFAULT_REGION and AWS_DEFAULT_PROFILE + // env values as well. + // + // AWS_SDK_LOAD_CONFIG=1 + EnableSharedConfig bool + + // Shared credentials file path can be set to instruct the SDK to use an alternate + // file for the shared credentials. If not set the file will be loaded from + // $HOME/.aws/credentials on Linux/Unix based systems, and + // %USERPROFILE%\.aws\credentials on Windows. + // + // AWS_SHARED_CREDENTIALS_FILE=$HOME/my_shared_credentials + SharedCredentialsFile string + + // Shared config file path can be set to instruct the SDK to use an alternate + // file for the shared config. If not set the file will be loaded from + // $HOME/.aws/config on Linux/Unix based systems, and + // %USERPROFILE%\.aws\config on Windows. + // + // AWS_CONFIG_FILE=$HOME/my_shared_config + SharedConfigFile string + + // Sets the path to a custom Credentials Authority (CA) Bundle PEM file + // that the SDK will use instead of the system's root CA bundle. + // Only use this if you want to configure the SDK to use a custom set + // of CAs. + // + // Enabling this option will attempt to merge the Transport + // into the SDK's HTTP client. If the client's Transport is + // not a http.Transport an error will be returned. If the + // Transport's TLS config is set this option will cause the + // SDK to overwrite the Transport's TLS config's RootCAs value. + // + // Setting a custom HTTPClient in the aws.Config options will override this setting. + // To use this option and custom HTTP client, the HTTP client needs to be provided + // when creating the session. Not the service client. + // + // AWS_CA_BUNDLE=$HOME/my_custom_ca_bundle + CustomCABundle string + + // Sets the TLC client certificate that should be used by the SDK's HTTP transport + // when making requests. The certificate must be paired with a TLS client key file. + // + // AWS_SDK_GO_CLIENT_TLS_CERT=$HOME/my_client_cert + ClientTLSCert string + + // Sets the TLC client key that should be used by the SDK's HTTP transport + // when making requests. The key must be paired with a TLS client certificate file. + // + // AWS_SDK_GO_CLIENT_TLS_KEY=$HOME/my_client_key + ClientTLSKey string + + csmEnabled string + CSMEnabled *bool + CSMPort string + CSMHost string + CSMClientID string + + // Enables endpoint discovery via environment variables. + // + // AWS_ENABLE_ENDPOINT_DISCOVERY=true + EnableEndpointDiscovery *bool + enableEndpointDiscovery string + + // Specifies the WebIdentity token the SDK should use to assume a role + // with. + // + // AWS_WEB_IDENTITY_TOKEN_FILE=file_path + WebIdentityTokenFilePath string + + // Specifies the IAM role arn to use when assuming an role. + // + // AWS_ROLE_ARN=role_arn + RoleARN string + + // Specifies the IAM role session name to use when assuming a role. + // + // AWS_ROLE_SESSION_NAME=session_name + RoleSessionName string + + // Specifies the STS Regional Endpoint flag for the SDK to resolve the endpoint + // for a service. + // + // AWS_STS_REGIONAL_ENDPOINTS=regional + // This can take value as `regional` or `legacy` + STSRegionalEndpoint endpoints.STSRegionalEndpoint + + // Specifies the S3 Regional Endpoint flag for the SDK to resolve the + // endpoint for a service. + // + // AWS_S3_US_EAST_1_REGIONAL_ENDPOINT=regional + // This can take value as `regional` or `legacy` + S3UsEast1RegionalEndpoint endpoints.S3UsEast1RegionalEndpoint + + // Specifies if the S3 service should allow ARNs to direct the region + // the client's requests are sent to. + // + // AWS_S3_USE_ARN_REGION=true + S3UseARNRegion bool + + // Specifies the alternative endpoint to use for EC2 IMDS. + // + // AWS_EC2_METADATA_SERVICE_ENDPOINT=http://[::1] + EC2IMDSEndpoint string +} + +var ( + csmEnabledEnvKey = []string{ + "AWS_CSM_ENABLED", + } + csmHostEnvKey = []string{ + "AWS_CSM_HOST", + } + csmPortEnvKey = []string{ + "AWS_CSM_PORT", + } + csmClientIDEnvKey = []string{ + "AWS_CSM_CLIENT_ID", + } + credAccessEnvKey = []string{ + "AWS_ACCESS_KEY_ID", + "AWS_ACCESS_KEY", + } + credSecretEnvKey = []string{ + "AWS_SECRET_ACCESS_KEY", + "AWS_SECRET_KEY", + } + credSessionEnvKey = []string{ + "AWS_SESSION_TOKEN", + } + + enableEndpointDiscoveryEnvKey = []string{ + "AWS_ENABLE_ENDPOINT_DISCOVERY", + } + + regionEnvKeys = []string{ + "AWS_REGION", + "AWS_DEFAULT_REGION", // Only read if AWS_SDK_LOAD_CONFIG is also set + } + profileEnvKeys = []string{ + "AWS_PROFILE", + "AWS_DEFAULT_PROFILE", // Only read if AWS_SDK_LOAD_CONFIG is also set + } + sharedCredsFileEnvKey = []string{ + "AWS_SHARED_CREDENTIALS_FILE", + } + sharedConfigFileEnvKey = []string{ + "AWS_CONFIG_FILE", + } + webIdentityTokenFilePathEnvKey = []string{ + "AWS_WEB_IDENTITY_TOKEN_FILE", + } + roleARNEnvKey = []string{ + "AWS_ROLE_ARN", + } + roleSessionNameEnvKey = []string{ + "AWS_ROLE_SESSION_NAME", + } + stsRegionalEndpointKey = []string{ + "AWS_STS_REGIONAL_ENDPOINTS", + } + s3UsEast1RegionalEndpoint = []string{ + "AWS_S3_US_EAST_1_REGIONAL_ENDPOINT", + } + s3UseARNRegionEnvKey = []string{ + "AWS_S3_USE_ARN_REGION", + } + ec2IMDSEndpointEnvKey = []string{ + "AWS_EC2_METADATA_SERVICE_ENDPOINT", + } + useCABundleKey = []string{ + "AWS_CA_BUNDLE", + } + useClientTLSCert = []string{ + "AWS_SDK_GO_CLIENT_TLS_CERT", + } + useClientTLSKey = []string{ + "AWS_SDK_GO_CLIENT_TLS_KEY", + } +) + +// loadEnvConfig retrieves the SDK's environment configuration. +// See `envConfig` for the values that will be retrieved. +// +// If the environment variable `AWS_SDK_LOAD_CONFIG` is set to a truthy value +// the shared SDK config will be loaded in addition to the SDK's specific +// configuration values. +func loadEnvConfig() (envConfig, error) { + enableSharedConfig, _ := strconv.ParseBool(os.Getenv("AWS_SDK_LOAD_CONFIG")) + return envConfigLoad(enableSharedConfig) +} + +// loadEnvSharedConfig retrieves the SDK's environment configuration, and the +// SDK shared config. See `envConfig` for the values that will be retrieved. +// +// Loads the shared configuration in addition to the SDK's specific configuration. +// This will load the same values as `loadEnvConfig` if the `AWS_SDK_LOAD_CONFIG` +// environment variable is set. +func loadSharedEnvConfig() (envConfig, error) { + return envConfigLoad(true) +} + +func envConfigLoad(enableSharedConfig bool) (envConfig, error) { + cfg := envConfig{} + + cfg.EnableSharedConfig = enableSharedConfig + + // Static environment credentials + var creds credentials.Value + setFromEnvVal(&creds.AccessKeyID, credAccessEnvKey) + setFromEnvVal(&creds.SecretAccessKey, credSecretEnvKey) + setFromEnvVal(&creds.SessionToken, credSessionEnvKey) + if creds.HasKeys() { + // Require logical grouping of credentials + creds.ProviderName = EnvProviderName + cfg.Creds = creds + } + + // Role Metadata + setFromEnvVal(&cfg.RoleARN, roleARNEnvKey) + setFromEnvVal(&cfg.RoleSessionName, roleSessionNameEnvKey) + + // Web identity environment variables + setFromEnvVal(&cfg.WebIdentityTokenFilePath, webIdentityTokenFilePathEnvKey) + + // CSM environment variables + setFromEnvVal(&cfg.csmEnabled, csmEnabledEnvKey) + setFromEnvVal(&cfg.CSMHost, csmHostEnvKey) + setFromEnvVal(&cfg.CSMPort, csmPortEnvKey) + setFromEnvVal(&cfg.CSMClientID, csmClientIDEnvKey) + + if len(cfg.csmEnabled) != 0 { + v, _ := strconv.ParseBool(cfg.csmEnabled) + cfg.CSMEnabled = &v + } + + regionKeys := regionEnvKeys + profileKeys := profileEnvKeys + if !cfg.EnableSharedConfig { + regionKeys = regionKeys[:1] + profileKeys = profileKeys[:1] + } + + setFromEnvVal(&cfg.Region, regionKeys) + setFromEnvVal(&cfg.Profile, profileKeys) + + // endpoint discovery is in reference to it being enabled. + setFromEnvVal(&cfg.enableEndpointDiscovery, enableEndpointDiscoveryEnvKey) + if len(cfg.enableEndpointDiscovery) > 0 { + cfg.EnableEndpointDiscovery = aws.Bool(cfg.enableEndpointDiscovery != "false") + } + + setFromEnvVal(&cfg.SharedCredentialsFile, sharedCredsFileEnvKey) + setFromEnvVal(&cfg.SharedConfigFile, sharedConfigFileEnvKey) + + if len(cfg.SharedCredentialsFile) == 0 { + cfg.SharedCredentialsFile = defaults.SharedCredentialsFilename() + } + if len(cfg.SharedConfigFile) == 0 { + cfg.SharedConfigFile = defaults.SharedConfigFilename() + } + + setFromEnvVal(&cfg.CustomCABundle, useCABundleKey) + setFromEnvVal(&cfg.ClientTLSCert, useClientTLSCert) + setFromEnvVal(&cfg.ClientTLSKey, useClientTLSKey) + + var err error + // STS Regional Endpoint variable + for _, k := range stsRegionalEndpointKey { + if v := os.Getenv(k); len(v) != 0 { + cfg.STSRegionalEndpoint, err = endpoints.GetSTSRegionalEndpoint(v) + if err != nil { + return cfg, fmt.Errorf("failed to load, %v from env config, %v", k, err) + } + } + } + + // S3 Regional Endpoint variable + for _, k := range s3UsEast1RegionalEndpoint { + if v := os.Getenv(k); len(v) != 0 { + cfg.S3UsEast1RegionalEndpoint, err = endpoints.GetS3UsEast1RegionalEndpoint(v) + if err != nil { + return cfg, fmt.Errorf("failed to load, %v from env config, %v", k, err) + } + } + } + + var s3UseARNRegion string + setFromEnvVal(&s3UseARNRegion, s3UseARNRegionEnvKey) + if len(s3UseARNRegion) != 0 { + switch { + case strings.EqualFold(s3UseARNRegion, "false"): + cfg.S3UseARNRegion = false + case strings.EqualFold(s3UseARNRegion, "true"): + cfg.S3UseARNRegion = true + default: + return envConfig{}, fmt.Errorf( + "invalid value for environment variable, %s=%s, need true or false", + s3UseARNRegionEnvKey[0], s3UseARNRegion) + } + } + + setFromEnvVal(&cfg.EC2IMDSEndpoint, ec2IMDSEndpointEnvKey) + + return cfg, nil +} + +func setFromEnvVal(dst *string, keys []string) { + for _, k := range keys { + if v := os.Getenv(k); len(v) != 0 { + *dst = v + break + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/session.go b/vendor/github.com/aws/aws-sdk-go/aws/session/session.go new file mode 100644 index 00000000000..038ae222ffc --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/session.go @@ -0,0 +1,912 @@ +package session + +import ( + "crypto/tls" + "crypto/x509" + "fmt" + "io" + "io/ioutil" + "net/http" + "os" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/corehandlers" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/csm" + "github.com/aws/aws-sdk-go/aws/defaults" + "github.com/aws/aws-sdk-go/aws/endpoints" + "github.com/aws/aws-sdk-go/aws/request" +) + +const ( + // ErrCodeSharedConfig represents an error that occurs in the shared + // configuration logic + ErrCodeSharedConfig = "SharedConfigErr" + + // ErrCodeLoadCustomCABundle error code for unable to load custom CA bundle. + ErrCodeLoadCustomCABundle = "LoadCustomCABundleError" + + // ErrCodeLoadClientTLSCert error code for unable to load client TLS + // certificate or key + ErrCodeLoadClientTLSCert = "LoadClientTLSCertError" +) + +// ErrSharedConfigSourceCollision will be returned if a section contains both +// source_profile and credential_source +var ErrSharedConfigSourceCollision = awserr.New(ErrCodeSharedConfig, "only one credential type may be specified per profile: source profile, credential source, credential process, web identity token, or sso", nil) + +// ErrSharedConfigECSContainerEnvVarEmpty will be returned if the environment +// variables are empty and Environment was set as the credential source +var ErrSharedConfigECSContainerEnvVarEmpty = awserr.New(ErrCodeSharedConfig, "EcsContainer was specified as the credential_source, but 'AWS_CONTAINER_CREDENTIALS_RELATIVE_URI' was not set", nil) + +// ErrSharedConfigInvalidCredSource will be returned if an invalid credential source was provided +var ErrSharedConfigInvalidCredSource = awserr.New(ErrCodeSharedConfig, "credential source values must be EcsContainer, Ec2InstanceMetadata, or Environment", nil) + +// A Session provides a central location to create service clients from and +// store configurations and request handlers for those services. +// +// Sessions are safe to create service clients concurrently, but it is not safe +// to mutate the Session concurrently. +// +// The Session satisfies the service client's client.ConfigProvider. +type Session struct { + Config *aws.Config + Handlers request.Handlers + + options Options +} + +// New creates a new instance of the handlers merging in the provided configs +// on top of the SDK's default configurations. Once the Session is created it +// can be mutated to modify the Config or Handlers. The Session is safe to be +// read concurrently, but it should not be written to concurrently. +// +// If the AWS_SDK_LOAD_CONFIG environment is set to a truthy value, the New +// method could now encounter an error when loading the configuration. When +// The environment variable is set, and an error occurs, New will return a +// session that will fail all requests reporting the error that occurred while +// loading the session. Use NewSession to get the error when creating the +// session. +// +// If the AWS_SDK_LOAD_CONFIG environment variable is set to a truthy value +// the shared config file (~/.aws/config) will also be loaded, in addition to +// the shared credentials file (~/.aws/credentials). Values set in both the +// shared config, and shared credentials will be taken from the shared +// credentials file. +// +// Deprecated: Use NewSession functions to create sessions instead. NewSession +// has the same functionality as New except an error can be returned when the +// func is called instead of waiting to receive an error until a request is made. +func New(cfgs ...*aws.Config) *Session { + // load initial config from environment + envCfg, envErr := loadEnvConfig() + + if envCfg.EnableSharedConfig { + var cfg aws.Config + cfg.MergeIn(cfgs...) + s, err := NewSessionWithOptions(Options{ + Config: cfg, + SharedConfigState: SharedConfigEnable, + }) + if err != nil { + // Old session.New expected all errors to be discovered when + // a request is made, and would report the errors then. This + // needs to be replicated if an error occurs while creating + // the session. + msg := "failed to create session with AWS_SDK_LOAD_CONFIG enabled. " + + "Use session.NewSession to handle errors occurring during session creation." + + // Session creation failed, need to report the error and prevent + // any requests from succeeding. + s = &Session{Config: defaults.Config()} + s.logDeprecatedNewSessionError(msg, err, cfgs) + } + + return s + } + + s := deprecatedNewSession(envCfg, cfgs...) + if envErr != nil { + msg := "failed to load env config" + s.logDeprecatedNewSessionError(msg, envErr, cfgs) + } + + if csmCfg, err := loadCSMConfig(envCfg, []string{}); err != nil { + if l := s.Config.Logger; l != nil { + l.Log(fmt.Sprintf("ERROR: failed to load CSM configuration, %v", err)) + } + } else if csmCfg.Enabled { + err := enableCSM(&s.Handlers, csmCfg, s.Config.Logger) + if err != nil { + msg := "failed to enable CSM" + s.logDeprecatedNewSessionError(msg, err, cfgs) + } + } + + return s +} + +// NewSession returns a new Session created from SDK defaults, config files, +// environment, and user provided config files. Once the Session is created +// it can be mutated to modify the Config or Handlers. The Session is safe to +// be read concurrently, but it should not be written to concurrently. +// +// If the AWS_SDK_LOAD_CONFIG environment variable is set to a truthy value +// the shared config file (~/.aws/config) will also be loaded in addition to +// the shared credentials file (~/.aws/credentials). Values set in both the +// shared config, and shared credentials will be taken from the shared +// credentials file. Enabling the Shared Config will also allow the Session +// to be built with retrieving credentials with AssumeRole set in the config. +// +// See the NewSessionWithOptions func for information on how to override or +// control through code how the Session will be created, such as specifying the +// config profile, and controlling if shared config is enabled or not. +func NewSession(cfgs ...*aws.Config) (*Session, error) { + opts := Options{} + opts.Config.MergeIn(cfgs...) + + return NewSessionWithOptions(opts) +} + +// SharedConfigState provides the ability to optionally override the state +// of the session's creation based on the shared config being enabled or +// disabled. +type SharedConfigState int + +const ( + // SharedConfigStateFromEnv does not override any state of the + // AWS_SDK_LOAD_CONFIG env var. It is the default value of the + // SharedConfigState type. + SharedConfigStateFromEnv SharedConfigState = iota + + // SharedConfigDisable overrides the AWS_SDK_LOAD_CONFIG env var value + // and disables the shared config functionality. + SharedConfigDisable + + // SharedConfigEnable overrides the AWS_SDK_LOAD_CONFIG env var value + // and enables the shared config functionality. + SharedConfigEnable +) + +// Options provides the means to control how a Session is created and what +// configuration values will be loaded. +// +type Options struct { + // Provides config values for the SDK to use when creating service clients + // and making API requests to services. Any value set in with this field + // will override the associated value provided by the SDK defaults, + // environment or config files where relevant. + // + // If not set, configuration values from from SDK defaults, environment, + // config will be used. + Config aws.Config + + // Overrides the config profile the Session should be created from. If not + // set the value of the environment variable will be loaded (AWS_PROFILE, + // or AWS_DEFAULT_PROFILE if the Shared Config is enabled). + // + // If not set and environment variables are not set the "default" + // (DefaultSharedConfigProfile) will be used as the profile to load the + // session config from. + Profile string + + // Instructs how the Session will be created based on the AWS_SDK_LOAD_CONFIG + // environment variable. By default a Session will be created using the + // value provided by the AWS_SDK_LOAD_CONFIG environment variable. + // + // Setting this value to SharedConfigEnable or SharedConfigDisable + // will allow you to override the AWS_SDK_LOAD_CONFIG environment variable + // and enable or disable the shared config functionality. + SharedConfigState SharedConfigState + + // Ordered list of files the session will load configuration from. + // It will override environment variable AWS_SHARED_CREDENTIALS_FILE, AWS_CONFIG_FILE. + SharedConfigFiles []string + + // When the SDK's shared config is configured to assume a role with MFA + // this option is required in order to provide the mechanism that will + // retrieve the MFA token. There is no default value for this field. If + // it is not set an error will be returned when creating the session. + // + // This token provider will be called when ever the assumed role's + // credentials need to be refreshed. Within the context of service clients + // all sharing the same session the SDK will ensure calls to the token + // provider are atomic. When sharing a token provider across multiple + // sessions additional synchronization logic is needed to ensure the + // token providers do not introduce race conditions. It is recommend to + // share the session where possible. + // + // stscreds.StdinTokenProvider is a basic implementation that will prompt + // from stdin for the MFA token code. + // + // This field is only used if the shared configuration is enabled, and + // the config enables assume role wit MFA via the mfa_serial field. + AssumeRoleTokenProvider func() (string, error) + + // When the SDK's shared config is configured to assume a role this option + // may be provided to set the expiry duration of the STS credentials. + // Defaults to 15 minutes if not set as documented in the + // stscreds.AssumeRoleProvider. + AssumeRoleDuration time.Duration + + // Reader for a custom Credentials Authority (CA) bundle in PEM format that + // the SDK will use instead of the default system's root CA bundle. Use this + // only if you want to replace the CA bundle the SDK uses for TLS requests. + // + // HTTP Client's Transport concrete implementation must be a http.Transport + // or creating the session will fail. + // + // If the Transport's TLS config is set this option will cause the SDK + // to overwrite the Transport's TLS config's RootCAs value. If the CA + // bundle reader contains multiple certificates all of them will be loaded. + // + // Can also be specified via the environment variable: + // + // AWS_CA_BUNDLE=$HOME/ca_bundle + // + // Can also be specified via the shared config field: + // + // ca_bundle = $HOME/ca_bundle + CustomCABundle io.Reader + + // Reader for the TLC client certificate that should be used by the SDK's + // HTTP transport when making requests. The certificate must be paired with + // a TLS client key file. Will be ignored if both are not provided. + // + // HTTP Client's Transport concrete implementation must be a http.Transport + // or creating the session will fail. + // + // Can also be specified via the environment variable: + // + // AWS_SDK_GO_CLIENT_TLS_CERT=$HOME/my_client_cert + ClientTLSCert io.Reader + + // Reader for the TLC client key that should be used by the SDK's HTTP + // transport when making requests. The key must be paired with a TLS client + // certificate file. Will be ignored if both are not provided. + // + // HTTP Client's Transport concrete implementation must be a http.Transport + // or creating the session will fail. + // + // Can also be specified via the environment variable: + // + // AWS_SDK_GO_CLIENT_TLS_KEY=$HOME/my_client_key + ClientTLSKey io.Reader + + // The handlers that the session and all API clients will be created with. + // This must be a complete set of handlers. Use the defaults.Handlers() + // function to initialize this value before changing the handlers to be + // used by the SDK. + Handlers request.Handlers + + // Allows specifying a custom endpoint to be used by the EC2 IMDS client + // when making requests to the EC2 IMDS API. The must endpoint value must + // include protocol prefix. + // + // If unset, will the EC2 IMDS client will use its default endpoint. + // + // Can also be specified via the environment variable, + // AWS_EC2_METADATA_SERVICE_ENDPOINT. + // + // AWS_EC2_METADATA_SERVICE_ENDPOINT=http://169.254.169.254 + // + // If using an URL with an IPv6 address literal, the IPv6 address + // component must be enclosed in square brackets. + // + // AWS_EC2_METADATA_SERVICE_ENDPOINT=http://[::1] + EC2IMDSEndpoint string +} + +// NewSessionWithOptions returns a new Session created from SDK defaults, config files, +// environment, and user provided config files. This func uses the Options +// values to configure how the Session is created. +// +// If the AWS_SDK_LOAD_CONFIG environment variable is set to a truthy value +// the shared config file (~/.aws/config) will also be loaded in addition to +// the shared credentials file (~/.aws/credentials). Values set in both the +// shared config, and shared credentials will be taken from the shared +// credentials file. Enabling the Shared Config will also allow the Session +// to be built with retrieving credentials with AssumeRole set in the config. +// +// // Equivalent to session.New +// sess := session.Must(session.NewSessionWithOptions(session.Options{})) +// +// // Specify profile to load for the session's config +// sess := session.Must(session.NewSessionWithOptions(session.Options{ +// Profile: "profile_name", +// })) +// +// // Specify profile for config and region for requests +// sess := session.Must(session.NewSessionWithOptions(session.Options{ +// Config: aws.Config{Region: aws.String("us-east-1")}, +// Profile: "profile_name", +// })) +// +// // Force enable Shared Config support +// sess := session.Must(session.NewSessionWithOptions(session.Options{ +// SharedConfigState: session.SharedConfigEnable, +// })) +func NewSessionWithOptions(opts Options) (*Session, error) { + var envCfg envConfig + var err error + if opts.SharedConfigState == SharedConfigEnable { + envCfg, err = loadSharedEnvConfig() + if err != nil { + return nil, fmt.Errorf("failed to load shared config, %v", err) + } + } else { + envCfg, err = loadEnvConfig() + if err != nil { + return nil, fmt.Errorf("failed to load environment config, %v", err) + } + } + + if len(opts.Profile) != 0 { + envCfg.Profile = opts.Profile + } + + switch opts.SharedConfigState { + case SharedConfigDisable: + envCfg.EnableSharedConfig = false + case SharedConfigEnable: + envCfg.EnableSharedConfig = true + } + + return newSession(opts, envCfg, &opts.Config) +} + +// Must is a helper function to ensure the Session is valid and there was no +// error when calling a NewSession function. +// +// This helper is intended to be used in variable initialization to load the +// Session and configuration at startup. Such as: +// +// var sess = session.Must(session.NewSession()) +func Must(sess *Session, err error) *Session { + if err != nil { + panic(err) + } + + return sess +} + +// Wraps the endpoint resolver with a resolver that will return a custom +// endpoint for EC2 IMDS. +func wrapEC2IMDSEndpoint(resolver endpoints.Resolver, endpoint string) endpoints.Resolver { + return endpoints.ResolverFunc( + func(service, region string, opts ...func(*endpoints.Options)) ( + endpoints.ResolvedEndpoint, error, + ) { + if service == ec2MetadataServiceID { + return endpoints.ResolvedEndpoint{ + URL: endpoint, + SigningName: ec2MetadataServiceID, + SigningRegion: region, + }, nil + } + return resolver.EndpointFor(service, region) + }) +} + +func deprecatedNewSession(envCfg envConfig, cfgs ...*aws.Config) *Session { + cfg := defaults.Config() + handlers := defaults.Handlers() + + // Apply the passed in configs so the configuration can be applied to the + // default credential chain + cfg.MergeIn(cfgs...) + if cfg.EndpointResolver == nil { + // An endpoint resolver is required for a session to be able to provide + // endpoints for service client configurations. + cfg.EndpointResolver = endpoints.DefaultResolver() + } + + if len(envCfg.EC2IMDSEndpoint) != 0 { + cfg.EndpointResolver = wrapEC2IMDSEndpoint(cfg.EndpointResolver, envCfg.EC2IMDSEndpoint) + } + + cfg.Credentials = defaults.CredChain(cfg, handlers) + + // Reapply any passed in configs to override credentials if set + cfg.MergeIn(cfgs...) + + s := &Session{ + Config: cfg, + Handlers: handlers, + options: Options{ + EC2IMDSEndpoint: envCfg.EC2IMDSEndpoint, + }, + } + + initHandlers(s) + return s +} + +func enableCSM(handlers *request.Handlers, cfg csmConfig, logger aws.Logger) error { + if logger != nil { + logger.Log("Enabling CSM") + } + + r, err := csm.Start(cfg.ClientID, csm.AddressWithDefaults(cfg.Host, cfg.Port)) + if err != nil { + return err + } + r.InjectHandlers(handlers) + + return nil +} + +func newSession(opts Options, envCfg envConfig, cfgs ...*aws.Config) (*Session, error) { + cfg := defaults.Config() + + handlers := opts.Handlers + if handlers.IsEmpty() { + handlers = defaults.Handlers() + } + + // Get a merged version of the user provided config to determine if + // credentials were. + userCfg := &aws.Config{} + userCfg.MergeIn(cfgs...) + cfg.MergeIn(userCfg) + + // Ordered config files will be loaded in with later files overwriting + // previous config file values. + var cfgFiles []string + if opts.SharedConfigFiles != nil { + cfgFiles = opts.SharedConfigFiles + } else { + cfgFiles = []string{envCfg.SharedConfigFile, envCfg.SharedCredentialsFile} + if !envCfg.EnableSharedConfig { + // The shared config file (~/.aws/config) is only loaded if instructed + // to load via the envConfig.EnableSharedConfig (AWS_SDK_LOAD_CONFIG). + cfgFiles = cfgFiles[1:] + } + } + + // Load additional config from file(s) + sharedCfg, err := loadSharedConfig(envCfg.Profile, cfgFiles, envCfg.EnableSharedConfig) + if err != nil { + if len(envCfg.Profile) == 0 && !envCfg.EnableSharedConfig && (envCfg.Creds.HasKeys() || userCfg.Credentials != nil) { + // Special case where the user has not explicitly specified an AWS_PROFILE, + // or session.Options.profile, shared config is not enabled, and the + // environment has credentials, allow the shared config file to fail to + // load since the user has already provided credentials, and nothing else + // is required to be read file. Github(aws/aws-sdk-go#2455) + } else if _, ok := err.(SharedConfigProfileNotExistsError); !ok { + return nil, err + } + } + + if err := mergeConfigSrcs(cfg, userCfg, envCfg, sharedCfg, handlers, opts); err != nil { + return nil, err + } + + if err := setTLSOptions(&opts, cfg, envCfg, sharedCfg); err != nil { + return nil, err + } + + s := &Session{ + Config: cfg, + Handlers: handlers, + options: opts, + } + + initHandlers(s) + + if csmCfg, err := loadCSMConfig(envCfg, cfgFiles); err != nil { + if l := s.Config.Logger; l != nil { + l.Log(fmt.Sprintf("ERROR: failed to load CSM configuration, %v", err)) + } + } else if csmCfg.Enabled { + err = enableCSM(&s.Handlers, csmCfg, s.Config.Logger) + if err != nil { + return nil, err + } + } + + return s, nil +} + +type csmConfig struct { + Enabled bool + Host string + Port string + ClientID string +} + +var csmProfileName = "aws_csm" + +func loadCSMConfig(envCfg envConfig, cfgFiles []string) (csmConfig, error) { + if envCfg.CSMEnabled != nil { + if *envCfg.CSMEnabled { + return csmConfig{ + Enabled: true, + ClientID: envCfg.CSMClientID, + Host: envCfg.CSMHost, + Port: envCfg.CSMPort, + }, nil + } + return csmConfig{}, nil + } + + sharedCfg, err := loadSharedConfig(csmProfileName, cfgFiles, false) + if err != nil { + if _, ok := err.(SharedConfigProfileNotExistsError); !ok { + return csmConfig{}, err + } + } + if sharedCfg.CSMEnabled != nil && *sharedCfg.CSMEnabled == true { + return csmConfig{ + Enabled: true, + ClientID: sharedCfg.CSMClientID, + Host: sharedCfg.CSMHost, + Port: sharedCfg.CSMPort, + }, nil + } + + return csmConfig{}, nil +} + +func setTLSOptions(opts *Options, cfg *aws.Config, envCfg envConfig, sharedCfg sharedConfig) error { + // CA Bundle can be specified in both environment variable shared config file. + var caBundleFilename = envCfg.CustomCABundle + if len(caBundleFilename) == 0 { + caBundleFilename = sharedCfg.CustomCABundle + } + + // Only use environment value if session option is not provided. + customTLSOptions := map[string]struct { + filename string + field *io.Reader + errCode string + }{ + "custom CA bundle PEM": {filename: caBundleFilename, field: &opts.CustomCABundle, errCode: ErrCodeLoadCustomCABundle}, + "custom client TLS cert": {filename: envCfg.ClientTLSCert, field: &opts.ClientTLSCert, errCode: ErrCodeLoadClientTLSCert}, + "custom client TLS key": {filename: envCfg.ClientTLSKey, field: &opts.ClientTLSKey, errCode: ErrCodeLoadClientTLSCert}, + } + for name, v := range customTLSOptions { + if len(v.filename) != 0 && *v.field == nil { + f, err := os.Open(v.filename) + if err != nil { + return awserr.New(v.errCode, fmt.Sprintf("failed to open %s file", name), err) + } + defer f.Close() + *v.field = f + } + } + + // Setup HTTP client with custom cert bundle if enabled + if opts.CustomCABundle != nil { + if err := loadCustomCABundle(cfg.HTTPClient, opts.CustomCABundle); err != nil { + return err + } + } + + // Setup HTTP client TLS certificate and key for client TLS authentication. + if opts.ClientTLSCert != nil && opts.ClientTLSKey != nil { + if err := loadClientTLSCert(cfg.HTTPClient, opts.ClientTLSCert, opts.ClientTLSKey); err != nil { + return err + } + } else if opts.ClientTLSCert == nil && opts.ClientTLSKey == nil { + // Do nothing if neither values are available. + + } else { + return awserr.New(ErrCodeLoadClientTLSCert, + fmt.Sprintf("client TLS cert(%t) and key(%t) must both be provided", + opts.ClientTLSCert != nil, opts.ClientTLSKey != nil), nil) + } + + return nil +} + +func getHTTPTransport(client *http.Client) (*http.Transport, error) { + var t *http.Transport + switch v := client.Transport.(type) { + case *http.Transport: + t = v + default: + if client.Transport != nil { + return nil, fmt.Errorf("unsupported transport, %T", client.Transport) + } + } + if t == nil { + // Nil transport implies `http.DefaultTransport` should be used. Since + // the SDK cannot modify, nor copy the `DefaultTransport` specifying + // the values the next closest behavior. + t = getCustomTransport() + } + + return t, nil +} + +func loadCustomCABundle(client *http.Client, bundle io.Reader) error { + t, err := getHTTPTransport(client) + if err != nil { + return awserr.New(ErrCodeLoadCustomCABundle, + "unable to load custom CA bundle, HTTPClient's transport unsupported type", err) + } + + p, err := loadCertPool(bundle) + if err != nil { + return err + } + if t.TLSClientConfig == nil { + t.TLSClientConfig = &tls.Config{} + } + t.TLSClientConfig.RootCAs = p + + client.Transport = t + + return nil +} + +func loadCertPool(r io.Reader) (*x509.CertPool, error) { + b, err := ioutil.ReadAll(r) + if err != nil { + return nil, awserr.New(ErrCodeLoadCustomCABundle, + "failed to read custom CA bundle PEM file", err) + } + + p := x509.NewCertPool() + if !p.AppendCertsFromPEM(b) { + return nil, awserr.New(ErrCodeLoadCustomCABundle, + "failed to load custom CA bundle PEM file", err) + } + + return p, nil +} + +func loadClientTLSCert(client *http.Client, certFile, keyFile io.Reader) error { + t, err := getHTTPTransport(client) + if err != nil { + return awserr.New(ErrCodeLoadClientTLSCert, + "unable to get usable HTTP transport from client", err) + } + + cert, err := ioutil.ReadAll(certFile) + if err != nil { + return awserr.New(ErrCodeLoadClientTLSCert, + "unable to get read client TLS cert file", err) + } + + key, err := ioutil.ReadAll(keyFile) + if err != nil { + return awserr.New(ErrCodeLoadClientTLSCert, + "unable to get read client TLS key file", err) + } + + clientCert, err := tls.X509KeyPair(cert, key) + if err != nil { + return awserr.New(ErrCodeLoadClientTLSCert, + "unable to load x509 key pair from client cert", err) + } + + tlsCfg := t.TLSClientConfig + if tlsCfg == nil { + tlsCfg = &tls.Config{} + } + + tlsCfg.Certificates = append(tlsCfg.Certificates, clientCert) + + t.TLSClientConfig = tlsCfg + client.Transport = t + + return nil +} + +func mergeConfigSrcs(cfg, userCfg *aws.Config, + envCfg envConfig, sharedCfg sharedConfig, + handlers request.Handlers, + sessOpts Options, +) error { + + // Region if not already set by user + if len(aws.StringValue(cfg.Region)) == 0 { + if len(envCfg.Region) > 0 { + cfg.WithRegion(envCfg.Region) + } else if envCfg.EnableSharedConfig && len(sharedCfg.Region) > 0 { + cfg.WithRegion(sharedCfg.Region) + } + } + + if cfg.EnableEndpointDiscovery == nil { + if envCfg.EnableEndpointDiscovery != nil { + cfg.WithEndpointDiscovery(*envCfg.EnableEndpointDiscovery) + } else if envCfg.EnableSharedConfig && sharedCfg.EnableEndpointDiscovery != nil { + cfg.WithEndpointDiscovery(*sharedCfg.EnableEndpointDiscovery) + } + } + + // Regional Endpoint flag for STS endpoint resolving + mergeSTSRegionalEndpointConfig(cfg, []endpoints.STSRegionalEndpoint{ + userCfg.STSRegionalEndpoint, + envCfg.STSRegionalEndpoint, + sharedCfg.STSRegionalEndpoint, + endpoints.LegacySTSEndpoint, + }) + + // Regional Endpoint flag for S3 endpoint resolving + mergeS3UsEast1RegionalEndpointConfig(cfg, []endpoints.S3UsEast1RegionalEndpoint{ + userCfg.S3UsEast1RegionalEndpoint, + envCfg.S3UsEast1RegionalEndpoint, + sharedCfg.S3UsEast1RegionalEndpoint, + endpoints.LegacyS3UsEast1Endpoint, + }) + + ec2IMDSEndpoint := sessOpts.EC2IMDSEndpoint + if len(ec2IMDSEndpoint) == 0 { + ec2IMDSEndpoint = envCfg.EC2IMDSEndpoint + } + if len(ec2IMDSEndpoint) != 0 { + cfg.EndpointResolver = wrapEC2IMDSEndpoint(cfg.EndpointResolver, ec2IMDSEndpoint) + } + + // Configure credentials if not already set by the user when creating the + // Session. + if cfg.Credentials == credentials.AnonymousCredentials && userCfg.Credentials == nil { + creds, err := resolveCredentials(cfg, envCfg, sharedCfg, handlers, sessOpts) + if err != nil { + return err + } + cfg.Credentials = creds + } + + cfg.S3UseARNRegion = userCfg.S3UseARNRegion + if cfg.S3UseARNRegion == nil { + cfg.S3UseARNRegion = &envCfg.S3UseARNRegion + } + if cfg.S3UseARNRegion == nil { + cfg.S3UseARNRegion = &sharedCfg.S3UseARNRegion + } + + return nil +} + +func mergeSTSRegionalEndpointConfig(cfg *aws.Config, values []endpoints.STSRegionalEndpoint) { + for _, v := range values { + if v != endpoints.UnsetSTSEndpoint { + cfg.STSRegionalEndpoint = v + break + } + } +} + +func mergeS3UsEast1RegionalEndpointConfig(cfg *aws.Config, values []endpoints.S3UsEast1RegionalEndpoint) { + for _, v := range values { + if v != endpoints.UnsetS3UsEast1Endpoint { + cfg.S3UsEast1RegionalEndpoint = v + break + } + } +} + +func initHandlers(s *Session) { + // Add the Validate parameter handler if it is not disabled. + s.Handlers.Validate.Remove(corehandlers.ValidateParametersHandler) + if !aws.BoolValue(s.Config.DisableParamValidation) { + s.Handlers.Validate.PushBackNamed(corehandlers.ValidateParametersHandler) + } +} + +// Copy creates and returns a copy of the current Session, copying the config +// and handlers. If any additional configs are provided they will be merged +// on top of the Session's copied config. +// +// // Create a copy of the current Session, configured for the us-west-2 region. +// sess.Copy(&aws.Config{Region: aws.String("us-west-2")}) +func (s *Session) Copy(cfgs ...*aws.Config) *Session { + newSession := &Session{ + Config: s.Config.Copy(cfgs...), + Handlers: s.Handlers.Copy(), + options: s.options, + } + + initHandlers(newSession) + + return newSession +} + +// ClientConfig satisfies the client.ConfigProvider interface and is used to +// configure the service client instances. Passing the Session to the service +// client's constructor (New) will use this method to configure the client. +func (s *Session) ClientConfig(service string, cfgs ...*aws.Config) client.Config { + s = s.Copy(cfgs...) + + region := aws.StringValue(s.Config.Region) + resolved, err := s.resolveEndpoint(service, region, s.Config) + if err != nil { + s.Handlers.Validate.PushBack(func(r *request.Request) { + if len(r.ClientInfo.Endpoint) != 0 { + // Error occurred while resolving endpoint, but the request + // being invoked has had an endpoint specified after the client + // was created. + return + } + r.Error = err + }) + } + + return client.Config{ + Config: s.Config, + Handlers: s.Handlers, + PartitionID: resolved.PartitionID, + Endpoint: resolved.URL, + SigningRegion: resolved.SigningRegion, + SigningNameDerived: resolved.SigningNameDerived, + SigningName: resolved.SigningName, + } +} + +const ec2MetadataServiceID = "ec2metadata" + +func (s *Session) resolveEndpoint(service, region string, cfg *aws.Config) (endpoints.ResolvedEndpoint, error) { + + if ep := aws.StringValue(cfg.Endpoint); len(ep) != 0 { + return endpoints.ResolvedEndpoint{ + URL: endpoints.AddScheme(ep, aws.BoolValue(cfg.DisableSSL)), + SigningRegion: region, + }, nil + } + + resolved, err := cfg.EndpointResolver.EndpointFor(service, region, + func(opt *endpoints.Options) { + opt.DisableSSL = aws.BoolValue(cfg.DisableSSL) + opt.UseDualStack = aws.BoolValue(cfg.UseDualStack) + // Support for STSRegionalEndpoint where the STSRegionalEndpoint is + // provided in envConfig or sharedConfig with envConfig getting + // precedence. + opt.STSRegionalEndpoint = cfg.STSRegionalEndpoint + + // Support for S3UsEast1RegionalEndpoint where the S3UsEast1RegionalEndpoint is + // provided in envConfig or sharedConfig with envConfig getting + // precedence. + opt.S3UsEast1RegionalEndpoint = cfg.S3UsEast1RegionalEndpoint + + // Support the condition where the service is modeled but its + // endpoint metadata is not available. + opt.ResolveUnknownService = true + }, + ) + if err != nil { + return endpoints.ResolvedEndpoint{}, err + } + + return resolved, nil +} + +// ClientConfigNoResolveEndpoint is the same as ClientConfig with the exception +// that the EndpointResolver will not be used to resolve the endpoint. The only +// endpoint set must come from the aws.Config.Endpoint field. +func (s *Session) ClientConfigNoResolveEndpoint(cfgs ...*aws.Config) client.Config { + s = s.Copy(cfgs...) + + var resolved endpoints.ResolvedEndpoint + if ep := aws.StringValue(s.Config.Endpoint); len(ep) > 0 { + resolved.URL = endpoints.AddScheme(ep, aws.BoolValue(s.Config.DisableSSL)) + resolved.SigningRegion = aws.StringValue(s.Config.Region) + } + + return client.Config{ + Config: s.Config, + Handlers: s.Handlers, + Endpoint: resolved.URL, + SigningRegion: resolved.SigningRegion, + SigningNameDerived: resolved.SigningNameDerived, + SigningName: resolved.SigningName, + } +} + +// logDeprecatedNewSessionError function enables error handling for session +func (s *Session) logDeprecatedNewSessionError(msg string, err error, cfgs []*aws.Config) { + // Session creation failed, need to report the error and prevent + // any requests from succeeding. + s.Config.MergeIn(cfgs...) + s.Config.Logger.Log("ERROR:", msg, "Error:", err) + s.Handlers.Validate.PushBack(func(r *request.Request) { + r.Error = err + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go b/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go new file mode 100644 index 00000000000..c3f38b6ec07 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go @@ -0,0 +1,642 @@ +package session + +import ( + "fmt" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/endpoints" + "github.com/aws/aws-sdk-go/internal/ini" +) + +const ( + // Static Credentials group + accessKeyIDKey = `aws_access_key_id` // group required + secretAccessKey = `aws_secret_access_key` // group required + sessionTokenKey = `aws_session_token` // optional + + // Assume Role Credentials group + roleArnKey = `role_arn` // group required + sourceProfileKey = `source_profile` // group required (or credential_source) + credentialSourceKey = `credential_source` // group required (or source_profile) + externalIDKey = `external_id` // optional + mfaSerialKey = `mfa_serial` // optional + roleSessionNameKey = `role_session_name` // optional + roleDurationSecondsKey = "duration_seconds" // optional + + // AWS Single Sign-On (AWS SSO) group + ssoAccountIDKey = "sso_account_id" + ssoRegionKey = "sso_region" + ssoRoleNameKey = "sso_role_name" + ssoStartURL = "sso_start_url" + + // CSM options + csmEnabledKey = `csm_enabled` + csmHostKey = `csm_host` + csmPortKey = `csm_port` + csmClientIDKey = `csm_client_id` + + // Additional Config fields + regionKey = `region` + + // custom CA Bundle filename + customCABundleKey = `ca_bundle` + + // endpoint discovery group + enableEndpointDiscoveryKey = `endpoint_discovery_enabled` // optional + + // External Credential Process + credentialProcessKey = `credential_process` // optional + + // Web Identity Token File + webIdentityTokenFileKey = `web_identity_token_file` // optional + + // Additional config fields for regional or legacy endpoints + stsRegionalEndpointSharedKey = `sts_regional_endpoints` + + // Additional config fields for regional or legacy endpoints + s3UsEast1RegionalSharedKey = `s3_us_east_1_regional_endpoint` + + // DefaultSharedConfigProfile is the default profile to be used when + // loading configuration from the config files if another profile name + // is not provided. + DefaultSharedConfigProfile = `default` + + // S3 ARN Region Usage + s3UseARNRegionKey = "s3_use_arn_region" +) + +// sharedConfig represents the configuration fields of the SDK config files. +type sharedConfig struct { + Profile string + + // Credentials values from the config file. Both aws_access_key_id and + // aws_secret_access_key must be provided together in the same file to be + // considered valid. The values will be ignored if not a complete group. + // aws_session_token is an optional field that can be provided if both of + // the other two fields are also provided. + // + // aws_access_key_id + // aws_secret_access_key + // aws_session_token + Creds credentials.Value + + CredentialSource string + CredentialProcess string + WebIdentityTokenFile string + + SSOAccountID string + SSORegion string + SSORoleName string + SSOStartURL string + + RoleARN string + RoleSessionName string + ExternalID string + MFASerial string + AssumeRoleDuration *time.Duration + + SourceProfileName string + SourceProfile *sharedConfig + + // Region is the region the SDK should use for looking up AWS service + // endpoints and signing requests. + // + // region + Region string + + // CustomCABundle is the file path to a PEM file the SDK will read and + // use to configure the HTTP transport with additional CA certs that are + // not present in the platforms default CA store. + // + // This value will be ignored if the file does not exist. + // + // ca_bundle + CustomCABundle string + + // EnableEndpointDiscovery can be enabled in the shared config by setting + // endpoint_discovery_enabled to true + // + // endpoint_discovery_enabled = true + EnableEndpointDiscovery *bool + + // CSM Options + CSMEnabled *bool + CSMHost string + CSMPort string + CSMClientID string + + // Specifies the Regional Endpoint flag for the SDK to resolve the endpoint for a service + // + // sts_regional_endpoints = regional + // This can take value as `LegacySTSEndpoint` or `RegionalSTSEndpoint` + STSRegionalEndpoint endpoints.STSRegionalEndpoint + + // Specifies the Regional Endpoint flag for the SDK to resolve the endpoint for a service + // + // s3_us_east_1_regional_endpoint = regional + // This can take value as `LegacyS3UsEast1Endpoint` or `RegionalS3UsEast1Endpoint` + S3UsEast1RegionalEndpoint endpoints.S3UsEast1RegionalEndpoint + + // Specifies if the S3 service should allow ARNs to direct the region + // the client's requests are sent to. + // + // s3_use_arn_region=true + S3UseARNRegion bool +} + +type sharedConfigFile struct { + Filename string + IniData ini.Sections +} + +// loadSharedConfig retrieves the configuration from the list of files using +// the profile provided. The order the files are listed will determine +// precedence. Values in subsequent files will overwrite values defined in +// earlier files. +// +// For example, given two files A and B. Both define credentials. If the order +// of the files are A then B, B's credential values will be used instead of +// A's. +// +// See sharedConfig.setFromFile for information how the config files +// will be loaded. +func loadSharedConfig(profile string, filenames []string, exOpts bool) (sharedConfig, error) { + if len(profile) == 0 { + profile = DefaultSharedConfigProfile + } + + files, err := loadSharedConfigIniFiles(filenames) + if err != nil { + return sharedConfig{}, err + } + + cfg := sharedConfig{} + profiles := map[string]struct{}{} + if err = cfg.setFromIniFiles(profiles, profile, files, exOpts); err != nil { + return sharedConfig{}, err + } + + return cfg, nil +} + +func loadSharedConfigIniFiles(filenames []string) ([]sharedConfigFile, error) { + files := make([]sharedConfigFile, 0, len(filenames)) + + for _, filename := range filenames { + sections, err := ini.OpenFile(filename) + if aerr, ok := err.(awserr.Error); ok && aerr.Code() == ini.ErrCodeUnableToReadFile { + // Skip files which can't be opened and read for whatever reason + continue + } else if err != nil { + return nil, SharedConfigLoadError{Filename: filename, Err: err} + } + + files = append(files, sharedConfigFile{ + Filename: filename, IniData: sections, + }) + } + + return files, nil +} + +func (cfg *sharedConfig) setFromIniFiles(profiles map[string]struct{}, profile string, files []sharedConfigFile, exOpts bool) error { + cfg.Profile = profile + + // Trim files from the list that don't exist. + var skippedFiles int + var profileNotFoundErr error + for _, f := range files { + if err := cfg.setFromIniFile(profile, f, exOpts); err != nil { + if _, ok := err.(SharedConfigProfileNotExistsError); ok { + // Ignore profiles not defined in individual files. + profileNotFoundErr = err + skippedFiles++ + continue + } + return err + } + } + if skippedFiles == len(files) { + // If all files were skipped because the profile is not found, return + // the original profile not found error. + return profileNotFoundErr + } + + if _, ok := profiles[profile]; ok { + // if this is the second instance of the profile the Assume Role + // options must be cleared because they are only valid for the + // first reference of a profile. The self linked instance of the + // profile only have credential provider options. + cfg.clearAssumeRoleOptions() + } else { + // First time a profile has been seen, It must either be a assume role + // credentials, or SSO. Assert if the credential type requires a role ARN, + // the ARN is also set, or validate that the SSO configuration is complete. + if err := cfg.validateCredentialsConfig(profile); err != nil { + return err + } + } + profiles[profile] = struct{}{} + + if err := cfg.validateCredentialType(); err != nil { + return err + } + + // Link source profiles for assume roles + if len(cfg.SourceProfileName) != 0 { + // Linked profile via source_profile ignore credential provider + // options, the source profile must provide the credentials. + cfg.clearCredentialOptions() + + srcCfg := &sharedConfig{} + err := srcCfg.setFromIniFiles(profiles, cfg.SourceProfileName, files, exOpts) + if err != nil { + // SourceProfile that doesn't exist is an error in configuration. + if _, ok := err.(SharedConfigProfileNotExistsError); ok { + err = SharedConfigAssumeRoleError{ + RoleARN: cfg.RoleARN, + SourceProfile: cfg.SourceProfileName, + } + } + return err + } + + if !srcCfg.hasCredentials() { + return SharedConfigAssumeRoleError{ + RoleARN: cfg.RoleARN, + SourceProfile: cfg.SourceProfileName, + } + } + + cfg.SourceProfile = srcCfg + } + + return nil +} + +// setFromFile loads the configuration from the file using the profile +// provided. A sharedConfig pointer type value is used so that multiple config +// file loadings can be chained. +// +// Only loads complete logically grouped values, and will not set fields in cfg +// for incomplete grouped values in the config. Such as credentials. For +// example if a config file only includes aws_access_key_id but no +// aws_secret_access_key the aws_access_key_id will be ignored. +func (cfg *sharedConfig) setFromIniFile(profile string, file sharedConfigFile, exOpts bool) error { + section, ok := file.IniData.GetSection(profile) + if !ok { + // Fallback to to alternate profile name: profile + section, ok = file.IniData.GetSection(fmt.Sprintf("profile %s", profile)) + if !ok { + return SharedConfigProfileNotExistsError{Profile: profile, Err: nil} + } + } + + if exOpts { + // Assume Role Parameters + updateString(&cfg.RoleARN, section, roleArnKey) + updateString(&cfg.ExternalID, section, externalIDKey) + updateString(&cfg.MFASerial, section, mfaSerialKey) + updateString(&cfg.RoleSessionName, section, roleSessionNameKey) + updateString(&cfg.SourceProfileName, section, sourceProfileKey) + updateString(&cfg.CredentialSource, section, credentialSourceKey) + updateString(&cfg.Region, section, regionKey) + updateString(&cfg.CustomCABundle, section, customCABundleKey) + + if section.Has(roleDurationSecondsKey) { + d := time.Duration(section.Int(roleDurationSecondsKey)) * time.Second + cfg.AssumeRoleDuration = &d + } + + if v := section.String(stsRegionalEndpointSharedKey); len(v) != 0 { + sre, err := endpoints.GetSTSRegionalEndpoint(v) + if err != nil { + return fmt.Errorf("failed to load %s from shared config, %s, %v", + stsRegionalEndpointSharedKey, file.Filename, err) + } + cfg.STSRegionalEndpoint = sre + } + + if v := section.String(s3UsEast1RegionalSharedKey); len(v) != 0 { + sre, err := endpoints.GetS3UsEast1RegionalEndpoint(v) + if err != nil { + return fmt.Errorf("failed to load %s from shared config, %s, %v", + s3UsEast1RegionalSharedKey, file.Filename, err) + } + cfg.S3UsEast1RegionalEndpoint = sre + } + + // AWS Single Sign-On (AWS SSO) + updateString(&cfg.SSOAccountID, section, ssoAccountIDKey) + updateString(&cfg.SSORegion, section, ssoRegionKey) + updateString(&cfg.SSORoleName, section, ssoRoleNameKey) + updateString(&cfg.SSOStartURL, section, ssoStartURL) + } + + updateString(&cfg.CredentialProcess, section, credentialProcessKey) + updateString(&cfg.WebIdentityTokenFile, section, webIdentityTokenFileKey) + + // Shared Credentials + creds := credentials.Value{ + AccessKeyID: section.String(accessKeyIDKey), + SecretAccessKey: section.String(secretAccessKey), + SessionToken: section.String(sessionTokenKey), + ProviderName: fmt.Sprintf("SharedConfigCredentials: %s", file.Filename), + } + if creds.HasKeys() { + cfg.Creds = creds + } + + // Endpoint discovery + updateBoolPtr(&cfg.EnableEndpointDiscovery, section, enableEndpointDiscoveryKey) + + // CSM options + updateBoolPtr(&cfg.CSMEnabled, section, csmEnabledKey) + updateString(&cfg.CSMHost, section, csmHostKey) + updateString(&cfg.CSMPort, section, csmPortKey) + updateString(&cfg.CSMClientID, section, csmClientIDKey) + + updateBool(&cfg.S3UseARNRegion, section, s3UseARNRegionKey) + + return nil +} + +func (cfg *sharedConfig) validateCredentialsConfig(profile string) error { + if err := cfg.validateCredentialsRequireARN(profile); err != nil { + return err + } + + return nil +} + +func (cfg *sharedConfig) validateCredentialsRequireARN(profile string) error { + var credSource string + + switch { + case len(cfg.SourceProfileName) != 0: + credSource = sourceProfileKey + case len(cfg.CredentialSource) != 0: + credSource = credentialSourceKey + case len(cfg.WebIdentityTokenFile) != 0: + credSource = webIdentityTokenFileKey + } + + if len(credSource) != 0 && len(cfg.RoleARN) == 0 { + return CredentialRequiresARNError{ + Type: credSource, + Profile: profile, + } + } + + return nil +} + +func (cfg *sharedConfig) validateCredentialType() error { + // Only one or no credential type can be defined. + if !oneOrNone( + len(cfg.SourceProfileName) != 0, + len(cfg.CredentialSource) != 0, + len(cfg.CredentialProcess) != 0, + len(cfg.WebIdentityTokenFile) != 0, + cfg.hasSSOConfiguration(), + ) { + return ErrSharedConfigSourceCollision + } + + return nil +} + +func (cfg *sharedConfig) validateSSOConfiguration() error { + if !cfg.hasSSOConfiguration() { + return nil + } + + var missing []string + if len(cfg.SSOAccountID) == 0 { + missing = append(missing, ssoAccountIDKey) + } + + if len(cfg.SSORegion) == 0 { + missing = append(missing, ssoRegionKey) + } + + if len(cfg.SSORoleName) == 0 { + missing = append(missing, ssoRoleNameKey) + } + + if len(cfg.SSOStartURL) == 0 { + missing = append(missing, ssoStartURL) + } + + if len(missing) > 0 { + return fmt.Errorf("profile %q is configured to use SSO but is missing required configuration: %s", + cfg.Profile, strings.Join(missing, ", ")) + } + + return nil +} + +func (cfg *sharedConfig) hasCredentials() bool { + switch { + case len(cfg.SourceProfileName) != 0: + case len(cfg.CredentialSource) != 0: + case len(cfg.CredentialProcess) != 0: + case len(cfg.WebIdentityTokenFile) != 0: + case cfg.hasSSOConfiguration(): + case cfg.Creds.HasKeys(): + default: + return false + } + + return true +} + +func (cfg *sharedConfig) clearCredentialOptions() { + cfg.CredentialSource = "" + cfg.CredentialProcess = "" + cfg.WebIdentityTokenFile = "" + cfg.Creds = credentials.Value{} +} + +func (cfg *sharedConfig) clearAssumeRoleOptions() { + cfg.RoleARN = "" + cfg.ExternalID = "" + cfg.MFASerial = "" + cfg.RoleSessionName = "" + cfg.SourceProfileName = "" +} + +func (cfg *sharedConfig) hasSSOConfiguration() bool { + switch { + case len(cfg.SSOAccountID) != 0: + case len(cfg.SSORegion) != 0: + case len(cfg.SSORoleName) != 0: + case len(cfg.SSOStartURL) != 0: + default: + return false + } + return true +} + +func oneOrNone(bs ...bool) bool { + var count int + + for _, b := range bs { + if b { + count++ + if count > 1 { + return false + } + } + } + + return true +} + +// updateString will only update the dst with the value in the section key, key +// is present in the section. +func updateString(dst *string, section ini.Section, key string) { + if !section.Has(key) { + return + } + *dst = section.String(key) +} + +// updateBool will only update the dst with the value in the section key, key +// is present in the section. +func updateBool(dst *bool, section ini.Section, key string) { + if !section.Has(key) { + return + } + *dst = section.Bool(key) +} + +// updateBoolPtr will only update the dst with the value in the section key, +// key is present in the section. +func updateBoolPtr(dst **bool, section ini.Section, key string) { + if !section.Has(key) { + return + } + *dst = new(bool) + **dst = section.Bool(key) +} + +// SharedConfigLoadError is an error for the shared config file failed to load. +type SharedConfigLoadError struct { + Filename string + Err error +} + +// Code is the short id of the error. +func (e SharedConfigLoadError) Code() string { + return "SharedConfigLoadError" +} + +// Message is the description of the error +func (e SharedConfigLoadError) Message() string { + return fmt.Sprintf("failed to load config file, %s", e.Filename) +} + +// OrigErr is the underlying error that caused the failure. +func (e SharedConfigLoadError) OrigErr() error { + return e.Err +} + +// Error satisfies the error interface. +func (e SharedConfigLoadError) Error() string { + return awserr.SprintError(e.Code(), e.Message(), "", e.Err) +} + +// SharedConfigProfileNotExistsError is an error for the shared config when +// the profile was not find in the config file. +type SharedConfigProfileNotExistsError struct { + Profile string + Err error +} + +// Code is the short id of the error. +func (e SharedConfigProfileNotExistsError) Code() string { + return "SharedConfigProfileNotExistsError" +} + +// Message is the description of the error +func (e SharedConfigProfileNotExistsError) Message() string { + return fmt.Sprintf("failed to get profile, %s", e.Profile) +} + +// OrigErr is the underlying error that caused the failure. +func (e SharedConfigProfileNotExistsError) OrigErr() error { + return e.Err +} + +// Error satisfies the error interface. +func (e SharedConfigProfileNotExistsError) Error() string { + return awserr.SprintError(e.Code(), e.Message(), "", e.Err) +} + +// SharedConfigAssumeRoleError is an error for the shared config when the +// profile contains assume role information, but that information is invalid +// or not complete. +type SharedConfigAssumeRoleError struct { + RoleARN string + SourceProfile string +} + +// Code is the short id of the error. +func (e SharedConfigAssumeRoleError) Code() string { + return "SharedConfigAssumeRoleError" +} + +// Message is the description of the error +func (e SharedConfigAssumeRoleError) Message() string { + return fmt.Sprintf( + "failed to load assume role for %s, source profile %s has no shared credentials", + e.RoleARN, e.SourceProfile, + ) +} + +// OrigErr is the underlying error that caused the failure. +func (e SharedConfigAssumeRoleError) OrigErr() error { + return nil +} + +// Error satisfies the error interface. +func (e SharedConfigAssumeRoleError) Error() string { + return awserr.SprintError(e.Code(), e.Message(), "", nil) +} + +// CredentialRequiresARNError provides the error for shared config credentials +// that are incorrectly configured in the shared config or credentials file. +type CredentialRequiresARNError struct { + // type of credentials that were configured. + Type string + + // Profile name the credentials were in. + Profile string +} + +// Code is the short id of the error. +func (e CredentialRequiresARNError) Code() string { + return "CredentialRequiresARNError" +} + +// Message is the description of the error +func (e CredentialRequiresARNError) Message() string { + return fmt.Sprintf( + "credential type %s requires role_arn, profile %s", + e.Type, e.Profile, + ) +} + +// OrigErr is the underlying error that caused the failure. +func (e CredentialRequiresARNError) OrigErr() error { + return nil +} + +// Error satisfies the error interface. +func (e CredentialRequiresARNError) Error() string { + return awserr.SprintError(e.Code(), e.Message(), "", nil) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/header_rules.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/header_rules.go new file mode 100644 index 00000000000..07ea799fbd3 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/header_rules.go @@ -0,0 +1,81 @@ +package v4 + +import ( + "github.com/aws/aws-sdk-go/internal/strings" +) + +// validator houses a set of rule needed for validation of a +// string value +type rules []rule + +// rule interface allows for more flexible rules and just simply +// checks whether or not a value adheres to that rule +type rule interface { + IsValid(value string) bool +} + +// IsValid will iterate through all rules and see if any rules +// apply to the value and supports nested rules +func (r rules) IsValid(value string) bool { + for _, rule := range r { + if rule.IsValid(value) { + return true + } + } + return false +} + +// mapRule generic rule for maps +type mapRule map[string]struct{} + +// IsValid for the map rule satisfies whether it exists in the map +func (m mapRule) IsValid(value string) bool { + _, ok := m[value] + return ok +} + +// whitelist is a generic rule for whitelisting +type whitelist struct { + rule +} + +// IsValid for whitelist checks if the value is within the whitelist +func (w whitelist) IsValid(value string) bool { + return w.rule.IsValid(value) +} + +// blacklist is a generic rule for blacklisting +type blacklist struct { + rule +} + +// IsValid for whitelist checks if the value is within the whitelist +func (b blacklist) IsValid(value string) bool { + return !b.rule.IsValid(value) +} + +type patterns []string + +// IsValid for patterns checks each pattern and returns if a match has +// been found +func (p patterns) IsValid(value string) bool { + for _, pattern := range p { + if strings.HasPrefixFold(value, pattern) { + return true + } + } + return false +} + +// inclusiveRules rules allow for rules to depend on one another +type inclusiveRules []rule + +// IsValid will return true if all rules are true +func (r inclusiveRules) IsValid(value string) bool { + for _, rule := range r { + if !rule.IsValid(value) { + return false + } + } + return true +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/options.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/options.go new file mode 100644 index 00000000000..6aa2ed241bb --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/options.go @@ -0,0 +1,7 @@ +package v4 + +// WithUnsignedPayload will enable and set the UnsignedPayload field to +// true of the signer. +func WithUnsignedPayload(v4 *Signer) { + v4.UnsignedPayload = true +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/request_context_go1.5.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/request_context_go1.5.go new file mode 100644 index 00000000000..f35fc860b3b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/request_context_go1.5.go @@ -0,0 +1,13 @@ +// +build !go1.7 + +package v4 + +import ( + "net/http" + + "github.com/aws/aws-sdk-go/aws" +) + +func requestContext(r *http.Request) aws.Context { + return aws.BackgroundContext() +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/request_context_go1.7.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/request_context_go1.7.go new file mode 100644 index 00000000000..fed5c859ca6 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/request_context_go1.7.go @@ -0,0 +1,13 @@ +// +build go1.7 + +package v4 + +import ( + "net/http" + + "github.com/aws/aws-sdk-go/aws" +) + +func requestContext(r *http.Request) aws.Context { + return r.Context() +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/stream.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/stream.go new file mode 100644 index 00000000000..02cbd97e234 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/stream.go @@ -0,0 +1,63 @@ +package v4 + +import ( + "encoding/hex" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws/credentials" +) + +type credentialValueProvider interface { + Get() (credentials.Value, error) +} + +// StreamSigner implements signing of event stream encoded payloads +type StreamSigner struct { + region string + service string + + credentials credentialValueProvider + + prevSig []byte +} + +// NewStreamSigner creates a SigV4 signer used to sign Event Stream encoded messages +func NewStreamSigner(region, service string, seedSignature []byte, credentials *credentials.Credentials) *StreamSigner { + return &StreamSigner{ + region: region, + service: service, + credentials: credentials, + prevSig: seedSignature, + } +} + +// GetSignature takes an event stream encoded headers and payload and returns a signature +func (s *StreamSigner) GetSignature(headers, payload []byte, date time.Time) ([]byte, error) { + credValue, err := s.credentials.Get() + if err != nil { + return nil, err + } + + sigKey := deriveSigningKey(s.region, s.service, credValue.SecretAccessKey, date) + + keyPath := buildSigningScope(s.region, s.service, date) + + stringToSign := buildEventStreamStringToSign(headers, payload, s.prevSig, keyPath, date) + + signature := hmacSHA256(sigKey, []byte(stringToSign)) + s.prevSig = signature + + return signature, nil +} + +func buildEventStreamStringToSign(headers, payload, prevSig []byte, scope string, date time.Time) string { + return strings.Join([]string{ + "AWS4-HMAC-SHA256-PAYLOAD", + formatTime(date), + scope, + hex.EncodeToString(prevSig), + hex.EncodeToString(hashSHA256(headers)), + hex.EncodeToString(hashSHA256(payload)), + }, "\n") +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/uri_path.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/uri_path.go new file mode 100644 index 00000000000..bd082e9d1f7 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/uri_path.go @@ -0,0 +1,24 @@ +// +build go1.5 + +package v4 + +import ( + "net/url" + "strings" +) + +func getURIPath(u *url.URL) string { + var uri string + + if len(u.Opaque) > 0 { + uri = "/" + strings.Join(strings.Split(u.Opaque, "/")[3:], "/") + } else { + uri = u.EscapedPath() + } + + if len(uri) == 0 { + uri = "/" + } + + return uri +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go new file mode 100644 index 00000000000..d71f7b3f4fa --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go @@ -0,0 +1,846 @@ +// Package v4 implements signing for AWS V4 signer +// +// Provides request signing for request that need to be signed with +// AWS V4 Signatures. +// +// Standalone Signer +// +// Generally using the signer outside of the SDK should not require any additional +// logic when using Go v1.5 or higher. The signer does this by taking advantage +// of the URL.EscapedPath method. If your request URI requires additional escaping +// you many need to use the URL.Opaque to define what the raw URI should be sent +// to the service as. +// +// The signer will first check the URL.Opaque field, and use its value if set. +// The signer does require the URL.Opaque field to be set in the form of: +// +// "///" +// +// // e.g. +// "//example.com/some/path" +// +// The leading "//" and hostname are required or the URL.Opaque escaping will +// not work correctly. +// +// If URL.Opaque is not set the signer will fallback to the URL.EscapedPath() +// method and using the returned value. If you're using Go v1.4 you must set +// URL.Opaque if the URI path needs escaping. If URL.Opaque is not set with +// Go v1.5 the signer will fallback to URL.Path. +// +// AWS v4 signature validation requires that the canonical string's URI path +// element must be the URI escaped form of the HTTP request's path. +// http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html +// +// The Go HTTP client will perform escaping automatically on the request. Some +// of these escaping may cause signature validation errors because the HTTP +// request differs from the URI path or query that the signature was generated. +// https://golang.org/pkg/net/url/#URL.EscapedPath +// +// Because of this, it is recommended that when using the signer outside of the +// SDK that explicitly escaping the request prior to being signed is preferable, +// and will help prevent signature validation errors. This can be done by setting +// the URL.Opaque or URL.RawPath. The SDK will use URL.Opaque first and then +// call URL.EscapedPath() if Opaque is not set. +// +// If signing a request intended for HTTP2 server, and you're using Go 1.6.2 +// through 1.7.4 you should use the URL.RawPath as the pre-escaped form of the +// request URL. https://github.com/golang/go/issues/16847 points to a bug in +// Go pre 1.8 that fails to make HTTP2 requests using absolute URL in the HTTP +// message. URL.Opaque generally will force Go to make requests with absolute URL. +// URL.RawPath does not do this, but RawPath must be a valid escaping of Path +// or url.EscapedPath will ignore the RawPath escaping. +// +// Test `TestStandaloneSign` provides a complete example of using the signer +// outside of the SDK and pre-escaping the URI path. +package v4 + +import ( + "crypto/hmac" + "crypto/sha256" + "encoding/hex" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "sort" + "strconv" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/internal/sdkio" + "github.com/aws/aws-sdk-go/private/protocol/rest" +) + +const ( + authorizationHeader = "Authorization" + authHeaderSignatureElem = "Signature=" + signatureQueryKey = "X-Amz-Signature" + + authHeaderPrefix = "AWS4-HMAC-SHA256" + timeFormat = "20060102T150405Z" + shortTimeFormat = "20060102" + awsV4Request = "aws4_request" + + // emptyStringSHA256 is a SHA256 of an empty string + emptyStringSHA256 = `e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855` +) + +var ignoredHeaders = rules{ + blacklist{ + mapRule{ + authorizationHeader: struct{}{}, + "User-Agent": struct{}{}, + "X-Amzn-Trace-Id": struct{}{}, + }, + }, +} + +// requiredSignedHeaders is a whitelist for build canonical headers. +var requiredSignedHeaders = rules{ + whitelist{ + mapRule{ + "Cache-Control": struct{}{}, + "Content-Disposition": struct{}{}, + "Content-Encoding": struct{}{}, + "Content-Language": struct{}{}, + "Content-Md5": struct{}{}, + "Content-Type": struct{}{}, + "Expires": struct{}{}, + "If-Match": struct{}{}, + "If-Modified-Since": struct{}{}, + "If-None-Match": struct{}{}, + "If-Unmodified-Since": struct{}{}, + "Range": struct{}{}, + "X-Amz-Acl": struct{}{}, + "X-Amz-Copy-Source": struct{}{}, + "X-Amz-Copy-Source-If-Match": struct{}{}, + "X-Amz-Copy-Source-If-Modified-Since": struct{}{}, + "X-Amz-Copy-Source-If-None-Match": struct{}{}, + "X-Amz-Copy-Source-If-Unmodified-Since": struct{}{}, + "X-Amz-Copy-Source-Range": struct{}{}, + "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm": struct{}{}, + "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key": struct{}{}, + "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5": struct{}{}, + "X-Amz-Grant-Full-control": struct{}{}, + "X-Amz-Grant-Read": struct{}{}, + "X-Amz-Grant-Read-Acp": struct{}{}, + "X-Amz-Grant-Write": struct{}{}, + "X-Amz-Grant-Write-Acp": struct{}{}, + "X-Amz-Metadata-Directive": struct{}{}, + "X-Amz-Mfa": struct{}{}, + "X-Amz-Request-Payer": struct{}{}, + "X-Amz-Server-Side-Encryption": struct{}{}, + "X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id": struct{}{}, + "X-Amz-Server-Side-Encryption-Customer-Algorithm": struct{}{}, + "X-Amz-Server-Side-Encryption-Customer-Key": struct{}{}, + "X-Amz-Server-Side-Encryption-Customer-Key-Md5": struct{}{}, + "X-Amz-Storage-Class": struct{}{}, + "X-Amz-Tagging": struct{}{}, + "X-Amz-Website-Redirect-Location": struct{}{}, + "X-Amz-Content-Sha256": struct{}{}, + }, + }, + patterns{"X-Amz-Meta-"}, +} + +// allowedHoisting is a whitelist for build query headers. The boolean value +// represents whether or not it is a pattern. +var allowedQueryHoisting = inclusiveRules{ + blacklist{requiredSignedHeaders}, + patterns{"X-Amz-"}, +} + +// Signer applies AWS v4 signing to given request. Use this to sign requests +// that need to be signed with AWS V4 Signatures. +type Signer struct { + // The authentication credentials the request will be signed against. + // This value must be set to sign requests. + Credentials *credentials.Credentials + + // Sets the log level the signer should use when reporting information to + // the logger. If the logger is nil nothing will be logged. See + // aws.LogLevelType for more information on available logging levels + // + // By default nothing will be logged. + Debug aws.LogLevelType + + // The logger loging information will be written to. If there the logger + // is nil, nothing will be logged. + Logger aws.Logger + + // Disables the Signer's moving HTTP header key/value pairs from the HTTP + // request header to the request's query string. This is most commonly used + // with pre-signed requests preventing headers from being added to the + // request's query string. + DisableHeaderHoisting bool + + // Disables the automatic escaping of the URI path of the request for the + // siganture's canonical string's path. For services that do not need additional + // escaping then use this to disable the signer escaping the path. + // + // S3 is an example of a service that does not need additional escaping. + // + // http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html + DisableURIPathEscaping bool + + // Disables the automatical setting of the HTTP request's Body field with the + // io.ReadSeeker passed in to the signer. This is useful if you're using a + // custom wrapper around the body for the io.ReadSeeker and want to preserve + // the Body value on the Request.Body. + // + // This does run the risk of signing a request with a body that will not be + // sent in the request. Need to ensure that the underlying data of the Body + // values are the same. + DisableRequestBodyOverwrite bool + + // currentTimeFn returns the time value which represents the current time. + // This value should only be used for testing. If it is nil the default + // time.Now will be used. + currentTimeFn func() time.Time + + // UnsignedPayload will prevent signing of the payload. This will only + // work for services that have support for this. + UnsignedPayload bool +} + +// NewSigner returns a Signer pointer configured with the credentials and optional +// option values provided. If not options are provided the Signer will use its +// default configuration. +func NewSigner(credentials *credentials.Credentials, options ...func(*Signer)) *Signer { + v4 := &Signer{ + Credentials: credentials, + } + + for _, option := range options { + option(v4) + } + + return v4 +} + +type signingCtx struct { + ServiceName string + Region string + Request *http.Request + Body io.ReadSeeker + Query url.Values + Time time.Time + ExpireTime time.Duration + SignedHeaderVals http.Header + + DisableURIPathEscaping bool + + credValues credentials.Value + isPresign bool + unsignedPayload bool + + bodyDigest string + signedHeaders string + canonicalHeaders string + canonicalString string + credentialString string + stringToSign string + signature string + authorization string +} + +// Sign signs AWS v4 requests with the provided body, service name, region the +// request is made to, and time the request is signed at. The signTime allows +// you to specify that a request is signed for the future, and cannot be +// used until then. +// +// Returns a list of HTTP headers that were included in the signature or an +// error if signing the request failed. Generally for signed requests this value +// is not needed as the full request context will be captured by the http.Request +// value. It is included for reference though. +// +// Sign will set the request's Body to be the `body` parameter passed in. If +// the body is not already an io.ReadCloser, it will be wrapped within one. If +// a `nil` body parameter passed to Sign, the request's Body field will be +// also set to nil. Its important to note that this functionality will not +// change the request's ContentLength of the request. +// +// Sign differs from Presign in that it will sign the request using HTTP +// header values. This type of signing is intended for http.Request values that +// will not be shared, or are shared in a way the header values on the request +// will not be lost. +// +// The requests body is an io.ReadSeeker so the SHA256 of the body can be +// generated. To bypass the signer computing the hash you can set the +// "X-Amz-Content-Sha256" header with a precomputed value. The signer will +// only compute the hash if the request header value is empty. +func (v4 Signer) Sign(r *http.Request, body io.ReadSeeker, service, region string, signTime time.Time) (http.Header, error) { + return v4.signWithBody(r, body, service, region, 0, false, signTime) +} + +// Presign signs AWS v4 requests with the provided body, service name, region +// the request is made to, and time the request is signed at. The signTime +// allows you to specify that a request is signed for the future, and cannot +// be used until then. +// +// Returns a list of HTTP headers that were included in the signature or an +// error if signing the request failed. For presigned requests these headers +// and their values must be included on the HTTP request when it is made. This +// is helpful to know what header values need to be shared with the party the +// presigned request will be distributed to. +// +// Presign differs from Sign in that it will sign the request using query string +// instead of header values. This allows you to share the Presigned Request's +// URL with third parties, or distribute it throughout your system with minimal +// dependencies. +// +// Presign also takes an exp value which is the duration the +// signed request will be valid after the signing time. This is allows you to +// set when the request will expire. +// +// The requests body is an io.ReadSeeker so the SHA256 of the body can be +// generated. To bypass the signer computing the hash you can set the +// "X-Amz-Content-Sha256" header with a precomputed value. The signer will +// only compute the hash if the request header value is empty. +// +// Presigning a S3 request will not compute the body's SHA256 hash by default. +// This is done due to the general use case for S3 presigned URLs is to share +// PUT/GET capabilities. If you would like to include the body's SHA256 in the +// presigned request's signature you can set the "X-Amz-Content-Sha256" +// HTTP header and that will be included in the request's signature. +func (v4 Signer) Presign(r *http.Request, body io.ReadSeeker, service, region string, exp time.Duration, signTime time.Time) (http.Header, error) { + return v4.signWithBody(r, body, service, region, exp, true, signTime) +} + +func (v4 Signer) signWithBody(r *http.Request, body io.ReadSeeker, service, region string, exp time.Duration, isPresign bool, signTime time.Time) (http.Header, error) { + currentTimeFn := v4.currentTimeFn + if currentTimeFn == nil { + currentTimeFn = time.Now + } + + ctx := &signingCtx{ + Request: r, + Body: body, + Query: r.URL.Query(), + Time: signTime, + ExpireTime: exp, + isPresign: isPresign, + ServiceName: service, + Region: region, + DisableURIPathEscaping: v4.DisableURIPathEscaping, + unsignedPayload: v4.UnsignedPayload, + } + + for key := range ctx.Query { + sort.Strings(ctx.Query[key]) + } + + if ctx.isRequestSigned() { + ctx.Time = currentTimeFn() + ctx.handlePresignRemoval() + } + + var err error + ctx.credValues, err = v4.Credentials.GetWithContext(requestContext(r)) + if err != nil { + return http.Header{}, err + } + + ctx.sanitizeHostForHeader() + ctx.assignAmzQueryValues() + if err := ctx.build(v4.DisableHeaderHoisting); err != nil { + return nil, err + } + + // If the request is not presigned the body should be attached to it. This + // prevents the confusion of wanting to send a signed request without + // the body the request was signed for attached. + if !(v4.DisableRequestBodyOverwrite || ctx.isPresign) { + var reader io.ReadCloser + if body != nil { + var ok bool + if reader, ok = body.(io.ReadCloser); !ok { + reader = ioutil.NopCloser(body) + } + } + r.Body = reader + } + + if v4.Debug.Matches(aws.LogDebugWithSigning) { + v4.logSigningInfo(ctx) + } + + return ctx.SignedHeaderVals, nil +} + +func (ctx *signingCtx) sanitizeHostForHeader() { + request.SanitizeHostForHeader(ctx.Request) +} + +func (ctx *signingCtx) handlePresignRemoval() { + if !ctx.isPresign { + return + } + + // The credentials have expired for this request. The current signing + // is invalid, and needs to be request because the request will fail. + ctx.removePresign() + + // Update the request's query string to ensure the values stays in + // sync in the case retrieving the new credentials fails. + ctx.Request.URL.RawQuery = ctx.Query.Encode() +} + +func (ctx *signingCtx) assignAmzQueryValues() { + if ctx.isPresign { + ctx.Query.Set("X-Amz-Algorithm", authHeaderPrefix) + if ctx.credValues.SessionToken != "" { + ctx.Query.Set("X-Amz-Security-Token", ctx.credValues.SessionToken) + } else { + ctx.Query.Del("X-Amz-Security-Token") + } + + return + } + + if ctx.credValues.SessionToken != "" { + ctx.Request.Header.Set("X-Amz-Security-Token", ctx.credValues.SessionToken) + } +} + +// SignRequestHandler is a named request handler the SDK will use to sign +// service client request with using the V4 signature. +var SignRequestHandler = request.NamedHandler{ + Name: "v4.SignRequestHandler", Fn: SignSDKRequest, +} + +// SignSDKRequest signs an AWS request with the V4 signature. This +// request handler should only be used with the SDK's built in service client's +// API operation requests. +// +// This function should not be used on its on its own, but in conjunction with +// an AWS service client's API operation call. To sign a standalone request +// not created by a service client's API operation method use the "Sign" or +// "Presign" functions of the "Signer" type. +// +// If the credentials of the request's config are set to +// credentials.AnonymousCredentials the request will not be signed. +func SignSDKRequest(req *request.Request) { + SignSDKRequestWithCurrentTime(req, time.Now) +} + +// BuildNamedHandler will build a generic handler for signing. +func BuildNamedHandler(name string, opts ...func(*Signer)) request.NamedHandler { + return request.NamedHandler{ + Name: name, + Fn: func(req *request.Request) { + SignSDKRequestWithCurrentTime(req, time.Now, opts...) + }, + } +} + +// SignSDKRequestWithCurrentTime will sign the SDK's request using the time +// function passed in. Behaves the same as SignSDKRequest with the exception +// the request is signed with the value returned by the current time function. +func SignSDKRequestWithCurrentTime(req *request.Request, curTimeFn func() time.Time, opts ...func(*Signer)) { + // If the request does not need to be signed ignore the signing of the + // request if the AnonymousCredentials object is used. + if req.Config.Credentials == credentials.AnonymousCredentials { + return + } + + region := req.ClientInfo.SigningRegion + if region == "" { + region = aws.StringValue(req.Config.Region) + } + + name := req.ClientInfo.SigningName + if name == "" { + name = req.ClientInfo.ServiceName + } + + v4 := NewSigner(req.Config.Credentials, func(v4 *Signer) { + v4.Debug = req.Config.LogLevel.Value() + v4.Logger = req.Config.Logger + v4.DisableHeaderHoisting = req.NotHoist + v4.currentTimeFn = curTimeFn + if name == "s3" { + // S3 service should not have any escaping applied + v4.DisableURIPathEscaping = true + } + // Prevents setting the HTTPRequest's Body. Since the Body could be + // wrapped in a custom io.Closer that we do not want to be stompped + // on top of by the signer. + v4.DisableRequestBodyOverwrite = true + }) + + for _, opt := range opts { + opt(v4) + } + + curTime := curTimeFn() + signedHeaders, err := v4.signWithBody(req.HTTPRequest, req.GetBody(), + name, region, req.ExpireTime, req.ExpireTime > 0, curTime, + ) + if err != nil { + req.Error = err + req.SignedHeaderVals = nil + return + } + + req.SignedHeaderVals = signedHeaders + req.LastSignedAt = curTime +} + +const logSignInfoMsg = `DEBUG: Request Signature: +---[ CANONICAL STRING ]----------------------------- +%s +---[ STRING TO SIGN ]-------------------------------- +%s%s +-----------------------------------------------------` +const logSignedURLMsg = ` +---[ SIGNED URL ]------------------------------------ +%s` + +func (v4 *Signer) logSigningInfo(ctx *signingCtx) { + signedURLMsg := "" + if ctx.isPresign { + signedURLMsg = fmt.Sprintf(logSignedURLMsg, ctx.Request.URL.String()) + } + msg := fmt.Sprintf(logSignInfoMsg, ctx.canonicalString, ctx.stringToSign, signedURLMsg) + v4.Logger.Log(msg) +} + +func (ctx *signingCtx) build(disableHeaderHoisting bool) error { + ctx.buildTime() // no depends + ctx.buildCredentialString() // no depends + + if err := ctx.buildBodyDigest(); err != nil { + return err + } + + unsignedHeaders := ctx.Request.Header + if ctx.isPresign { + if !disableHeaderHoisting { + urlValues := url.Values{} + urlValues, unsignedHeaders = buildQuery(allowedQueryHoisting, unsignedHeaders) // no depends + for k := range urlValues { + ctx.Query[k] = urlValues[k] + } + } + } + + ctx.buildCanonicalHeaders(ignoredHeaders, unsignedHeaders) + ctx.buildCanonicalString() // depends on canon headers / signed headers + ctx.buildStringToSign() // depends on canon string + ctx.buildSignature() // depends on string to sign + + if ctx.isPresign { + ctx.Request.URL.RawQuery += "&" + signatureQueryKey + "=" + ctx.signature + } else { + parts := []string{ + authHeaderPrefix + " Credential=" + ctx.credValues.AccessKeyID + "/" + ctx.credentialString, + "SignedHeaders=" + ctx.signedHeaders, + authHeaderSignatureElem + ctx.signature, + } + ctx.Request.Header.Set(authorizationHeader, strings.Join(parts, ", ")) + } + + return nil +} + +// GetSignedRequestSignature attempts to extract the signature of the request. +// Returning an error if the request is unsigned, or unable to extract the +// signature. +func GetSignedRequestSignature(r *http.Request) ([]byte, error) { + + if auth := r.Header.Get(authorizationHeader); len(auth) != 0 { + ps := strings.Split(auth, ", ") + for _, p := range ps { + if idx := strings.Index(p, authHeaderSignatureElem); idx >= 0 { + sig := p[len(authHeaderSignatureElem):] + if len(sig) == 0 { + return nil, fmt.Errorf("invalid request signature authorization header") + } + return hex.DecodeString(sig) + } + } + } + + if sig := r.URL.Query().Get("X-Amz-Signature"); len(sig) != 0 { + return hex.DecodeString(sig) + } + + return nil, fmt.Errorf("request not signed") +} + +func (ctx *signingCtx) buildTime() { + if ctx.isPresign { + duration := int64(ctx.ExpireTime / time.Second) + ctx.Query.Set("X-Amz-Date", formatTime(ctx.Time)) + ctx.Query.Set("X-Amz-Expires", strconv.FormatInt(duration, 10)) + } else { + ctx.Request.Header.Set("X-Amz-Date", formatTime(ctx.Time)) + } +} + +func (ctx *signingCtx) buildCredentialString() { + ctx.credentialString = buildSigningScope(ctx.Region, ctx.ServiceName, ctx.Time) + + if ctx.isPresign { + ctx.Query.Set("X-Amz-Credential", ctx.credValues.AccessKeyID+"/"+ctx.credentialString) + } +} + +func buildQuery(r rule, header http.Header) (url.Values, http.Header) { + query := url.Values{} + unsignedHeaders := http.Header{} + for k, h := range header { + if r.IsValid(k) { + query[k] = h + } else { + unsignedHeaders[k] = h + } + } + + return query, unsignedHeaders +} +func (ctx *signingCtx) buildCanonicalHeaders(r rule, header http.Header) { + var headers []string + headers = append(headers, "host") + for k, v := range header { + if !r.IsValid(k) { + continue // ignored header + } + if ctx.SignedHeaderVals == nil { + ctx.SignedHeaderVals = make(http.Header) + } + + lowerCaseKey := strings.ToLower(k) + if _, ok := ctx.SignedHeaderVals[lowerCaseKey]; ok { + // include additional values + ctx.SignedHeaderVals[lowerCaseKey] = append(ctx.SignedHeaderVals[lowerCaseKey], v...) + continue + } + + headers = append(headers, lowerCaseKey) + ctx.SignedHeaderVals[lowerCaseKey] = v + } + sort.Strings(headers) + + ctx.signedHeaders = strings.Join(headers, ";") + + if ctx.isPresign { + ctx.Query.Set("X-Amz-SignedHeaders", ctx.signedHeaders) + } + + headerValues := make([]string, len(headers)) + for i, k := range headers { + if k == "host" { + if ctx.Request.Host != "" { + headerValues[i] = "host:" + ctx.Request.Host + } else { + headerValues[i] = "host:" + ctx.Request.URL.Host + } + } else { + headerValues[i] = k + ":" + + strings.Join(ctx.SignedHeaderVals[k], ",") + } + } + stripExcessSpaces(headerValues) + ctx.canonicalHeaders = strings.Join(headerValues, "\n") +} + +func (ctx *signingCtx) buildCanonicalString() { + ctx.Request.URL.RawQuery = strings.Replace(ctx.Query.Encode(), "+", "%20", -1) + + uri := getURIPath(ctx.Request.URL) + + if !ctx.DisableURIPathEscaping { + uri = rest.EscapePath(uri, false) + } + + ctx.canonicalString = strings.Join([]string{ + ctx.Request.Method, + uri, + ctx.Request.URL.RawQuery, + ctx.canonicalHeaders + "\n", + ctx.signedHeaders, + ctx.bodyDigest, + }, "\n") +} + +func (ctx *signingCtx) buildStringToSign() { + ctx.stringToSign = strings.Join([]string{ + authHeaderPrefix, + formatTime(ctx.Time), + ctx.credentialString, + hex.EncodeToString(hashSHA256([]byte(ctx.canonicalString))), + }, "\n") +} + +func (ctx *signingCtx) buildSignature() { + creds := deriveSigningKey(ctx.Region, ctx.ServiceName, ctx.credValues.SecretAccessKey, ctx.Time) + signature := hmacSHA256(creds, []byte(ctx.stringToSign)) + ctx.signature = hex.EncodeToString(signature) +} + +func (ctx *signingCtx) buildBodyDigest() error { + hash := ctx.Request.Header.Get("X-Amz-Content-Sha256") + if hash == "" { + includeSHA256Header := ctx.unsignedPayload || + ctx.ServiceName == "s3" || + ctx.ServiceName == "glacier" + + s3Presign := ctx.isPresign && ctx.ServiceName == "s3" + + if ctx.unsignedPayload || s3Presign { + hash = "UNSIGNED-PAYLOAD" + includeSHA256Header = !s3Presign + } else if ctx.Body == nil { + hash = emptyStringSHA256 + } else { + if !aws.IsReaderSeekable(ctx.Body) { + return fmt.Errorf("cannot use unseekable request body %T, for signed request with body", ctx.Body) + } + hashBytes, err := makeSha256Reader(ctx.Body) + if err != nil { + return err + } + hash = hex.EncodeToString(hashBytes) + } + + if includeSHA256Header { + ctx.Request.Header.Set("X-Amz-Content-Sha256", hash) + } + } + ctx.bodyDigest = hash + + return nil +} + +// isRequestSigned returns if the request is currently signed or presigned +func (ctx *signingCtx) isRequestSigned() bool { + if ctx.isPresign && ctx.Query.Get("X-Amz-Signature") != "" { + return true + } + if ctx.Request.Header.Get("Authorization") != "" { + return true + } + + return false +} + +// unsign removes signing flags for both signed and presigned requests. +func (ctx *signingCtx) removePresign() { + ctx.Query.Del("X-Amz-Algorithm") + ctx.Query.Del("X-Amz-Signature") + ctx.Query.Del("X-Amz-Security-Token") + ctx.Query.Del("X-Amz-Date") + ctx.Query.Del("X-Amz-Expires") + ctx.Query.Del("X-Amz-Credential") + ctx.Query.Del("X-Amz-SignedHeaders") +} + +func hmacSHA256(key []byte, data []byte) []byte { + hash := hmac.New(sha256.New, key) + hash.Write(data) + return hash.Sum(nil) +} + +func hashSHA256(data []byte) []byte { + hash := sha256.New() + hash.Write(data) + return hash.Sum(nil) +} + +func makeSha256Reader(reader io.ReadSeeker) (hashBytes []byte, err error) { + hash := sha256.New() + start, err := reader.Seek(0, sdkio.SeekCurrent) + if err != nil { + return nil, err + } + defer func() { + // ensure error is return if unable to seek back to start of payload. + _, err = reader.Seek(start, sdkio.SeekStart) + }() + + // Use CopyN to avoid allocating the 32KB buffer in io.Copy for bodies + // smaller than 32KB. Fall back to io.Copy if we fail to determine the size. + size, err := aws.SeekerLen(reader) + if err != nil { + io.Copy(hash, reader) + } else { + io.CopyN(hash, reader, size) + } + + return hash.Sum(nil), nil +} + +const doubleSpace = " " + +// stripExcessSpaces will rewrite the passed in slice's string values to not +// contain multiple side-by-side spaces. +func stripExcessSpaces(vals []string) { + var j, k, l, m, spaces int + for i, str := range vals { + // Trim trailing spaces + for j = len(str) - 1; j >= 0 && str[j] == ' '; j-- { + } + + // Trim leading spaces + for k = 0; k < j && str[k] == ' '; k++ { + } + str = str[k : j+1] + + // Strip multiple spaces. + j = strings.Index(str, doubleSpace) + if j < 0 { + vals[i] = str + continue + } + + buf := []byte(str) + for k, m, l = j, j, len(buf); k < l; k++ { + if buf[k] == ' ' { + if spaces == 0 { + // First space. + buf[m] = buf[k] + m++ + } + spaces++ + } else { + // End of multiple spaces. + spaces = 0 + buf[m] = buf[k] + m++ + } + } + + vals[i] = string(buf[:m]) + } +} + +func buildSigningScope(region, service string, dt time.Time) string { + return strings.Join([]string{ + formatShortTime(dt), + region, + service, + awsV4Request, + }, "/") +} + +func deriveSigningKey(region, service, secretKey string, dt time.Time) []byte { + kDate := hmacSHA256([]byte("AWS4"+secretKey), []byte(formatShortTime(dt))) + kRegion := hmacSHA256(kDate, []byte(region)) + kService := hmacSHA256(kRegion, []byte(service)) + signingKey := hmacSHA256(kService, []byte(awsV4Request)) + return signingKey +} + +func formatShortTime(dt time.Time) string { + return dt.UTC().Format(shortTimeFormat) +} + +func formatTime(dt time.Time) string { + return dt.UTC().Format(timeFormat) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/types.go b/vendor/github.com/aws/aws-sdk-go/aws/types.go new file mode 100644 index 00000000000..98751ee84f2 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/types.go @@ -0,0 +1,264 @@ +package aws + +import ( + "io" + "strings" + "sync" + + "github.com/aws/aws-sdk-go/internal/sdkio" +) + +// ReadSeekCloser wraps a io.Reader returning a ReaderSeekerCloser. Allows the +// SDK to accept an io.Reader that is not also an io.Seeker for unsigned +// streaming payload API operations. +// +// A ReadSeekCloser wrapping an nonseekable io.Reader used in an API +// operation's input will prevent that operation being retried in the case of +// network errors, and cause operation requests to fail if the operation +// requires payload signing. +// +// Note: If using With S3 PutObject to stream an object upload The SDK's S3 +// Upload manager (s3manager.Uploader) provides support for streaming with the +// ability to retry network errors. +func ReadSeekCloser(r io.Reader) ReaderSeekerCloser { + return ReaderSeekerCloser{r} +} + +// ReaderSeekerCloser represents a reader that can also delegate io.Seeker and +// io.Closer interfaces to the underlying object if they are available. +type ReaderSeekerCloser struct { + r io.Reader +} + +// IsReaderSeekable returns if the underlying reader type can be seeked. A +// io.Reader might not actually be seekable if it is the ReaderSeekerCloser +// type. +func IsReaderSeekable(r io.Reader) bool { + switch v := r.(type) { + case ReaderSeekerCloser: + return v.IsSeeker() + case *ReaderSeekerCloser: + return v.IsSeeker() + case io.ReadSeeker: + return true + default: + return false + } +} + +// Read reads from the reader up to size of p. The number of bytes read, and +// error if it occurred will be returned. +// +// If the reader is not an io.Reader zero bytes read, and nil error will be +// returned. +// +// Performs the same functionality as io.Reader Read +func (r ReaderSeekerCloser) Read(p []byte) (int, error) { + switch t := r.r.(type) { + case io.Reader: + return t.Read(p) + } + return 0, nil +} + +// Seek sets the offset for the next Read to offset, interpreted according to +// whence: 0 means relative to the origin of the file, 1 means relative to the +// current offset, and 2 means relative to the end. Seek returns the new offset +// and an error, if any. +// +// If the ReaderSeekerCloser is not an io.Seeker nothing will be done. +func (r ReaderSeekerCloser) Seek(offset int64, whence int) (int64, error) { + switch t := r.r.(type) { + case io.Seeker: + return t.Seek(offset, whence) + } + return int64(0), nil +} + +// IsSeeker returns if the underlying reader is also a seeker. +func (r ReaderSeekerCloser) IsSeeker() bool { + _, ok := r.r.(io.Seeker) + return ok +} + +// HasLen returns the length of the underlying reader if the value implements +// the Len() int method. +func (r ReaderSeekerCloser) HasLen() (int, bool) { + type lenner interface { + Len() int + } + + if lr, ok := r.r.(lenner); ok { + return lr.Len(), true + } + + return 0, false +} + +// GetLen returns the length of the bytes remaining in the underlying reader. +// Checks first for Len(), then io.Seeker to determine the size of the +// underlying reader. +// +// Will return -1 if the length cannot be determined. +func (r ReaderSeekerCloser) GetLen() (int64, error) { + if l, ok := r.HasLen(); ok { + return int64(l), nil + } + + if s, ok := r.r.(io.Seeker); ok { + return seekerLen(s) + } + + return -1, nil +} + +// SeekerLen attempts to get the number of bytes remaining at the seeker's +// current position. Returns the number of bytes remaining or error. +func SeekerLen(s io.Seeker) (int64, error) { + // Determine if the seeker is actually seekable. ReaderSeekerCloser + // hides the fact that a io.Readers might not actually be seekable. + switch v := s.(type) { + case ReaderSeekerCloser: + return v.GetLen() + case *ReaderSeekerCloser: + return v.GetLen() + } + + return seekerLen(s) +} + +func seekerLen(s io.Seeker) (int64, error) { + curOffset, err := s.Seek(0, sdkio.SeekCurrent) + if err != nil { + return 0, err + } + + endOffset, err := s.Seek(0, sdkio.SeekEnd) + if err != nil { + return 0, err + } + + _, err = s.Seek(curOffset, sdkio.SeekStart) + if err != nil { + return 0, err + } + + return endOffset - curOffset, nil +} + +// Close closes the ReaderSeekerCloser. +// +// If the ReaderSeekerCloser is not an io.Closer nothing will be done. +func (r ReaderSeekerCloser) Close() error { + switch t := r.r.(type) { + case io.Closer: + return t.Close() + } + return nil +} + +// A WriteAtBuffer provides a in memory buffer supporting the io.WriterAt interface +// Can be used with the s3manager.Downloader to download content to a buffer +// in memory. Safe to use concurrently. +type WriteAtBuffer struct { + buf []byte + m sync.Mutex + + // GrowthCoeff defines the growth rate of the internal buffer. By + // default, the growth rate is 1, where expanding the internal + // buffer will allocate only enough capacity to fit the new expected + // length. + GrowthCoeff float64 +} + +// NewWriteAtBuffer creates a WriteAtBuffer with an internal buffer +// provided by buf. +func NewWriteAtBuffer(buf []byte) *WriteAtBuffer { + return &WriteAtBuffer{buf: buf} +} + +// WriteAt writes a slice of bytes to a buffer starting at the position provided +// The number of bytes written will be returned, or error. Can overwrite previous +// written slices if the write ats overlap. +func (b *WriteAtBuffer) WriteAt(p []byte, pos int64) (n int, err error) { + pLen := len(p) + expLen := pos + int64(pLen) + b.m.Lock() + defer b.m.Unlock() + if int64(len(b.buf)) < expLen { + if int64(cap(b.buf)) < expLen { + if b.GrowthCoeff < 1 { + b.GrowthCoeff = 1 + } + newBuf := make([]byte, expLen, int64(b.GrowthCoeff*float64(expLen))) + copy(newBuf, b.buf) + b.buf = newBuf + } + b.buf = b.buf[:expLen] + } + copy(b.buf[pos:], p) + return pLen, nil +} + +// Bytes returns a slice of bytes written to the buffer. +func (b *WriteAtBuffer) Bytes() []byte { + b.m.Lock() + defer b.m.Unlock() + return b.buf +} + +// MultiCloser is a utility to close multiple io.Closers within a single +// statement. +type MultiCloser []io.Closer + +// Close closes all of the io.Closers making up the MultiClosers. Any +// errors that occur while closing will be returned in the order they +// occur. +func (m MultiCloser) Close() error { + var errs errors + for _, c := range m { + err := c.Close() + if err != nil { + errs = append(errs, err) + } + } + if len(errs) != 0 { + return errs + } + + return nil +} + +type errors []error + +func (es errors) Error() string { + var parts []string + for _, e := range es { + parts = append(parts, e.Error()) + } + + return strings.Join(parts, "\n") +} + +// CopySeekableBody copies the seekable body to an io.Writer +func CopySeekableBody(dst io.Writer, src io.ReadSeeker) (int64, error) { + curPos, err := src.Seek(0, sdkio.SeekCurrent) + if err != nil { + return 0, err + } + + // copy errors may be assumed to be from the body. + n, err := io.Copy(dst, src) + if err != nil { + return n, err + } + + // seek back to the first position after reading to reset + // the body for transmission. + _, err = src.Seek(curPos, sdkio.SeekStart) + if err != nil { + return n, err + } + + return n, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/url.go b/vendor/github.com/aws/aws-sdk-go/aws/url.go new file mode 100644 index 00000000000..6192b2455b6 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/url.go @@ -0,0 +1,12 @@ +// +build go1.8 + +package aws + +import "net/url" + +// URLHostname will extract the Hostname without port from the URL value. +// +// Wrapper of net/url#URL.Hostname for backwards Go version compatibility. +func URLHostname(url *url.URL) string { + return url.Hostname() +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/url_1_7.go b/vendor/github.com/aws/aws-sdk-go/aws/url_1_7.go new file mode 100644 index 00000000000..0210d2720e7 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/url_1_7.go @@ -0,0 +1,29 @@ +// +build !go1.8 + +package aws + +import ( + "net/url" + "strings" +) + +// URLHostname will extract the Hostname without port from the URL value. +// +// Copy of Go 1.8's net/url#URL.Hostname functionality. +func URLHostname(url *url.URL) string { + return stripPort(url.Host) + +} + +// stripPort is copy of Go 1.8 url#URL.Hostname functionality. +// https://golang.org/src/net/url/url.go +func stripPort(hostport string) string { + colon := strings.IndexByte(hostport, ':') + if colon == -1 { + return hostport + } + if i := strings.IndexByte(hostport, ']'); i != -1 { + return strings.TrimPrefix(hostport[:i], "[") + } + return hostport[:colon] +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/version.go b/vendor/github.com/aws/aws-sdk-go/aws/version.go new file mode 100644 index 00000000000..508ca6ae805 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/version.go @@ -0,0 +1,8 @@ +// Package aws provides core functionality for making requests to AWS services. +package aws + +// SDKName is the name of this AWS SDK +const SDKName = "aws-sdk-go" + +// SDKVersion is the version of this SDK +const SDKVersion = "1.37.22" diff --git a/vendor/github.com/aws/aws-sdk-go/internal/context/background_go1.5.go b/vendor/github.com/aws/aws-sdk-go/internal/context/background_go1.5.go new file mode 100644 index 00000000000..876dcb3fde2 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/context/background_go1.5.go @@ -0,0 +1,40 @@ +// +build !go1.7 + +package context + +import "time" + +// An emptyCtx is a copy of the Go 1.7 context.emptyCtx type. This is copied to +// provide a 1.6 and 1.5 safe version of context that is compatible with Go +// 1.7's Context. +// +// An emptyCtx is never canceled, has no values, and has no deadline. It is not +// struct{}, since vars of this type must have distinct addresses. +type emptyCtx int + +func (*emptyCtx) Deadline() (deadline time.Time, ok bool) { + return +} + +func (*emptyCtx) Done() <-chan struct{} { + return nil +} + +func (*emptyCtx) Err() error { + return nil +} + +func (*emptyCtx) Value(key interface{}) interface{} { + return nil +} + +func (e *emptyCtx) String() string { + switch e { + case BackgroundCtx: + return "aws.BackgroundContext" + } + return "unknown empty Context" +} + +// BackgroundCtx is the common base context. +var BackgroundCtx = new(emptyCtx) diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/ast.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/ast.go new file mode 100644 index 00000000000..e83a99886bc --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/ast.go @@ -0,0 +1,120 @@ +package ini + +// ASTKind represents different states in the parse table +// and the type of AST that is being constructed +type ASTKind int + +// ASTKind* is used in the parse table to transition between +// the different states +const ( + ASTKindNone = ASTKind(iota) + ASTKindStart + ASTKindExpr + ASTKindEqualExpr + ASTKindStatement + ASTKindSkipStatement + ASTKindExprStatement + ASTKindSectionStatement + ASTKindNestedSectionStatement + ASTKindCompletedNestedSectionStatement + ASTKindCommentStatement + ASTKindCompletedSectionStatement +) + +func (k ASTKind) String() string { + switch k { + case ASTKindNone: + return "none" + case ASTKindStart: + return "start" + case ASTKindExpr: + return "expr" + case ASTKindStatement: + return "stmt" + case ASTKindSectionStatement: + return "section_stmt" + case ASTKindExprStatement: + return "expr_stmt" + case ASTKindCommentStatement: + return "comment" + case ASTKindNestedSectionStatement: + return "nested_section_stmt" + case ASTKindCompletedSectionStatement: + return "completed_stmt" + case ASTKindSkipStatement: + return "skip" + default: + return "" + } +} + +// AST interface allows us to determine what kind of node we +// are on and casting may not need to be necessary. +// +// The root is always the first node in Children +type AST struct { + Kind ASTKind + Root Token + RootToken bool + Children []AST +} + +func newAST(kind ASTKind, root AST, children ...AST) AST { + return AST{ + Kind: kind, + Children: append([]AST{root}, children...), + } +} + +func newASTWithRootToken(kind ASTKind, root Token, children ...AST) AST { + return AST{ + Kind: kind, + Root: root, + RootToken: true, + Children: children, + } +} + +// AppendChild will append to the list of children an AST has. +func (a *AST) AppendChild(child AST) { + a.Children = append(a.Children, child) +} + +// GetRoot will return the root AST which can be the first entry +// in the children list or a token. +func (a *AST) GetRoot() AST { + if a.RootToken { + return *a + } + + if len(a.Children) == 0 { + return AST{} + } + + return a.Children[0] +} + +// GetChildren will return the current AST's list of children +func (a *AST) GetChildren() []AST { + if len(a.Children) == 0 { + return []AST{} + } + + if a.RootToken { + return a.Children + } + + return a.Children[1:] +} + +// SetChildren will set and override all children of the AST. +func (a *AST) SetChildren(children []AST) { + if a.RootToken { + a.Children = children + } else { + a.Children = append(a.Children[:1], children...) + } +} + +// Start is used to indicate the starting state of the parse table. +var Start = newAST(ASTKindStart, AST{}) diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/comma_token.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/comma_token.go new file mode 100644 index 00000000000..0895d53cbe6 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/comma_token.go @@ -0,0 +1,11 @@ +package ini + +var commaRunes = []rune(",") + +func isComma(b rune) bool { + return b == ',' +} + +func newCommaToken() Token { + return newToken(TokenComma, commaRunes, NoneType) +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/comment_token.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/comment_token.go new file mode 100644 index 00000000000..0b76999ba1f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/comment_token.go @@ -0,0 +1,35 @@ +package ini + +// isComment will return whether or not the next byte(s) is a +// comment. +func isComment(b []rune) bool { + if len(b) == 0 { + return false + } + + switch b[0] { + case ';': + return true + case '#': + return true + } + + return false +} + +// newCommentToken will create a comment token and +// return how many bytes were read. +func newCommentToken(b []rune) (Token, int, error) { + i := 0 + for ; i < len(b); i++ { + if b[i] == '\n' { + break + } + + if len(b)-i > 2 && b[i] == '\r' && b[i+1] == '\n' { + break + } + } + + return newToken(TokenComment, b[:i], NoneType), i, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/doc.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/doc.go new file mode 100644 index 00000000000..25ce0fe134d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/doc.go @@ -0,0 +1,29 @@ +// Package ini is an LL(1) parser for configuration files. +// +// Example: +// sections, err := ini.OpenFile("/path/to/file") +// if err != nil { +// panic(err) +// } +// +// profile := "foo" +// section, ok := sections.GetSection(profile) +// if !ok { +// fmt.Printf("section %q could not be found", profile) +// } +// +// Below is the BNF that describes this parser +// Grammar: +// stmt -> value stmt' +// stmt' -> epsilon | op stmt +// value -> number | string | boolean | quoted_string +// +// section -> [ section' +// section' -> value section_close +// section_close -> ] +// +// SkipState will skip (NL WS)+ +// +// comment -> # comment' | ; comment' +// comment' -> epsilon | value +package ini diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/empty_token.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/empty_token.go new file mode 100644 index 00000000000..04345a54c20 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/empty_token.go @@ -0,0 +1,4 @@ +package ini + +// emptyToken is used to satisfy the Token interface +var emptyToken = newToken(TokenNone, []rune{}, NoneType) diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/expression.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/expression.go new file mode 100644 index 00000000000..91ba2a59dd5 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/expression.go @@ -0,0 +1,24 @@ +package ini + +// newExpression will return an expression AST. +// Expr represents an expression +// +// grammar: +// expr -> string | number +func newExpression(tok Token) AST { + return newASTWithRootToken(ASTKindExpr, tok) +} + +func newEqualExpr(left AST, tok Token) AST { + return newASTWithRootToken(ASTKindEqualExpr, tok, left) +} + +// EqualExprKey will return a LHS value in the equal expr +func EqualExprKey(ast AST) string { + children := ast.GetChildren() + if len(children) == 0 || ast.Kind != ASTKindEqualExpr { + return "" + } + + return string(children[0].Root.Raw()) +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/fuzz.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/fuzz.go new file mode 100644 index 00000000000..8d462f77e24 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/fuzz.go @@ -0,0 +1,17 @@ +// +build gofuzz + +package ini + +import ( + "bytes" +) + +func Fuzz(data []byte) int { + b := bytes.NewReader(data) + + if _, err := Parse(b); err != nil { + return 0 + } + + return 1 +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/ini.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/ini.go new file mode 100644 index 00000000000..3b0ca7afe3b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/ini.go @@ -0,0 +1,51 @@ +package ini + +import ( + "io" + "os" + + "github.com/aws/aws-sdk-go/aws/awserr" +) + +// OpenFile takes a path to a given file, and will open and parse +// that file. +func OpenFile(path string) (Sections, error) { + f, err := os.Open(path) + if err != nil { + return Sections{}, awserr.New(ErrCodeUnableToReadFile, "unable to open file", err) + } + defer f.Close() + + return Parse(f) +} + +// Parse will parse the given file using the shared config +// visitor. +func Parse(f io.Reader) (Sections, error) { + tree, err := ParseAST(f) + if err != nil { + return Sections{}, err + } + + v := NewDefaultVisitor() + if err = Walk(tree, v); err != nil { + return Sections{}, err + } + + return v.Sections, nil +} + +// ParseBytes will parse the given bytes and return the parsed sections. +func ParseBytes(b []byte) (Sections, error) { + tree, err := ParseASTBytes(b) + if err != nil { + return Sections{}, err + } + + v := NewDefaultVisitor() + if err = Walk(tree, v); err != nil { + return Sections{}, err + } + + return v.Sections, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_lexer.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_lexer.go new file mode 100644 index 00000000000..582c024ad15 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_lexer.go @@ -0,0 +1,165 @@ +package ini + +import ( + "bytes" + "io" + "io/ioutil" + + "github.com/aws/aws-sdk-go/aws/awserr" +) + +const ( + // ErrCodeUnableToReadFile is used when a file is failed to be + // opened or read from. + ErrCodeUnableToReadFile = "FailedRead" +) + +// TokenType represents the various different tokens types +type TokenType int + +func (t TokenType) String() string { + switch t { + case TokenNone: + return "none" + case TokenLit: + return "literal" + case TokenSep: + return "sep" + case TokenOp: + return "op" + case TokenWS: + return "ws" + case TokenNL: + return "newline" + case TokenComment: + return "comment" + case TokenComma: + return "comma" + default: + return "" + } +} + +// TokenType enums +const ( + TokenNone = TokenType(iota) + TokenLit + TokenSep + TokenComma + TokenOp + TokenWS + TokenNL + TokenComment +) + +type iniLexer struct{} + +// Tokenize will return a list of tokens during lexical analysis of the +// io.Reader. +func (l *iniLexer) Tokenize(r io.Reader) ([]Token, error) { + b, err := ioutil.ReadAll(r) + if err != nil { + return nil, awserr.New(ErrCodeUnableToReadFile, "unable to read file", err) + } + + return l.tokenize(b) +} + +func (l *iniLexer) tokenize(b []byte) ([]Token, error) { + runes := bytes.Runes(b) + var err error + n := 0 + tokenAmount := countTokens(runes) + tokens := make([]Token, tokenAmount) + count := 0 + + for len(runes) > 0 && count < tokenAmount { + switch { + case isWhitespace(runes[0]): + tokens[count], n, err = newWSToken(runes) + case isComma(runes[0]): + tokens[count], n = newCommaToken(), 1 + case isComment(runes): + tokens[count], n, err = newCommentToken(runes) + case isNewline(runes): + tokens[count], n, err = newNewlineToken(runes) + case isSep(runes): + tokens[count], n, err = newSepToken(runes) + case isOp(runes): + tokens[count], n, err = newOpToken(runes) + default: + tokens[count], n, err = newLitToken(runes) + } + + if err != nil { + return nil, err + } + + count++ + + runes = runes[n:] + } + + return tokens[:count], nil +} + +func countTokens(runes []rune) int { + count, n := 0, 0 + var err error + + for len(runes) > 0 { + switch { + case isWhitespace(runes[0]): + _, n, err = newWSToken(runes) + case isComma(runes[0]): + _, n = newCommaToken(), 1 + case isComment(runes): + _, n, err = newCommentToken(runes) + case isNewline(runes): + _, n, err = newNewlineToken(runes) + case isSep(runes): + _, n, err = newSepToken(runes) + case isOp(runes): + _, n, err = newOpToken(runes) + default: + _, n, err = newLitToken(runes) + } + + if err != nil { + return 0 + } + + count++ + runes = runes[n:] + } + + return count + 1 +} + +// Token indicates a metadata about a given value. +type Token struct { + t TokenType + ValueType ValueType + base int + raw []rune +} + +var emptyValue = Value{} + +func newToken(t TokenType, raw []rune, v ValueType) Token { + return Token{ + t: t, + raw: raw, + ValueType: v, + } +} + +// Raw return the raw runes that were consumed +func (tok Token) Raw() []rune { + return tok.raw +} + +// Type returns the token type +func (tok Token) Type() TokenType { + return tok.t +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_parser.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_parser.go new file mode 100644 index 00000000000..55fa73ebcf2 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_parser.go @@ -0,0 +1,357 @@ +package ini + +import ( + "fmt" + "io" +) + +// State enums for the parse table +const ( + InvalidState = iota + // stmt -> value stmt' + StatementState + // stmt' -> MarkComplete | op stmt + StatementPrimeState + // value -> number | string | boolean | quoted_string + ValueState + // section -> [ section' + OpenScopeState + // section' -> value section_close + SectionState + // section_close -> ] + CloseScopeState + // SkipState will skip (NL WS)+ + SkipState + // SkipTokenState will skip any token and push the previous + // state onto the stack. + SkipTokenState + // comment -> # comment' | ; comment' + // comment' -> MarkComplete | value + CommentState + // MarkComplete state will complete statements and move that + // to the completed AST list + MarkCompleteState + // TerminalState signifies that the tokens have been fully parsed + TerminalState +) + +// parseTable is a state machine to dictate the grammar above. +var parseTable = map[ASTKind]map[TokenType]int{ + ASTKindStart: map[TokenType]int{ + TokenLit: StatementState, + TokenSep: OpenScopeState, + TokenWS: SkipTokenState, + TokenNL: SkipTokenState, + TokenComment: CommentState, + TokenNone: TerminalState, + }, + ASTKindCommentStatement: map[TokenType]int{ + TokenLit: StatementState, + TokenSep: OpenScopeState, + TokenWS: SkipTokenState, + TokenNL: SkipTokenState, + TokenComment: CommentState, + TokenNone: MarkCompleteState, + }, + ASTKindExpr: map[TokenType]int{ + TokenOp: StatementPrimeState, + TokenLit: ValueState, + TokenSep: OpenScopeState, + TokenWS: ValueState, + TokenNL: SkipState, + TokenComment: CommentState, + TokenNone: MarkCompleteState, + }, + ASTKindEqualExpr: map[TokenType]int{ + TokenLit: ValueState, + TokenWS: SkipTokenState, + TokenNL: SkipState, + TokenNone: SkipState, + }, + ASTKindStatement: map[TokenType]int{ + TokenLit: SectionState, + TokenSep: CloseScopeState, + TokenWS: SkipTokenState, + TokenNL: SkipTokenState, + TokenComment: CommentState, + TokenNone: MarkCompleteState, + }, + ASTKindExprStatement: map[TokenType]int{ + TokenLit: ValueState, + TokenSep: OpenScopeState, + TokenOp: ValueState, + TokenWS: ValueState, + TokenNL: MarkCompleteState, + TokenComment: CommentState, + TokenNone: TerminalState, + TokenComma: SkipState, + }, + ASTKindSectionStatement: map[TokenType]int{ + TokenLit: SectionState, + TokenOp: SectionState, + TokenSep: CloseScopeState, + TokenWS: SectionState, + TokenNL: SkipTokenState, + }, + ASTKindCompletedSectionStatement: map[TokenType]int{ + TokenWS: SkipTokenState, + TokenNL: SkipTokenState, + TokenLit: StatementState, + TokenSep: OpenScopeState, + TokenComment: CommentState, + TokenNone: MarkCompleteState, + }, + ASTKindSkipStatement: map[TokenType]int{ + TokenLit: StatementState, + TokenSep: OpenScopeState, + TokenWS: SkipTokenState, + TokenNL: SkipTokenState, + TokenComment: CommentState, + TokenNone: TerminalState, + }, +} + +// ParseAST will parse input from an io.Reader using +// an LL(1) parser. +func ParseAST(r io.Reader) ([]AST, error) { + lexer := iniLexer{} + tokens, err := lexer.Tokenize(r) + if err != nil { + return []AST{}, err + } + + return parse(tokens) +} + +// ParseASTBytes will parse input from a byte slice using +// an LL(1) parser. +func ParseASTBytes(b []byte) ([]AST, error) { + lexer := iniLexer{} + tokens, err := lexer.tokenize(b) + if err != nil { + return []AST{}, err + } + + return parse(tokens) +} + +func parse(tokens []Token) ([]AST, error) { + start := Start + stack := newParseStack(3, len(tokens)) + + stack.Push(start) + s := newSkipper() + +loop: + for stack.Len() > 0 { + k := stack.Pop() + + var tok Token + if len(tokens) == 0 { + // this occurs when all the tokens have been processed + // but reduction of what's left on the stack needs to + // occur. + tok = emptyToken + } else { + tok = tokens[0] + } + + step := parseTable[k.Kind][tok.Type()] + if s.ShouldSkip(tok) { + // being in a skip state with no tokens will break out of + // the parse loop since there is nothing left to process. + if len(tokens) == 0 { + break loop + } + // if should skip is true, we skip the tokens until should skip is set to false. + step = SkipTokenState + } + + switch step { + case TerminalState: + // Finished parsing. Push what should be the last + // statement to the stack. If there is anything left + // on the stack, an error in parsing has occurred. + if k.Kind != ASTKindStart { + stack.MarkComplete(k) + } + break loop + case SkipTokenState: + // When skipping a token, the previous state was popped off the stack. + // To maintain the correct state, the previous state will be pushed + // onto the stack. + stack.Push(k) + case StatementState: + if k.Kind != ASTKindStart { + stack.MarkComplete(k) + } + expr := newExpression(tok) + stack.Push(expr) + case StatementPrimeState: + if tok.Type() != TokenOp { + stack.MarkComplete(k) + continue + } + + if k.Kind != ASTKindExpr { + return nil, NewParseError( + fmt.Sprintf("invalid expression: expected Expr type, but found %T type", k), + ) + } + + k = trimSpaces(k) + expr := newEqualExpr(k, tok) + stack.Push(expr) + case ValueState: + // ValueState requires the previous state to either be an equal expression + // or an expression statement. + // + // This grammar occurs when the RHS is a number, word, or quoted string. + // equal_expr -> lit op equal_expr' + // equal_expr' -> number | string | quoted_string + // quoted_string -> " quoted_string' + // quoted_string' -> string quoted_string_end + // quoted_string_end -> " + // + // otherwise + // expr_stmt -> equal_expr (expr_stmt')* + // expr_stmt' -> ws S | op S | MarkComplete + // S -> equal_expr' expr_stmt' + switch k.Kind { + case ASTKindEqualExpr: + // assigning a value to some key + k.AppendChild(newExpression(tok)) + stack.Push(newExprStatement(k)) + case ASTKindExpr: + k.Root.raw = append(k.Root.raw, tok.Raw()...) + stack.Push(k) + case ASTKindExprStatement: + root := k.GetRoot() + children := root.GetChildren() + if len(children) == 0 { + return nil, NewParseError( + fmt.Sprintf("invalid expression: AST contains no children %s", k.Kind), + ) + } + + rhs := children[len(children)-1] + + if rhs.Root.ValueType != QuotedStringType { + rhs.Root.ValueType = StringType + rhs.Root.raw = append(rhs.Root.raw, tok.Raw()...) + + } + + children[len(children)-1] = rhs + k.SetChildren(children) + + stack.Push(k) + } + case OpenScopeState: + if !runeCompare(tok.Raw(), openBrace) { + return nil, NewParseError("expected '['") + } + // If OpenScopeState is not at the start, we must mark the previous ast as complete + // + // for example: if previous ast was a skip statement; + // we should mark it as complete before we create a new statement + if k.Kind != ASTKindStart { + stack.MarkComplete(k) + } + + stmt := newStatement() + stack.Push(stmt) + case CloseScopeState: + if !runeCompare(tok.Raw(), closeBrace) { + return nil, NewParseError("expected ']'") + } + + k = trimSpaces(k) + stack.Push(newCompletedSectionStatement(k)) + case SectionState: + var stmt AST + + switch k.Kind { + case ASTKindStatement: + // If there are multiple literals inside of a scope declaration, + // then the current token's raw value will be appended to the Name. + // + // This handles cases like [ profile default ] + // + // k will represent a SectionStatement with the children representing + // the label of the section + stmt = newSectionStatement(tok) + case ASTKindSectionStatement: + k.Root.raw = append(k.Root.raw, tok.Raw()...) + stmt = k + default: + return nil, NewParseError( + fmt.Sprintf("invalid statement: expected statement: %v", k.Kind), + ) + } + + stack.Push(stmt) + case MarkCompleteState: + if k.Kind != ASTKindStart { + stack.MarkComplete(k) + } + + if stack.Len() == 0 { + stack.Push(start) + } + case SkipState: + stack.Push(newSkipStatement(k)) + s.Skip() + case CommentState: + if k.Kind == ASTKindStart { + stack.Push(k) + } else { + stack.MarkComplete(k) + } + + stmt := newCommentStatement(tok) + stack.Push(stmt) + default: + return nil, NewParseError( + fmt.Sprintf("invalid state with ASTKind %v and TokenType %v", + k, tok.Type())) + } + + if len(tokens) > 0 { + tokens = tokens[1:] + } + } + + // this occurs when a statement has not been completed + if stack.top > 1 { + return nil, NewParseError(fmt.Sprintf("incomplete ini expression")) + } + + // returns a sublist which excludes the start symbol + return stack.List(), nil +} + +// trimSpaces will trim spaces on the left and right hand side of +// the literal. +func trimSpaces(k AST) AST { + // trim left hand side of spaces + for i := 0; i < len(k.Root.raw); i++ { + if !isWhitespace(k.Root.raw[i]) { + break + } + + k.Root.raw = k.Root.raw[1:] + i-- + } + + // trim right hand side of spaces + for i := len(k.Root.raw) - 1; i >= 0; i-- { + if !isWhitespace(k.Root.raw[i]) { + break + } + + k.Root.raw = k.Root.raw[:len(k.Root.raw)-1] + } + + return k +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/literal_tokens.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/literal_tokens.go new file mode 100644 index 00000000000..24df543d38c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/literal_tokens.go @@ -0,0 +1,324 @@ +package ini + +import ( + "fmt" + "strconv" + "strings" +) + +var ( + runesTrue = []rune("true") + runesFalse = []rune("false") +) + +var literalValues = [][]rune{ + runesTrue, + runesFalse, +} + +func isBoolValue(b []rune) bool { + for _, lv := range literalValues { + if isLitValue(lv, b) { + return true + } + } + return false +} + +func isLitValue(want, have []rune) bool { + if len(have) < len(want) { + return false + } + + for i := 0; i < len(want); i++ { + if want[i] != have[i] { + return false + } + } + + return true +} + +// isNumberValue will return whether not the leading characters in +// a byte slice is a number. A number is delimited by whitespace or +// the newline token. +// +// A number is defined to be in a binary, octal, decimal (int | float), hex format, +// or in scientific notation. +func isNumberValue(b []rune) bool { + negativeIndex := 0 + helper := numberHelper{} + needDigit := false + + for i := 0; i < len(b); i++ { + negativeIndex++ + + switch b[i] { + case '-': + if helper.IsNegative() || negativeIndex != 1 { + return false + } + helper.Determine(b[i]) + needDigit = true + continue + case 'e', 'E': + if err := helper.Determine(b[i]); err != nil { + return false + } + negativeIndex = 0 + needDigit = true + continue + case 'b': + if helper.numberFormat == hex { + break + } + fallthrough + case 'o', 'x': + needDigit = true + if i == 0 { + return false + } + + fallthrough + case '.': + if err := helper.Determine(b[i]); err != nil { + return false + } + needDigit = true + continue + } + + if i > 0 && (isNewline(b[i:]) || isWhitespace(b[i])) { + return !needDigit + } + + if !helper.CorrectByte(b[i]) { + return false + } + needDigit = false + } + + return !needDigit +} + +func isValid(b []rune) (bool, int, error) { + if len(b) == 0 { + // TODO: should probably return an error + return false, 0, nil + } + + return isValidRune(b[0]), 1, nil +} + +func isValidRune(r rune) bool { + return r != ':' && r != '=' && r != '[' && r != ']' && r != ' ' && r != '\n' +} + +// ValueType is an enum that will signify what type +// the Value is +type ValueType int + +func (v ValueType) String() string { + switch v { + case NoneType: + return "NONE" + case DecimalType: + return "FLOAT" + case IntegerType: + return "INT" + case StringType: + return "STRING" + case BoolType: + return "BOOL" + } + + return "" +} + +// ValueType enums +const ( + NoneType = ValueType(iota) + DecimalType + IntegerType + StringType + QuotedStringType + BoolType +) + +// Value is a union container +type Value struct { + Type ValueType + raw []rune + + integer int64 + decimal float64 + boolean bool + str string +} + +func newValue(t ValueType, base int, raw []rune) (Value, error) { + v := Value{ + Type: t, + raw: raw, + } + var err error + + switch t { + case DecimalType: + v.decimal, err = strconv.ParseFloat(string(raw), 64) + case IntegerType: + if base != 10 { + raw = raw[2:] + } + + v.integer, err = strconv.ParseInt(string(raw), base, 64) + case StringType: + v.str = string(raw) + case QuotedStringType: + v.str = string(raw[1 : len(raw)-1]) + case BoolType: + v.boolean = runeCompare(v.raw, runesTrue) + } + + // issue 2253 + // + // if the value trying to be parsed is too large, then we will use + // the 'StringType' and raw value instead. + if nerr, ok := err.(*strconv.NumError); ok && nerr.Err == strconv.ErrRange { + v.Type = StringType + v.str = string(raw) + err = nil + } + + return v, err +} + +// Append will append values and change the type to a string +// type. +func (v *Value) Append(tok Token) { + r := tok.Raw() + if v.Type != QuotedStringType { + v.Type = StringType + r = tok.raw[1 : len(tok.raw)-1] + } + if tok.Type() != TokenLit { + v.raw = append(v.raw, tok.Raw()...) + } else { + v.raw = append(v.raw, r...) + } +} + +func (v Value) String() string { + switch v.Type { + case DecimalType: + return fmt.Sprintf("decimal: %f", v.decimal) + case IntegerType: + return fmt.Sprintf("integer: %d", v.integer) + case StringType: + return fmt.Sprintf("string: %s", string(v.raw)) + case QuotedStringType: + return fmt.Sprintf("quoted string: %s", string(v.raw)) + case BoolType: + return fmt.Sprintf("bool: %t", v.boolean) + default: + return "union not set" + } +} + +func newLitToken(b []rune) (Token, int, error) { + n := 0 + var err error + + token := Token{} + if b[0] == '"' { + n, err = getStringValue(b) + if err != nil { + return token, n, err + } + + token = newToken(TokenLit, b[:n], QuotedStringType) + } else if isNumberValue(b) { + var base int + base, n, err = getNumericalValue(b) + if err != nil { + return token, 0, err + } + + value := b[:n] + vType := IntegerType + if contains(value, '.') || hasExponent(value) { + vType = DecimalType + } + token = newToken(TokenLit, value, vType) + token.base = base + } else if isBoolValue(b) { + n, err = getBoolValue(b) + + token = newToken(TokenLit, b[:n], BoolType) + } else { + n, err = getValue(b) + token = newToken(TokenLit, b[:n], StringType) + } + + return token, n, err +} + +// IntValue returns an integer value +func (v Value) IntValue() int64 { + return v.integer +} + +// FloatValue returns a float value +func (v Value) FloatValue() float64 { + return v.decimal +} + +// BoolValue returns a bool value +func (v Value) BoolValue() bool { + return v.boolean +} + +func isTrimmable(r rune) bool { + switch r { + case '\n', ' ': + return true + } + return false +} + +// StringValue returns the string value +func (v Value) StringValue() string { + switch v.Type { + case StringType: + return strings.TrimFunc(string(v.raw), isTrimmable) + case QuotedStringType: + // preserve all characters in the quotes + return string(removeEscapedCharacters(v.raw[1 : len(v.raw)-1])) + default: + return strings.TrimFunc(string(v.raw), isTrimmable) + } +} + +func contains(runes []rune, c rune) bool { + for i := 0; i < len(runes); i++ { + if runes[i] == c { + return true + } + } + + return false +} + +func runeCompare(v1 []rune, v2 []rune) bool { + if len(v1) != len(v2) { + return false + } + + for i := 0; i < len(v1); i++ { + if v1[i] != v2[i] { + return false + } + } + + return true +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/newline_token.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/newline_token.go new file mode 100644 index 00000000000..e52ac399f17 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/newline_token.go @@ -0,0 +1,30 @@ +package ini + +func isNewline(b []rune) bool { + if len(b) == 0 { + return false + } + + if b[0] == '\n' { + return true + } + + if len(b) < 2 { + return false + } + + return b[0] == '\r' && b[1] == '\n' +} + +func newNewlineToken(b []rune) (Token, int, error) { + i := 1 + if b[0] == '\r' && isNewline(b[1:]) { + i++ + } + + if !isNewline([]rune(b[:i])) { + return emptyToken, 0, NewParseError("invalid new line token") + } + + return newToken(TokenNL, b[:i], NoneType), i, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/number_helper.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/number_helper.go new file mode 100644 index 00000000000..a45c0bc5662 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/number_helper.go @@ -0,0 +1,152 @@ +package ini + +import ( + "bytes" + "fmt" + "strconv" +) + +const ( + none = numberFormat(iota) + binary + octal + decimal + hex + exponent +) + +type numberFormat int + +// numberHelper is used to dictate what format a number is in +// and what to do for negative values. Since -1e-4 is a valid +// number, we cannot just simply check for duplicate negatives. +type numberHelper struct { + numberFormat numberFormat + + negative bool + negativeExponent bool +} + +func (b numberHelper) Exists() bool { + return b.numberFormat != none +} + +func (b numberHelper) IsNegative() bool { + return b.negative || b.negativeExponent +} + +func (b *numberHelper) Determine(c rune) error { + if b.Exists() { + return NewParseError(fmt.Sprintf("multiple number formats: 0%v", string(c))) + } + + switch c { + case 'b': + b.numberFormat = binary + case 'o': + b.numberFormat = octal + case 'x': + b.numberFormat = hex + case 'e', 'E': + b.numberFormat = exponent + case '-': + if b.numberFormat != exponent { + b.negative = true + } else { + b.negativeExponent = true + } + case '.': + b.numberFormat = decimal + default: + return NewParseError(fmt.Sprintf("invalid number character: %v", string(c))) + } + + return nil +} + +func (b numberHelper) CorrectByte(c rune) bool { + switch { + case b.numberFormat == binary: + if !isBinaryByte(c) { + return false + } + case b.numberFormat == octal: + if !isOctalByte(c) { + return false + } + case b.numberFormat == hex: + if !isHexByte(c) { + return false + } + case b.numberFormat == decimal: + if !isDigit(c) { + return false + } + case b.numberFormat == exponent: + if !isDigit(c) { + return false + } + case b.negativeExponent: + if !isDigit(c) { + return false + } + case b.negative: + if !isDigit(c) { + return false + } + default: + if !isDigit(c) { + return false + } + } + + return true +} + +func (b numberHelper) Base() int { + switch b.numberFormat { + case binary: + return 2 + case octal: + return 8 + case hex: + return 16 + default: + return 10 + } +} + +func (b numberHelper) String() string { + buf := bytes.Buffer{} + i := 0 + + switch b.numberFormat { + case binary: + i++ + buf.WriteString(strconv.Itoa(i) + ": binary format\n") + case octal: + i++ + buf.WriteString(strconv.Itoa(i) + ": octal format\n") + case hex: + i++ + buf.WriteString(strconv.Itoa(i) + ": hex format\n") + case exponent: + i++ + buf.WriteString(strconv.Itoa(i) + ": exponent format\n") + default: + i++ + buf.WriteString(strconv.Itoa(i) + ": integer format\n") + } + + if b.negative { + i++ + buf.WriteString(strconv.Itoa(i) + ": negative format\n") + } + + if b.negativeExponent { + i++ + buf.WriteString(strconv.Itoa(i) + ": negative exponent format\n") + } + + return buf.String() +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/op_tokens.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/op_tokens.go new file mode 100644 index 00000000000..8a84c7cbe08 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/op_tokens.go @@ -0,0 +1,39 @@ +package ini + +import ( + "fmt" +) + +var ( + equalOp = []rune("=") + equalColonOp = []rune(":") +) + +func isOp(b []rune) bool { + if len(b) == 0 { + return false + } + + switch b[0] { + case '=': + return true + case ':': + return true + default: + return false + } +} + +func newOpToken(b []rune) (Token, int, error) { + tok := Token{} + + switch b[0] { + case '=': + tok = newToken(TokenOp, equalOp, NoneType) + case ':': + tok = newToken(TokenOp, equalColonOp, NoneType) + default: + return tok, 0, NewParseError(fmt.Sprintf("unexpected op type, %v", b[0])) + } + return tok, 1, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/parse_error.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/parse_error.go new file mode 100644 index 00000000000..45728701931 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/parse_error.go @@ -0,0 +1,43 @@ +package ini + +import "fmt" + +const ( + // ErrCodeParseError is returned when a parsing error + // has occurred. + ErrCodeParseError = "INIParseError" +) + +// ParseError is an error which is returned during any part of +// the parsing process. +type ParseError struct { + msg string +} + +// NewParseError will return a new ParseError where message +// is the description of the error. +func NewParseError(message string) *ParseError { + return &ParseError{ + msg: message, + } +} + +// Code will return the ErrCodeParseError +func (err *ParseError) Code() string { + return ErrCodeParseError +} + +// Message returns the error's message +func (err *ParseError) Message() string { + return err.msg +} + +// OrigError return nothing since there will never be any +// original error. +func (err *ParseError) OrigError() error { + return nil +} + +func (err *ParseError) Error() string { + return fmt.Sprintf("%s: %s", err.Code(), err.Message()) +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/parse_stack.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/parse_stack.go new file mode 100644 index 00000000000..7f01cf7c703 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/parse_stack.go @@ -0,0 +1,60 @@ +package ini + +import ( + "bytes" + "fmt" +) + +// ParseStack is a stack that contains a container, the stack portion, +// and the list which is the list of ASTs that have been successfully +// parsed. +type ParseStack struct { + top int + container []AST + list []AST + index int +} + +func newParseStack(sizeContainer, sizeList int) ParseStack { + return ParseStack{ + container: make([]AST, sizeContainer), + list: make([]AST, sizeList), + } +} + +// Pop will return and truncate the last container element. +func (s *ParseStack) Pop() AST { + s.top-- + return s.container[s.top] +} + +// Push will add the new AST to the container +func (s *ParseStack) Push(ast AST) { + s.container[s.top] = ast + s.top++ +} + +// MarkComplete will append the AST to the list of completed statements +func (s *ParseStack) MarkComplete(ast AST) { + s.list[s.index] = ast + s.index++ +} + +// List will return the completed statements +func (s ParseStack) List() []AST { + return s.list[:s.index] +} + +// Len will return the length of the container +func (s *ParseStack) Len() int { + return s.top +} + +func (s ParseStack) String() string { + buf := bytes.Buffer{} + for i, node := range s.list { + buf.WriteString(fmt.Sprintf("%d: %v\n", i+1, node)) + } + + return buf.String() +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/sep_tokens.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/sep_tokens.go new file mode 100644 index 00000000000..f82095ba259 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/sep_tokens.go @@ -0,0 +1,41 @@ +package ini + +import ( + "fmt" +) + +var ( + emptyRunes = []rune{} +) + +func isSep(b []rune) bool { + if len(b) == 0 { + return false + } + + switch b[0] { + case '[', ']': + return true + default: + return false + } +} + +var ( + openBrace = []rune("[") + closeBrace = []rune("]") +) + +func newSepToken(b []rune) (Token, int, error) { + tok := Token{} + + switch b[0] { + case '[': + tok = newToken(TokenSep, openBrace, NoneType) + case ']': + tok = newToken(TokenSep, closeBrace, NoneType) + default: + return tok, 0, NewParseError(fmt.Sprintf("unexpected sep type, %v", b[0])) + } + return tok, 1, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/skipper.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/skipper.go new file mode 100644 index 00000000000..da7a4049cfa --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/skipper.go @@ -0,0 +1,45 @@ +package ini + +// skipper is used to skip certain blocks of an ini file. +// Currently skipper is used to skip nested blocks of ini +// files. See example below +// +// [ foo ] +// nested = ; this section will be skipped +// a=b +// c=d +// bar=baz ; this will be included +type skipper struct { + shouldSkip bool + TokenSet bool + prevTok Token +} + +func newSkipper() skipper { + return skipper{ + prevTok: emptyToken, + } +} + +func (s *skipper) ShouldSkip(tok Token) bool { + // should skip state will be modified only if previous token was new line (NL); + // and the current token is not WhiteSpace (WS). + if s.shouldSkip && + s.prevTok.Type() == TokenNL && + tok.Type() != TokenWS { + s.Continue() + return false + } + s.prevTok = tok + return s.shouldSkip +} + +func (s *skipper) Skip() { + s.shouldSkip = true +} + +func (s *skipper) Continue() { + s.shouldSkip = false + // empty token is assigned as we return to default state, when should skip is false + s.prevTok = emptyToken +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/statement.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/statement.go new file mode 100644 index 00000000000..18f3fe89317 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/statement.go @@ -0,0 +1,35 @@ +package ini + +// Statement is an empty AST mostly used for transitioning states. +func newStatement() AST { + return newAST(ASTKindStatement, AST{}) +} + +// SectionStatement represents a section AST +func newSectionStatement(tok Token) AST { + return newASTWithRootToken(ASTKindSectionStatement, tok) +} + +// ExprStatement represents a completed expression AST +func newExprStatement(ast AST) AST { + return newAST(ASTKindExprStatement, ast) +} + +// CommentStatement represents a comment in the ini definition. +// +// grammar: +// comment -> #comment' | ;comment' +// comment' -> epsilon | value +func newCommentStatement(tok Token) AST { + return newAST(ASTKindCommentStatement, newExpression(tok)) +} + +// CompletedSectionStatement represents a completed section +func newCompletedSectionStatement(ast AST) AST { + return newAST(ASTKindCompletedSectionStatement, ast) +} + +// SkipStatement is used to skip whole statements +func newSkipStatement(ast AST) AST { + return newAST(ASTKindSkipStatement, ast) +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/value_util.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/value_util.go new file mode 100644 index 00000000000..305999d29be --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/value_util.go @@ -0,0 +1,284 @@ +package ini + +import ( + "fmt" +) + +// getStringValue will return a quoted string and the amount +// of bytes read +// +// an error will be returned if the string is not properly formatted +func getStringValue(b []rune) (int, error) { + if b[0] != '"' { + return 0, NewParseError("strings must start with '\"'") + } + + endQuote := false + i := 1 + + for ; i < len(b) && !endQuote; i++ { + if escaped := isEscaped(b[:i], b[i]); b[i] == '"' && !escaped { + endQuote = true + break + } else if escaped { + /*c, err := getEscapedByte(b[i]) + if err != nil { + return 0, err + } + + b[i-1] = c + b = append(b[:i], b[i+1:]...) + i--*/ + + continue + } + } + + if !endQuote { + return 0, NewParseError("missing '\"' in string value") + } + + return i + 1, nil +} + +// getBoolValue will return a boolean and the amount +// of bytes read +// +// an error will be returned if the boolean is not of a correct +// value +func getBoolValue(b []rune) (int, error) { + if len(b) < 4 { + return 0, NewParseError("invalid boolean value") + } + + n := 0 + for _, lv := range literalValues { + if len(lv) > len(b) { + continue + } + + if isLitValue(lv, b) { + n = len(lv) + } + } + + if n == 0 { + return 0, NewParseError("invalid boolean value") + } + + return n, nil +} + +// getNumericalValue will return a numerical string, the amount +// of bytes read, and the base of the number +// +// an error will be returned if the number is not of a correct +// value +func getNumericalValue(b []rune) (int, int, error) { + if !isDigit(b[0]) { + return 0, 0, NewParseError("invalid digit value") + } + + i := 0 + helper := numberHelper{} + +loop: + for negativeIndex := 0; i < len(b); i++ { + negativeIndex++ + + if !isDigit(b[i]) { + switch b[i] { + case '-': + if helper.IsNegative() || negativeIndex != 1 { + return 0, 0, NewParseError("parse error '-'") + } + + n := getNegativeNumber(b[i:]) + i += (n - 1) + helper.Determine(b[i]) + continue + case '.': + if err := helper.Determine(b[i]); err != nil { + return 0, 0, err + } + case 'e', 'E': + if err := helper.Determine(b[i]); err != nil { + return 0, 0, err + } + + negativeIndex = 0 + case 'b': + if helper.numberFormat == hex { + break + } + fallthrough + case 'o', 'x': + if i == 0 && b[i] != '0' { + return 0, 0, NewParseError("incorrect base format, expected leading '0'") + } + + if i != 1 { + return 0, 0, NewParseError(fmt.Sprintf("incorrect base format found %s at %d index", string(b[i]), i)) + } + + if err := helper.Determine(b[i]); err != nil { + return 0, 0, err + } + default: + if isWhitespace(b[i]) { + break loop + } + + if isNewline(b[i:]) { + break loop + } + + if !(helper.numberFormat == hex && isHexByte(b[i])) { + if i+2 < len(b) && !isNewline(b[i:i+2]) { + return 0, 0, NewParseError("invalid numerical character") + } else if !isNewline([]rune{b[i]}) { + return 0, 0, NewParseError("invalid numerical character") + } + + break loop + } + } + } + } + + return helper.Base(), i, nil +} + +// isDigit will return whether or not something is an integer +func isDigit(b rune) bool { + return b >= '0' && b <= '9' +} + +func hasExponent(v []rune) bool { + return contains(v, 'e') || contains(v, 'E') +} + +func isBinaryByte(b rune) bool { + switch b { + case '0', '1': + return true + default: + return false + } +} + +func isOctalByte(b rune) bool { + switch b { + case '0', '1', '2', '3', '4', '5', '6', '7': + return true + default: + return false + } +} + +func isHexByte(b rune) bool { + if isDigit(b) { + return true + } + return (b >= 'A' && b <= 'F') || + (b >= 'a' && b <= 'f') +} + +func getValue(b []rune) (int, error) { + i := 0 + + for i < len(b) { + if isNewline(b[i:]) { + break + } + + if isOp(b[i:]) { + break + } + + valid, n, err := isValid(b[i:]) + if err != nil { + return 0, err + } + + if !valid { + break + } + + i += n + } + + return i, nil +} + +// getNegativeNumber will return a negative number from a +// byte slice. This will iterate through all characters until +// a non-digit has been found. +func getNegativeNumber(b []rune) int { + if b[0] != '-' { + return 0 + } + + i := 1 + for ; i < len(b); i++ { + if !isDigit(b[i]) { + return i + } + } + + return i +} + +// isEscaped will return whether or not the character is an escaped +// character. +func isEscaped(value []rune, b rune) bool { + if len(value) == 0 { + return false + } + + switch b { + case '\'': // single quote + case '"': // quote + case 'n': // newline + case 't': // tab + case '\\': // backslash + default: + return false + } + + return value[len(value)-1] == '\\' +} + +func getEscapedByte(b rune) (rune, error) { + switch b { + case '\'': // single quote + return '\'', nil + case '"': // quote + return '"', nil + case 'n': // newline + return '\n', nil + case 't': // table + return '\t', nil + case '\\': // backslash + return '\\', nil + default: + return b, NewParseError(fmt.Sprintf("invalid escaped character %c", b)) + } +} + +func removeEscapedCharacters(b []rune) []rune { + for i := 0; i < len(b); i++ { + if isEscaped(b[:i], b[i]) { + c, err := getEscapedByte(b[i]) + if err != nil { + return b + } + + b[i-1] = c + b = append(b[:i], b[i+1:]...) + i-- + } + } + + return b +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/visitor.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/visitor.go new file mode 100644 index 00000000000..94841c32443 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/visitor.go @@ -0,0 +1,166 @@ +package ini + +import ( + "fmt" + "sort" +) + +// Visitor is an interface used by walkers that will +// traverse an array of ASTs. +type Visitor interface { + VisitExpr(AST) error + VisitStatement(AST) error +} + +// DefaultVisitor is used to visit statements and expressions +// and ensure that they are both of the correct format. +// In addition, upon visiting this will build sections and populate +// the Sections field which can be used to retrieve profile +// configuration. +type DefaultVisitor struct { + scope string + Sections Sections +} + +// NewDefaultVisitor return a DefaultVisitor +func NewDefaultVisitor() *DefaultVisitor { + return &DefaultVisitor{ + Sections: Sections{ + container: map[string]Section{}, + }, + } +} + +// VisitExpr visits expressions... +func (v *DefaultVisitor) VisitExpr(expr AST) error { + t := v.Sections.container[v.scope] + if t.values == nil { + t.values = values{} + } + + switch expr.Kind { + case ASTKindExprStatement: + opExpr := expr.GetRoot() + switch opExpr.Kind { + case ASTKindEqualExpr: + children := opExpr.GetChildren() + if len(children) <= 1 { + return NewParseError("unexpected token type") + } + + rhs := children[1] + + if rhs.Root.Type() != TokenLit { + return NewParseError("unexpected token type") + } + + key := EqualExprKey(opExpr) + v, err := newValue(rhs.Root.ValueType, rhs.Root.base, rhs.Root.Raw()) + if err != nil { + return err + } + + t.values[key] = v + default: + return NewParseError(fmt.Sprintf("unsupported expression %v", expr)) + } + default: + return NewParseError(fmt.Sprintf("unsupported expression %v", expr)) + } + + v.Sections.container[v.scope] = t + return nil +} + +// VisitStatement visits statements... +func (v *DefaultVisitor) VisitStatement(stmt AST) error { + switch stmt.Kind { + case ASTKindCompletedSectionStatement: + child := stmt.GetRoot() + if child.Kind != ASTKindSectionStatement { + return NewParseError(fmt.Sprintf("unsupported child statement: %T", child)) + } + + name := string(child.Root.Raw()) + v.Sections.container[name] = Section{} + v.scope = name + default: + return NewParseError(fmt.Sprintf("unsupported statement: %s", stmt.Kind)) + } + + return nil +} + +// Sections is a map of Section structures that represent +// a configuration. +type Sections struct { + container map[string]Section +} + +// GetSection will return section p. If section p does not exist, +// false will be returned in the second parameter. +func (t Sections) GetSection(p string) (Section, bool) { + v, ok := t.container[p] + return v, ok +} + +// values represents a map of union values. +type values map[string]Value + +// List will return a list of all sections that were successfully +// parsed. +func (t Sections) List() []string { + keys := make([]string, len(t.container)) + i := 0 + for k := range t.container { + keys[i] = k + i++ + } + + sort.Strings(keys) + return keys +} + +// Section contains a name and values. This represent +// a sectioned entry in a configuration file. +type Section struct { + Name string + values values +} + +// Has will return whether or not an entry exists in a given section +func (t Section) Has(k string) bool { + _, ok := t.values[k] + return ok +} + +// ValueType will returned what type the union is set to. If +// k was not found, the NoneType will be returned. +func (t Section) ValueType(k string) (ValueType, bool) { + v, ok := t.values[k] + return v.Type, ok +} + +// Bool returns a bool value at k +func (t Section) Bool(k string) bool { + return t.values[k].BoolValue() +} + +// Int returns an integer value at k +func (t Section) Int(k string) int64 { + return t.values[k].IntValue() +} + +// Float64 returns a float value at k +func (t Section) Float64(k string) float64 { + return t.values[k].FloatValue() +} + +// String returns the string value at k +func (t Section) String(k string) string { + _, ok := t.values[k] + if !ok { + return "" + } + return t.values[k].StringValue() +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/walker.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/walker.go new file mode 100644 index 00000000000..99915f7f777 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/walker.go @@ -0,0 +1,25 @@ +package ini + +// Walk will traverse the AST using the v, the Visitor. +func Walk(tree []AST, v Visitor) error { + for _, node := range tree { + switch node.Kind { + case ASTKindExpr, + ASTKindExprStatement: + + if err := v.VisitExpr(node); err != nil { + return err + } + case ASTKindStatement, + ASTKindCompletedSectionStatement, + ASTKindNestedSectionStatement, + ASTKindCompletedNestedSectionStatement: + + if err := v.VisitStatement(node); err != nil { + return err + } + } + } + + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/ws_token.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/ws_token.go new file mode 100644 index 00000000000..7ffb4ae06ff --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/ws_token.go @@ -0,0 +1,24 @@ +package ini + +import ( + "unicode" +) + +// isWhitespace will return whether or not the character is +// a whitespace character. +// +// Whitespace is defined as a space or tab. +func isWhitespace(c rune) bool { + return unicode.IsSpace(c) && c != '\n' && c != '\r' +} + +func newWSToken(b []rune) (Token, int, error) { + i := 0 + for ; i < len(b); i++ { + if !isWhitespace(b[i]) { + break + } + } + + return newToken(TokenWS, b[:i], NoneType), i, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/s3shared/arn/accesspoint_arn.go b/vendor/github.com/aws/aws-sdk-go/internal/s3shared/arn/accesspoint_arn.go new file mode 100644 index 00000000000..bf18031a38e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/s3shared/arn/accesspoint_arn.go @@ -0,0 +1,50 @@ +package arn + +import ( + "strings" + + "github.com/aws/aws-sdk-go/aws/arn" +) + +// AccessPointARN provides representation +type AccessPointARN struct { + arn.ARN + AccessPointName string +} + +// GetARN returns the base ARN for the Access Point resource +func (a AccessPointARN) GetARN() arn.ARN { + return a.ARN +} + +// ParseAccessPointResource attempts to parse the ARN's resource as an +// AccessPoint resource. +// +// Supported Access point resource format: +// - Access point format: arn:{partition}:s3:{region}:{accountId}:accesspoint/{accesspointName} +// - example: arn.aws.s3.us-west-2.012345678901:accesspoint/myaccesspoint +// +func ParseAccessPointResource(a arn.ARN, resParts []string) (AccessPointARN, error) { + if len(a.Region) == 0 { + return AccessPointARN{}, InvalidARNError{ARN: a, Reason: "region not set"} + } + if len(a.AccountID) == 0 { + return AccessPointARN{}, InvalidARNError{ARN: a, Reason: "account-id not set"} + } + if len(resParts) == 0 { + return AccessPointARN{}, InvalidARNError{ARN: a, Reason: "resource-id not set"} + } + if len(resParts) > 1 { + return AccessPointARN{}, InvalidARNError{ARN: a, Reason: "sub resource not supported"} + } + + resID := resParts[0] + if len(strings.TrimSpace(resID)) == 0 { + return AccessPointARN{}, InvalidARNError{ARN: a, Reason: "resource-id not set"} + } + + return AccessPointARN{ + ARN: a, + AccessPointName: resID, + }, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/s3shared/arn/arn.go b/vendor/github.com/aws/aws-sdk-go/internal/s3shared/arn/arn.go new file mode 100644 index 00000000000..7a8e46fbdae --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/s3shared/arn/arn.go @@ -0,0 +1,74 @@ +package arn + +import ( + "fmt" + "strings" + + "github.com/aws/aws-sdk-go/aws/arn" +) + +// Resource provides the interfaces abstracting ARNs of specific resource +// types. +type Resource interface { + GetARN() arn.ARN + String() string +} + +// ResourceParser provides the function for parsing an ARN's resource +// component into a typed resource. +type ResourceParser func(arn.ARN) (Resource, error) + +// ParseResource parses an AWS ARN into a typed resource for the S3 API. +func ParseResource(s string, resParser ResourceParser) (resARN Resource, err error) { + a, err := arn.Parse(s) + if err != nil { + return nil, err + } + + if len(a.Partition) == 0 { + return nil, InvalidARNError{ARN: a, Reason: "partition not set"} + } + + if a.Service != "s3" && a.Service != "s3-outposts" { + return nil, InvalidARNError{ARN: a, Reason: "service is not supported"} + } + if len(a.Resource) == 0 { + return nil, InvalidARNError{ARN: a, Reason: "resource not set"} + } + + return resParser(a) +} + +// SplitResource splits the resource components by the ARN resource delimiters. +func SplitResource(v string) []string { + var parts []string + var offset int + + for offset <= len(v) { + idx := strings.IndexAny(v[offset:], "/:") + if idx < 0 { + parts = append(parts, v[offset:]) + break + } + parts = append(parts, v[offset:idx+offset]) + offset += idx + 1 + } + + return parts +} + +// IsARN returns whether the given string is an ARN +func IsARN(s string) bool { + return arn.IsARN(s) +} + +// InvalidARNError provides the error for an invalid ARN error. +type InvalidARNError struct { + ARN arn.ARN + Reason string +} + +// Error returns a string denoting the occurred InvalidARNError +func (e InvalidARNError) Error() string { + return fmt.Sprintf("invalid Amazon %s ARN, %s, %s", e.ARN.Service, e.Reason, e.ARN.String()) +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/s3shared/arn/outpost_arn.go b/vendor/github.com/aws/aws-sdk-go/internal/s3shared/arn/outpost_arn.go new file mode 100644 index 00000000000..1e10f8de00b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/s3shared/arn/outpost_arn.go @@ -0,0 +1,126 @@ +package arn + +import ( + "strings" + + "github.com/aws/aws-sdk-go/aws/arn" +) + +// OutpostARN interface that should be satisfied by outpost ARNs +type OutpostARN interface { + Resource + GetOutpostID() string +} + +// ParseOutpostARNResource will parse a provided ARNs resource using the appropriate ARN format +// and return a specific OutpostARN type +// +// Currently supported outpost ARN formats: +// * Outpost AccessPoint ARN format: +// - ARN format: arn:{partition}:s3-outposts:{region}:{accountId}:outpost/{outpostId}/accesspoint/{accesspointName} +// - example: arn:aws:s3-outposts:us-west-2:012345678901:outpost/op-1234567890123456/accesspoint/myaccesspoint +// +// * Outpost Bucket ARN format: +// - ARN format: arn:{partition}:s3-outposts:{region}:{accountId}:outpost/{outpostId}/bucket/{bucketName} +// - example: arn:aws:s3-outposts:us-west-2:012345678901:outpost/op-1234567890123456/bucket/mybucket +// +// Other outpost ARN formats may be supported and added in the future. +// +func ParseOutpostARNResource(a arn.ARN, resParts []string) (OutpostARN, error) { + if len(a.Region) == 0 { + return nil, InvalidARNError{ARN: a, Reason: "region not set"} + } + + if len(a.AccountID) == 0 { + return nil, InvalidARNError{ARN: a, Reason: "account-id not set"} + } + + // verify if outpost id is present and valid + if len(resParts) == 0 || len(strings.TrimSpace(resParts[0])) == 0 { + return nil, InvalidARNError{ARN: a, Reason: "outpost resource-id not set"} + } + + // verify possible resource type exists + if len(resParts) < 3 { + return nil, InvalidARNError{ + ARN: a, Reason: "incomplete outpost resource type. Expected bucket or access-point resource to be present", + } + } + + // Since we know this is a OutpostARN fetch outpostID + outpostID := strings.TrimSpace(resParts[0]) + + switch resParts[1] { + case "accesspoint": + accesspointARN, err := ParseAccessPointResource(a, resParts[2:]) + if err != nil { + return OutpostAccessPointARN{}, err + } + return OutpostAccessPointARN{ + AccessPointARN: accesspointARN, + OutpostID: outpostID, + }, nil + + case "bucket": + bucketName, err := parseBucketResource(a, resParts[2:]) + if err != nil { + return nil, err + } + return OutpostBucketARN{ + ARN: a, + BucketName: bucketName, + OutpostID: outpostID, + }, nil + + default: + return nil, InvalidARNError{ARN: a, Reason: "unknown resource set for outpost ARN"} + } +} + +// OutpostAccessPointARN represents outpost access point ARN. +type OutpostAccessPointARN struct { + AccessPointARN + OutpostID string +} + +// GetOutpostID returns the outpost id of outpost access point arn +func (o OutpostAccessPointARN) GetOutpostID() string { + return o.OutpostID +} + +// OutpostBucketARN represents the outpost bucket ARN. +type OutpostBucketARN struct { + arn.ARN + BucketName string + OutpostID string +} + +// GetOutpostID returns the outpost id of outpost bucket arn +func (o OutpostBucketARN) GetOutpostID() string { + return o.OutpostID +} + +// GetARN retrives the base ARN from outpost bucket ARN resource +func (o OutpostBucketARN) GetARN() arn.ARN { + return o.ARN +} + +// parseBucketResource attempts to parse the ARN's bucket resource and retrieve the +// bucket resource id. +// +// parseBucketResource only parses the bucket resource id. +// +func parseBucketResource(a arn.ARN, resParts []string) (bucketName string, err error) { + if len(resParts) == 0 { + return bucketName, InvalidARNError{ARN: a, Reason: "bucket resource-id not set"} + } + if len(resParts) > 1 { + return bucketName, InvalidARNError{ARN: a, Reason: "sub resource not supported"} + } + + bucketName = strings.TrimSpace(resParts[0]) + if len(bucketName) == 0 { + return bucketName, InvalidARNError{ARN: a, Reason: "bucket resource-id not set"} + } + return bucketName, err +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/s3shared/endpoint_errors.go b/vendor/github.com/aws/aws-sdk-go/internal/s3shared/endpoint_errors.go new file mode 100644 index 00000000000..e756b2f8733 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/s3shared/endpoint_errors.go @@ -0,0 +1,189 @@ +package s3shared + +import ( + "fmt" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/internal/s3shared/arn" +) + +const ( + invalidARNErrorErrCode = "InvalidARNError" + configurationErrorErrCode = "ConfigurationError" +) + +// InvalidARNError denotes the error for Invalid ARN +type InvalidARNError struct { + message string + resource arn.Resource + origErr error +} + +// Error returns the InvalidARNError +func (e InvalidARNError) Error() string { + var extra string + if e.resource != nil { + extra = "ARN: " + e.resource.String() + } + return awserr.SprintError(e.Code(), e.Message(), extra, e.origErr) +} + +// Code returns the invalid ARN error code +func (e InvalidARNError) Code() string { + return invalidARNErrorErrCode +} + +// Message returns the message for Invalid ARN error +func (e InvalidARNError) Message() string { + return e.message +} + +// OrigErr is the original error wrapped by Invalid ARN Error +func (e InvalidARNError) OrigErr() error { + return e.origErr +} + +// NewInvalidARNError denotes invalid arn error +func NewInvalidARNError(resource arn.Resource, err error) InvalidARNError { + return InvalidARNError{ + message: "invalid ARN", + origErr: err, + resource: resource, + } +} + +// NewInvalidARNWithCustomEndpointError ARN not supported for custom clients endpoints +func NewInvalidARNWithCustomEndpointError(resource arn.Resource, err error) InvalidARNError { + return InvalidARNError{ + message: "resource ARN not supported with custom client endpoints", + origErr: err, + resource: resource, + } +} + +// NewInvalidARNWithUnsupportedPartitionError ARN not supported for the target partition +func NewInvalidARNWithUnsupportedPartitionError(resource arn.Resource, err error) InvalidARNError { + return InvalidARNError{ + message: "resource ARN not supported for the target ARN partition", + origErr: err, + resource: resource, + } +} + +// NewInvalidARNWithFIPSError ARN not supported for FIPS region +func NewInvalidARNWithFIPSError(resource arn.Resource, err error) InvalidARNError { + return InvalidARNError{ + message: "resource ARN not supported for FIPS region", + resource: resource, + origErr: err, + } +} + +// ConfigurationError is used to denote a client configuration error +type ConfigurationError struct { + message string + resource arn.Resource + clientPartitionID string + clientRegion string + origErr error +} + +// Error returns the Configuration error string +func (e ConfigurationError) Error() string { + extra := fmt.Sprintf("ARN: %s, client partition: %s, client region: %s", + e.resource, e.clientPartitionID, e.clientRegion) + + return awserr.SprintError(e.Code(), e.Message(), extra, e.origErr) +} + +// Code returns configuration error's error-code +func (e ConfigurationError) Code() string { + return configurationErrorErrCode +} + +// Message returns the configuration error message +func (e ConfigurationError) Message() string { + return e.message +} + +// OrigErr is the original error wrapped by Configuration Error +func (e ConfigurationError) OrigErr() error { + return e.origErr +} + +// NewClientPartitionMismatchError stub +func NewClientPartitionMismatchError(resource arn.Resource, clientPartitionID, clientRegion string, err error) ConfigurationError { + return ConfigurationError{ + message: "client partition does not match provided ARN partition", + origErr: err, + resource: resource, + clientPartitionID: clientPartitionID, + clientRegion: clientRegion, + } +} + +// NewClientRegionMismatchError denotes cross region access error +func NewClientRegionMismatchError(resource arn.Resource, clientPartitionID, clientRegion string, err error) ConfigurationError { + return ConfigurationError{ + message: "client region does not match provided ARN region", + origErr: err, + resource: resource, + clientPartitionID: clientPartitionID, + clientRegion: clientRegion, + } +} + +// NewFailedToResolveEndpointError denotes endpoint resolving error +func NewFailedToResolveEndpointError(resource arn.Resource, clientPartitionID, clientRegion string, err error) ConfigurationError { + return ConfigurationError{ + message: "endpoint resolver failed to find an endpoint for the provided ARN region", + origErr: err, + resource: resource, + clientPartitionID: clientPartitionID, + clientRegion: clientRegion, + } +} + +// NewClientConfiguredForFIPSError denotes client config error for unsupported cross region FIPS access +func NewClientConfiguredForFIPSError(resource arn.Resource, clientPartitionID, clientRegion string, err error) ConfigurationError { + return ConfigurationError{ + message: "client configured for fips but cross-region resource ARN provided", + origErr: err, + resource: resource, + clientPartitionID: clientPartitionID, + clientRegion: clientRegion, + } +} + +// NewClientConfiguredForAccelerateError denotes client config error for unsupported S3 accelerate +func NewClientConfiguredForAccelerateError(resource arn.Resource, clientPartitionID, clientRegion string, err error) ConfigurationError { + return ConfigurationError{ + message: "client configured for S3 Accelerate but is not supported with resource ARN", + origErr: err, + resource: resource, + clientPartitionID: clientPartitionID, + clientRegion: clientRegion, + } +} + +// NewClientConfiguredForCrossRegionFIPSError denotes client config error for unsupported cross region FIPS request +func NewClientConfiguredForCrossRegionFIPSError(resource arn.Resource, clientPartitionID, clientRegion string, err error) ConfigurationError { + return ConfigurationError{ + message: "client configured for FIPS with cross-region enabled but is supported with cross-region resource ARN", + origErr: err, + resource: resource, + clientPartitionID: clientPartitionID, + clientRegion: clientRegion, + } +} + +// NewClientConfiguredForDualStackError denotes client config error for unsupported S3 Dual-stack +func NewClientConfiguredForDualStackError(resource arn.Resource, clientPartitionID, clientRegion string, err error) ConfigurationError { + return ConfigurationError{ + message: "client configured for S3 Dual-stack but is not supported with resource ARN", + origErr: err, + resource: resource, + clientPartitionID: clientPartitionID, + clientRegion: clientRegion, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/s3shared/resource_request.go b/vendor/github.com/aws/aws-sdk-go/internal/s3shared/resource_request.go new file mode 100644 index 00000000000..9f70a64ecff --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/s3shared/resource_request.go @@ -0,0 +1,62 @@ +package s3shared + +import ( + "strings" + + "github.com/aws/aws-sdk-go/aws" + awsarn "github.com/aws/aws-sdk-go/aws/arn" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/internal/s3shared/arn" +) + +// ResourceRequest represents the request and arn resource +type ResourceRequest struct { + Resource arn.Resource + Request *request.Request +} + +// ARN returns the resource ARN +func (r ResourceRequest) ARN() awsarn.ARN { + return r.Resource.GetARN() +} + +// AllowCrossRegion returns a bool value to denote if S3UseARNRegion flag is set +func (r ResourceRequest) AllowCrossRegion() bool { + return aws.BoolValue(r.Request.Config.S3UseARNRegion) +} + +// UseFIPS returns true if request config region is FIPS +func (r ResourceRequest) UseFIPS() bool { + return IsFIPS(aws.StringValue(r.Request.Config.Region)) +} + +// ResourceConfiguredForFIPS returns true if resource ARNs region is FIPS +func (r ResourceRequest) ResourceConfiguredForFIPS() bool { + return IsFIPS(r.ARN().Region) +} + +// IsCrossPartition returns true if client is configured for another partition, than +// the partition that resource ARN region resolves to. +func (r ResourceRequest) IsCrossPartition() bool { + return r.Request.ClientInfo.PartitionID != r.Resource.GetARN().Partition +} + +// IsCrossRegion returns true if ARN region is different than client configured region +func (r ResourceRequest) IsCrossRegion() bool { + return IsCrossRegion(r.Request, r.Resource.GetARN().Region) +} + +// HasCustomEndpoint returns true if custom client endpoint is provided +func (r ResourceRequest) HasCustomEndpoint() bool { + return len(aws.StringValue(r.Request.Config.Endpoint)) > 0 +} + +// IsFIPS returns true if region is a fips region +func IsFIPS(clientRegion string) bool { + return strings.HasPrefix(clientRegion, "fips-") || strings.HasSuffix(clientRegion, "-fips") +} + +// IsCrossRegion returns true if request signing region is not same as configured region +func IsCrossRegion(req *request.Request, otherRegion string) bool { + return req.ClientInfo.SigningRegion != otherRegion +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/s3shared/s3err/error.go b/vendor/github.com/aws/aws-sdk-go/internal/s3shared/s3err/error.go new file mode 100644 index 00000000000..0b9b0dfce04 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/s3shared/s3err/error.go @@ -0,0 +1,57 @@ +package s3err + +import ( + "fmt" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" +) + +// RequestFailure provides additional S3 specific metadata for the request +// failure. +type RequestFailure struct { + awserr.RequestFailure + + hostID string +} + +// NewRequestFailure returns a request failure error decordated with S3 +// specific metadata. +func NewRequestFailure(err awserr.RequestFailure, hostID string) *RequestFailure { + return &RequestFailure{RequestFailure: err, hostID: hostID} +} + +func (r RequestFailure) Error() string { + extra := fmt.Sprintf("status code: %d, request id: %s, host id: %s", + r.StatusCode(), r.RequestID(), r.hostID) + return awserr.SprintError(r.Code(), r.Message(), extra, r.OrigErr()) +} +func (r RequestFailure) String() string { + return r.Error() +} + +// HostID returns the HostID request response value. +func (r RequestFailure) HostID() string { + return r.hostID +} + +// RequestFailureWrapperHandler returns a handler to rap an +// awserr.RequestFailure with the S3 request ID 2 from the response. +func RequestFailureWrapperHandler() request.NamedHandler { + return request.NamedHandler{ + Name: "awssdk.s3.errorHandler", + Fn: func(req *request.Request) { + reqErr, ok := req.Error.(awserr.RequestFailure) + if !ok || reqErr == nil { + return + } + + hostID := req.HTTPResponse.Header.Get("X-Amz-Id-2") + if req.Error == nil { + return + } + + req.Error = NewRequestFailure(reqErr, hostID) + }, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkio/byte.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkio/byte.go new file mode 100644 index 00000000000..6c443988bbc --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/sdkio/byte.go @@ -0,0 +1,12 @@ +package sdkio + +const ( + // Byte is 8 bits + Byte int64 = 1 + // KibiByte (KiB) is 1024 Bytes + KibiByte = Byte * 1024 + // MebiByte (MiB) is 1024 KiB + MebiByte = KibiByte * 1024 + // GibiByte (GiB) is 1024 MiB + GibiByte = MebiByte * 1024 +) diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.6.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.6.go new file mode 100644 index 00000000000..5aa9137e0f9 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.6.go @@ -0,0 +1,10 @@ +// +build !go1.7 + +package sdkio + +// Copy of Go 1.7 io package's Seeker constants. +const ( + SeekStart = 0 // seek relative to the origin of the file + SeekCurrent = 1 // seek relative to the current offset + SeekEnd = 2 // seek relative to the end +) diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.7.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.7.go new file mode 100644 index 00000000000..e5f005613b7 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.7.go @@ -0,0 +1,12 @@ +// +build go1.7 + +package sdkio + +import "io" + +// Alias for Go 1.7 io package Seeker constants +const ( + SeekStart = io.SeekStart // seek relative to the origin of the file + SeekCurrent = io.SeekCurrent // seek relative to the current offset + SeekEnd = io.SeekEnd // seek relative to the end +) diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkmath/floor.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkmath/floor.go new file mode 100644 index 00000000000..44898eed0fd --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/sdkmath/floor.go @@ -0,0 +1,15 @@ +// +build go1.10 + +package sdkmath + +import "math" + +// Round returns the nearest integer, rounding half away from zero. +// +// Special cases are: +// Round(±0) = ±0 +// Round(±Inf) = ±Inf +// Round(NaN) = NaN +func Round(x float64) float64 { + return math.Round(x) +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkmath/floor_go1.9.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkmath/floor_go1.9.go new file mode 100644 index 00000000000..810ec7f08b0 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/sdkmath/floor_go1.9.go @@ -0,0 +1,56 @@ +// +build !go1.10 + +package sdkmath + +import "math" + +// Copied from the Go standard library's (Go 1.12) math/floor.go for use in +// Go version prior to Go 1.10. +const ( + uvone = 0x3FF0000000000000 + mask = 0x7FF + shift = 64 - 11 - 1 + bias = 1023 + signMask = 1 << 63 + fracMask = 1<= 0.5 { + // return t + Copysign(1, x) + // } + // return t + // } + bits := math.Float64bits(x) + e := uint(bits>>shift) & mask + if e < bias { + // Round abs(x) < 1 including denormals. + bits &= signMask // +-0 + if e == bias-1 { + bits |= uvone // +-1 + } + } else if e < bias+shift { + // Round any abs(x) >= 1 containing a fractional component [0,1). + // + // Numbers with larger exponents are returned unchanged since they + // must be either an integer, infinity, or NaN. + const half = 1 << (shift - 1) + e -= bias + bits += half >> e + bits &^= fracMask >> e + } + return math.Float64frombits(bits) +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/locked_source.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/locked_source.go new file mode 100644 index 00000000000..0c9802d8770 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/locked_source.go @@ -0,0 +1,29 @@ +package sdkrand + +import ( + "math/rand" + "sync" + "time" +) + +// lockedSource is a thread-safe implementation of rand.Source +type lockedSource struct { + lk sync.Mutex + src rand.Source +} + +func (r *lockedSource) Int63() (n int64) { + r.lk.Lock() + n = r.src.Int63() + r.lk.Unlock() + return +} + +func (r *lockedSource) Seed(seed int64) { + r.lk.Lock() + r.src.Seed(seed) + r.lk.Unlock() +} + +// SeededRand is a new RNG using a thread safe implementation of rand.Source +var SeededRand = rand.New(&lockedSource{src: rand.NewSource(time.Now().UnixNano())}) diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/read.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/read.go new file mode 100644 index 00000000000..f4651da2da5 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/read.go @@ -0,0 +1,11 @@ +// +build go1.6 + +package sdkrand + +import "math/rand" + +// Read provides the stub for math.Rand.Read method support for go version's +// 1.6 and greater. +func Read(r *rand.Rand, p []byte) (int, error) { + return r.Read(p) +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/read_1_5.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/read_1_5.go new file mode 100644 index 00000000000..b1d93a33d48 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/read_1_5.go @@ -0,0 +1,24 @@ +// +build !go1.6 + +package sdkrand + +import "math/rand" + +// Read backfills Go 1.6's math.Rand.Reader for Go 1.5 +func Read(r *rand.Rand, p []byte) (n int, err error) { + // Copy of Go standard libraries math package's read function not added to + // standard library until Go 1.6. + var pos int8 + var val int64 + for n = 0; n < len(p); n++ { + if pos == 0 { + val = r.Int63() + pos = 7 + } + p[n] = byte(val) + val >>= 8 + pos-- + } + + return n, err +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkuri/path.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkuri/path.go new file mode 100644 index 00000000000..38ea61afeaa --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/sdkuri/path.go @@ -0,0 +1,23 @@ +package sdkuri + +import ( + "path" + "strings" +) + +// PathJoin will join the elements of the path delimited by the "/" +// character. Similar to path.Join with the exception the trailing "/" +// character is preserved if present. +func PathJoin(elems ...string) string { + if len(elems) == 0 { + return "" + } + + hasTrailing := strings.HasSuffix(elems[len(elems)-1], "/") + str := path.Join(elems...) + if hasTrailing && str != "/" { + str += "/" + } + + return str +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/ecs_container.go b/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/ecs_container.go new file mode 100644 index 00000000000..7da8a49ce52 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/ecs_container.go @@ -0,0 +1,12 @@ +package shareddefaults + +const ( + // ECSCredsProviderEnvVar is an environmental variable key used to + // determine which path needs to be hit. + ECSCredsProviderEnvVar = "AWS_CONTAINER_CREDENTIALS_RELATIVE_URI" +) + +// ECSContainerCredentialsURI is the endpoint to retrieve container +// credentials. This can be overridden to test to ensure the credential process +// is behaving correctly. +var ECSContainerCredentialsURI = "http://169.254.170.2" diff --git a/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config.go b/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config.go new file mode 100644 index 00000000000..ebcbc2b40a3 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config.go @@ -0,0 +1,40 @@ +package shareddefaults + +import ( + "os" + "path/filepath" + "runtime" +) + +// SharedCredentialsFilename returns the SDK's default file path +// for the shared credentials file. +// +// Builds the shared config file path based on the OS's platform. +// +// - Linux/Unix: $HOME/.aws/credentials +// - Windows: %USERPROFILE%\.aws\credentials +func SharedCredentialsFilename() string { + return filepath.Join(UserHomeDir(), ".aws", "credentials") +} + +// SharedConfigFilename returns the SDK's default file path for +// the shared config file. +// +// Builds the shared config file path based on the OS's platform. +// +// - Linux/Unix: $HOME/.aws/config +// - Windows: %USERPROFILE%\.aws\config +func SharedConfigFilename() string { + return filepath.Join(UserHomeDir(), ".aws", "config") +} + +// UserHomeDir returns the home directory for the user the process is +// running under. +func UserHomeDir() string { + if runtime.GOOS == "windows" { // Windows + return os.Getenv("USERPROFILE") + } + + // *nix + return os.Getenv("HOME") +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/strings/strings.go b/vendor/github.com/aws/aws-sdk-go/internal/strings/strings.go new file mode 100644 index 00000000000..d008ae27cb3 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/strings/strings.go @@ -0,0 +1,11 @@ +package strings + +import ( + "strings" +) + +// HasPrefixFold tests whether the string s begins with prefix, interpreted as UTF-8 strings, +// under Unicode case-folding. +func HasPrefixFold(s, prefix string) bool { + return len(s) >= len(prefix) && strings.EqualFold(s[0:len(prefix)], prefix) +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sync/singleflight/LICENSE b/vendor/github.com/aws/aws-sdk-go/internal/sync/singleflight/LICENSE new file mode 100644 index 00000000000..6a66aea5eaf --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/sync/singleflight/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sync/singleflight/singleflight.go b/vendor/github.com/aws/aws-sdk-go/internal/sync/singleflight/singleflight.go new file mode 100644 index 00000000000..14ad0c58911 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/sync/singleflight/singleflight.go @@ -0,0 +1,120 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package singleflight provides a duplicate function call suppression +// mechanism. +package singleflight + +import "sync" + +// call is an in-flight or completed singleflight.Do call +type call struct { + wg sync.WaitGroup + + // These fields are written once before the WaitGroup is done + // and are only read after the WaitGroup is done. + val interface{} + err error + + // forgotten indicates whether Forget was called with this call's key + // while the call was still in flight. + forgotten bool + + // These fields are read and written with the singleflight + // mutex held before the WaitGroup is done, and are read but + // not written after the WaitGroup is done. + dups int + chans []chan<- Result +} + +// Group represents a class of work and forms a namespace in +// which units of work can be executed with duplicate suppression. +type Group struct { + mu sync.Mutex // protects m + m map[string]*call // lazily initialized +} + +// Result holds the results of Do, so they can be passed +// on a channel. +type Result struct { + Val interface{} + Err error + Shared bool +} + +// Do executes and returns the results of the given function, making +// sure that only one execution is in-flight for a given key at a +// time. If a duplicate comes in, the duplicate caller waits for the +// original to complete and receives the same results. +// The return value shared indicates whether v was given to multiple callers. +func (g *Group) Do(key string, fn func() (interface{}, error)) (v interface{}, err error, shared bool) { + g.mu.Lock() + if g.m == nil { + g.m = make(map[string]*call) + } + if c, ok := g.m[key]; ok { + c.dups++ + g.mu.Unlock() + c.wg.Wait() + return c.val, c.err, true + } + c := new(call) + c.wg.Add(1) + g.m[key] = c + g.mu.Unlock() + + g.doCall(c, key, fn) + return c.val, c.err, c.dups > 0 +} + +// DoChan is like Do but returns a channel that will receive the +// results when they are ready. +func (g *Group) DoChan(key string, fn func() (interface{}, error)) <-chan Result { + ch := make(chan Result, 1) + g.mu.Lock() + if g.m == nil { + g.m = make(map[string]*call) + } + if c, ok := g.m[key]; ok { + c.dups++ + c.chans = append(c.chans, ch) + g.mu.Unlock() + return ch + } + c := &call{chans: []chan<- Result{ch}} + c.wg.Add(1) + g.m[key] = c + g.mu.Unlock() + + go g.doCall(c, key, fn) + + return ch +} + +// doCall handles the single call for a key. +func (g *Group) doCall(c *call, key string, fn func() (interface{}, error)) { + c.val, c.err = fn() + c.wg.Done() + + g.mu.Lock() + if !c.forgotten { + delete(g.m, key) + } + for _, ch := range c.chans { + ch <- Result{c.val, c.err, c.dups > 0} + } + g.mu.Unlock() +} + +// Forget tells the singleflight to forget about a key. Future calls +// to Do for this key will call the function rather than waiting for +// an earlier call to complete. +func (g *Group) Forget(key string) { + g.mu.Lock() + if c, ok := g.m[key]; ok { + c.forgotten = true + } + delete(g.m, key) + g.mu.Unlock() +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/checksum/content_md5.go b/vendor/github.com/aws/aws-sdk-go/private/checksum/content_md5.go new file mode 100644 index 00000000000..e045f38d837 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/checksum/content_md5.go @@ -0,0 +1,53 @@ +package checksum + +import ( + "crypto/md5" + "encoding/base64" + "fmt" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" +) + +const contentMD5Header = "Content-Md5" + +// AddBodyContentMD5Handler computes and sets the HTTP Content-MD5 header for requests that +// require it. +func AddBodyContentMD5Handler(r *request.Request) { + // if Content-MD5 header is already present, return + if v := r.HTTPRequest.Header.Get(contentMD5Header); len(v) != 0 { + return + } + + // if S3DisableContentMD5Validation flag is set, return + if aws.BoolValue(r.Config.S3DisableContentMD5Validation) { + return + } + + // if request is presigned, return + if r.IsPresigned() { + return + } + + // if body is not seekable, return + if !aws.IsReaderSeekable(r.Body) { + if r.Config.Logger != nil { + r.Config.Logger.Log(fmt.Sprintf( + "Unable to compute Content-MD5 for unseekable body, S3.%s", + r.Operation.Name)) + } + return + } + + h := md5.New() + + if _, err := aws.CopySeekableBody(h, r.Body); err != nil { + r.Error = awserr.New("ContentMD5", "failed to compute body MD5", err) + return + } + + // encode the md5 checksum in base64 and set the request header. + v := base64.StdEncoding.EncodeToString(h.Sum(nil)) + r.HTTPRequest.Header.Set(contentMD5Header, v) +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/debug.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/debug.go new file mode 100644 index 00000000000..151054971a5 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/debug.go @@ -0,0 +1,144 @@ +package eventstream + +import ( + "bytes" + "encoding/base64" + "encoding/json" + "fmt" + "strconv" +) + +type decodedMessage struct { + rawMessage + Headers decodedHeaders `json:"headers"` +} +type jsonMessage struct { + Length json.Number `json:"total_length"` + HeadersLen json.Number `json:"headers_length"` + PreludeCRC json.Number `json:"prelude_crc"` + Headers decodedHeaders `json:"headers"` + Payload []byte `json:"payload"` + CRC json.Number `json:"message_crc"` +} + +func (d *decodedMessage) UnmarshalJSON(b []byte) (err error) { + var jsonMsg jsonMessage + if err = json.Unmarshal(b, &jsonMsg); err != nil { + return err + } + + d.Length, err = numAsUint32(jsonMsg.Length) + if err != nil { + return err + } + d.HeadersLen, err = numAsUint32(jsonMsg.HeadersLen) + if err != nil { + return err + } + d.PreludeCRC, err = numAsUint32(jsonMsg.PreludeCRC) + if err != nil { + return err + } + d.Headers = jsonMsg.Headers + d.Payload = jsonMsg.Payload + d.CRC, err = numAsUint32(jsonMsg.CRC) + if err != nil { + return err + } + + return nil +} + +func (d *decodedMessage) MarshalJSON() ([]byte, error) { + jsonMsg := jsonMessage{ + Length: json.Number(strconv.Itoa(int(d.Length))), + HeadersLen: json.Number(strconv.Itoa(int(d.HeadersLen))), + PreludeCRC: json.Number(strconv.Itoa(int(d.PreludeCRC))), + Headers: d.Headers, + Payload: d.Payload, + CRC: json.Number(strconv.Itoa(int(d.CRC))), + } + + return json.Marshal(jsonMsg) +} + +func numAsUint32(n json.Number) (uint32, error) { + v, err := n.Int64() + if err != nil { + return 0, fmt.Errorf("failed to get int64 json number, %v", err) + } + + return uint32(v), nil +} + +func (d decodedMessage) Message() Message { + return Message{ + Headers: Headers(d.Headers), + Payload: d.Payload, + } +} + +type decodedHeaders Headers + +func (hs *decodedHeaders) UnmarshalJSON(b []byte) error { + var jsonHeaders []struct { + Name string `json:"name"` + Type valueType `json:"type"` + Value interface{} `json:"value"` + } + + decoder := json.NewDecoder(bytes.NewReader(b)) + decoder.UseNumber() + if err := decoder.Decode(&jsonHeaders); err != nil { + return err + } + + var headers Headers + for _, h := range jsonHeaders { + value, err := valueFromType(h.Type, h.Value) + if err != nil { + return err + } + headers.Set(h.Name, value) + } + *hs = decodedHeaders(headers) + + return nil +} + +func valueFromType(typ valueType, val interface{}) (Value, error) { + switch typ { + case trueValueType: + return BoolValue(true), nil + case falseValueType: + return BoolValue(false), nil + case int8ValueType: + v, err := val.(json.Number).Int64() + return Int8Value(int8(v)), err + case int16ValueType: + v, err := val.(json.Number).Int64() + return Int16Value(int16(v)), err + case int32ValueType: + v, err := val.(json.Number).Int64() + return Int32Value(int32(v)), err + case int64ValueType: + v, err := val.(json.Number).Int64() + return Int64Value(v), err + case bytesValueType: + v, err := base64.StdEncoding.DecodeString(val.(string)) + return BytesValue(v), err + case stringValueType: + v, err := base64.StdEncoding.DecodeString(val.(string)) + return StringValue(string(v)), err + case timestampValueType: + v, err := val.(json.Number).Int64() + return TimestampValue(timeFromEpochMilli(v)), err + case uuidValueType: + v, err := base64.StdEncoding.DecodeString(val.(string)) + var tv UUIDValue + copy(tv[:], v) + return tv, err + default: + panic(fmt.Sprintf("unknown type, %s, %T", typ.String(), val)) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/decode.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/decode.go new file mode 100644 index 00000000000..47433939189 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/decode.go @@ -0,0 +1,216 @@ +package eventstream + +import ( + "bytes" + "encoding/binary" + "encoding/hex" + "encoding/json" + "fmt" + "hash" + "hash/crc32" + "io" + + "github.com/aws/aws-sdk-go/aws" +) + +// Decoder provides decoding of an Event Stream messages. +type Decoder struct { + r io.Reader + logger aws.Logger +} + +// NewDecoder initializes and returns a Decoder for decoding event +// stream messages from the reader provided. +func NewDecoder(r io.Reader, opts ...func(*Decoder)) *Decoder { + d := &Decoder{ + r: r, + } + + for _, opt := range opts { + opt(d) + } + + return d +} + +// DecodeWithLogger adds a logger to be used by the decoder when decoding +// stream events. +func DecodeWithLogger(logger aws.Logger) func(*Decoder) { + return func(d *Decoder) { + d.logger = logger + } +} + +// Decode attempts to decode a single message from the event stream reader. +// Will return the event stream message, or error if Decode fails to read +// the message from the stream. +func (d *Decoder) Decode(payloadBuf []byte) (m Message, err error) { + reader := d.r + if d.logger != nil { + debugMsgBuf := bytes.NewBuffer(nil) + reader = io.TeeReader(reader, debugMsgBuf) + defer func() { + logMessageDecode(d.logger, debugMsgBuf, m, err) + }() + } + + m, err = Decode(reader, payloadBuf) + + return m, err +} + +// Decode attempts to decode a single message from the event stream reader. +// Will return the event stream message, or error if Decode fails to read +// the message from the reader. +func Decode(reader io.Reader, payloadBuf []byte) (m Message, err error) { + crc := crc32.New(crc32IEEETable) + hashReader := io.TeeReader(reader, crc) + + prelude, err := decodePrelude(hashReader, crc) + if err != nil { + return Message{}, err + } + + if prelude.HeadersLen > 0 { + lr := io.LimitReader(hashReader, int64(prelude.HeadersLen)) + m.Headers, err = decodeHeaders(lr) + if err != nil { + return Message{}, err + } + } + + if payloadLen := prelude.PayloadLen(); payloadLen > 0 { + buf, err := decodePayload(payloadBuf, io.LimitReader(hashReader, int64(payloadLen))) + if err != nil { + return Message{}, err + } + m.Payload = buf + } + + msgCRC := crc.Sum32() + if err := validateCRC(reader, msgCRC); err != nil { + return Message{}, err + } + + return m, nil +} + +func logMessageDecode(logger aws.Logger, msgBuf *bytes.Buffer, msg Message, decodeErr error) { + w := bytes.NewBuffer(nil) + defer func() { logger.Log(w.String()) }() + + fmt.Fprintf(w, "Raw message:\n%s\n", + hex.Dump(msgBuf.Bytes())) + + if decodeErr != nil { + fmt.Fprintf(w, "Decode error: %v\n", decodeErr) + return + } + + rawMsg, err := msg.rawMessage() + if err != nil { + fmt.Fprintf(w, "failed to create raw message, %v\n", err) + return + } + + decodedMsg := decodedMessage{ + rawMessage: rawMsg, + Headers: decodedHeaders(msg.Headers), + } + + fmt.Fprintf(w, "Decoded message:\n") + encoder := json.NewEncoder(w) + if err := encoder.Encode(decodedMsg); err != nil { + fmt.Fprintf(w, "failed to generate decoded message, %v\n", err) + } +} + +func decodePrelude(r io.Reader, crc hash.Hash32) (messagePrelude, error) { + var p messagePrelude + + var err error + p.Length, err = decodeUint32(r) + if err != nil { + return messagePrelude{}, err + } + + p.HeadersLen, err = decodeUint32(r) + if err != nil { + return messagePrelude{}, err + } + + if err := p.ValidateLens(); err != nil { + return messagePrelude{}, err + } + + preludeCRC := crc.Sum32() + if err := validateCRC(r, preludeCRC); err != nil { + return messagePrelude{}, err + } + + p.PreludeCRC = preludeCRC + + return p, nil +} + +func decodePayload(buf []byte, r io.Reader) ([]byte, error) { + w := bytes.NewBuffer(buf[0:0]) + + _, err := io.Copy(w, r) + return w.Bytes(), err +} + +func decodeUint8(r io.Reader) (uint8, error) { + type byteReader interface { + ReadByte() (byte, error) + } + + if br, ok := r.(byteReader); ok { + v, err := br.ReadByte() + return uint8(v), err + } + + var b [1]byte + _, err := io.ReadFull(r, b[:]) + return uint8(b[0]), err +} +func decodeUint16(r io.Reader) (uint16, error) { + var b [2]byte + bs := b[:] + _, err := io.ReadFull(r, bs) + if err != nil { + return 0, err + } + return binary.BigEndian.Uint16(bs), nil +} +func decodeUint32(r io.Reader) (uint32, error) { + var b [4]byte + bs := b[:] + _, err := io.ReadFull(r, bs) + if err != nil { + return 0, err + } + return binary.BigEndian.Uint32(bs), nil +} +func decodeUint64(r io.Reader) (uint64, error) { + var b [8]byte + bs := b[:] + _, err := io.ReadFull(r, bs) + if err != nil { + return 0, err + } + return binary.BigEndian.Uint64(bs), nil +} + +func validateCRC(r io.Reader, expect uint32) error { + msgCRC, err := decodeUint32(r) + if err != nil { + return err + } + + if msgCRC != expect { + return ChecksumError{} + } + + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/encode.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/encode.go new file mode 100644 index 00000000000..ffade3bc0c8 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/encode.go @@ -0,0 +1,162 @@ +package eventstream + +import ( + "bytes" + "encoding/binary" + "encoding/hex" + "encoding/json" + "fmt" + "hash" + "hash/crc32" + "io" + + "github.com/aws/aws-sdk-go/aws" +) + +// Encoder provides EventStream message encoding. +type Encoder struct { + w io.Writer + logger aws.Logger + + headersBuf *bytes.Buffer +} + +// NewEncoder initializes and returns an Encoder to encode Event Stream +// messages to an io.Writer. +func NewEncoder(w io.Writer, opts ...func(*Encoder)) *Encoder { + e := &Encoder{ + w: w, + headersBuf: bytes.NewBuffer(nil), + } + + for _, opt := range opts { + opt(e) + } + + return e +} + +// EncodeWithLogger adds a logger to be used by the encode when decoding +// stream events. +func EncodeWithLogger(logger aws.Logger) func(*Encoder) { + return func(d *Encoder) { + d.logger = logger + } +} + +// Encode encodes a single EventStream message to the io.Writer the Encoder +// was created with. An error is returned if writing the message fails. +func (e *Encoder) Encode(msg Message) (err error) { + e.headersBuf.Reset() + + writer := e.w + if e.logger != nil { + encodeMsgBuf := bytes.NewBuffer(nil) + writer = io.MultiWriter(writer, encodeMsgBuf) + defer func() { + logMessageEncode(e.logger, encodeMsgBuf, msg, err) + }() + } + + if err = EncodeHeaders(e.headersBuf, msg.Headers); err != nil { + return err + } + + crc := crc32.New(crc32IEEETable) + hashWriter := io.MultiWriter(writer, crc) + + headersLen := uint32(e.headersBuf.Len()) + payloadLen := uint32(len(msg.Payload)) + + if err = encodePrelude(hashWriter, crc, headersLen, payloadLen); err != nil { + return err + } + + if headersLen > 0 { + if _, err = io.Copy(hashWriter, e.headersBuf); err != nil { + return err + } + } + + if payloadLen > 0 { + if _, err = hashWriter.Write(msg.Payload); err != nil { + return err + } + } + + msgCRC := crc.Sum32() + return binary.Write(writer, binary.BigEndian, msgCRC) +} + +func logMessageEncode(logger aws.Logger, msgBuf *bytes.Buffer, msg Message, encodeErr error) { + w := bytes.NewBuffer(nil) + defer func() { logger.Log(w.String()) }() + + fmt.Fprintf(w, "Message to encode:\n") + encoder := json.NewEncoder(w) + if err := encoder.Encode(msg); err != nil { + fmt.Fprintf(w, "Failed to get encoded message, %v\n", err) + } + + if encodeErr != nil { + fmt.Fprintf(w, "Encode error: %v\n", encodeErr) + return + } + + fmt.Fprintf(w, "Raw message:\n%s\n", hex.Dump(msgBuf.Bytes())) +} + +func encodePrelude(w io.Writer, crc hash.Hash32, headersLen, payloadLen uint32) error { + p := messagePrelude{ + Length: minMsgLen + headersLen + payloadLen, + HeadersLen: headersLen, + } + if err := p.ValidateLens(); err != nil { + return err + } + + err := binaryWriteFields(w, binary.BigEndian, + p.Length, + p.HeadersLen, + ) + if err != nil { + return err + } + + p.PreludeCRC = crc.Sum32() + err = binary.Write(w, binary.BigEndian, p.PreludeCRC) + if err != nil { + return err + } + + return nil +} + +// EncodeHeaders writes the header values to the writer encoded in the event +// stream format. Returns an error if a header fails to encode. +func EncodeHeaders(w io.Writer, headers Headers) error { + for _, h := range headers { + hn := headerName{ + Len: uint8(len(h.Name)), + } + copy(hn.Name[:hn.Len], h.Name) + if err := hn.encode(w); err != nil { + return err + } + + if err := h.Value.encode(w); err != nil { + return err + } + } + + return nil +} + +func binaryWriteFields(w io.Writer, order binary.ByteOrder, vs ...interface{}) error { + for _, v := range vs { + if err := binary.Write(w, order, v); err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/error.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/error.go new file mode 100644 index 00000000000..5481ef30796 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/error.go @@ -0,0 +1,23 @@ +package eventstream + +import "fmt" + +// LengthError provides the error for items being larger than a maximum length. +type LengthError struct { + Part string + Want int + Have int + Value interface{} +} + +func (e LengthError) Error() string { + return fmt.Sprintf("%s length invalid, %d/%d, %v", + e.Part, e.Want, e.Have, e.Value) +} + +// ChecksumError provides the error for message checksum invalidation errors. +type ChecksumError struct{} + +func (e ChecksumError) Error() string { + return "message checksum mismatch" +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/error.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/error.go new file mode 100644 index 00000000000..34c2e89d539 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/error.go @@ -0,0 +1,77 @@ +package eventstreamapi + +import ( + "fmt" + "sync" +) + +type messageError struct { + code string + msg string +} + +func (e messageError) Code() string { + return e.code +} + +func (e messageError) Message() string { + return e.msg +} + +func (e messageError) Error() string { + return fmt.Sprintf("%s: %s", e.code, e.msg) +} + +func (e messageError) OrigErr() error { + return nil +} + +// OnceError wraps the behavior of recording an error +// once and signal on a channel when this has occurred. +// Signaling is done by closing of the channel. +// +// Type is safe for concurrent usage. +type OnceError struct { + mu sync.RWMutex + err error + ch chan struct{} +} + +// NewOnceError return a new OnceError +func NewOnceError() *OnceError { + return &OnceError{ + ch: make(chan struct{}, 1), + } +} + +// Err acquires a read-lock and returns an +// error if one has been set. +func (e *OnceError) Err() error { + e.mu.RLock() + err := e.err + e.mu.RUnlock() + + return err +} + +// SetError acquires a write-lock and will set +// the underlying error value if one has not been set. +func (e *OnceError) SetError(err error) { + if err == nil { + return + } + + e.mu.Lock() + if e.err == nil { + e.err = err + close(e.ch) + } + e.mu.Unlock() +} + +// ErrorSet returns a channel that will be used to signal +// that an error has been set. This channel will be closed +// when the error value has been set for OnceError. +func (e *OnceError) ErrorSet() <-chan struct{} { + return e.ch +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/reader.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/reader.go new file mode 100644 index 00000000000..0e4aa42f3e4 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/reader.go @@ -0,0 +1,173 @@ +package eventstreamapi + +import ( + "fmt" + + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/eventstream" +) + +// Unmarshaler provides the interface for unmarshaling a EventStream +// message into a SDK type. +type Unmarshaler interface { + UnmarshalEvent(protocol.PayloadUnmarshaler, eventstream.Message) error +} + +// EventReader provides reading from the EventStream of an reader. +type EventReader struct { + decoder *eventstream.Decoder + + unmarshalerForEventType func(string) (Unmarshaler, error) + payloadUnmarshaler protocol.PayloadUnmarshaler + + payloadBuf []byte +} + +// NewEventReader returns a EventReader built from the reader and unmarshaler +// provided. Use ReadStream method to start reading from the EventStream. +func NewEventReader( + decoder *eventstream.Decoder, + payloadUnmarshaler protocol.PayloadUnmarshaler, + unmarshalerForEventType func(string) (Unmarshaler, error), +) *EventReader { + return &EventReader{ + decoder: decoder, + payloadUnmarshaler: payloadUnmarshaler, + unmarshalerForEventType: unmarshalerForEventType, + payloadBuf: make([]byte, 10*1024), + } +} + +// ReadEvent attempts to read a message from the EventStream and return the +// unmarshaled event value that the message is for. +// +// For EventStream API errors check if the returned error satisfies the +// awserr.Error interface to get the error's Code and Message components. +// +// EventUnmarshalers called with EventStream messages must take copies of the +// message's Payload. The payload will is reused between events read. +func (r *EventReader) ReadEvent() (event interface{}, err error) { + msg, err := r.decoder.Decode(r.payloadBuf) + if err != nil { + return nil, err + } + defer func() { + // Reclaim payload buffer for next message read. + r.payloadBuf = msg.Payload[0:0] + }() + + typ, err := GetHeaderString(msg, MessageTypeHeader) + if err != nil { + return nil, err + } + + switch typ { + case EventMessageType: + return r.unmarshalEventMessage(msg) + case ExceptionMessageType: + return nil, r.unmarshalEventException(msg) + case ErrorMessageType: + return nil, r.unmarshalErrorMessage(msg) + default: + return nil, &UnknownMessageTypeError{ + Type: typ, Message: msg.Clone(), + } + } +} + +// UnknownMessageTypeError provides an error when a message is received from +// the stream, but the reader is unable to determine what kind of message it is. +type UnknownMessageTypeError struct { + Type string + Message eventstream.Message +} + +func (e *UnknownMessageTypeError) Error() string { + return "unknown eventstream message type, " + e.Type +} + +func (r *EventReader) unmarshalEventMessage( + msg eventstream.Message, +) (event interface{}, err error) { + eventType, err := GetHeaderString(msg, EventTypeHeader) + if err != nil { + return nil, err + } + + ev, err := r.unmarshalerForEventType(eventType) + if err != nil { + return nil, err + } + + err = ev.UnmarshalEvent(r.payloadUnmarshaler, msg) + if err != nil { + return nil, err + } + + return ev, nil +} + +func (r *EventReader) unmarshalEventException( + msg eventstream.Message, +) (err error) { + eventType, err := GetHeaderString(msg, ExceptionTypeHeader) + if err != nil { + return err + } + + ev, err := r.unmarshalerForEventType(eventType) + if err != nil { + return err + } + + err = ev.UnmarshalEvent(r.payloadUnmarshaler, msg) + if err != nil { + return err + } + + var ok bool + err, ok = ev.(error) + if !ok { + err = messageError{ + code: "SerializationError", + msg: fmt.Sprintf( + "event stream exception %s mapped to non-error %T, %v", + eventType, ev, ev, + ), + } + } + + return err +} + +func (r *EventReader) unmarshalErrorMessage(msg eventstream.Message) (err error) { + var msgErr messageError + + msgErr.code, err = GetHeaderString(msg, ErrorCodeHeader) + if err != nil { + return err + } + + msgErr.msg, err = GetHeaderString(msg, ErrorMessageHeader) + if err != nil { + return err + } + + return msgErr +} + +// GetHeaderString returns the value of the header as a string. If the header +// is not set or the value is not a string an error will be returned. +func GetHeaderString(msg eventstream.Message, headerName string) (string, error) { + headerVal := msg.Headers.Get(headerName) + if headerVal == nil { + return "", fmt.Errorf("error header %s not present", headerName) + } + + v, ok := headerVal.Get().(string) + if !ok { + return "", fmt.Errorf("error header value is not a string, %T", headerVal) + } + + return v, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/shared.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/shared.go new file mode 100644 index 00000000000..e46b8acc200 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/shared.go @@ -0,0 +1,23 @@ +package eventstreamapi + +// EventStream headers with specific meaning to async API functionality. +const ( + ChunkSignatureHeader = `:chunk-signature` // chunk signature for message + DateHeader = `:date` // Date header for signature + + // Message header and values + MessageTypeHeader = `:message-type` // Identifies type of message. + EventMessageType = `event` + ErrorMessageType = `error` + ExceptionMessageType = `exception` + + // Message Events + EventTypeHeader = `:event-type` // Identifies message event type e.g. "Stats". + + // Message Error + ErrorCodeHeader = `:error-code` + ErrorMessageHeader = `:error-message` + + // Message Exception + ExceptionTypeHeader = `:exception-type` +) diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/signer.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/signer.go new file mode 100644 index 00000000000..3a7ba5cd57a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/signer.go @@ -0,0 +1,123 @@ +package eventstreamapi + +import ( + "bytes" + "strings" + "time" + + "github.com/aws/aws-sdk-go/private/protocol/eventstream" +) + +var timeNow = time.Now + +// StreamSigner defines an interface for the implementation of signing of event stream payloads +type StreamSigner interface { + GetSignature(headers, payload []byte, date time.Time) ([]byte, error) +} + +// SignEncoder envelopes event stream messages +// into an event stream message payload with included +// signature headers using the provided signer and encoder. +type SignEncoder struct { + signer StreamSigner + encoder Encoder + bufEncoder *BufferEncoder + + closeErr error + closed bool +} + +// NewSignEncoder returns a new SignEncoder using the provided stream signer and +// event stream encoder. +func NewSignEncoder(signer StreamSigner, encoder Encoder) *SignEncoder { + // TODO: Need to pass down logging + + return &SignEncoder{ + signer: signer, + encoder: encoder, + bufEncoder: NewBufferEncoder(), + } +} + +// Close encodes a final event stream signing envelope with an empty event stream +// payload. This final end-frame is used to mark the conclusion of the stream. +func (s *SignEncoder) Close() error { + if s.closed { + return s.closeErr + } + + if err := s.encode([]byte{}); err != nil { + if strings.Contains(err.Error(), "on closed pipe") { + return nil + } + + s.closeErr = err + s.closed = true + return s.closeErr + } + + return nil +} + +// Encode takes the provided message and add envelopes the message +// with the required signature. +func (s *SignEncoder) Encode(msg eventstream.Message) error { + payload, err := s.bufEncoder.Encode(msg) + if err != nil { + return err + } + + return s.encode(payload) +} + +func (s SignEncoder) encode(payload []byte) error { + date := timeNow() + + var msg eventstream.Message + msg.Headers.Set(DateHeader, eventstream.TimestampValue(date)) + msg.Payload = payload + + var headers bytes.Buffer + if err := eventstream.EncodeHeaders(&headers, msg.Headers); err != nil { + return err + } + + sig, err := s.signer.GetSignature(headers.Bytes(), msg.Payload, date) + if err != nil { + return err + } + + msg.Headers.Set(ChunkSignatureHeader, eventstream.BytesValue(sig)) + + return s.encoder.Encode(msg) +} + +// BufferEncoder is a utility that provides a buffered +// event stream encoder +type BufferEncoder struct { + encoder Encoder + buffer *bytes.Buffer +} + +// NewBufferEncoder returns a new BufferEncoder initialized +// with a 1024 byte buffer. +func NewBufferEncoder() *BufferEncoder { + buf := bytes.NewBuffer(make([]byte, 1024)) + return &BufferEncoder{ + encoder: eventstream.NewEncoder(buf), + buffer: buf, + } +} + +// Encode returns the encoded message as a byte slice. +// The returned byte slice will be modified on the next encode call +// and should not be held onto. +func (e *BufferEncoder) Encode(msg eventstream.Message) ([]byte, error) { + e.buffer.Reset() + + if err := e.encoder.Encode(msg); err != nil { + return nil, err + } + + return e.buffer.Bytes(), nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/stream_writer.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/stream_writer.go new file mode 100644 index 00000000000..433bb1630a7 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/stream_writer.go @@ -0,0 +1,129 @@ +package eventstreamapi + +import ( + "fmt" + "io" + "sync" + + "github.com/aws/aws-sdk-go/aws" +) + +// StreamWriter provides concurrent safe writing to an event stream. +type StreamWriter struct { + eventWriter *EventWriter + stream chan eventWriteAsyncReport + + done chan struct{} + closeOnce sync.Once + err *OnceError + + streamCloser io.Closer +} + +// NewStreamWriter returns a StreamWriter for the event writer, and stream +// closer provided. +func NewStreamWriter(eventWriter *EventWriter, streamCloser io.Closer) *StreamWriter { + w := &StreamWriter{ + eventWriter: eventWriter, + streamCloser: streamCloser, + stream: make(chan eventWriteAsyncReport), + done: make(chan struct{}), + err: NewOnceError(), + } + go w.writeStream() + + return w +} + +// Close terminates the writers ability to write new events to the stream. Any +// future call to Send will fail with an error. +func (w *StreamWriter) Close() error { + w.closeOnce.Do(w.safeClose) + return w.Err() +} + +func (w *StreamWriter) safeClose() { + close(w.done) +} + +// ErrorSet returns a channel which will be closed +// if an error occurs. +func (w *StreamWriter) ErrorSet() <-chan struct{} { + return w.err.ErrorSet() +} + +// Err returns any error that occurred while attempting to write an event to the +// stream. +func (w *StreamWriter) Err() error { + return w.err.Err() +} + +// Send writes a single event to the stream returning an error if the write +// failed. +// +// Send may be called concurrently. Events will be written to the stream +// safely. +func (w *StreamWriter) Send(ctx aws.Context, event Marshaler) error { + if err := w.Err(); err != nil { + return err + } + + resultCh := make(chan error) + wrapped := eventWriteAsyncReport{ + Event: event, + Result: resultCh, + } + + select { + case w.stream <- wrapped: + case <-ctx.Done(): + return ctx.Err() + case <-w.done: + return fmt.Errorf("stream closed, unable to send event") + } + + select { + case err := <-resultCh: + return err + case <-ctx.Done(): + return ctx.Err() + case <-w.done: + return fmt.Errorf("stream closed, unable to send event") + } +} + +func (w *StreamWriter) writeStream() { + defer w.Close() + + for { + select { + case wrapper := <-w.stream: + err := w.eventWriter.WriteEvent(wrapper.Event) + wrapper.ReportResult(w.done, err) + if err != nil { + w.err.SetError(err) + return + } + + case <-w.done: + if err := w.streamCloser.Close(); err != nil { + w.err.SetError(err) + } + return + } + } +} + +type eventWriteAsyncReport struct { + Event Marshaler + Result chan<- error +} + +func (e eventWriteAsyncReport) ReportResult(cancel <-chan struct{}, err error) bool { + select { + case e.Result <- err: + return true + case <-cancel: + return false + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/writer.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/writer.go new file mode 100644 index 00000000000..10a3823dfa6 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/writer.go @@ -0,0 +1,109 @@ +package eventstreamapi + +import ( + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/eventstream" +) + +// Marshaler provides a marshaling interface for event types to event stream +// messages. +type Marshaler interface { + MarshalEvent(protocol.PayloadMarshaler) (eventstream.Message, error) +} + +// Encoder is an stream encoder that will encode an event stream message for +// the transport. +type Encoder interface { + Encode(eventstream.Message) error +} + +// EventWriter provides a wrapper around the underlying event stream encoder +// for an io.WriteCloser. +type EventWriter struct { + encoder Encoder + payloadMarshaler protocol.PayloadMarshaler + eventTypeFor func(Marshaler) (string, error) +} + +// NewEventWriter returns a new event stream writer, that will write to the +// writer provided. Use the WriteEvent method to write an event to the stream. +func NewEventWriter(encoder Encoder, pm protocol.PayloadMarshaler, eventTypeFor func(Marshaler) (string, error), +) *EventWriter { + return &EventWriter{ + encoder: encoder, + payloadMarshaler: pm, + eventTypeFor: eventTypeFor, + } +} + +// WriteEvent writes an event to the stream. Returns an error if the event +// fails to marshal into a message, or writing to the underlying writer fails. +func (w *EventWriter) WriteEvent(event Marshaler) error { + msg, err := w.marshal(event) + if err != nil { + return err + } + + return w.encoder.Encode(msg) +} + +func (w *EventWriter) marshal(event Marshaler) (eventstream.Message, error) { + eventType, err := w.eventTypeFor(event) + if err != nil { + return eventstream.Message{}, err + } + + msg, err := event.MarshalEvent(w.payloadMarshaler) + if err != nil { + return eventstream.Message{}, err + } + + msg.Headers.Set(EventTypeHeader, eventstream.StringValue(eventType)) + return msg, nil +} + +//type EventEncoder struct { +// encoder Encoder +// ppayloadMarshaler protocol.PayloadMarshaler +// eventTypeFor func(Marshaler) (string, error) +//} +// +//func (e EventEncoder) Encode(event Marshaler) error { +// msg, err := e.marshal(event) +// if err != nil { +// return err +// } +// +// return w.encoder.Encode(msg) +//} +// +//func (e EventEncoder) marshal(event Marshaler) (eventstream.Message, error) { +// eventType, err := w.eventTypeFor(event) +// if err != nil { +// return eventstream.Message{}, err +// } +// +// msg, err := event.MarshalEvent(w.payloadMarshaler) +// if err != nil { +// return eventstream.Message{}, err +// } +// +// msg.Headers.Set(EventTypeHeader, eventstream.StringValue(eventType)) +// return msg, nil +//} +// +//func (w *EventWriter) marshal(event Marshaler) (eventstream.Message, error) { +// eventType, err := w.eventTypeFor(event) +// if err != nil { +// return eventstream.Message{}, err +// } +// +// msg, err := event.MarshalEvent(w.payloadMarshaler) +// if err != nil { +// return eventstream.Message{}, err +// } +// +// msg.Headers.Set(EventTypeHeader, eventstream.StringValue(eventType)) +// return msg, nil +//} +// diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/header.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/header.go new file mode 100644 index 00000000000..f6f8c5674ed --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/header.go @@ -0,0 +1,175 @@ +package eventstream + +import ( + "encoding/binary" + "fmt" + "io" +) + +// Headers are a collection of EventStream header values. +type Headers []Header + +// Header is a single EventStream Key Value header pair. +type Header struct { + Name string + Value Value +} + +// Set associates the name with a value. If the header name already exists in +// the Headers the value will be replaced with the new one. +func (hs *Headers) Set(name string, value Value) { + var i int + for ; i < len(*hs); i++ { + if (*hs)[i].Name == name { + (*hs)[i].Value = value + return + } + } + + *hs = append(*hs, Header{ + Name: name, Value: value, + }) +} + +// Get returns the Value associated with the header. Nil is returned if the +// value does not exist. +func (hs Headers) Get(name string) Value { + for i := 0; i < len(hs); i++ { + if h := hs[i]; h.Name == name { + return h.Value + } + } + return nil +} + +// Del deletes the value in the Headers if it exists. +func (hs *Headers) Del(name string) { + for i := 0; i < len(*hs); i++ { + if (*hs)[i].Name == name { + copy((*hs)[i:], (*hs)[i+1:]) + (*hs) = (*hs)[:len(*hs)-1] + } + } +} + +// Clone returns a deep copy of the headers +func (hs Headers) Clone() Headers { + o := make(Headers, 0, len(hs)) + for _, h := range hs { + o.Set(h.Name, h.Value) + } + return o +} + +func decodeHeaders(r io.Reader) (Headers, error) { + hs := Headers{} + + for { + name, err := decodeHeaderName(r) + if err != nil { + if err == io.EOF { + // EOF while getting header name means no more headers + break + } + return nil, err + } + + value, err := decodeHeaderValue(r) + if err != nil { + return nil, err + } + + hs.Set(name, value) + } + + return hs, nil +} + +func decodeHeaderName(r io.Reader) (string, error) { + var n headerName + + var err error + n.Len, err = decodeUint8(r) + if err != nil { + return "", err + } + + name := n.Name[:n.Len] + if _, err := io.ReadFull(r, name); err != nil { + return "", err + } + + return string(name), nil +} + +func decodeHeaderValue(r io.Reader) (Value, error) { + var raw rawValue + + typ, err := decodeUint8(r) + if err != nil { + return nil, err + } + raw.Type = valueType(typ) + + var v Value + + switch raw.Type { + case trueValueType: + v = BoolValue(true) + case falseValueType: + v = BoolValue(false) + case int8ValueType: + var tv Int8Value + err = tv.decode(r) + v = tv + case int16ValueType: + var tv Int16Value + err = tv.decode(r) + v = tv + case int32ValueType: + var tv Int32Value + err = tv.decode(r) + v = tv + case int64ValueType: + var tv Int64Value + err = tv.decode(r) + v = tv + case bytesValueType: + var tv BytesValue + err = tv.decode(r) + v = tv + case stringValueType: + var tv StringValue + err = tv.decode(r) + v = tv + case timestampValueType: + var tv TimestampValue + err = tv.decode(r) + v = tv + case uuidValueType: + var tv UUIDValue + err = tv.decode(r) + v = tv + default: + panic(fmt.Sprintf("unknown value type %d", raw.Type)) + } + + // Error could be EOF, let caller deal with it + return v, err +} + +const maxHeaderNameLen = 255 + +type headerName struct { + Len uint8 + Name [maxHeaderNameLen]byte +} + +func (v headerName) encode(w io.Writer) error { + if err := binary.Write(w, binary.BigEndian, v.Len); err != nil { + return err + } + + _, err := w.Write(v.Name[:v.Len]) + return err +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/header_value.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/header_value.go new file mode 100644 index 00000000000..9f509d8f6dc --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/header_value.go @@ -0,0 +1,506 @@ +package eventstream + +import ( + "encoding/base64" + "encoding/binary" + "fmt" + "io" + "strconv" + "time" +) + +const maxHeaderValueLen = 1<<15 - 1 // 2^15-1 or 32KB - 1 + +// valueType is the EventStream header value type. +type valueType uint8 + +// Header value types +const ( + trueValueType valueType = iota + falseValueType + int8ValueType // Byte + int16ValueType // Short + int32ValueType // Integer + int64ValueType // Long + bytesValueType + stringValueType + timestampValueType + uuidValueType +) + +func (t valueType) String() string { + switch t { + case trueValueType: + return "bool" + case falseValueType: + return "bool" + case int8ValueType: + return "int8" + case int16ValueType: + return "int16" + case int32ValueType: + return "int32" + case int64ValueType: + return "int64" + case bytesValueType: + return "byte_array" + case stringValueType: + return "string" + case timestampValueType: + return "timestamp" + case uuidValueType: + return "uuid" + default: + return fmt.Sprintf("unknown value type %d", uint8(t)) + } +} + +type rawValue struct { + Type valueType + Len uint16 // Only set for variable length slices + Value []byte // byte representation of value, BigEndian encoding. +} + +func (r rawValue) encodeScalar(w io.Writer, v interface{}) error { + return binaryWriteFields(w, binary.BigEndian, + r.Type, + v, + ) +} + +func (r rawValue) encodeFixedSlice(w io.Writer, v []byte) error { + binary.Write(w, binary.BigEndian, r.Type) + + _, err := w.Write(v) + return err +} + +func (r rawValue) encodeBytes(w io.Writer, v []byte) error { + if len(v) > maxHeaderValueLen { + return LengthError{ + Part: "header value", + Want: maxHeaderValueLen, Have: len(v), + Value: v, + } + } + r.Len = uint16(len(v)) + + err := binaryWriteFields(w, binary.BigEndian, + r.Type, + r.Len, + ) + if err != nil { + return err + } + + _, err = w.Write(v) + return err +} + +func (r rawValue) encodeString(w io.Writer, v string) error { + if len(v) > maxHeaderValueLen { + return LengthError{ + Part: "header value", + Want: maxHeaderValueLen, Have: len(v), + Value: v, + } + } + r.Len = uint16(len(v)) + + type stringWriter interface { + WriteString(string) (int, error) + } + + err := binaryWriteFields(w, binary.BigEndian, + r.Type, + r.Len, + ) + if err != nil { + return err + } + + if sw, ok := w.(stringWriter); ok { + _, err = sw.WriteString(v) + } else { + _, err = w.Write([]byte(v)) + } + + return err +} + +func decodeFixedBytesValue(r io.Reader, buf []byte) error { + _, err := io.ReadFull(r, buf) + return err +} + +func decodeBytesValue(r io.Reader) ([]byte, error) { + var raw rawValue + var err error + raw.Len, err = decodeUint16(r) + if err != nil { + return nil, err + } + + buf := make([]byte, raw.Len) + _, err = io.ReadFull(r, buf) + if err != nil { + return nil, err + } + + return buf, nil +} + +func decodeStringValue(r io.Reader) (string, error) { + v, err := decodeBytesValue(r) + return string(v), err +} + +// Value represents the abstract header value. +type Value interface { + Get() interface{} + String() string + valueType() valueType + encode(io.Writer) error +} + +// An BoolValue provides eventstream encoding, and representation +// of a Go bool value. +type BoolValue bool + +// Get returns the underlying type +func (v BoolValue) Get() interface{} { + return bool(v) +} + +// valueType returns the EventStream header value type value. +func (v BoolValue) valueType() valueType { + if v { + return trueValueType + } + return falseValueType +} + +func (v BoolValue) String() string { + return strconv.FormatBool(bool(v)) +} + +// encode encodes the BoolValue into an eventstream binary value +// representation. +func (v BoolValue) encode(w io.Writer) error { + return binary.Write(w, binary.BigEndian, v.valueType()) +} + +// An Int8Value provides eventstream encoding, and representation of a Go +// int8 value. +type Int8Value int8 + +// Get returns the underlying value. +func (v Int8Value) Get() interface{} { + return int8(v) +} + +// valueType returns the EventStream header value type value. +func (Int8Value) valueType() valueType { + return int8ValueType +} + +func (v Int8Value) String() string { + return fmt.Sprintf("0x%02x", int8(v)) +} + +// encode encodes the Int8Value into an eventstream binary value +// representation. +func (v Int8Value) encode(w io.Writer) error { + raw := rawValue{ + Type: v.valueType(), + } + + return raw.encodeScalar(w, v) +} + +func (v *Int8Value) decode(r io.Reader) error { + n, err := decodeUint8(r) + if err != nil { + return err + } + + *v = Int8Value(n) + return nil +} + +// An Int16Value provides eventstream encoding, and representation of a Go +// int16 value. +type Int16Value int16 + +// Get returns the underlying value. +func (v Int16Value) Get() interface{} { + return int16(v) +} + +// valueType returns the EventStream header value type value. +func (Int16Value) valueType() valueType { + return int16ValueType +} + +func (v Int16Value) String() string { + return fmt.Sprintf("0x%04x", int16(v)) +} + +// encode encodes the Int16Value into an eventstream binary value +// representation. +func (v Int16Value) encode(w io.Writer) error { + raw := rawValue{ + Type: v.valueType(), + } + return raw.encodeScalar(w, v) +} + +func (v *Int16Value) decode(r io.Reader) error { + n, err := decodeUint16(r) + if err != nil { + return err + } + + *v = Int16Value(n) + return nil +} + +// An Int32Value provides eventstream encoding, and representation of a Go +// int32 value. +type Int32Value int32 + +// Get returns the underlying value. +func (v Int32Value) Get() interface{} { + return int32(v) +} + +// valueType returns the EventStream header value type value. +func (Int32Value) valueType() valueType { + return int32ValueType +} + +func (v Int32Value) String() string { + return fmt.Sprintf("0x%08x", int32(v)) +} + +// encode encodes the Int32Value into an eventstream binary value +// representation. +func (v Int32Value) encode(w io.Writer) error { + raw := rawValue{ + Type: v.valueType(), + } + return raw.encodeScalar(w, v) +} + +func (v *Int32Value) decode(r io.Reader) error { + n, err := decodeUint32(r) + if err != nil { + return err + } + + *v = Int32Value(n) + return nil +} + +// An Int64Value provides eventstream encoding, and representation of a Go +// int64 value. +type Int64Value int64 + +// Get returns the underlying value. +func (v Int64Value) Get() interface{} { + return int64(v) +} + +// valueType returns the EventStream header value type value. +func (Int64Value) valueType() valueType { + return int64ValueType +} + +func (v Int64Value) String() string { + return fmt.Sprintf("0x%016x", int64(v)) +} + +// encode encodes the Int64Value into an eventstream binary value +// representation. +func (v Int64Value) encode(w io.Writer) error { + raw := rawValue{ + Type: v.valueType(), + } + return raw.encodeScalar(w, v) +} + +func (v *Int64Value) decode(r io.Reader) error { + n, err := decodeUint64(r) + if err != nil { + return err + } + + *v = Int64Value(n) + return nil +} + +// An BytesValue provides eventstream encoding, and representation of a Go +// byte slice. +type BytesValue []byte + +// Get returns the underlying value. +func (v BytesValue) Get() interface{} { + return []byte(v) +} + +// valueType returns the EventStream header value type value. +func (BytesValue) valueType() valueType { + return bytesValueType +} + +func (v BytesValue) String() string { + return base64.StdEncoding.EncodeToString([]byte(v)) +} + +// encode encodes the BytesValue into an eventstream binary value +// representation. +func (v BytesValue) encode(w io.Writer) error { + raw := rawValue{ + Type: v.valueType(), + } + + return raw.encodeBytes(w, []byte(v)) +} + +func (v *BytesValue) decode(r io.Reader) error { + buf, err := decodeBytesValue(r) + if err != nil { + return err + } + + *v = BytesValue(buf) + return nil +} + +// An StringValue provides eventstream encoding, and representation of a Go +// string. +type StringValue string + +// Get returns the underlying value. +func (v StringValue) Get() interface{} { + return string(v) +} + +// valueType returns the EventStream header value type value. +func (StringValue) valueType() valueType { + return stringValueType +} + +func (v StringValue) String() string { + return string(v) +} + +// encode encodes the StringValue into an eventstream binary value +// representation. +func (v StringValue) encode(w io.Writer) error { + raw := rawValue{ + Type: v.valueType(), + } + + return raw.encodeString(w, string(v)) +} + +func (v *StringValue) decode(r io.Reader) error { + s, err := decodeStringValue(r) + if err != nil { + return err + } + + *v = StringValue(s) + return nil +} + +// An TimestampValue provides eventstream encoding, and representation of a Go +// timestamp. +type TimestampValue time.Time + +// Get returns the underlying value. +func (v TimestampValue) Get() interface{} { + return time.Time(v) +} + +// valueType returns the EventStream header value type value. +func (TimestampValue) valueType() valueType { + return timestampValueType +} + +func (v TimestampValue) epochMilli() int64 { + nano := time.Time(v).UnixNano() + msec := nano / int64(time.Millisecond) + return msec +} + +func (v TimestampValue) String() string { + msec := v.epochMilli() + return strconv.FormatInt(msec, 10) +} + +// encode encodes the TimestampValue into an eventstream binary value +// representation. +func (v TimestampValue) encode(w io.Writer) error { + raw := rawValue{ + Type: v.valueType(), + } + + msec := v.epochMilli() + return raw.encodeScalar(w, msec) +} + +func (v *TimestampValue) decode(r io.Reader) error { + n, err := decodeUint64(r) + if err != nil { + return err + } + + *v = TimestampValue(timeFromEpochMilli(int64(n))) + return nil +} + +// MarshalJSON implements the json.Marshaler interface +func (v TimestampValue) MarshalJSON() ([]byte, error) { + return []byte(v.String()), nil +} + +func timeFromEpochMilli(t int64) time.Time { + secs := t / 1e3 + msec := t % 1e3 + return time.Unix(secs, msec*int64(time.Millisecond)).UTC() +} + +// An UUIDValue provides eventstream encoding, and representation of a UUID +// value. +type UUIDValue [16]byte + +// Get returns the underlying value. +func (v UUIDValue) Get() interface{} { + return v[:] +} + +// valueType returns the EventStream header value type value. +func (UUIDValue) valueType() valueType { + return uuidValueType +} + +func (v UUIDValue) String() string { + return fmt.Sprintf(`%X-%X-%X-%X-%X`, v[0:4], v[4:6], v[6:8], v[8:10], v[10:]) +} + +// encode encodes the UUIDValue into an eventstream binary value +// representation. +func (v UUIDValue) encode(w io.Writer) error { + raw := rawValue{ + Type: v.valueType(), + } + + return raw.encodeFixedSlice(w, v[:]) +} + +func (v *UUIDValue) decode(r io.Reader) error { + tv := (*v)[:] + return decodeFixedBytesValue(r, tv) +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/message.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/message.go new file mode 100644 index 00000000000..f7427da039e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/message.go @@ -0,0 +1,117 @@ +package eventstream + +import ( + "bytes" + "encoding/binary" + "hash/crc32" +) + +const preludeLen = 8 +const preludeCRCLen = 4 +const msgCRCLen = 4 +const minMsgLen = preludeLen + preludeCRCLen + msgCRCLen +const maxPayloadLen = 1024 * 1024 * 16 // 16MB +const maxHeadersLen = 1024 * 128 // 128KB +const maxMsgLen = minMsgLen + maxHeadersLen + maxPayloadLen + +var crc32IEEETable = crc32.MakeTable(crc32.IEEE) + +// A Message provides the eventstream message representation. +type Message struct { + Headers Headers + Payload []byte +} + +func (m *Message) rawMessage() (rawMessage, error) { + var raw rawMessage + + if len(m.Headers) > 0 { + var headers bytes.Buffer + if err := EncodeHeaders(&headers, m.Headers); err != nil { + return rawMessage{}, err + } + raw.Headers = headers.Bytes() + raw.HeadersLen = uint32(len(raw.Headers)) + } + + raw.Length = raw.HeadersLen + uint32(len(m.Payload)) + minMsgLen + + hash := crc32.New(crc32IEEETable) + binaryWriteFields(hash, binary.BigEndian, raw.Length, raw.HeadersLen) + raw.PreludeCRC = hash.Sum32() + + binaryWriteFields(hash, binary.BigEndian, raw.PreludeCRC) + + if raw.HeadersLen > 0 { + hash.Write(raw.Headers) + } + + // Read payload bytes and update hash for it as well. + if len(m.Payload) > 0 { + raw.Payload = m.Payload + hash.Write(raw.Payload) + } + + raw.CRC = hash.Sum32() + + return raw, nil +} + +// Clone returns a deep copy of the message. +func (m Message) Clone() Message { + var payload []byte + if m.Payload != nil { + payload = make([]byte, len(m.Payload)) + copy(payload, m.Payload) + } + + return Message{ + Headers: m.Headers.Clone(), + Payload: payload, + } +} + +type messagePrelude struct { + Length uint32 + HeadersLen uint32 + PreludeCRC uint32 +} + +func (p messagePrelude) PayloadLen() uint32 { + return p.Length - p.HeadersLen - minMsgLen +} + +func (p messagePrelude) ValidateLens() error { + if p.Length == 0 || p.Length > maxMsgLen { + return LengthError{ + Part: "message prelude", + Want: maxMsgLen, + Have: int(p.Length), + } + } + if p.HeadersLen > maxHeadersLen { + return LengthError{ + Part: "message headers", + Want: maxHeadersLen, + Have: int(p.HeadersLen), + } + } + if payloadLen := p.PayloadLen(); payloadLen > maxPayloadLen { + return LengthError{ + Part: "message payload", + Want: maxPayloadLen, + Have: int(payloadLen), + } + } + + return nil +} + +type rawMessage struct { + messagePrelude + + Headers []byte + Payload []byte + + CRC uint32 +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/host.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/host.go new file mode 100644 index 00000000000..1f1d27aea49 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/host.go @@ -0,0 +1,104 @@ +package protocol + +import ( + "github.com/aws/aws-sdk-go/aws/request" + "net" + "strconv" + "strings" +) + +// ValidateEndpointHostHandler is a request handler that will validate the +// request endpoint's hosts is a valid RFC 3986 host. +var ValidateEndpointHostHandler = request.NamedHandler{ + Name: "awssdk.protocol.ValidateEndpointHostHandler", + Fn: func(r *request.Request) { + err := ValidateEndpointHost(r.Operation.Name, r.HTTPRequest.URL.Host) + if err != nil { + r.Error = err + } + }, +} + +// ValidateEndpointHost validates that the host string passed in is a valid RFC +// 3986 host. Returns error if the host is not valid. +func ValidateEndpointHost(opName, host string) error { + paramErrs := request.ErrInvalidParams{Context: opName} + + var hostname string + var port string + var err error + + if strings.Contains(host, ":") { + hostname, port, err = net.SplitHostPort(host) + + if err != nil { + paramErrs.Add(request.NewErrParamFormat("endpoint", err.Error(), host)) + } + + if !ValidPortNumber(port) { + paramErrs.Add(request.NewErrParamFormat("endpoint port number", "[0-65535]", port)) + } + } else { + hostname = host + } + + labels := strings.Split(hostname, ".") + for i, label := range labels { + if i == len(labels)-1 && len(label) == 0 { + // Allow trailing dot for FQDN hosts. + continue + } + + if !ValidHostLabel(label) { + paramErrs.Add(request.NewErrParamFormat( + "endpoint host label", "[a-zA-Z0-9-]{1,63}", label)) + } + } + + if len(hostname) == 0 { + paramErrs.Add(request.NewErrParamMinLen("endpoint host", 1)) + } + + if len(hostname) > 255 { + paramErrs.Add(request.NewErrParamMaxLen( + "endpoint host", 255, host, + )) + } + + if paramErrs.Len() > 0 { + return paramErrs + } + return nil +} + +// ValidHostLabel returns if the label is a valid RFC 3986 host label. +func ValidHostLabel(label string) bool { + if l := len(label); l == 0 || l > 63 { + return false + } + for _, r := range label { + switch { + case r >= '0' && r <= '9': + case r >= 'A' && r <= 'Z': + case r >= 'a' && r <= 'z': + case r == '-': + default: + return false + } + } + + return true +} + +// ValidPortNumber return if the port is valid RFC 3986 port +func ValidPortNumber(port string) bool { + i, err := strconv.Atoi(port) + if err != nil { + return false + } + + if i < 0 || i > 65535 { + return false + } + return true +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/host_prefix.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/host_prefix.go new file mode 100644 index 00000000000..915b0fcafd7 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/host_prefix.go @@ -0,0 +1,54 @@ +package protocol + +import ( + "strings" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/request" +) + +// HostPrefixHandlerName is the handler name for the host prefix request +// handler. +const HostPrefixHandlerName = "awssdk.endpoint.HostPrefixHandler" + +// NewHostPrefixHandler constructs a build handler +func NewHostPrefixHandler(prefix string, labelsFn func() map[string]string) request.NamedHandler { + builder := HostPrefixBuilder{ + Prefix: prefix, + LabelsFn: labelsFn, + } + + return request.NamedHandler{ + Name: HostPrefixHandlerName, + Fn: builder.Build, + } +} + +// HostPrefixBuilder provides the request handler to expand and prepend +// the host prefix into the operation's request endpoint host. +type HostPrefixBuilder struct { + Prefix string + LabelsFn func() map[string]string +} + +// Build updates the passed in Request with the HostPrefix template expanded. +func (h HostPrefixBuilder) Build(r *request.Request) { + if aws.BoolValue(r.Config.DisableEndpointHostPrefix) { + return + } + + var labels map[string]string + if h.LabelsFn != nil { + labels = h.LabelsFn() + } + + prefix := h.Prefix + for name, value := range labels { + prefix = strings.Replace(prefix, "{"+name+"}", value, -1) + } + + r.HTTPRequest.URL.Host = prefix + r.HTTPRequest.URL.Host + if len(r.HTTPRequest.Host) > 0 { + r.HTTPRequest.Host = prefix + r.HTTPRequest.Host + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/idempotency.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/idempotency.go new file mode 100644 index 00000000000..53831dff984 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/idempotency.go @@ -0,0 +1,75 @@ +package protocol + +import ( + "crypto/rand" + "fmt" + "reflect" +) + +// RandReader is the random reader the protocol package will use to read +// random bytes from. This is exported for testing, and should not be used. +var RandReader = rand.Reader + +const idempotencyTokenFillTag = `idempotencyToken` + +// CanSetIdempotencyToken returns true if the struct field should be +// automatically populated with a Idempotency token. +// +// Only *string and string type fields that are tagged with idempotencyToken +// which are not already set can be auto filled. +func CanSetIdempotencyToken(v reflect.Value, f reflect.StructField) bool { + switch u := v.Interface().(type) { + // To auto fill an Idempotency token the field must be a string, + // tagged for auto fill, and have a zero value. + case *string: + return u == nil && len(f.Tag.Get(idempotencyTokenFillTag)) != 0 + case string: + return len(u) == 0 && len(f.Tag.Get(idempotencyTokenFillTag)) != 0 + } + + return false +} + +// GetIdempotencyToken returns a randomly generated idempotency token. +func GetIdempotencyToken() string { + b := make([]byte, 16) + RandReader.Read(b) + + return UUIDVersion4(b) +} + +// SetIdempotencyToken will set the value provided with a Idempotency Token. +// Given that the value can be set. Will panic if value is not setable. +func SetIdempotencyToken(v reflect.Value) { + if v.Kind() == reflect.Ptr { + if v.IsNil() && v.CanSet() { + v.Set(reflect.New(v.Type().Elem())) + } + v = v.Elem() + } + v = reflect.Indirect(v) + + if !v.CanSet() { + panic(fmt.Sprintf("unable to set idempotnecy token %v", v)) + } + + b := make([]byte, 16) + _, err := rand.Read(b) + if err != nil { + // TODO handle error + return + } + + v.Set(reflect.ValueOf(UUIDVersion4(b))) +} + +// UUIDVersion4 returns a Version 4 random UUID from the byte slice provided +func UUIDVersion4(u []byte) string { + // https://en.wikipedia.org/wiki/Universally_unique_identifier#Version_4_.28random.29 + // 13th character is "4" + u[6] = (u[6] | 0x40) & 0x4F + // 17th character is "8", "9", "a", or "b" + u[8] = (u[8] | 0x80) & 0xBF + + return fmt.Sprintf(`%X-%X-%X-%X-%X`, u[0:4], u[4:6], u[6:8], u[8:10], u[10:]) +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/build.go new file mode 100644 index 00000000000..864fb6704b4 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/build.go @@ -0,0 +1,296 @@ +// Package jsonutil provides JSON serialization of AWS requests and responses. +package jsonutil + +import ( + "bytes" + "encoding/base64" + "encoding/json" + "fmt" + "math" + "reflect" + "sort" + "strconv" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/private/protocol" +) + +var timeType = reflect.ValueOf(time.Time{}).Type() +var byteSliceType = reflect.ValueOf([]byte{}).Type() + +// BuildJSON builds a JSON string for a given object v. +func BuildJSON(v interface{}) ([]byte, error) { + var buf bytes.Buffer + + err := buildAny(reflect.ValueOf(v), &buf, "") + return buf.Bytes(), err +} + +func buildAny(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error { + origVal := value + value = reflect.Indirect(value) + if !value.IsValid() { + return nil + } + + vtype := value.Type() + + t := tag.Get("type") + if t == "" { + switch vtype.Kind() { + case reflect.Struct: + // also it can't be a time object + if value.Type() != timeType { + t = "structure" + } + case reflect.Slice: + // also it can't be a byte slice + if _, ok := value.Interface().([]byte); !ok { + t = "list" + } + case reflect.Map: + // cannot be a JSONValue map + if _, ok := value.Interface().(aws.JSONValue); !ok { + t = "map" + } + } + } + + switch t { + case "structure": + if field, ok := vtype.FieldByName("_"); ok { + tag = field.Tag + } + return buildStruct(value, buf, tag) + case "list": + return buildList(value, buf, tag) + case "map": + return buildMap(value, buf, tag) + default: + return buildScalar(origVal, buf, tag) + } +} + +func buildStruct(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error { + if !value.IsValid() { + return nil + } + + // unwrap payloads + if payload := tag.Get("payload"); payload != "" { + field, _ := value.Type().FieldByName(payload) + tag = field.Tag + value = elemOf(value.FieldByName(payload)) + + if !value.IsValid() { + return nil + } + } + + buf.WriteByte('{') + + t := value.Type() + first := true + for i := 0; i < t.NumField(); i++ { + member := value.Field(i) + + // This allocates the most memory. + // Additionally, we cannot skip nil fields due to + // idempotency auto filling. + field := t.Field(i) + + if field.PkgPath != "" { + continue // ignore unexported fields + } + if field.Tag.Get("json") == "-" { + continue + } + if field.Tag.Get("location") != "" { + continue // ignore non-body elements + } + if field.Tag.Get("ignore") != "" { + continue + } + + if protocol.CanSetIdempotencyToken(member, field) { + token := protocol.GetIdempotencyToken() + member = reflect.ValueOf(&token) + } + + if (member.Kind() == reflect.Ptr || member.Kind() == reflect.Slice || member.Kind() == reflect.Map) && member.IsNil() { + continue // ignore unset fields + } + + if first { + first = false + } else { + buf.WriteByte(',') + } + + // figure out what this field is called + name := field.Name + if locName := field.Tag.Get("locationName"); locName != "" { + name = locName + } + + writeString(name, buf) + buf.WriteString(`:`) + + err := buildAny(member, buf, field.Tag) + if err != nil { + return err + } + + } + + buf.WriteString("}") + + return nil +} + +func buildList(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error { + buf.WriteString("[") + + for i := 0; i < value.Len(); i++ { + buildAny(value.Index(i), buf, "") + + if i < value.Len()-1 { + buf.WriteString(",") + } + } + + buf.WriteString("]") + + return nil +} + +type sortedValues []reflect.Value + +func (sv sortedValues) Len() int { return len(sv) } +func (sv sortedValues) Swap(i, j int) { sv[i], sv[j] = sv[j], sv[i] } +func (sv sortedValues) Less(i, j int) bool { return sv[i].String() < sv[j].String() } + +func buildMap(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error { + buf.WriteString("{") + + sv := sortedValues(value.MapKeys()) + sort.Sort(sv) + + for i, k := range sv { + if i > 0 { + buf.WriteByte(',') + } + + writeString(k.String(), buf) + buf.WriteString(`:`) + + buildAny(value.MapIndex(k), buf, "") + } + + buf.WriteString("}") + + return nil +} + +func buildScalar(v reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error { + // prevents allocation on the heap. + scratch := [64]byte{} + switch value := reflect.Indirect(v); value.Kind() { + case reflect.String: + writeString(value.String(), buf) + case reflect.Bool: + if value.Bool() { + buf.WriteString("true") + } else { + buf.WriteString("false") + } + case reflect.Int64: + buf.Write(strconv.AppendInt(scratch[:0], value.Int(), 10)) + case reflect.Float64: + f := value.Float() + if math.IsInf(f, 0) || math.IsNaN(f) { + return &json.UnsupportedValueError{Value: v, Str: strconv.FormatFloat(f, 'f', -1, 64)} + } + buf.Write(strconv.AppendFloat(scratch[:0], f, 'f', -1, 64)) + default: + switch converted := value.Interface().(type) { + case time.Time: + format := tag.Get("timestampFormat") + if len(format) == 0 { + format = protocol.UnixTimeFormatName + } + + ts := protocol.FormatTime(format, converted) + if format != protocol.UnixTimeFormatName { + ts = `"` + ts + `"` + } + + buf.WriteString(ts) + case []byte: + if !value.IsNil() { + buf.WriteByte('"') + if len(converted) < 1024 { + // for small buffers, using Encode directly is much faster. + dst := make([]byte, base64.StdEncoding.EncodedLen(len(converted))) + base64.StdEncoding.Encode(dst, converted) + buf.Write(dst) + } else { + // for large buffers, avoid unnecessary extra temporary + // buffer space. + enc := base64.NewEncoder(base64.StdEncoding, buf) + enc.Write(converted) + enc.Close() + } + buf.WriteByte('"') + } + case aws.JSONValue: + str, err := protocol.EncodeJSONValue(converted, protocol.QuotedEscape) + if err != nil { + return fmt.Errorf("unable to encode JSONValue, %v", err) + } + buf.WriteString(str) + default: + return fmt.Errorf("unsupported JSON value %v (%s)", value.Interface(), value.Type()) + } + } + return nil +} + +var hex = "0123456789abcdef" + +func writeString(s string, buf *bytes.Buffer) { + buf.WriteByte('"') + for i := 0; i < len(s); i++ { + if s[i] == '"' { + buf.WriteString(`\"`) + } else if s[i] == '\\' { + buf.WriteString(`\\`) + } else if s[i] == '\b' { + buf.WriteString(`\b`) + } else if s[i] == '\f' { + buf.WriteString(`\f`) + } else if s[i] == '\r' { + buf.WriteString(`\r`) + } else if s[i] == '\t' { + buf.WriteString(`\t`) + } else if s[i] == '\n' { + buf.WriteString(`\n`) + } else if s[i] < 32 { + buf.WriteString("\\u00") + buf.WriteByte(hex[s[i]>>4]) + buf.WriteByte(hex[s[i]&0xF]) + } else { + buf.WriteByte(s[i]) + } + } + buf.WriteByte('"') +} + +// Returns the reflection element of a value, if it is a pointer. +func elemOf(value reflect.Value) reflect.Value { + for value.Kind() == reflect.Ptr { + value = value.Elem() + } + return value +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/unmarshal.go new file mode 100644 index 00000000000..8b2c9bbeba0 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/unmarshal.go @@ -0,0 +1,304 @@ +package jsonutil + +import ( + "bytes" + "encoding/base64" + "encoding/json" + "fmt" + "io" + "math/big" + "reflect" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/private/protocol" +) + +var millisecondsFloat = new(big.Float).SetInt64(1e3) + +// UnmarshalJSONError unmarshal's the reader's JSON document into the passed in +// type. The value to unmarshal the json document into must be a pointer to the +// type. +func UnmarshalJSONError(v interface{}, stream io.Reader) error { + var errBuf bytes.Buffer + body := io.TeeReader(stream, &errBuf) + + err := json.NewDecoder(body).Decode(v) + if err != nil { + msg := "failed decoding error message" + if err == io.EOF { + msg = "error message missing" + err = nil + } + return awserr.NewUnmarshalError(err, msg, errBuf.Bytes()) + } + + return nil +} + +// UnmarshalJSON reads a stream and unmarshals the results in object v. +func UnmarshalJSON(v interface{}, stream io.Reader) error { + var out interface{} + + decoder := json.NewDecoder(stream) + decoder.UseNumber() + err := decoder.Decode(&out) + if err == io.EOF { + return nil + } else if err != nil { + return err + } + + return unmarshaler{}.unmarshalAny(reflect.ValueOf(v), out, "") +} + +// UnmarshalJSONCaseInsensitive reads a stream and unmarshals the result into the +// object v. Ignores casing for structure members. +func UnmarshalJSONCaseInsensitive(v interface{}, stream io.Reader) error { + var out interface{} + + decoder := json.NewDecoder(stream) + decoder.UseNumber() + err := decoder.Decode(&out) + if err == io.EOF { + return nil + } else if err != nil { + return err + } + + return unmarshaler{ + caseInsensitive: true, + }.unmarshalAny(reflect.ValueOf(v), out, "") +} + +type unmarshaler struct { + caseInsensitive bool +} + +func (u unmarshaler) unmarshalAny(value reflect.Value, data interface{}, tag reflect.StructTag) error { + vtype := value.Type() + if vtype.Kind() == reflect.Ptr { + vtype = vtype.Elem() // check kind of actual element type + } + + t := tag.Get("type") + if t == "" { + switch vtype.Kind() { + case reflect.Struct: + // also it can't be a time object + if _, ok := value.Interface().(*time.Time); !ok { + t = "structure" + } + case reflect.Slice: + // also it can't be a byte slice + if _, ok := value.Interface().([]byte); !ok { + t = "list" + } + case reflect.Map: + // cannot be a JSONValue map + if _, ok := value.Interface().(aws.JSONValue); !ok { + t = "map" + } + } + } + + switch t { + case "structure": + if field, ok := vtype.FieldByName("_"); ok { + tag = field.Tag + } + return u.unmarshalStruct(value, data, tag) + case "list": + return u.unmarshalList(value, data, tag) + case "map": + return u.unmarshalMap(value, data, tag) + default: + return u.unmarshalScalar(value, data, tag) + } +} + +func (u unmarshaler) unmarshalStruct(value reflect.Value, data interface{}, tag reflect.StructTag) error { + if data == nil { + return nil + } + mapData, ok := data.(map[string]interface{}) + if !ok { + return fmt.Errorf("JSON value is not a structure (%#v)", data) + } + + t := value.Type() + if value.Kind() == reflect.Ptr { + if value.IsNil() { // create the structure if it's nil + s := reflect.New(value.Type().Elem()) + value.Set(s) + value = s + } + + value = value.Elem() + t = t.Elem() + } + + // unwrap any payloads + if payload := tag.Get("payload"); payload != "" { + field, _ := t.FieldByName(payload) + return u.unmarshalAny(value.FieldByName(payload), data, field.Tag) + } + + for i := 0; i < t.NumField(); i++ { + field := t.Field(i) + if field.PkgPath != "" { + continue // ignore unexported fields + } + + // figure out what this field is called + name := field.Name + if locName := field.Tag.Get("locationName"); locName != "" { + name = locName + } + if u.caseInsensitive { + if _, ok := mapData[name]; !ok { + // Fallback to uncased name search if the exact name didn't match. + for kn, v := range mapData { + if strings.EqualFold(kn, name) { + mapData[name] = v + } + } + } + } + + member := value.FieldByIndex(field.Index) + err := u.unmarshalAny(member, mapData[name], field.Tag) + if err != nil { + return err + } + } + return nil +} + +func (u unmarshaler) unmarshalList(value reflect.Value, data interface{}, tag reflect.StructTag) error { + if data == nil { + return nil + } + listData, ok := data.([]interface{}) + if !ok { + return fmt.Errorf("JSON value is not a list (%#v)", data) + } + + if value.IsNil() { + l := len(listData) + value.Set(reflect.MakeSlice(value.Type(), l, l)) + } + + for i, c := range listData { + err := u.unmarshalAny(value.Index(i), c, "") + if err != nil { + return err + } + } + + return nil +} + +func (u unmarshaler) unmarshalMap(value reflect.Value, data interface{}, tag reflect.StructTag) error { + if data == nil { + return nil + } + mapData, ok := data.(map[string]interface{}) + if !ok { + return fmt.Errorf("JSON value is not a map (%#v)", data) + } + + if value.IsNil() { + value.Set(reflect.MakeMap(value.Type())) + } + + for k, v := range mapData { + kvalue := reflect.ValueOf(k) + vvalue := reflect.New(value.Type().Elem()).Elem() + + u.unmarshalAny(vvalue, v, "") + value.SetMapIndex(kvalue, vvalue) + } + + return nil +} + +func (u unmarshaler) unmarshalScalar(value reflect.Value, data interface{}, tag reflect.StructTag) error { + + switch d := data.(type) { + case nil: + return nil // nothing to do here + case string: + switch value.Interface().(type) { + case *string: + value.Set(reflect.ValueOf(&d)) + case []byte: + b, err := base64.StdEncoding.DecodeString(d) + if err != nil { + return err + } + value.Set(reflect.ValueOf(b)) + case *time.Time: + format := tag.Get("timestampFormat") + if len(format) == 0 { + format = protocol.ISO8601TimeFormatName + } + + t, err := protocol.ParseTime(format, d) + if err != nil { + return err + } + value.Set(reflect.ValueOf(&t)) + case aws.JSONValue: + // No need to use escaping as the value is a non-quoted string. + v, err := protocol.DecodeJSONValue(d, protocol.NoEscape) + if err != nil { + return err + } + value.Set(reflect.ValueOf(v)) + default: + return fmt.Errorf("unsupported value: %v (%s)", value.Interface(), value.Type()) + } + case json.Number: + switch value.Interface().(type) { + case *int64: + // Retain the old behavior where we would just truncate the float64 + // calling d.Int64() here could cause an invalid syntax error due to the usage of strconv.ParseInt + f, err := d.Float64() + if err != nil { + return err + } + di := int64(f) + value.Set(reflect.ValueOf(&di)) + case *float64: + f, err := d.Float64() + if err != nil { + return err + } + value.Set(reflect.ValueOf(&f)) + case *time.Time: + float, ok := new(big.Float).SetString(d.String()) + if !ok { + return fmt.Errorf("unsupported float time representation: %v", d.String()) + } + float = float.Mul(float, millisecondsFloat) + ms, _ := float.Int64() + t := time.Unix(0, ms*1e6).UTC() + value.Set(reflect.ValueOf(&t)) + default: + return fmt.Errorf("unsupported value: %v (%s)", value.Interface(), value.Type()) + } + case bool: + switch value.Interface().(type) { + case *bool: + value.Set(reflect.ValueOf(&d)) + default: + return fmt.Errorf("unsupported value: %v (%s)", value.Interface(), value.Type()) + } + default: + return fmt.Errorf("unsupported JSON value (%v)", data) + } + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/jsonrpc.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/jsonrpc.go new file mode 100644 index 00000000000..a029217e4c6 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/jsonrpc.go @@ -0,0 +1,88 @@ +// Package jsonrpc provides JSON RPC utilities for serialization of AWS +// requests and responses. +package jsonrpc + +//go:generate go run -tags codegen ../../../private/model/cli/gen-protocol-tests ../../../models/protocol_tests/input/json.json build_test.go +//go:generate go run -tags codegen ../../../private/model/cli/gen-protocol-tests ../../../models/protocol_tests/output/json.json unmarshal_test.go + +import ( + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/json/jsonutil" + "github.com/aws/aws-sdk-go/private/protocol/rest" +) + +var emptyJSON = []byte("{}") + +// BuildHandler is a named request handler for building jsonrpc protocol +// requests +var BuildHandler = request.NamedHandler{ + Name: "awssdk.jsonrpc.Build", + Fn: Build, +} + +// UnmarshalHandler is a named request handler for unmarshaling jsonrpc +// protocol requests +var UnmarshalHandler = request.NamedHandler{ + Name: "awssdk.jsonrpc.Unmarshal", + Fn: Unmarshal, +} + +// UnmarshalMetaHandler is a named request handler for unmarshaling jsonrpc +// protocol request metadata +var UnmarshalMetaHandler = request.NamedHandler{ + Name: "awssdk.jsonrpc.UnmarshalMeta", + Fn: UnmarshalMeta, +} + +// Build builds a JSON payload for a JSON RPC request. +func Build(req *request.Request) { + var buf []byte + var err error + if req.ParamsFilled() { + buf, err = jsonutil.BuildJSON(req.Params) + if err != nil { + req.Error = awserr.New(request.ErrCodeSerialization, "failed encoding JSON RPC request", err) + return + } + } else { + buf = emptyJSON + } + + if req.ClientInfo.TargetPrefix != "" || string(buf) != "{}" { + req.SetBufferBody(buf) + } + + if req.ClientInfo.TargetPrefix != "" { + target := req.ClientInfo.TargetPrefix + "." + req.Operation.Name + req.HTTPRequest.Header.Add("X-Amz-Target", target) + } + + // Only set the content type if one is not already specified and an + // JSONVersion is specified. + if ct, v := req.HTTPRequest.Header.Get("Content-Type"), req.ClientInfo.JSONVersion; len(ct) == 0 && len(v) != 0 { + jsonVersion := req.ClientInfo.JSONVersion + req.HTTPRequest.Header.Set("Content-Type", "application/x-amz-json-"+jsonVersion) + } +} + +// Unmarshal unmarshals a response for a JSON RPC service. +func Unmarshal(req *request.Request) { + defer req.HTTPResponse.Body.Close() + if req.DataFilled() { + err := jsonutil.UnmarshalJSON(req.Data, req.HTTPResponse.Body) + if err != nil { + req.Error = awserr.NewRequestFailure( + awserr.New(request.ErrCodeSerialization, "failed decoding JSON RPC response", err), + req.HTTPResponse.StatusCode, + req.RequestID, + ) + } + } + return +} + +// UnmarshalMeta unmarshals headers from a response for a JSON RPC service. +func UnmarshalMeta(req *request.Request) { + rest.UnmarshalMeta(req) +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/unmarshal_error.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/unmarshal_error.go new file mode 100644 index 00000000000..c0c52e2db0f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/unmarshal_error.go @@ -0,0 +1,107 @@ +package jsonrpc + +import ( + "bytes" + "io" + "io/ioutil" + "net/http" + "strings" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/json/jsonutil" +) + +// UnmarshalTypedError provides unmarshaling errors API response errors +// for both typed and untyped errors. +type UnmarshalTypedError struct { + exceptions map[string]func(protocol.ResponseMetadata) error +} + +// NewUnmarshalTypedError returns an UnmarshalTypedError initialized for the +// set of exception names to the error unmarshalers +func NewUnmarshalTypedError(exceptions map[string]func(protocol.ResponseMetadata) error) *UnmarshalTypedError { + return &UnmarshalTypedError{ + exceptions: exceptions, + } +} + +// UnmarshalError attempts to unmarshal the HTTP response error as a known +// error type. If unable to unmarshal the error type, the generic SDK error +// type will be used. +func (u *UnmarshalTypedError) UnmarshalError( + resp *http.Response, + respMeta protocol.ResponseMetadata, +) (error, error) { + + var buf bytes.Buffer + var jsonErr jsonErrorResponse + teeReader := io.TeeReader(resp.Body, &buf) + err := jsonutil.UnmarshalJSONError(&jsonErr, teeReader) + if err != nil { + return nil, err + } + body := ioutil.NopCloser(&buf) + + // Code may be separated by hash(#), with the last element being the code + // used by the SDK. + codeParts := strings.SplitN(jsonErr.Code, "#", 2) + code := codeParts[len(codeParts)-1] + msg := jsonErr.Message + + if fn, ok := u.exceptions[code]; ok { + // If exception code is know, use associated constructor to get a value + // for the exception that the JSON body can be unmarshaled into. + v := fn(respMeta) + err := jsonutil.UnmarshalJSONCaseInsensitive(v, body) + if err != nil { + return nil, err + } + + return v, nil + } + + // fallback to unmodeled generic exceptions + return awserr.NewRequestFailure( + awserr.New(code, msg, nil), + respMeta.StatusCode, + respMeta.RequestID, + ), nil +} + +// UnmarshalErrorHandler is a named request handler for unmarshaling jsonrpc +// protocol request errors +var UnmarshalErrorHandler = request.NamedHandler{ + Name: "awssdk.jsonrpc.UnmarshalError", + Fn: UnmarshalError, +} + +// UnmarshalError unmarshals an error response for a JSON RPC service. +func UnmarshalError(req *request.Request) { + defer req.HTTPResponse.Body.Close() + + var jsonErr jsonErrorResponse + err := jsonutil.UnmarshalJSONError(&jsonErr, req.HTTPResponse.Body) + if err != nil { + req.Error = awserr.NewRequestFailure( + awserr.New(request.ErrCodeSerialization, + "failed to unmarshal error message", err), + req.HTTPResponse.StatusCode, + req.RequestID, + ) + return + } + + codes := strings.SplitN(jsonErr.Code, "#", 2) + req.Error = awserr.NewRequestFailure( + awserr.New(codes[len(codes)-1], jsonErr.Message, nil), + req.HTTPResponse.StatusCode, + req.RequestID, + ) +} + +type jsonErrorResponse struct { + Code string `json:"__type"` + Message string `json:"message"` +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonvalue.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonvalue.go new file mode 100644 index 00000000000..776d1101843 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonvalue.go @@ -0,0 +1,76 @@ +package protocol + +import ( + "encoding/base64" + "encoding/json" + "fmt" + "strconv" + + "github.com/aws/aws-sdk-go/aws" +) + +// EscapeMode is the mode that should be use for escaping a value +type EscapeMode uint + +// The modes for escaping a value before it is marshaled, and unmarshaled. +const ( + NoEscape EscapeMode = iota + Base64Escape + QuotedEscape +) + +// EncodeJSONValue marshals the value into a JSON string, and optionally base64 +// encodes the string before returning it. +// +// Will panic if the escape mode is unknown. +func EncodeJSONValue(v aws.JSONValue, escape EscapeMode) (string, error) { + b, err := json.Marshal(v) + if err != nil { + return "", err + } + + switch escape { + case NoEscape: + return string(b), nil + case Base64Escape: + return base64.StdEncoding.EncodeToString(b), nil + case QuotedEscape: + return strconv.Quote(string(b)), nil + } + + panic(fmt.Sprintf("EncodeJSONValue called with unknown EscapeMode, %v", escape)) +} + +// DecodeJSONValue will attempt to decode the string input as a JSONValue. +// Optionally decoding base64 the value first before JSON unmarshaling. +// +// Will panic if the escape mode is unknown. +func DecodeJSONValue(v string, escape EscapeMode) (aws.JSONValue, error) { + var b []byte + var err error + + switch escape { + case NoEscape: + b = []byte(v) + case Base64Escape: + b, err = base64.StdEncoding.DecodeString(v) + case QuotedEscape: + var u string + u, err = strconv.Unquote(v) + b = []byte(u) + default: + panic(fmt.Sprintf("DecodeJSONValue called with unknown EscapeMode, %v", escape)) + } + + if err != nil { + return nil, err + } + + m := aws.JSONValue{} + err = json.Unmarshal(b, &m) + if err != nil { + return nil, err + } + + return m, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/payload.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/payload.go new file mode 100644 index 00000000000..0ea0647a57d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/payload.go @@ -0,0 +1,81 @@ +package protocol + +import ( + "io" + "io/ioutil" + "net/http" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" +) + +// PayloadUnmarshaler provides the interface for unmarshaling a payload's +// reader into a SDK shape. +type PayloadUnmarshaler interface { + UnmarshalPayload(io.Reader, interface{}) error +} + +// HandlerPayloadUnmarshal implements the PayloadUnmarshaler from a +// HandlerList. This provides the support for unmarshaling a payload reader to +// a shape without needing a SDK request first. +type HandlerPayloadUnmarshal struct { + Unmarshalers request.HandlerList +} + +// UnmarshalPayload unmarshals the io.Reader payload into the SDK shape using +// the Unmarshalers HandlerList provided. Returns an error if unable +// unmarshaling fails. +func (h HandlerPayloadUnmarshal) UnmarshalPayload(r io.Reader, v interface{}) error { + req := &request.Request{ + HTTPRequest: &http.Request{}, + HTTPResponse: &http.Response{ + StatusCode: 200, + Header: http.Header{}, + Body: ioutil.NopCloser(r), + }, + Data: v, + } + + h.Unmarshalers.Run(req) + + return req.Error +} + +// PayloadMarshaler provides the interface for marshaling a SDK shape into and +// io.Writer. +type PayloadMarshaler interface { + MarshalPayload(io.Writer, interface{}) error +} + +// HandlerPayloadMarshal implements the PayloadMarshaler from a HandlerList. +// This provides support for marshaling a SDK shape into an io.Writer without +// needing a SDK request first. +type HandlerPayloadMarshal struct { + Marshalers request.HandlerList +} + +// MarshalPayload marshals the SDK shape into the io.Writer using the +// Marshalers HandlerList provided. Returns an error if unable if marshal +// fails. +func (h HandlerPayloadMarshal) MarshalPayload(w io.Writer, v interface{}) error { + req := request.New( + aws.Config{}, + metadata.ClientInfo{}, + request.Handlers{}, + nil, + &request.Operation{HTTPMethod: "PUT"}, + v, + nil, + ) + + h.Marshalers.Run(req) + + if req.Error != nil { + return req.Error + } + + io.Copy(w, req.GetBody()) + + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/protocol.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/protocol.go new file mode 100644 index 00000000000..9d521dcb950 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/protocol.go @@ -0,0 +1,49 @@ +package protocol + +import ( + "fmt" + "strings" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" +) + +// RequireHTTPMinProtocol request handler is used to enforce that +// the target endpoint supports the given major and minor HTTP protocol version. +type RequireHTTPMinProtocol struct { + Major, Minor int +} + +// Handler will mark the request.Request with an error if the +// target endpoint did not connect with the required HTTP protocol +// major and minor version. +func (p RequireHTTPMinProtocol) Handler(r *request.Request) { + if r.Error != nil || r.HTTPResponse == nil { + return + } + + if !strings.HasPrefix(r.HTTPResponse.Proto, "HTTP") { + r.Error = newMinHTTPProtoError(p.Major, p.Minor, r) + } + + if r.HTTPResponse.ProtoMajor < p.Major || r.HTTPResponse.ProtoMinor < p.Minor { + r.Error = newMinHTTPProtoError(p.Major, p.Minor, r) + } +} + +// ErrCodeMinimumHTTPProtocolError error code is returned when the target endpoint +// did not match the required HTTP major and minor protocol version. +const ErrCodeMinimumHTTPProtocolError = "MinimumHTTPProtocolError" + +func newMinHTTPProtoError(major, minor int, r *request.Request) error { + return awserr.NewRequestFailure( + awserr.New("MinimumHTTPProtocolError", + fmt.Sprintf( + "operation requires minimum HTTP protocol of HTTP/%d.%d, but was %s", + major, minor, r.HTTPResponse.Proto, + ), + nil, + ), + r.HTTPResponse.StatusCode, r.RequestID, + ) +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go new file mode 100644 index 00000000000..d40346a7790 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go @@ -0,0 +1,36 @@ +// Package query provides serialization of AWS query requests, and responses. +package query + +//go:generate go run -tags codegen ../../../private/model/cli/gen-protocol-tests ../../../models/protocol_tests/input/query.json build_test.go + +import ( + "net/url" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/query/queryutil" +) + +// BuildHandler is a named request handler for building query protocol requests +var BuildHandler = request.NamedHandler{Name: "awssdk.query.Build", Fn: Build} + +// Build builds a request for an AWS Query service. +func Build(r *request.Request) { + body := url.Values{ + "Action": {r.Operation.Name}, + "Version": {r.ClientInfo.APIVersion}, + } + if err := queryutil.Parse(body, r.Params, false); err != nil { + r.Error = awserr.New(request.ErrCodeSerialization, "failed encoding Query request", err) + return + } + + if !r.IsPresigned() { + r.HTTPRequest.Method = "POST" + r.HTTPRequest.Header.Set("Content-Type", "application/x-www-form-urlencoded; charset=utf-8") + r.SetBufferBody([]byte(body.Encode())) + } else { // This is a pre-signed request + r.HTTPRequest.Method = "GET" + r.HTTPRequest.URL.RawQuery = body.Encode() + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go new file mode 100644 index 00000000000..75866d01218 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go @@ -0,0 +1,246 @@ +package queryutil + +import ( + "encoding/base64" + "fmt" + "net/url" + "reflect" + "sort" + "strconv" + "strings" + "time" + + "github.com/aws/aws-sdk-go/private/protocol" +) + +// Parse parses an object i and fills a url.Values object. The isEC2 flag +// indicates if this is the EC2 Query sub-protocol. +func Parse(body url.Values, i interface{}, isEC2 bool) error { + q := queryParser{isEC2: isEC2} + return q.parseValue(body, reflect.ValueOf(i), "", "") +} + +func elemOf(value reflect.Value) reflect.Value { + for value.Kind() == reflect.Ptr { + value = value.Elem() + } + return value +} + +type queryParser struct { + isEC2 bool +} + +func (q *queryParser) parseValue(v url.Values, value reflect.Value, prefix string, tag reflect.StructTag) error { + value = elemOf(value) + + // no need to handle zero values + if !value.IsValid() { + return nil + } + + t := tag.Get("type") + if t == "" { + switch value.Kind() { + case reflect.Struct: + t = "structure" + case reflect.Slice: + t = "list" + case reflect.Map: + t = "map" + } + } + + switch t { + case "structure": + return q.parseStruct(v, value, prefix) + case "list": + return q.parseList(v, value, prefix, tag) + case "map": + return q.parseMap(v, value, prefix, tag) + default: + return q.parseScalar(v, value, prefix, tag) + } +} + +func (q *queryParser) parseStruct(v url.Values, value reflect.Value, prefix string) error { + if !value.IsValid() { + return nil + } + + t := value.Type() + for i := 0; i < value.NumField(); i++ { + elemValue := elemOf(value.Field(i)) + field := t.Field(i) + + if field.PkgPath != "" { + continue // ignore unexported fields + } + if field.Tag.Get("ignore") != "" { + continue + } + + if protocol.CanSetIdempotencyToken(value.Field(i), field) { + token := protocol.GetIdempotencyToken() + elemValue = reflect.ValueOf(token) + } + + var name string + if q.isEC2 { + name = field.Tag.Get("queryName") + } + if name == "" { + if field.Tag.Get("flattened") != "" && field.Tag.Get("locationNameList") != "" { + name = field.Tag.Get("locationNameList") + } else if locName := field.Tag.Get("locationName"); locName != "" { + name = locName + } + if name != "" && q.isEC2 { + name = strings.ToUpper(name[0:1]) + name[1:] + } + } + if name == "" { + name = field.Name + } + + if prefix != "" { + name = prefix + "." + name + } + + if err := q.parseValue(v, elemValue, name, field.Tag); err != nil { + return err + } + } + return nil +} + +func (q *queryParser) parseList(v url.Values, value reflect.Value, prefix string, tag reflect.StructTag) error { + // If it's empty, generate an empty value + if !value.IsNil() && value.Len() == 0 { + v.Set(prefix, "") + return nil + } + + if _, ok := value.Interface().([]byte); ok { + return q.parseScalar(v, value, prefix, tag) + } + + // check for unflattened list member + if !q.isEC2 && tag.Get("flattened") == "" { + if listName := tag.Get("locationNameList"); listName == "" { + prefix += ".member" + } else { + prefix += "." + listName + } + } + + for i := 0; i < value.Len(); i++ { + slicePrefix := prefix + if slicePrefix == "" { + slicePrefix = strconv.Itoa(i + 1) + } else { + slicePrefix = slicePrefix + "." + strconv.Itoa(i+1) + } + if err := q.parseValue(v, value.Index(i), slicePrefix, ""); err != nil { + return err + } + } + return nil +} + +func (q *queryParser) parseMap(v url.Values, value reflect.Value, prefix string, tag reflect.StructTag) error { + // If it's empty, generate an empty value + if !value.IsNil() && value.Len() == 0 { + v.Set(prefix, "") + return nil + } + + // check for unflattened list member + if !q.isEC2 && tag.Get("flattened") == "" { + prefix += ".entry" + } + + // sort keys for improved serialization consistency. + // this is not strictly necessary for protocol support. + mapKeyValues := value.MapKeys() + mapKeys := map[string]reflect.Value{} + mapKeyNames := make([]string, len(mapKeyValues)) + for i, mapKey := range mapKeyValues { + name := mapKey.String() + mapKeys[name] = mapKey + mapKeyNames[i] = name + } + sort.Strings(mapKeyNames) + + for i, mapKeyName := range mapKeyNames { + mapKey := mapKeys[mapKeyName] + mapValue := value.MapIndex(mapKey) + + kname := tag.Get("locationNameKey") + if kname == "" { + kname = "key" + } + vname := tag.Get("locationNameValue") + if vname == "" { + vname = "value" + } + + // serialize key + var keyName string + if prefix == "" { + keyName = strconv.Itoa(i+1) + "." + kname + } else { + keyName = prefix + "." + strconv.Itoa(i+1) + "." + kname + } + + if err := q.parseValue(v, mapKey, keyName, ""); err != nil { + return err + } + + // serialize value + var valueName string + if prefix == "" { + valueName = strconv.Itoa(i+1) + "." + vname + } else { + valueName = prefix + "." + strconv.Itoa(i+1) + "." + vname + } + + if err := q.parseValue(v, mapValue, valueName, ""); err != nil { + return err + } + } + + return nil +} + +func (q *queryParser) parseScalar(v url.Values, r reflect.Value, name string, tag reflect.StructTag) error { + switch value := r.Interface().(type) { + case string: + v.Set(name, value) + case []byte: + if !r.IsNil() { + v.Set(name, base64.StdEncoding.EncodeToString(value)) + } + case bool: + v.Set(name, strconv.FormatBool(value)) + case int64: + v.Set(name, strconv.FormatInt(value, 10)) + case int: + v.Set(name, strconv.Itoa(value)) + case float64: + v.Set(name, strconv.FormatFloat(value, 'f', -1, 64)) + case float32: + v.Set(name, strconv.FormatFloat(float64(value), 'f', -1, 32)) + case time.Time: + const ISO8601UTC = "2006-01-02T15:04:05Z" + format := tag.Get("timestampFormat") + if len(format) == 0 { + format = protocol.ISO8601TimeFormatName + } + + v.Set(name, protocol.FormatTime(format, value)) + default: + return fmt.Errorf("unsupported value for param %s: %v (%s)", name, r.Interface(), r.Type().Name()) + } + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go new file mode 100644 index 00000000000..9231e95d160 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go @@ -0,0 +1,39 @@ +package query + +//go:generate go run -tags codegen ../../../private/model/cli/gen-protocol-tests ../../../models/protocol_tests/output/query.json unmarshal_test.go + +import ( + "encoding/xml" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil" +) + +// UnmarshalHandler is a named request handler for unmarshaling query protocol requests +var UnmarshalHandler = request.NamedHandler{Name: "awssdk.query.Unmarshal", Fn: Unmarshal} + +// UnmarshalMetaHandler is a named request handler for unmarshaling query protocol request metadata +var UnmarshalMetaHandler = request.NamedHandler{Name: "awssdk.query.UnmarshalMeta", Fn: UnmarshalMeta} + +// Unmarshal unmarshals a response for an AWS Query service. +func Unmarshal(r *request.Request) { + defer r.HTTPResponse.Body.Close() + if r.DataFilled() { + decoder := xml.NewDecoder(r.HTTPResponse.Body) + err := xmlutil.UnmarshalXML(r.Data, decoder, r.Operation.Name+"Result") + if err != nil { + r.Error = awserr.NewRequestFailure( + awserr.New(request.ErrCodeSerialization, "failed decoding Query response", err), + r.HTTPResponse.StatusCode, + r.RequestID, + ) + return + } + } +} + +// UnmarshalMeta unmarshals header response values for an AWS Query service. +func UnmarshalMeta(r *request.Request) { + r.RequestID = r.HTTPResponse.Header.Get("X-Amzn-Requestid") +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go new file mode 100644 index 00000000000..831b0110c54 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go @@ -0,0 +1,69 @@ +package query + +import ( + "encoding/xml" + "fmt" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil" +) + +// UnmarshalErrorHandler is a name request handler to unmarshal request errors +var UnmarshalErrorHandler = request.NamedHandler{Name: "awssdk.query.UnmarshalError", Fn: UnmarshalError} + +type xmlErrorResponse struct { + Code string `xml:"Error>Code"` + Message string `xml:"Error>Message"` + RequestID string `xml:"RequestId"` +} + +type xmlResponseError struct { + xmlErrorResponse +} + +func (e *xmlResponseError) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { + const svcUnavailableTagName = "ServiceUnavailableException" + const errorResponseTagName = "ErrorResponse" + + switch start.Name.Local { + case svcUnavailableTagName: + e.Code = svcUnavailableTagName + e.Message = "service is unavailable" + return d.Skip() + + case errorResponseTagName: + return d.DecodeElement(&e.xmlErrorResponse, &start) + + default: + return fmt.Errorf("unknown error response tag, %v", start) + } +} + +// UnmarshalError unmarshals an error response for an AWS Query service. +func UnmarshalError(r *request.Request) { + defer r.HTTPResponse.Body.Close() + + var respErr xmlResponseError + err := xmlutil.UnmarshalXMLError(&respErr, r.HTTPResponse.Body) + if err != nil { + r.Error = awserr.NewRequestFailure( + awserr.New(request.ErrCodeSerialization, + "failed to unmarshal error message", err), + r.HTTPResponse.StatusCode, + r.RequestID, + ) + return + } + + reqID := respErr.RequestID + if len(reqID) == 0 { + reqID = r.RequestID + } + + r.Error = awserr.NewRequestFailure( + awserr.New(respErr.Code, respErr.Message, nil), + r.HTTPResponse.StatusCode, + reqID, + ) +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go new file mode 100644 index 00000000000..1301b149d35 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go @@ -0,0 +1,310 @@ +// Package rest provides RESTful serialization of AWS requests and responses. +package rest + +import ( + "bytes" + "encoding/base64" + "fmt" + "io" + "net/http" + "net/url" + "path" + "reflect" + "strconv" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol" +) + +// Whether the byte value can be sent without escaping in AWS URLs +var noEscape [256]bool + +var errValueNotSet = fmt.Errorf("value not set") + +var byteSliceType = reflect.TypeOf([]byte{}) + +func init() { + for i := 0; i < len(noEscape); i++ { + // AWS expects every character except these to be escaped + noEscape[i] = (i >= 'A' && i <= 'Z') || + (i >= 'a' && i <= 'z') || + (i >= '0' && i <= '9') || + i == '-' || + i == '.' || + i == '_' || + i == '~' + } +} + +// BuildHandler is a named request handler for building rest protocol requests +var BuildHandler = request.NamedHandler{Name: "awssdk.rest.Build", Fn: Build} + +// Build builds the REST component of a service request. +func Build(r *request.Request) { + if r.ParamsFilled() { + v := reflect.ValueOf(r.Params).Elem() + buildLocationElements(r, v, false) + buildBody(r, v) + } +} + +// BuildAsGET builds the REST component of a service request with the ability to hoist +// data from the body. +func BuildAsGET(r *request.Request) { + if r.ParamsFilled() { + v := reflect.ValueOf(r.Params).Elem() + buildLocationElements(r, v, true) + buildBody(r, v) + } +} + +func buildLocationElements(r *request.Request, v reflect.Value, buildGETQuery bool) { + query := r.HTTPRequest.URL.Query() + + // Setup the raw path to match the base path pattern. This is needed + // so that when the path is mutated a custom escaped version can be + // stored in RawPath that will be used by the Go client. + r.HTTPRequest.URL.RawPath = r.HTTPRequest.URL.Path + + for i := 0; i < v.NumField(); i++ { + m := v.Field(i) + if n := v.Type().Field(i).Name; n[0:1] == strings.ToLower(n[0:1]) { + continue + } + + if m.IsValid() { + field := v.Type().Field(i) + name := field.Tag.Get("locationName") + if name == "" { + name = field.Name + } + if kind := m.Kind(); kind == reflect.Ptr { + m = m.Elem() + } else if kind == reflect.Interface { + if !m.Elem().IsValid() { + continue + } + } + if !m.IsValid() { + continue + } + if field.Tag.Get("ignore") != "" { + continue + } + + // Support the ability to customize values to be marshaled as a + // blob even though they were modeled as a string. Required for S3 + // API operations like SSECustomerKey is modeled as stirng but + // required to be base64 encoded in request. + if field.Tag.Get("marshal-as") == "blob" { + m = m.Convert(byteSliceType) + } + + var err error + switch field.Tag.Get("location") { + case "headers": // header maps + err = buildHeaderMap(&r.HTTPRequest.Header, m, field.Tag) + case "header": + err = buildHeader(&r.HTTPRequest.Header, m, name, field.Tag) + case "uri": + err = buildURI(r.HTTPRequest.URL, m, name, field.Tag) + case "querystring": + err = buildQueryString(query, m, name, field.Tag) + default: + if buildGETQuery { + err = buildQueryString(query, m, name, field.Tag) + } + } + r.Error = err + } + if r.Error != nil { + return + } + } + + r.HTTPRequest.URL.RawQuery = query.Encode() + if !aws.BoolValue(r.Config.DisableRestProtocolURICleaning) { + cleanPath(r.HTTPRequest.URL) + } +} + +func buildBody(r *request.Request, v reflect.Value) { + if field, ok := v.Type().FieldByName("_"); ok { + if payloadName := field.Tag.Get("payload"); payloadName != "" { + pfield, _ := v.Type().FieldByName(payloadName) + if ptag := pfield.Tag.Get("type"); ptag != "" && ptag != "structure" { + payload := reflect.Indirect(v.FieldByName(payloadName)) + if payload.IsValid() && payload.Interface() != nil { + switch reader := payload.Interface().(type) { + case io.ReadSeeker: + r.SetReaderBody(reader) + case []byte: + r.SetBufferBody(reader) + case string: + r.SetStringBody(reader) + default: + r.Error = awserr.New(request.ErrCodeSerialization, + "failed to encode REST request", + fmt.Errorf("unknown payload type %s", payload.Type())) + } + } + } + } + } +} + +func buildHeader(header *http.Header, v reflect.Value, name string, tag reflect.StructTag) error { + str, err := convertType(v, tag) + if err == errValueNotSet { + return nil + } else if err != nil { + return awserr.New(request.ErrCodeSerialization, "failed to encode REST request", err) + } + + name = strings.TrimSpace(name) + str = strings.TrimSpace(str) + + header.Add(name, str) + + return nil +} + +func buildHeaderMap(header *http.Header, v reflect.Value, tag reflect.StructTag) error { + prefix := tag.Get("locationName") + for _, key := range v.MapKeys() { + str, err := convertType(v.MapIndex(key), tag) + if err == errValueNotSet { + continue + } else if err != nil { + return awserr.New(request.ErrCodeSerialization, "failed to encode REST request", err) + + } + keyStr := strings.TrimSpace(key.String()) + str = strings.TrimSpace(str) + + header.Add(prefix+keyStr, str) + } + return nil +} + +func buildURI(u *url.URL, v reflect.Value, name string, tag reflect.StructTag) error { + value, err := convertType(v, tag) + if err == errValueNotSet { + return nil + } else if err != nil { + return awserr.New(request.ErrCodeSerialization, "failed to encode REST request", err) + } + + u.Path = strings.Replace(u.Path, "{"+name+"}", value, -1) + u.Path = strings.Replace(u.Path, "{"+name+"+}", value, -1) + + u.RawPath = strings.Replace(u.RawPath, "{"+name+"}", EscapePath(value, true), -1) + u.RawPath = strings.Replace(u.RawPath, "{"+name+"+}", EscapePath(value, false), -1) + + return nil +} + +func buildQueryString(query url.Values, v reflect.Value, name string, tag reflect.StructTag) error { + switch value := v.Interface().(type) { + case []*string: + for _, item := range value { + query.Add(name, *item) + } + case map[string]*string: + for key, item := range value { + query.Add(key, *item) + } + case map[string][]*string: + for key, items := range value { + for _, item := range items { + query.Add(key, *item) + } + } + default: + str, err := convertType(v, tag) + if err == errValueNotSet { + return nil + } else if err != nil { + return awserr.New(request.ErrCodeSerialization, "failed to encode REST request", err) + } + query.Set(name, str) + } + + return nil +} + +func cleanPath(u *url.URL) { + hasSlash := strings.HasSuffix(u.Path, "/") + + // clean up path, removing duplicate `/` + u.Path = path.Clean(u.Path) + u.RawPath = path.Clean(u.RawPath) + + if hasSlash && !strings.HasSuffix(u.Path, "/") { + u.Path += "/" + u.RawPath += "/" + } +} + +// EscapePath escapes part of a URL path in Amazon style +func EscapePath(path string, encodeSep bool) string { + var buf bytes.Buffer + for i := 0; i < len(path); i++ { + c := path[i] + if noEscape[c] || (c == '/' && !encodeSep) { + buf.WriteByte(c) + } else { + fmt.Fprintf(&buf, "%%%02X", c) + } + } + return buf.String() +} + +func convertType(v reflect.Value, tag reflect.StructTag) (str string, err error) { + v = reflect.Indirect(v) + if !v.IsValid() { + return "", errValueNotSet + } + + switch value := v.Interface().(type) { + case string: + str = value + case []byte: + str = base64.StdEncoding.EncodeToString(value) + case bool: + str = strconv.FormatBool(value) + case int64: + str = strconv.FormatInt(value, 10) + case float64: + str = strconv.FormatFloat(value, 'f', -1, 64) + case time.Time: + format := tag.Get("timestampFormat") + if len(format) == 0 { + format = protocol.RFC822TimeFormatName + if tag.Get("location") == "querystring" { + format = protocol.ISO8601TimeFormatName + } + } + str = protocol.FormatTime(format, value) + case aws.JSONValue: + if len(value) == 0 { + return "", errValueNotSet + } + escaping := protocol.NoEscape + if tag.Get("location") == "header" { + escaping = protocol.Base64Escape + } + str, err = protocol.EncodeJSONValue(value, escaping) + if err != nil { + return "", fmt.Errorf("unable to encode JSONValue, %v", err) + } + default: + err := fmt.Errorf("unsupported value for param %v (%s)", v.Interface(), v.Type()) + return "", err + } + return str, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/payload.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/payload.go new file mode 100644 index 00000000000..4366de2e1e8 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/payload.go @@ -0,0 +1,45 @@ +package rest + +import "reflect" + +// PayloadMember returns the payload field member of i if there is one, or nil. +func PayloadMember(i interface{}) interface{} { + if i == nil { + return nil + } + + v := reflect.ValueOf(i).Elem() + if !v.IsValid() { + return nil + } + if field, ok := v.Type().FieldByName("_"); ok { + if payloadName := field.Tag.Get("payload"); payloadName != "" { + field, _ := v.Type().FieldByName(payloadName) + if field.Tag.Get("type") != "structure" { + return nil + } + + payload := v.FieldByName(payloadName) + if payload.IsValid() || (payload.Kind() == reflect.Ptr && !payload.IsNil()) { + return payload.Interface() + } + } + } + return nil +} + +// PayloadType returns the type of a payload field member of i if there is one, or "". +func PayloadType(i interface{}) string { + v := reflect.Indirect(reflect.ValueOf(i)) + if !v.IsValid() { + return "" + } + if field, ok := v.Type().FieldByName("_"); ok { + if payloadName := field.Tag.Get("payload"); payloadName != "" { + if member, ok := v.Type().FieldByName(payloadName); ok { + return member.Tag.Get("type") + } + } + } + return "" +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go new file mode 100644 index 00000000000..92f8b4d9a48 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go @@ -0,0 +1,257 @@ +package rest + +import ( + "bytes" + "encoding/base64" + "fmt" + "io" + "io/ioutil" + "net/http" + "reflect" + "strconv" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + awsStrings "github.com/aws/aws-sdk-go/internal/strings" + "github.com/aws/aws-sdk-go/private/protocol" +) + +// UnmarshalHandler is a named request handler for unmarshaling rest protocol requests +var UnmarshalHandler = request.NamedHandler{Name: "awssdk.rest.Unmarshal", Fn: Unmarshal} + +// UnmarshalMetaHandler is a named request handler for unmarshaling rest protocol request metadata +var UnmarshalMetaHandler = request.NamedHandler{Name: "awssdk.rest.UnmarshalMeta", Fn: UnmarshalMeta} + +// Unmarshal unmarshals the REST component of a response in a REST service. +func Unmarshal(r *request.Request) { + if r.DataFilled() { + v := reflect.Indirect(reflect.ValueOf(r.Data)) + if err := unmarshalBody(r, v); err != nil { + r.Error = err + } + } +} + +// UnmarshalMeta unmarshals the REST metadata of a response in a REST service +func UnmarshalMeta(r *request.Request) { + r.RequestID = r.HTTPResponse.Header.Get("X-Amzn-Requestid") + if r.RequestID == "" { + // Alternative version of request id in the header + r.RequestID = r.HTTPResponse.Header.Get("X-Amz-Request-Id") + } + if r.DataFilled() { + if err := UnmarshalResponse(r.HTTPResponse, r.Data, aws.BoolValue(r.Config.LowerCaseHeaderMaps)); err != nil { + r.Error = err + } + } +} + +// UnmarshalResponse attempts to unmarshal the REST response headers to +// the data type passed in. The type must be a pointer. An error is returned +// with any error unmarshaling the response into the target datatype. +func UnmarshalResponse(resp *http.Response, data interface{}, lowerCaseHeaderMaps bool) error { + v := reflect.Indirect(reflect.ValueOf(data)) + return unmarshalLocationElements(resp, v, lowerCaseHeaderMaps) +} + +func unmarshalBody(r *request.Request, v reflect.Value) error { + if field, ok := v.Type().FieldByName("_"); ok { + if payloadName := field.Tag.Get("payload"); payloadName != "" { + pfield, _ := v.Type().FieldByName(payloadName) + if ptag := pfield.Tag.Get("type"); ptag != "" && ptag != "structure" { + payload := v.FieldByName(payloadName) + if payload.IsValid() { + switch payload.Interface().(type) { + case []byte: + defer r.HTTPResponse.Body.Close() + b, err := ioutil.ReadAll(r.HTTPResponse.Body) + if err != nil { + return awserr.New(request.ErrCodeSerialization, "failed to decode REST response", err) + } + + payload.Set(reflect.ValueOf(b)) + + case *string: + defer r.HTTPResponse.Body.Close() + b, err := ioutil.ReadAll(r.HTTPResponse.Body) + if err != nil { + return awserr.New(request.ErrCodeSerialization, "failed to decode REST response", err) + } + + str := string(b) + payload.Set(reflect.ValueOf(&str)) + + default: + switch payload.Type().String() { + case "io.ReadCloser": + payload.Set(reflect.ValueOf(r.HTTPResponse.Body)) + + case "io.ReadSeeker": + b, err := ioutil.ReadAll(r.HTTPResponse.Body) + if err != nil { + return awserr.New(request.ErrCodeSerialization, + "failed to read response body", err) + } + payload.Set(reflect.ValueOf(ioutil.NopCloser(bytes.NewReader(b)))) + + default: + io.Copy(ioutil.Discard, r.HTTPResponse.Body) + r.HTTPResponse.Body.Close() + return awserr.New(request.ErrCodeSerialization, + "failed to decode REST response", + fmt.Errorf("unknown payload type %s", payload.Type())) + } + } + } + } + } + } + + return nil +} + +func unmarshalLocationElements(resp *http.Response, v reflect.Value, lowerCaseHeaderMaps bool) error { + for i := 0; i < v.NumField(); i++ { + m, field := v.Field(i), v.Type().Field(i) + if n := field.Name; n[0:1] == strings.ToLower(n[0:1]) { + continue + } + + if m.IsValid() { + name := field.Tag.Get("locationName") + if name == "" { + name = field.Name + } + + switch field.Tag.Get("location") { + case "statusCode": + unmarshalStatusCode(m, resp.StatusCode) + + case "header": + err := unmarshalHeader(m, resp.Header.Get(name), field.Tag) + if err != nil { + return awserr.New(request.ErrCodeSerialization, "failed to decode REST response", err) + } + + case "headers": + prefix := field.Tag.Get("locationName") + err := unmarshalHeaderMap(m, resp.Header, prefix, lowerCaseHeaderMaps) + if err != nil { + awserr.New(request.ErrCodeSerialization, "failed to decode REST response", err) + } + } + } + } + + return nil +} + +func unmarshalStatusCode(v reflect.Value, statusCode int) { + if !v.IsValid() { + return + } + + switch v.Interface().(type) { + case *int64: + s := int64(statusCode) + v.Set(reflect.ValueOf(&s)) + } +} + +func unmarshalHeaderMap(r reflect.Value, headers http.Header, prefix string, normalize bool) error { + if len(headers) == 0 { + return nil + } + switch r.Interface().(type) { + case map[string]*string: // we only support string map value types + out := map[string]*string{} + for k, v := range headers { + if awsStrings.HasPrefixFold(k, prefix) { + if normalize == true { + k = strings.ToLower(k) + } else { + k = http.CanonicalHeaderKey(k) + } + out[k[len(prefix):]] = &v[0] + } + } + if len(out) != 0 { + r.Set(reflect.ValueOf(out)) + } + + } + return nil +} + +func unmarshalHeader(v reflect.Value, header string, tag reflect.StructTag) error { + switch tag.Get("type") { + case "jsonvalue": + if len(header) == 0 { + return nil + } + case "blob": + if len(header) == 0 { + return nil + } + default: + if !v.IsValid() || (header == "" && v.Elem().Kind() != reflect.String) { + return nil + } + } + + switch v.Interface().(type) { + case *string: + v.Set(reflect.ValueOf(&header)) + case []byte: + b, err := base64.StdEncoding.DecodeString(header) + if err != nil { + return err + } + v.Set(reflect.ValueOf(b)) + case *bool: + b, err := strconv.ParseBool(header) + if err != nil { + return err + } + v.Set(reflect.ValueOf(&b)) + case *int64: + i, err := strconv.ParseInt(header, 10, 64) + if err != nil { + return err + } + v.Set(reflect.ValueOf(&i)) + case *float64: + f, err := strconv.ParseFloat(header, 64) + if err != nil { + return err + } + v.Set(reflect.ValueOf(&f)) + case *time.Time: + format := tag.Get("timestampFormat") + if len(format) == 0 { + format = protocol.RFC822TimeFormatName + } + t, err := protocol.ParseTime(format, header) + if err != nil { + return err + } + v.Set(reflect.ValueOf(&t)) + case aws.JSONValue: + escaping := protocol.NoEscape + if tag.Get("location") == "header" { + escaping = protocol.Base64Escape + } + m, err := protocol.DecodeJSONValue(header, escaping) + if err != nil { + return err + } + v.Set(reflect.ValueOf(m)) + default: + err := fmt.Errorf("Unsupported value for param %v (%s)", v.Interface(), v.Type()) + return err + } + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/restjson/restjson.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/restjson/restjson.go new file mode 100644 index 00000000000..2e0e205af37 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/restjson/restjson.go @@ -0,0 +1,59 @@ +// Package restjson provides RESTful JSON serialization of AWS +// requests and responses. +package restjson + +//go:generate go run -tags codegen ../../../private/model/cli/gen-protocol-tests ../../../models/protocol_tests/input/rest-json.json build_test.go +//go:generate go run -tags codegen ../../../private/model/cli/gen-protocol-tests ../../../models/protocol_tests/output/rest-json.json unmarshal_test.go + +import ( + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" + "github.com/aws/aws-sdk-go/private/protocol/rest" +) + +// BuildHandler is a named request handler for building restjson protocol +// requests +var BuildHandler = request.NamedHandler{ + Name: "awssdk.restjson.Build", + Fn: Build, +} + +// UnmarshalHandler is a named request handler for unmarshaling restjson +// protocol requests +var UnmarshalHandler = request.NamedHandler{ + Name: "awssdk.restjson.Unmarshal", + Fn: Unmarshal, +} + +// UnmarshalMetaHandler is a named request handler for unmarshaling restjson +// protocol request metadata +var UnmarshalMetaHandler = request.NamedHandler{ + Name: "awssdk.restjson.UnmarshalMeta", + Fn: UnmarshalMeta, +} + +// Build builds a request for the REST JSON protocol. +func Build(r *request.Request) { + rest.Build(r) + + if t := rest.PayloadType(r.Params); t == "structure" || t == "" { + if v := r.HTTPRequest.Header.Get("Content-Type"); len(v) == 0 { + r.HTTPRequest.Header.Set("Content-Type", "application/json") + } + jsonrpc.Build(r) + } +} + +// Unmarshal unmarshals a response body for the REST JSON protocol. +func Unmarshal(r *request.Request) { + if t := rest.PayloadType(r.Data); t == "structure" || t == "" { + jsonrpc.Unmarshal(r) + } else { + rest.Unmarshal(r) + } +} + +// UnmarshalMeta unmarshals response headers for the REST JSON protocol. +func UnmarshalMeta(r *request.Request) { + rest.UnmarshalMeta(r) +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/restjson/unmarshal_error.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/restjson/unmarshal_error.go new file mode 100644 index 00000000000..d756d8cc529 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/restjson/unmarshal_error.go @@ -0,0 +1,134 @@ +package restjson + +import ( + "bytes" + "io" + "io/ioutil" + "net/http" + "strings" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/json/jsonutil" + "github.com/aws/aws-sdk-go/private/protocol/rest" +) + +const ( + errorTypeHeader = "X-Amzn-Errortype" + errorMessageHeader = "X-Amzn-Errormessage" +) + +// UnmarshalTypedError provides unmarshaling errors API response errors +// for both typed and untyped errors. +type UnmarshalTypedError struct { + exceptions map[string]func(protocol.ResponseMetadata) error +} + +// NewUnmarshalTypedError returns an UnmarshalTypedError initialized for the +// set of exception names to the error unmarshalers +func NewUnmarshalTypedError(exceptions map[string]func(protocol.ResponseMetadata) error) *UnmarshalTypedError { + return &UnmarshalTypedError{ + exceptions: exceptions, + } +} + +// UnmarshalError attempts to unmarshal the HTTP response error as a known +// error type. If unable to unmarshal the error type, the generic SDK error +// type will be used. +func (u *UnmarshalTypedError) UnmarshalError( + resp *http.Response, + respMeta protocol.ResponseMetadata, +) (error, error) { + + code := resp.Header.Get(errorTypeHeader) + msg := resp.Header.Get(errorMessageHeader) + + body := resp.Body + if len(code) == 0 { + // If unable to get code from HTTP headers have to parse JSON message + // to determine what kind of exception this will be. + var buf bytes.Buffer + var jsonErr jsonErrorResponse + teeReader := io.TeeReader(resp.Body, &buf) + err := jsonutil.UnmarshalJSONError(&jsonErr, teeReader) + if err != nil { + return nil, err + } + + body = ioutil.NopCloser(&buf) + code = jsonErr.Code + msg = jsonErr.Message + } + + // If code has colon separators remove them so can compare against modeled + // exception names. + code = strings.SplitN(code, ":", 2)[0] + + if fn, ok := u.exceptions[code]; ok { + // If exception code is know, use associated constructor to get a value + // for the exception that the JSON body can be unmarshaled into. + v := fn(respMeta) + if err := jsonutil.UnmarshalJSONCaseInsensitive(v, body); err != nil { + return nil, err + } + + if err := rest.UnmarshalResponse(resp, v, true); err != nil { + return nil, err + } + + return v, nil + } + + // fallback to unmodeled generic exceptions + return awserr.NewRequestFailure( + awserr.New(code, msg, nil), + respMeta.StatusCode, + respMeta.RequestID, + ), nil +} + +// UnmarshalErrorHandler is a named request handler for unmarshaling restjson +// protocol request errors +var UnmarshalErrorHandler = request.NamedHandler{ + Name: "awssdk.restjson.UnmarshalError", + Fn: UnmarshalError, +} + +// UnmarshalError unmarshals a response error for the REST JSON protocol. +func UnmarshalError(r *request.Request) { + defer r.HTTPResponse.Body.Close() + + var jsonErr jsonErrorResponse + err := jsonutil.UnmarshalJSONError(&jsonErr, r.HTTPResponse.Body) + if err != nil { + r.Error = awserr.NewRequestFailure( + awserr.New(request.ErrCodeSerialization, + "failed to unmarshal response error", err), + r.HTTPResponse.StatusCode, + r.RequestID, + ) + return + } + + code := r.HTTPResponse.Header.Get(errorTypeHeader) + if code == "" { + code = jsonErr.Code + } + msg := r.HTTPResponse.Header.Get(errorMessageHeader) + if msg == "" { + msg = jsonErr.Message + } + + code = strings.SplitN(code, ":", 2)[0] + r.Error = awserr.NewRequestFailure( + awserr.New(code, jsonErr.Message, nil), + r.HTTPResponse.StatusCode, + r.RequestID, + ) +} + +type jsonErrorResponse struct { + Code string `json:"code"` + Message string `json:"message"` +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/restxml.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/restxml.go new file mode 100644 index 00000000000..b1ae3648719 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/restxml.go @@ -0,0 +1,79 @@ +// Package restxml provides RESTful XML serialization of AWS +// requests and responses. +package restxml + +//go:generate go run -tags codegen ../../../private/model/cli/gen-protocol-tests ../../../models/protocol_tests/input/rest-xml.json build_test.go +//go:generate go run -tags codegen ../../../private/model/cli/gen-protocol-tests ../../../models/protocol_tests/output/rest-xml.json unmarshal_test.go + +import ( + "bytes" + "encoding/xml" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/query" + "github.com/aws/aws-sdk-go/private/protocol/rest" + "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil" +) + +// BuildHandler is a named request handler for building restxml protocol requests +var BuildHandler = request.NamedHandler{Name: "awssdk.restxml.Build", Fn: Build} + +// UnmarshalHandler is a named request handler for unmarshaling restxml protocol requests +var UnmarshalHandler = request.NamedHandler{Name: "awssdk.restxml.Unmarshal", Fn: Unmarshal} + +// UnmarshalMetaHandler is a named request handler for unmarshaling restxml protocol request metadata +var UnmarshalMetaHandler = request.NamedHandler{Name: "awssdk.restxml.UnmarshalMeta", Fn: UnmarshalMeta} + +// UnmarshalErrorHandler is a named request handler for unmarshaling restxml protocol request errors +var UnmarshalErrorHandler = request.NamedHandler{Name: "awssdk.restxml.UnmarshalError", Fn: UnmarshalError} + +// Build builds a request payload for the REST XML protocol. +func Build(r *request.Request) { + rest.Build(r) + + if t := rest.PayloadType(r.Params); t == "structure" || t == "" { + var buf bytes.Buffer + err := xmlutil.BuildXML(r.Params, xml.NewEncoder(&buf)) + if err != nil { + r.Error = awserr.NewRequestFailure( + awserr.New(request.ErrCodeSerialization, + "failed to encode rest XML request", err), + 0, + r.RequestID, + ) + return + } + r.SetBufferBody(buf.Bytes()) + } +} + +// Unmarshal unmarshals a payload response for the REST XML protocol. +func Unmarshal(r *request.Request) { + if t := rest.PayloadType(r.Data); t == "structure" || t == "" { + defer r.HTTPResponse.Body.Close() + decoder := xml.NewDecoder(r.HTTPResponse.Body) + err := xmlutil.UnmarshalXML(r.Data, decoder, "") + if err != nil { + r.Error = awserr.NewRequestFailure( + awserr.New(request.ErrCodeSerialization, + "failed to decode REST XML response", err), + r.HTTPResponse.StatusCode, + r.RequestID, + ) + return + } + } else { + rest.Unmarshal(r) + } +} + +// UnmarshalMeta unmarshals response headers for the REST XML protocol. +func UnmarshalMeta(r *request.Request) { + rest.UnmarshalMeta(r) +} + +// UnmarshalError unmarshals a response error for the REST XML protocol. +func UnmarshalError(r *request.Request) { + query.UnmarshalError(r) +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/timestamp.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/timestamp.go new file mode 100644 index 00000000000..98f4caed91c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/timestamp.go @@ -0,0 +1,85 @@ +package protocol + +import ( + "math" + "strconv" + "time" + + "github.com/aws/aws-sdk-go/internal/sdkmath" +) + +// Names of time formats supported by the SDK +const ( + RFC822TimeFormatName = "rfc822" + ISO8601TimeFormatName = "iso8601" + UnixTimeFormatName = "unixTimestamp" +) + +// Time formats supported by the SDK +// Output time is intended to not contain decimals +const ( + // RFC 7231#section-7.1.1.1 timetamp format. e.g Tue, 29 Apr 2014 18:30:38 GMT + RFC822TimeFormat = "Mon, 2 Jan 2006 15:04:05 GMT" + + // This format is used for output time without seconds precision + RFC822OutputTimeFormat = "Mon, 02 Jan 2006 15:04:05 GMT" + + // RFC3339 a subset of the ISO8601 timestamp format. e.g 2014-04-29T18:30:38Z + ISO8601TimeFormat = "2006-01-02T15:04:05.999999999Z" + + // This format is used for output time with fractional second precision up to milliseconds + ISO8601OutputTimeFormat = "2006-01-02T15:04:05.999999999Z" +) + +// IsKnownTimestampFormat returns if the timestamp format name +// is know to the SDK's protocols. +func IsKnownTimestampFormat(name string) bool { + switch name { + case RFC822TimeFormatName: + fallthrough + case ISO8601TimeFormatName: + fallthrough + case UnixTimeFormatName: + return true + default: + return false + } +} + +// FormatTime returns a string value of the time. +func FormatTime(name string, t time.Time) string { + t = t.UTC().Truncate(time.Millisecond) + + switch name { + case RFC822TimeFormatName: + return t.Format(RFC822OutputTimeFormat) + case ISO8601TimeFormatName: + return t.Format(ISO8601OutputTimeFormat) + case UnixTimeFormatName: + ms := t.UnixNano() / int64(time.Millisecond) + return strconv.FormatFloat(float64(ms)/1e3, 'f', -1, 64) + default: + panic("unknown timestamp format name, " + name) + } +} + +// ParseTime attempts to parse the time given the format. Returns +// the time if it was able to be parsed, and fails otherwise. +func ParseTime(formatName, value string) (time.Time, error) { + switch formatName { + case RFC822TimeFormatName: + return time.Parse(RFC822TimeFormat, value) + case ISO8601TimeFormatName: + return time.Parse(ISO8601TimeFormat, value) + case UnixTimeFormatName: + v, err := strconv.ParseFloat(value, 64) + _, dec := math.Modf(v) + dec = sdkmath.Round(dec*1e3) / 1e3 //Rounds 0.1229999 to 0.123 + if err != nil { + return time.Time{}, err + } + return time.Unix(int64(v), int64(dec*(1e9))), nil + default: + panic("unknown timestamp format name, " + formatName) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal.go new file mode 100644 index 00000000000..f614ef898be --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal.go @@ -0,0 +1,27 @@ +package protocol + +import ( + "io" + "io/ioutil" + + "github.com/aws/aws-sdk-go/aws/request" +) + +// UnmarshalDiscardBodyHandler is a named request handler to empty and close a response's body +var UnmarshalDiscardBodyHandler = request.NamedHandler{Name: "awssdk.shared.UnmarshalDiscardBody", Fn: UnmarshalDiscardBody} + +// UnmarshalDiscardBody is a request handler to empty a response's body and closing it. +func UnmarshalDiscardBody(r *request.Request) { + if r.HTTPResponse == nil || r.HTTPResponse.Body == nil { + return + } + + io.Copy(ioutil.Discard, r.HTTPResponse.Body) + r.HTTPResponse.Body.Close() +} + +// ResponseMetadata provides the SDK response metadata attributes. +type ResponseMetadata struct { + StatusCode int + RequestID string +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal_error.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal_error.go new file mode 100644 index 00000000000..cc857f136c5 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal_error.go @@ -0,0 +1,65 @@ +package protocol + +import ( + "net/http" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" +) + +// UnmarshalErrorHandler provides unmarshaling errors API response errors for +// both typed and untyped errors. +type UnmarshalErrorHandler struct { + unmarshaler ErrorUnmarshaler +} + +// ErrorUnmarshaler is an abstract interface for concrete implementations to +// unmarshal protocol specific response errors. +type ErrorUnmarshaler interface { + UnmarshalError(*http.Response, ResponseMetadata) (error, error) +} + +// NewUnmarshalErrorHandler returns an UnmarshalErrorHandler +// initialized for the set of exception names to the error unmarshalers +func NewUnmarshalErrorHandler(unmarshaler ErrorUnmarshaler) *UnmarshalErrorHandler { + return &UnmarshalErrorHandler{ + unmarshaler: unmarshaler, + } +} + +// UnmarshalErrorHandlerName is the name of the named handler. +const UnmarshalErrorHandlerName = "awssdk.protocol.UnmarshalError" + +// NamedHandler returns a NamedHandler for the unmarshaler using the set of +// errors the unmarshaler was initialized for. +func (u *UnmarshalErrorHandler) NamedHandler() request.NamedHandler { + return request.NamedHandler{ + Name: UnmarshalErrorHandlerName, + Fn: u.UnmarshalError, + } +} + +// UnmarshalError will attempt to unmarshal the API response's error message +// into either a generic SDK error type, or a typed error corresponding to the +// errors exception name. +func (u *UnmarshalErrorHandler) UnmarshalError(r *request.Request) { + defer r.HTTPResponse.Body.Close() + + respMeta := ResponseMetadata{ + StatusCode: r.HTTPResponse.StatusCode, + RequestID: r.RequestID, + } + + v, err := u.unmarshaler.UnmarshalError(r.HTTPResponse, respMeta) + if err != nil { + r.Error = awserr.NewRequestFailure( + awserr.New(request.ErrCodeSerialization, + "failed to unmarshal response error", err), + respMeta.StatusCode, + respMeta.RequestID, + ) + return + } + + r.Error = v +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go new file mode 100644 index 00000000000..09ad951595e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go @@ -0,0 +1,315 @@ +// Package xmlutil provides XML serialization of AWS requests and responses. +package xmlutil + +import ( + "encoding/base64" + "encoding/xml" + "fmt" + "reflect" + "sort" + "strconv" + "strings" + "time" + + "github.com/aws/aws-sdk-go/private/protocol" +) + +// BuildXML will serialize params into an xml.Encoder. Error will be returned +// if the serialization of any of the params or nested values fails. +func BuildXML(params interface{}, e *xml.Encoder) error { + return buildXML(params, e, false) +} + +func buildXML(params interface{}, e *xml.Encoder, sorted bool) error { + b := xmlBuilder{encoder: e, namespaces: map[string]string{}} + root := NewXMLElement(xml.Name{}) + if err := b.buildValue(reflect.ValueOf(params), root, ""); err != nil { + return err + } + for _, c := range root.Children { + for _, v := range c { + return StructToXML(e, v, sorted) + } + } + return nil +} + +// Returns the reflection element of a value, if it is a pointer. +func elemOf(value reflect.Value) reflect.Value { + for value.Kind() == reflect.Ptr { + value = value.Elem() + } + return value +} + +// A xmlBuilder serializes values from Go code to XML +type xmlBuilder struct { + encoder *xml.Encoder + namespaces map[string]string +} + +// buildValue generic XMLNode builder for any type. Will build value for their specific type +// struct, list, map, scalar. +// +// Also takes a "type" tag value to set what type a value should be converted to XMLNode as. If +// type is not provided reflect will be used to determine the value's type. +func (b *xmlBuilder) buildValue(value reflect.Value, current *XMLNode, tag reflect.StructTag) error { + value = elemOf(value) + if !value.IsValid() { // no need to handle zero values + return nil + } else if tag.Get("location") != "" { // don't handle non-body location values + return nil + } + + xml := tag.Get("xml") + if len(xml) != 0 { + name := strings.SplitAfterN(xml, ",", 2)[0] + if name == "-" { + return nil + } + } + + t := tag.Get("type") + if t == "" { + switch value.Kind() { + case reflect.Struct: + t = "structure" + case reflect.Slice: + t = "list" + case reflect.Map: + t = "map" + } + } + + switch t { + case "structure": + if field, ok := value.Type().FieldByName("_"); ok { + tag = tag + reflect.StructTag(" ") + field.Tag + } + return b.buildStruct(value, current, tag) + case "list": + return b.buildList(value, current, tag) + case "map": + return b.buildMap(value, current, tag) + default: + return b.buildScalar(value, current, tag) + } +} + +// buildStruct adds a struct and its fields to the current XMLNode. All fields and any nested +// types are converted to XMLNodes also. +func (b *xmlBuilder) buildStruct(value reflect.Value, current *XMLNode, tag reflect.StructTag) error { + if !value.IsValid() { + return nil + } + + // unwrap payloads + if payload := tag.Get("payload"); payload != "" { + field, _ := value.Type().FieldByName(payload) + tag = field.Tag + value = elemOf(value.FieldByName(payload)) + + if !value.IsValid() { + return nil + } + } + + child := NewXMLElement(xml.Name{Local: tag.Get("locationName")}) + + // there is an xmlNamespace associated with this struct + if prefix, uri := tag.Get("xmlPrefix"), tag.Get("xmlURI"); uri != "" { + ns := xml.Attr{ + Name: xml.Name{Local: "xmlns"}, + Value: uri, + } + if prefix != "" { + b.namespaces[prefix] = uri // register the namespace + ns.Name.Local = "xmlns:" + prefix + } + + child.Attr = append(child.Attr, ns) + } + + var payloadFields, nonPayloadFields int + + t := value.Type() + for i := 0; i < value.NumField(); i++ { + member := elemOf(value.Field(i)) + field := t.Field(i) + + if field.PkgPath != "" { + continue // ignore unexported fields + } + if field.Tag.Get("ignore") != "" { + continue + } + + mTag := field.Tag + if mTag.Get("location") != "" { // skip non-body members + nonPayloadFields++ + continue + } + payloadFields++ + + if protocol.CanSetIdempotencyToken(value.Field(i), field) { + token := protocol.GetIdempotencyToken() + member = reflect.ValueOf(token) + } + + memberName := mTag.Get("locationName") + if memberName == "" { + memberName = field.Name + mTag = reflect.StructTag(string(mTag) + ` locationName:"` + memberName + `"`) + } + if err := b.buildValue(member, child, mTag); err != nil { + return err + } + } + + // Only case where the child shape is not added is if the shape only contains + // non-payload fields, e.g headers/query. + if !(payloadFields == 0 && nonPayloadFields > 0) { + current.AddChild(child) + } + + return nil +} + +// buildList adds the value's list items to the current XMLNode as children nodes. All +// nested values in the list are converted to XMLNodes also. +func (b *xmlBuilder) buildList(value reflect.Value, current *XMLNode, tag reflect.StructTag) error { + if value.IsNil() { // don't build omitted lists + return nil + } + + // check for unflattened list member + flattened := tag.Get("flattened") != "" + + xname := xml.Name{Local: tag.Get("locationName")} + if flattened { + for i := 0; i < value.Len(); i++ { + child := NewXMLElement(xname) + current.AddChild(child) + if err := b.buildValue(value.Index(i), child, ""); err != nil { + return err + } + } + } else { + list := NewXMLElement(xname) + current.AddChild(list) + + for i := 0; i < value.Len(); i++ { + iname := tag.Get("locationNameList") + if iname == "" { + iname = "member" + } + + child := NewXMLElement(xml.Name{Local: iname}) + list.AddChild(child) + if err := b.buildValue(value.Index(i), child, ""); err != nil { + return err + } + } + } + + return nil +} + +// buildMap adds the value's key/value pairs to the current XMLNode as children nodes. All +// nested values in the map are converted to XMLNodes also. +// +// Error will be returned if it is unable to build the map's values into XMLNodes +func (b *xmlBuilder) buildMap(value reflect.Value, current *XMLNode, tag reflect.StructTag) error { + if value.IsNil() { // don't build omitted maps + return nil + } + + maproot := NewXMLElement(xml.Name{Local: tag.Get("locationName")}) + current.AddChild(maproot) + current = maproot + + kname, vname := "key", "value" + if n := tag.Get("locationNameKey"); n != "" { + kname = n + } + if n := tag.Get("locationNameValue"); n != "" { + vname = n + } + + // sorting is not required for compliance, but it makes testing easier + keys := make([]string, value.Len()) + for i, k := range value.MapKeys() { + keys[i] = k.String() + } + sort.Strings(keys) + + for _, k := range keys { + v := value.MapIndex(reflect.ValueOf(k)) + + mapcur := current + if tag.Get("flattened") == "" { // add "entry" tag to non-flat maps + child := NewXMLElement(xml.Name{Local: "entry"}) + mapcur.AddChild(child) + mapcur = child + } + + kchild := NewXMLElement(xml.Name{Local: kname}) + kchild.Text = k + vchild := NewXMLElement(xml.Name{Local: vname}) + mapcur.AddChild(kchild) + mapcur.AddChild(vchild) + + if err := b.buildValue(v, vchild, ""); err != nil { + return err + } + } + + return nil +} + +// buildScalar will convert the value into a string and append it as a attribute or child +// of the current XMLNode. +// +// The value will be added as an attribute if tag contains a "xmlAttribute" attribute value. +// +// Error will be returned if the value type is unsupported. +func (b *xmlBuilder) buildScalar(value reflect.Value, current *XMLNode, tag reflect.StructTag) error { + var str string + switch converted := value.Interface().(type) { + case string: + str = converted + case []byte: + if !value.IsNil() { + str = base64.StdEncoding.EncodeToString(converted) + } + case bool: + str = strconv.FormatBool(converted) + case int64: + str = strconv.FormatInt(converted, 10) + case int: + str = strconv.Itoa(converted) + case float64: + str = strconv.FormatFloat(converted, 'f', -1, 64) + case float32: + str = strconv.FormatFloat(float64(converted), 'f', -1, 32) + case time.Time: + format := tag.Get("timestampFormat") + if len(format) == 0 { + format = protocol.ISO8601TimeFormatName + } + + str = protocol.FormatTime(format, converted) + default: + return fmt.Errorf("unsupported value for param %s: %v (%s)", + tag.Get("locationName"), value.Interface(), value.Type().Name()) + } + + xname := xml.Name{Local: tag.Get("locationName")} + if tag.Get("xmlAttribute") != "" { // put into current node's attribute list + attr := xml.Attr{Name: xname, Value: str} + current.Attr = append(current.Attr, attr) + } else { // regular text node + current.AddChild(&XMLNode{Name: xname, Text: str}) + } + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/sort.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/sort.go new file mode 100644 index 00000000000..c1a511851f6 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/sort.go @@ -0,0 +1,32 @@ +package xmlutil + +import ( + "encoding/xml" + "strings" +) + +type xmlAttrSlice []xml.Attr + +func (x xmlAttrSlice) Len() int { + return len(x) +} + +func (x xmlAttrSlice) Less(i, j int) bool { + spaceI, spaceJ := x[i].Name.Space, x[j].Name.Space + localI, localJ := x[i].Name.Local, x[j].Name.Local + valueI, valueJ := x[i].Value, x[j].Value + + spaceCmp := strings.Compare(spaceI, spaceJ) + localCmp := strings.Compare(localI, localJ) + valueCmp := strings.Compare(valueI, valueJ) + + if spaceCmp == -1 || (spaceCmp == 0 && (localCmp == -1 || (localCmp == 0 && valueCmp == -1))) { + return true + } + + return false +} + +func (x xmlAttrSlice) Swap(i, j int) { + x[i], x[j] = x[j], x[i] +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go new file mode 100644 index 00000000000..107c053f8ac --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go @@ -0,0 +1,299 @@ +package xmlutil + +import ( + "bytes" + "encoding/base64" + "encoding/xml" + "fmt" + "io" + "reflect" + "strconv" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/private/protocol" +) + +// UnmarshalXMLError unmarshals the XML error from the stream into the value +// type specified. The value must be a pointer. If the message fails to +// unmarshal, the message content will be included in the returned error as a +// awserr.UnmarshalError. +func UnmarshalXMLError(v interface{}, stream io.Reader) error { + var errBuf bytes.Buffer + body := io.TeeReader(stream, &errBuf) + + err := xml.NewDecoder(body).Decode(v) + if err != nil && err != io.EOF { + return awserr.NewUnmarshalError(err, + "failed to unmarshal error message", errBuf.Bytes()) + } + + return nil +} + +// UnmarshalXML deserializes an xml.Decoder into the container v. V +// needs to match the shape of the XML expected to be decoded. +// If the shape doesn't match unmarshaling will fail. +func UnmarshalXML(v interface{}, d *xml.Decoder, wrapper string) error { + n, err := XMLToStruct(d, nil) + if err != nil { + return err + } + if n.Children != nil { + for _, root := range n.Children { + for _, c := range root { + if wrappedChild, ok := c.Children[wrapper]; ok { + c = wrappedChild[0] // pull out wrapped element + } + + err = parse(reflect.ValueOf(v), c, "") + if err != nil { + if err == io.EOF { + return nil + } + return err + } + } + } + return nil + } + return nil +} + +// parse deserializes any value from the XMLNode. The type tag is used to infer the type, or reflect +// will be used to determine the type from r. +func parse(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { + xml := tag.Get("xml") + if len(xml) != 0 { + name := strings.SplitAfterN(xml, ",", 2)[0] + if name == "-" { + return nil + } + } + + rtype := r.Type() + if rtype.Kind() == reflect.Ptr { + rtype = rtype.Elem() // check kind of actual element type + } + + t := tag.Get("type") + if t == "" { + switch rtype.Kind() { + case reflect.Struct: + // also it can't be a time object + if _, ok := r.Interface().(*time.Time); !ok { + t = "structure" + } + case reflect.Slice: + // also it can't be a byte slice + if _, ok := r.Interface().([]byte); !ok { + t = "list" + } + case reflect.Map: + t = "map" + } + } + + switch t { + case "structure": + if field, ok := rtype.FieldByName("_"); ok { + tag = field.Tag + } + return parseStruct(r, node, tag) + case "list": + return parseList(r, node, tag) + case "map": + return parseMap(r, node, tag) + default: + return parseScalar(r, node, tag) + } +} + +// parseStruct deserializes a structure and its fields from an XMLNode. Any nested +// types in the structure will also be deserialized. +func parseStruct(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { + t := r.Type() + if r.Kind() == reflect.Ptr { + if r.IsNil() { // create the structure if it's nil + s := reflect.New(r.Type().Elem()) + r.Set(s) + r = s + } + + r = r.Elem() + t = t.Elem() + } + + // unwrap any payloads + if payload := tag.Get("payload"); payload != "" { + field, _ := t.FieldByName(payload) + return parseStruct(r.FieldByName(payload), node, field.Tag) + } + + for i := 0; i < t.NumField(); i++ { + field := t.Field(i) + if c := field.Name[0:1]; strings.ToLower(c) == c { + continue // ignore unexported fields + } + + // figure out what this field is called + name := field.Name + if field.Tag.Get("flattened") != "" && field.Tag.Get("locationNameList") != "" { + name = field.Tag.Get("locationNameList") + } else if locName := field.Tag.Get("locationName"); locName != "" { + name = locName + } + + // try to find the field by name in elements + elems := node.Children[name] + + if elems == nil { // try to find the field in attributes + if val, ok := node.findElem(name); ok { + elems = []*XMLNode{{Text: val}} + } + } + + member := r.FieldByName(field.Name) + for _, elem := range elems { + err := parse(member, elem, field.Tag) + if err != nil { + return err + } + } + } + return nil +} + +// parseList deserializes a list of values from an XML node. Each list entry +// will also be deserialized. +func parseList(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { + t := r.Type() + + if tag.Get("flattened") == "" { // look at all item entries + mname := "member" + if name := tag.Get("locationNameList"); name != "" { + mname = name + } + + if Children, ok := node.Children[mname]; ok { + if r.IsNil() { + r.Set(reflect.MakeSlice(t, len(Children), len(Children))) + } + + for i, c := range Children { + err := parse(r.Index(i), c, "") + if err != nil { + return err + } + } + } + } else { // flattened list means this is a single element + if r.IsNil() { + r.Set(reflect.MakeSlice(t, 0, 0)) + } + + childR := reflect.Zero(t.Elem()) + r.Set(reflect.Append(r, childR)) + err := parse(r.Index(r.Len()-1), node, "") + if err != nil { + return err + } + } + + return nil +} + +// parseMap deserializes a map from an XMLNode. The direct children of the XMLNode +// will also be deserialized as map entries. +func parseMap(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { + if r.IsNil() { + r.Set(reflect.MakeMap(r.Type())) + } + + if tag.Get("flattened") == "" { // look at all child entries + for _, entry := range node.Children["entry"] { + parseMapEntry(r, entry, tag) + } + } else { // this element is itself an entry + parseMapEntry(r, node, tag) + } + + return nil +} + +// parseMapEntry deserializes a map entry from a XML node. +func parseMapEntry(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { + kname, vname := "key", "value" + if n := tag.Get("locationNameKey"); n != "" { + kname = n + } + if n := tag.Get("locationNameValue"); n != "" { + vname = n + } + + keys, ok := node.Children[kname] + values := node.Children[vname] + if ok { + for i, key := range keys { + keyR := reflect.ValueOf(key.Text) + value := values[i] + valueR := reflect.New(r.Type().Elem()).Elem() + + parse(valueR, value, "") + r.SetMapIndex(keyR, valueR) + } + } + return nil +} + +// parseScaller deserializes an XMLNode value into a concrete type based on the +// interface type of r. +// +// Error is returned if the deserialization fails due to invalid type conversion, +// or unsupported interface type. +func parseScalar(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { + switch r.Interface().(type) { + case *string: + r.Set(reflect.ValueOf(&node.Text)) + return nil + case []byte: + b, err := base64.StdEncoding.DecodeString(node.Text) + if err != nil { + return err + } + r.Set(reflect.ValueOf(b)) + case *bool: + v, err := strconv.ParseBool(node.Text) + if err != nil { + return err + } + r.Set(reflect.ValueOf(&v)) + case *int64: + v, err := strconv.ParseInt(node.Text, 10, 64) + if err != nil { + return err + } + r.Set(reflect.ValueOf(&v)) + case *float64: + v, err := strconv.ParseFloat(node.Text, 64) + if err != nil { + return err + } + r.Set(reflect.ValueOf(&v)) + case *time.Time: + format := tag.Get("timestampFormat") + if len(format) == 0 { + format = protocol.ISO8601TimeFormatName + } + + t, err := protocol.ParseTime(format, node.Text) + if err != nil { + return err + } + r.Set(reflect.ValueOf(&t)) + default: + return fmt.Errorf("unsupported value: %v (%s)", r.Interface(), r.Type()) + } + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go new file mode 100644 index 00000000000..42f71648eee --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go @@ -0,0 +1,159 @@ +package xmlutil + +import ( + "encoding/xml" + "fmt" + "io" + "sort" +) + +// A XMLNode contains the values to be encoded or decoded. +type XMLNode struct { + Name xml.Name `json:",omitempty"` + Children map[string][]*XMLNode `json:",omitempty"` + Text string `json:",omitempty"` + Attr []xml.Attr `json:",omitempty"` + + namespaces map[string]string + parent *XMLNode +} + +// NewXMLElement returns a pointer to a new XMLNode initialized to default values. +func NewXMLElement(name xml.Name) *XMLNode { + return &XMLNode{ + Name: name, + Children: map[string][]*XMLNode{}, + Attr: []xml.Attr{}, + } +} + +// AddChild adds child to the XMLNode. +func (n *XMLNode) AddChild(child *XMLNode) { + child.parent = n + if _, ok := n.Children[child.Name.Local]; !ok { + n.Children[child.Name.Local] = []*XMLNode{} + } + n.Children[child.Name.Local] = append(n.Children[child.Name.Local], child) +} + +// XMLToStruct converts a xml.Decoder stream to XMLNode with nested values. +func XMLToStruct(d *xml.Decoder, s *xml.StartElement) (*XMLNode, error) { + out := &XMLNode{} + for { + tok, err := d.Token() + if err != nil { + if err == io.EOF { + break + } else { + return out, err + } + } + + if tok == nil { + break + } + + switch typed := tok.(type) { + case xml.CharData: + out.Text = string(typed.Copy()) + case xml.StartElement: + el := typed.Copy() + out.Attr = el.Attr + if out.Children == nil { + out.Children = map[string][]*XMLNode{} + } + + name := typed.Name.Local + slice := out.Children[name] + if slice == nil { + slice = []*XMLNode{} + } + node, e := XMLToStruct(d, &el) + out.findNamespaces() + if e != nil { + return out, e + } + node.Name = typed.Name + node.findNamespaces() + tempOut := *out + // Save into a temp variable, simply because out gets squashed during + // loop iterations + node.parent = &tempOut + slice = append(slice, node) + out.Children[name] = slice + case xml.EndElement: + if s != nil && s.Name.Local == typed.Name.Local { // matching end token + return out, nil + } + out = &XMLNode{} + } + } + return out, nil +} + +func (n *XMLNode) findNamespaces() { + ns := map[string]string{} + for _, a := range n.Attr { + if a.Name.Space == "xmlns" { + ns[a.Value] = a.Name.Local + } + } + + n.namespaces = ns +} + +func (n *XMLNode) findElem(name string) (string, bool) { + for node := n; node != nil; node = node.parent { + for _, a := range node.Attr { + namespace := a.Name.Space + if v, ok := node.namespaces[namespace]; ok { + namespace = v + } + if name == fmt.Sprintf("%s:%s", namespace, a.Name.Local) { + return a.Value, true + } + } + } + return "", false +} + +// StructToXML writes an XMLNode to a xml.Encoder as tokens. +func StructToXML(e *xml.Encoder, node *XMLNode, sorted bool) error { + // Sort Attributes + attrs := node.Attr + if sorted { + sortedAttrs := make([]xml.Attr, len(attrs)) + for _, k := range node.Attr { + sortedAttrs = append(sortedAttrs, k) + } + sort.Sort(xmlAttrSlice(sortedAttrs)) + attrs = sortedAttrs + } + + e.EncodeToken(xml.StartElement{Name: node.Name, Attr: attrs}) + + if node.Text != "" { + e.EncodeToken(xml.CharData([]byte(node.Text))) + } else if sorted { + sortedNames := []string{} + for k := range node.Children { + sortedNames = append(sortedNames, k) + } + sort.Strings(sortedNames) + + for _, k := range sortedNames { + for _, v := range node.Children[k] { + StructToXML(e, v, sorted) + } + } + } else { + for _, c := range node.Children { + for _, v := range c { + StructToXML(e, v, sorted) + } + } + } + + e.EncodeToken(xml.EndElement{Name: node.Name}) + return e.Flush() +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/api.go b/vendor/github.com/aws/aws-sdk-go/service/s3/api.go new file mode 100644 index 00000000000..cc1f3dbf52e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/api.go @@ -0,0 +1,36085 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package s3 + +import ( + "bytes" + "fmt" + "io" + "sync" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/internal/s3shared/arn" + "github.com/aws/aws-sdk-go/private/checksum" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/eventstream" + "github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi" + "github.com/aws/aws-sdk-go/private/protocol/rest" + "github.com/aws/aws-sdk-go/private/protocol/restxml" +) + +const opAbortMultipartUpload = "AbortMultipartUpload" + +// AbortMultipartUploadRequest generates a "aws/request.Request" representing the +// client's request for the AbortMultipartUpload operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See AbortMultipartUpload for more information on using the AbortMultipartUpload +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the AbortMultipartUploadRequest method. +// req, resp := client.AbortMultipartUploadRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AbortMultipartUpload +func (c *S3) AbortMultipartUploadRequest(input *AbortMultipartUploadInput) (req *request.Request, output *AbortMultipartUploadOutput) { + op := &request.Operation{ + Name: opAbortMultipartUpload, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}/{Key+}", + } + + if input == nil { + input = &AbortMultipartUploadInput{} + } + + output = &AbortMultipartUploadOutput{} + req = c.newRequest(op, input, output) + return +} + +// AbortMultipartUpload API operation for Amazon Simple Storage Service. +// +// This operation aborts a multipart upload. After a multipart upload is aborted, +// no additional parts can be uploaded using that upload ID. The storage consumed +// by any previously uploaded parts will be freed. However, if any part uploads +// are currently in progress, those part uploads might or might not succeed. +// As a result, it might be necessary to abort a given multipart upload multiple +// times in order to completely free all storage consumed by all parts. +// +// To verify that all parts have been removed, so you don't get charged for +// the part storage, you should call the ListParts (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html) +// operation and ensure that the parts list is empty. +// +// For information about permissions required to use the multipart upload API, +// see Multipart Upload API and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html). +// +// The following operations are related to AbortMultipartUpload: +// +// * CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html) +// +// * UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) +// +// * CompleteMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html) +// +// * ListParts (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html) +// +// * ListMultipartUploads (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation AbortMultipartUpload for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNoSuchUpload "NoSuchUpload" +// The specified multipart upload does not exist. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AbortMultipartUpload +func (c *S3) AbortMultipartUpload(input *AbortMultipartUploadInput) (*AbortMultipartUploadOutput, error) { + req, out := c.AbortMultipartUploadRequest(input) + return out, req.Send() +} + +// AbortMultipartUploadWithContext is the same as AbortMultipartUpload with the addition of +// the ability to pass a context and additional request options. +// +// See AbortMultipartUpload for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) AbortMultipartUploadWithContext(ctx aws.Context, input *AbortMultipartUploadInput, opts ...request.Option) (*AbortMultipartUploadOutput, error) { + req, out := c.AbortMultipartUploadRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCompleteMultipartUpload = "CompleteMultipartUpload" + +// CompleteMultipartUploadRequest generates a "aws/request.Request" representing the +// client's request for the CompleteMultipartUpload operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CompleteMultipartUpload for more information on using the CompleteMultipartUpload +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CompleteMultipartUploadRequest method. +// req, resp := client.CompleteMultipartUploadRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CompleteMultipartUpload +func (c *S3) CompleteMultipartUploadRequest(input *CompleteMultipartUploadInput) (req *request.Request, output *CompleteMultipartUploadOutput) { + op := &request.Operation{ + Name: opCompleteMultipartUpload, + HTTPMethod: "POST", + HTTPPath: "/{Bucket}/{Key+}", + } + + if input == nil { + input = &CompleteMultipartUploadInput{} + } + + output = &CompleteMultipartUploadOutput{} + req = c.newRequest(op, input, output) + return +} + +// CompleteMultipartUpload API operation for Amazon Simple Storage Service. +// +// Completes a multipart upload by assembling previously uploaded parts. +// +// You first initiate the multipart upload and then upload all parts using the +// UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) +// operation. After successfully uploading all relevant parts of an upload, +// you call this operation to complete the upload. Upon receiving this request, +// Amazon S3 concatenates all the parts in ascending order by part number to +// create a new object. In the Complete Multipart Upload request, you must provide +// the parts list. You must ensure that the parts list is complete. This operation +// concatenates the parts that you provide in the list. For each part in the +// list, you must provide the part number and the ETag value, returned after +// that part was uploaded. +// +// Processing of a Complete Multipart Upload request could take several minutes +// to complete. After Amazon S3 begins processing the request, it sends an HTTP +// response header that specifies a 200 OK response. While processing is in +// progress, Amazon S3 periodically sends white space characters to keep the +// connection from timing out. Because a request could fail after the initial +// 200 OK response has been sent, it is important that you check the response +// body to determine whether the request succeeded. +// +// Note that if CompleteMultipartUpload fails, applications should be prepared +// to retry the failed requests. For more information, see Amazon S3 Error Best +// Practices (https://docs.aws.amazon.com/AmazonS3/latest/dev/ErrorBestPractices.html). +// +// For more information about multipart uploads, see Uploading Objects Using +// Multipart Upload (https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html). +// +// For information about permissions required to use the multipart upload API, +// see Multipart Upload API and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html). +// +// CompleteMultipartUpload has the following special errors: +// +// * Error code: EntityTooSmall Description: Your proposed upload is smaller +// than the minimum allowed object size. Each part must be at least 5 MB +// in size, except the last part. 400 Bad Request +// +// * Error code: InvalidPart Description: One or more of the specified parts +// could not be found. The part might not have been uploaded, or the specified +// entity tag might not have matched the part's entity tag. 400 Bad Request +// +// * Error code: InvalidPartOrder Description: The list of parts was not +// in ascending order. The parts list must be specified in order by part +// number. 400 Bad Request +// +// * Error code: NoSuchUpload Description: The specified multipart upload +// does not exist. The upload ID might be invalid, or the multipart upload +// might have been aborted or completed. 404 Not Found +// +// The following operations are related to CompleteMultipartUpload: +// +// * CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html) +// +// * UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) +// +// * AbortMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html) +// +// * ListParts (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html) +// +// * ListMultipartUploads (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation CompleteMultipartUpload for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CompleteMultipartUpload +func (c *S3) CompleteMultipartUpload(input *CompleteMultipartUploadInput) (*CompleteMultipartUploadOutput, error) { + req, out := c.CompleteMultipartUploadRequest(input) + return out, req.Send() +} + +// CompleteMultipartUploadWithContext is the same as CompleteMultipartUpload with the addition of +// the ability to pass a context and additional request options. +// +// See CompleteMultipartUpload for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) CompleteMultipartUploadWithContext(ctx aws.Context, input *CompleteMultipartUploadInput, opts ...request.Option) (*CompleteMultipartUploadOutput, error) { + req, out := c.CompleteMultipartUploadRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCopyObject = "CopyObject" + +// CopyObjectRequest generates a "aws/request.Request" representing the +// client's request for the CopyObject operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CopyObject for more information on using the CopyObject +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CopyObjectRequest method. +// req, resp := client.CopyObjectRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CopyObject +func (c *S3) CopyObjectRequest(input *CopyObjectInput) (req *request.Request, output *CopyObjectOutput) { + op := &request.Operation{ + Name: opCopyObject, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}/{Key+}", + } + + if input == nil { + input = &CopyObjectInput{} + } + + output = &CopyObjectOutput{} + req = c.newRequest(op, input, output) + return +} + +// CopyObject API operation for Amazon Simple Storage Service. +// +// Creates a copy of an object that is already stored in Amazon S3. +// +// You can store individual objects of up to 5 TB in Amazon S3. You create a +// copy of your object up to 5 GB in size in a single atomic operation using +// this API. However, to copy an object greater than 5 GB, you must use the +// multipart upload Upload Part - Copy API. For more information, see Copy Object +// Using the REST Multipart Upload API (https://docs.aws.amazon.com/AmazonS3/latest/dev/CopyingObjctsUsingRESTMPUapi.html). +// +// All copy requests must be authenticated. Additionally, you must have read +// access to the source object and write access to the destination bucket. For +// more information, see REST Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html). +// Both the Region that you want to copy the object from and the Region that +// you want to copy the object to must be enabled for your account. +// +// A copy request might return an error when Amazon S3 receives the copy request +// or while Amazon S3 is copying the files. If the error occurs before the copy +// operation starts, you receive a standard Amazon S3 error. If the error occurs +// during the copy operation, the error response is embedded in the 200 OK response. +// This means that a 200 OK response can contain either a success or an error. +// Design your application to parse the contents of the response and handle +// it appropriately. +// +// If the copy is successful, you receive a response with information about +// the copied object. +// +// If the request is an HTTP 1.1 request, the response is chunk encoded. If +// it were not, it would not contain the content-length, and you would need +// to read the entire body. +// +// The copy request charge is based on the storage class and Region that you +// specify for the destination object. For pricing information, see Amazon S3 +// pricing (https://aws.amazon.com/s3/pricing/). +// +// Amazon S3 transfer acceleration does not support cross-Region copies. If +// you request a cross-Region copy using a transfer acceleration endpoint, you +// get a 400 Bad Request error. For more information, see Transfer Acceleration +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html). +// +// Metadata +// +// When copying an object, you can preserve all metadata (default) or specify +// new metadata. However, the ACL is not preserved and is set to private for +// the user making the request. To override the default ACL setting, specify +// a new ACL when generating a copy request. For more information, see Using +// ACLs (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html). +// +// To specify whether you want the object metadata copied from the source object +// or replaced with metadata provided in the request, you can optionally add +// the x-amz-metadata-directive header. When you grant permissions, you can +// use the s3:x-amz-metadata-directive condition key to enforce certain metadata +// behavior when objects are uploaded. For more information, see Specifying +// Conditions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/amazon-s3-policy-keys.html) +// in the Amazon S3 Developer Guide. For a complete list of Amazon S3-specific +// condition keys, see Actions, Resources, and Condition Keys for Amazon S3 +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/list_amazons3.html). +// +// x-amz-copy-source-if Headers +// +// To only copy an object under certain conditions, such as whether the Etag +// matches or whether the object was modified before or after a specified date, +// use the following request parameters: +// +// * x-amz-copy-source-if-match +// +// * x-amz-copy-source-if-none-match +// +// * x-amz-copy-source-if-unmodified-since +// +// * x-amz-copy-source-if-modified-since +// +// If both the x-amz-copy-source-if-match and x-amz-copy-source-if-unmodified-since +// headers are present in the request and evaluate as follows, Amazon S3 returns +// 200 OK and copies the data: +// +// * x-amz-copy-source-if-match condition evaluates to true +// +// * x-amz-copy-source-if-unmodified-since condition evaluates to false +// +// If both the x-amz-copy-source-if-none-match and x-amz-copy-source-if-modified-since +// headers are present in the request and evaluate as follows, Amazon S3 returns +// the 412 Precondition Failed response code: +// +// * x-amz-copy-source-if-none-match condition evaluates to false +// +// * x-amz-copy-source-if-modified-since condition evaluates to true +// +// All headers with the x-amz- prefix, including x-amz-copy-source, must be +// signed. +// +// Server-side encryption +// +// When you perform a CopyObject operation, you can optionally use the appropriate +// encryption-related headers to encrypt the object using server-side encryption +// with AWS managed encryption keys (SSE-S3 or SSE-KMS) or a customer-provided +// encryption key. With server-side encryption, Amazon S3 encrypts your data +// as it writes it to disks in its data centers and decrypts the data when you +// access it. For more information about server-side encryption, see Using Server-Side +// Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html). +// +// If a target object uses SSE-KMS, you can enable an S3 Bucket Key for the +// object. For more information, see Amazon S3 Bucket Keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// Access Control List (ACL)-Specific Request Headers +// +// When copying an object, you can optionally use headers to grant ACL-based +// permissions. By default, all objects are private. Only the owner has full +// access control. When adding a new object, you can grant permissions to individual +// AWS accounts or to predefined groups defined by Amazon S3. These permissions +// are then added to the ACL on the object. For more information, see Access +// Control List (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html) +// and Managing ACLs Using the REST API (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-using-rest-api.html). +// +// Storage Class Options +// +// You can use the CopyObject operation to change the storage class of an object +// that is already stored in Amazon S3 using the StorageClass parameter. For +// more information, see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html) +// in the Amazon S3 Service Developer Guide. +// +// Versioning +// +// By default, x-amz-copy-source identifies the current version of an object +// to copy. If the current version is a delete marker, Amazon S3 behaves as +// if the object was deleted. To copy a different version, use the versionId +// subresource. +// +// If you enable versioning on the target bucket, Amazon S3 generates a unique +// version ID for the object being copied. This version ID is different from +// the version ID of the source object. Amazon S3 returns the version ID of +// the copied object in the x-amz-version-id response header in the response. +// +// If you do not enable versioning or suspend it on the target bucket, the version +// ID that Amazon S3 generates is always null. +// +// If the source object's storage class is GLACIER, you must restore a copy +// of this object before you can use it as a source object for the copy operation. +// For more information, see RestoreObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html). +// +// The following operations are related to CopyObject: +// +// * PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) +// +// * GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) +// +// For more information, see Copying Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/CopyingObjectsExamples.html). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation CopyObject for usage and error information. +// +// Returned Error Codes: +// * ErrCodeObjectNotInActiveTierError "ObjectNotInActiveTierError" +// The source object of the COPY operation is not in the active tier and is +// only stored in Amazon S3 Glacier. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CopyObject +func (c *S3) CopyObject(input *CopyObjectInput) (*CopyObjectOutput, error) { + req, out := c.CopyObjectRequest(input) + return out, req.Send() +} + +// CopyObjectWithContext is the same as CopyObject with the addition of +// the ability to pass a context and additional request options. +// +// See CopyObject for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) CopyObjectWithContext(ctx aws.Context, input *CopyObjectInput, opts ...request.Option) (*CopyObjectOutput, error) { + req, out := c.CopyObjectRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateBucket = "CreateBucket" + +// CreateBucketRequest generates a "aws/request.Request" representing the +// client's request for the CreateBucket operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateBucket for more information on using the CreateBucket +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateBucketRequest method. +// req, resp := client.CreateBucketRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateBucket +func (c *S3) CreateBucketRequest(input *CreateBucketInput) (req *request.Request, output *CreateBucketOutput) { + op := &request.Operation{ + Name: opCreateBucket, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}", + } + + if input == nil { + input = &CreateBucketInput{} + } + + output = &CreateBucketOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateBucket API operation for Amazon Simple Storage Service. +// +// Creates a new S3 bucket. To create a bucket, you must register with Amazon +// S3 and have a valid AWS Access Key ID to authenticate requests. Anonymous +// requests are never allowed to create buckets. By creating the bucket, you +// become the bucket owner. +// +// Not every string is an acceptable bucket name. For information about bucket +// naming restrictions, see Working with Amazon S3 buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html). +// +// If you want to create an Amazon S3 on Outposts bucket, see Create Bucket +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_CreateBucket.html). +// +// By default, the bucket is created in the US East (N. Virginia) Region. You +// can optionally specify a Region in the request body. You might choose a Region +// to optimize latency, minimize costs, or address regulatory requirements. +// For example, if you reside in Europe, you will probably find it advantageous +// to create buckets in the Europe (Ireland) Region. For more information, see +// Accessing a bucket (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro). +// +// If you send your create bucket request to the s3.amazonaws.com endpoint, +// the request goes to the us-east-1 Region. Accordingly, the signature calculations +// in Signature Version 4 must use us-east-1 as the Region, even if the location +// constraint in the request specifies another Region where the bucket is to +// be created. If you create a bucket in a Region other than US East (N. Virginia), +// your application must be able to handle 307 redirect. For more information, +// see Virtual hosting of buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html). +// +// When creating a bucket using this operation, you can optionally specify the +// accounts or groups that should be granted specific permissions on the bucket. +// There are two ways to grant the appropriate permissions using the request +// headers. +// +// * Specify a canned ACL using the x-amz-acl request header. Amazon S3 supports +// a set of predefined ACLs, known as canned ACLs. Each canned ACL has a +// predefined set of grantees and permissions. For more information, see +// Canned ACL (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL). +// +// * Specify access permissions explicitly using the x-amz-grant-read, x-amz-grant-write, +// x-amz-grant-read-acp, x-amz-grant-write-acp, and x-amz-grant-full-control +// headers. These headers map to the set of permissions Amazon S3 supports +// in an ACL. For more information, see Access control list (ACL) overview +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html). You +// specify each grantee as a type=value pair, where the type is one of the +// following: id – if the value specified is the canonical user ID of an +// AWS account uri – if you are granting permissions to a predefined group +// emailAddress – if the value specified is the email address of an AWS +// account Using email addresses to specify a grantee is only supported in +// the following AWS Regions: US East (N. Virginia) US West (N. California) +// US West (Oregon) Asia Pacific (Singapore) Asia Pacific (Sydney) Asia Pacific +// (Tokyo) Europe (Ireland) South America (São Paulo) For a list of all +// the Amazon S3 supported Regions and endpoints, see Regions and Endpoints +// (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) in +// the AWS General Reference. For example, the following x-amz-grant-read +// header grants the AWS accounts identified by account IDs permissions to +// read object data and its metadata: x-amz-grant-read: id="11112222333", +// id="444455556666" +// +// You can use either a canned ACL or specify access permissions explicitly. +// You cannot do both. +// +// The following operations are related to CreateBucket: +// +// * PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) +// +// * DeleteBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation CreateBucket for usage and error information. +// +// Returned Error Codes: +// * ErrCodeBucketAlreadyExists "BucketAlreadyExists" +// The requested bucket name is not available. The bucket namespace is shared +// by all users of the system. Select a different name and try again. +// +// * ErrCodeBucketAlreadyOwnedByYou "BucketAlreadyOwnedByYou" +// The bucket you tried to create already exists, and you own it. Amazon S3 +// returns this error in all AWS Regions except in the North Virginia Region. +// For legacy compatibility, if you re-create an existing bucket that you already +// own in the North Virginia Region, Amazon S3 returns 200 OK and resets the +// bucket access control lists (ACLs). +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateBucket +func (c *S3) CreateBucket(input *CreateBucketInput) (*CreateBucketOutput, error) { + req, out := c.CreateBucketRequest(input) + return out, req.Send() +} + +// CreateBucketWithContext is the same as CreateBucket with the addition of +// the ability to pass a context and additional request options. +// +// See CreateBucket for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) CreateBucketWithContext(ctx aws.Context, input *CreateBucketInput, opts ...request.Option) (*CreateBucketOutput, error) { + req, out := c.CreateBucketRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateMultipartUpload = "CreateMultipartUpload" + +// CreateMultipartUploadRequest generates a "aws/request.Request" representing the +// client's request for the CreateMultipartUpload operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateMultipartUpload for more information on using the CreateMultipartUpload +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateMultipartUploadRequest method. +// req, resp := client.CreateMultipartUploadRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateMultipartUpload +func (c *S3) CreateMultipartUploadRequest(input *CreateMultipartUploadInput) (req *request.Request, output *CreateMultipartUploadOutput) { + op := &request.Operation{ + Name: opCreateMultipartUpload, + HTTPMethod: "POST", + HTTPPath: "/{Bucket}/{Key+}?uploads", + } + + if input == nil { + input = &CreateMultipartUploadInput{} + } + + output = &CreateMultipartUploadOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateMultipartUpload API operation for Amazon Simple Storage Service. +// +// This operation initiates a multipart upload and returns an upload ID. This +// upload ID is used to associate all of the parts in the specific multipart +// upload. You specify this upload ID in each of your subsequent upload part +// requests (see UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html)). +// You also include this upload ID in the final request to either complete or +// abort the multipart upload request. +// +// For more information about multipart uploads, see Multipart Upload Overview +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html). +// +// If you have configured a lifecycle rule to abort incomplete multipart uploads, +// the upload must complete within the number of days specified in the bucket +// lifecycle configuration. Otherwise, the incomplete multipart upload becomes +// eligible for an abort operation and Amazon S3 aborts the multipart upload. +// For more information, see Aborting Incomplete Multipart Uploads Using a Bucket +// Lifecycle Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config). +// +// For information about the permissions required to use the multipart upload +// API, see Multipart Upload API and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html). +// +// For request signing, multipart upload is just a series of regular requests. +// You initiate a multipart upload, send one or more requests to upload parts, +// and then complete the multipart upload process. You sign each request individually. +// There is nothing special about signing multipart upload requests. For more +// information about signing, see Authenticating Requests (AWS Signature Version +// 4) (https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html). +// +// After you initiate a multipart upload and upload one or more parts, to stop +// being charged for storing the uploaded parts, you must either complete or +// abort the multipart upload. Amazon S3 frees up the space used to store the +// parts and stop charging you for storing them only after you either complete +// or abort a multipart upload. +// +// You can optionally request server-side encryption. For server-side encryption, +// Amazon S3 encrypts your data as it writes it to disks in its data centers +// and decrypts it when you access it. You can provide your own encryption key, +// or use AWS Key Management Service (AWS KMS) customer master keys (CMKs) or +// Amazon S3-managed encryption keys. If you choose to provide your own encryption +// key, the request headers you provide in UploadPart (AmazonS3/latest/API/API_UploadPart.html) +// and UploadPartCopy (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html) +// requests must match the headers you used in the request to initiate the upload +// by using CreateMultipartUpload. +// +// To perform a multipart upload with encryption using an AWS KMS CMK, the requester +// must have permission to the kms:Encrypt, kms:Decrypt, kms:ReEncrypt*, kms:GenerateDataKey*, +// and kms:DescribeKey actions on the key. These permissions are required because +// Amazon S3 must decrypt and read data from the encrypted file parts before +// it completes the multipart upload. +// +// If your AWS Identity and Access Management (IAM) user or role is in the same +// AWS account as the AWS KMS CMK, then you must have these permissions on the +// key policy. If your IAM user or role belongs to a different account than +// the key, then you must have the permissions on both the key policy and your +// IAM user or role. +// +// For more information, see Protecting Data Using Server-Side Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html). +// +// Access Permissions +// +// When copying an object, you can optionally specify the accounts or groups +// that should be granted specific permissions on the new object. There are +// two ways to grant the permissions using the request headers: +// +// * Specify a canned ACL with the x-amz-acl request header. For more information, +// see Canned ACL (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL). +// +// * Specify access permissions explicitly with the x-amz-grant-read, x-amz-grant-read-acp, +// x-amz-grant-write-acp, and x-amz-grant-full-control headers. These parameters +// map to the set of permissions that Amazon S3 supports in an ACL. For more +// information, see Access Control List (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html). +// +// You can use either a canned ACL or specify access permissions explicitly. +// You cannot do both. +// +// Server-Side- Encryption-Specific Request Headers +// +// You can optionally tell Amazon S3 to encrypt data at rest using server-side +// encryption. Server-side encryption is for data encryption at rest. Amazon +// S3 encrypts your data as it writes it to disks in its data centers and decrypts +// it when you access it. The option you use depends on whether you want to +// use AWS managed encryption keys or provide your own encryption key. +// +// * Use encryption keys managed by Amazon S3 or customer master keys (CMKs) +// stored in AWS Key Management Service (AWS KMS) – If you want AWS to +// manage the keys used to encrypt data, specify the following headers in +// the request. x-amz-server-side-encryption x-amz-server-side-encryption-aws-kms-key-id +// x-amz-server-side-encryption-context If you specify x-amz-server-side-encryption:aws:kms, +// but don't provide x-amz-server-side-encryption-aws-kms-key-id, Amazon +// S3 uses the AWS managed CMK in AWS KMS to protect the data. All GET and +// PUT requests for an object protected by AWS KMS fail if you don't make +// them with SSL or by using SigV4. For more information about server-side +// encryption with CMKs stored in AWS KMS (SSE-KMS), see Protecting Data +// Using Server-Side Encryption with CMKs stored in AWS KMS (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingKMSEncryption.html). +// +// * Use customer-provided encryption keys – If you want to manage your +// own encryption keys, provide all the following headers in the request. +// x-amz-server-side-encryption-customer-algorithm x-amz-server-side-encryption-customer-key +// x-amz-server-side-encryption-customer-key-MD5 For more information about +// server-side encryption with CMKs stored in AWS KMS (SSE-KMS), see Protecting +// Data Using Server-Side Encryption with CMKs stored in AWS KMS (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingKMSEncryption.html). +// +// Access-Control-List (ACL)-Specific Request Headers +// +// You also can use the following access control–related headers with this +// operation. By default, all objects are private. Only the owner has full access +// control. When adding a new object, you can grant permissions to individual +// AWS accounts or to predefined groups defined by Amazon S3. These permissions +// are then added to the access control list (ACL) on the object. For more information, +// see Using ACLs (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html). +// With this operation, you can grant access permissions using one of the following +// two methods: +// +// * Specify a canned ACL (x-amz-acl) — Amazon S3 supports a set of predefined +// ACLs, known as canned ACLs. Each canned ACL has a predefined set of grantees +// and permissions. For more information, see Canned ACL (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL). +// +// * Specify access permissions explicitly — To explicitly grant access +// permissions to specific AWS accounts or groups, use the following headers. +// Each header maps to specific permissions that Amazon S3 supports in an +// ACL. For more information, see Access Control List (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html). +// In the header, you specify a list of grantees who get the specific permission. +// To grant permissions explicitly, use: x-amz-grant-read x-amz-grant-write +// x-amz-grant-read-acp x-amz-grant-write-acp x-amz-grant-full-control You +// specify each grantee as a type=value pair, where the type is one of the +// following: id – if the value specified is the canonical user ID of an +// AWS account uri – if you are granting permissions to a predefined group +// emailAddress – if the value specified is the email address of an AWS +// account Using email addresses to specify a grantee is only supported in +// the following AWS Regions: US East (N. Virginia) US West (N. California) +// US West (Oregon) Asia Pacific (Singapore) Asia Pacific (Sydney) Asia Pacific +// (Tokyo) Europe (Ireland) South America (São Paulo) For a list of all +// the Amazon S3 supported Regions and endpoints, see Regions and Endpoints +// (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) in +// the AWS General Reference. For example, the following x-amz-grant-read +// header grants the AWS accounts identified by account IDs permissions to +// read object data and its metadata: x-amz-grant-read: id="11112222333", +// id="444455556666" +// +// The following operations are related to CreateMultipartUpload: +// +// * UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) +// +// * CompleteMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html) +// +// * AbortMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html) +// +// * ListParts (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html) +// +// * ListMultipartUploads (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation CreateMultipartUpload for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateMultipartUpload +func (c *S3) CreateMultipartUpload(input *CreateMultipartUploadInput) (*CreateMultipartUploadOutput, error) { + req, out := c.CreateMultipartUploadRequest(input) + return out, req.Send() +} + +// CreateMultipartUploadWithContext is the same as CreateMultipartUpload with the addition of +// the ability to pass a context and additional request options. +// +// See CreateMultipartUpload for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) CreateMultipartUploadWithContext(ctx aws.Context, input *CreateMultipartUploadInput, opts ...request.Option) (*CreateMultipartUploadOutput, error) { + req, out := c.CreateMultipartUploadRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteBucket = "DeleteBucket" + +// DeleteBucketRequest generates a "aws/request.Request" representing the +// client's request for the DeleteBucket operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteBucket for more information on using the DeleteBucket +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteBucketRequest method. +// req, resp := client.DeleteBucketRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucket +func (c *S3) DeleteBucketRequest(input *DeleteBucketInput) (req *request.Request, output *DeleteBucketOutput) { + op := &request.Operation{ + Name: opDeleteBucket, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}", + } + + if input == nil { + input = &DeleteBucketInput{} + } + + output = &DeleteBucketOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteBucket API operation for Amazon Simple Storage Service. +// +// Deletes the S3 bucket. All objects (including all object versions and delete +// markers) in the bucket must be deleted before the bucket itself can be deleted. +// +// Related Resources +// +// * CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) +// +// * DeleteObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation DeleteBucket for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucket +func (c *S3) DeleteBucket(input *DeleteBucketInput) (*DeleteBucketOutput, error) { + req, out := c.DeleteBucketRequest(input) + return out, req.Send() +} + +// DeleteBucketWithContext is the same as DeleteBucket with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteBucket for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeleteBucketWithContext(ctx aws.Context, input *DeleteBucketInput, opts ...request.Option) (*DeleteBucketOutput, error) { + req, out := c.DeleteBucketRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteBucketAnalyticsConfiguration = "DeleteBucketAnalyticsConfiguration" + +// DeleteBucketAnalyticsConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the DeleteBucketAnalyticsConfiguration operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteBucketAnalyticsConfiguration for more information on using the DeleteBucketAnalyticsConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteBucketAnalyticsConfigurationRequest method. +// req, resp := client.DeleteBucketAnalyticsConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketAnalyticsConfiguration +func (c *S3) DeleteBucketAnalyticsConfigurationRequest(input *DeleteBucketAnalyticsConfigurationInput) (req *request.Request, output *DeleteBucketAnalyticsConfigurationOutput) { + op := &request.Operation{ + Name: opDeleteBucketAnalyticsConfiguration, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}?analytics", + } + + if input == nil { + input = &DeleteBucketAnalyticsConfigurationInput{} + } + + output = &DeleteBucketAnalyticsConfigurationOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteBucketAnalyticsConfiguration API operation for Amazon Simple Storage Service. +// +// Deletes an analytics configuration for the bucket (specified by the analytics +// configuration ID). +// +// To use this operation, you must have permissions to perform the s3:PutAnalyticsConfiguration +// action. The bucket owner has this permission by default. The bucket owner +// can grant this permission to others. For more information about permissions, +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). +// +// For information about the Amazon S3 analytics feature, see Amazon S3 Analytics +// – Storage Class Analysis (https://docs.aws.amazon.com/AmazonS3/latest/dev/analytics-storage-class.html). +// +// The following operations are related to DeleteBucketAnalyticsConfiguration: +// +// * GetBucketAnalyticsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketAnalyticsConfiguration.html) +// +// * ListBucketAnalyticsConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketAnalyticsConfigurations.html) +// +// * PutBucketAnalyticsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketAnalyticsConfiguration.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation DeleteBucketAnalyticsConfiguration for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketAnalyticsConfiguration +func (c *S3) DeleteBucketAnalyticsConfiguration(input *DeleteBucketAnalyticsConfigurationInput) (*DeleteBucketAnalyticsConfigurationOutput, error) { + req, out := c.DeleteBucketAnalyticsConfigurationRequest(input) + return out, req.Send() +} + +// DeleteBucketAnalyticsConfigurationWithContext is the same as DeleteBucketAnalyticsConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteBucketAnalyticsConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeleteBucketAnalyticsConfigurationWithContext(ctx aws.Context, input *DeleteBucketAnalyticsConfigurationInput, opts ...request.Option) (*DeleteBucketAnalyticsConfigurationOutput, error) { + req, out := c.DeleteBucketAnalyticsConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteBucketCors = "DeleteBucketCors" + +// DeleteBucketCorsRequest generates a "aws/request.Request" representing the +// client's request for the DeleteBucketCors operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteBucketCors for more information on using the DeleteBucketCors +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteBucketCorsRequest method. +// req, resp := client.DeleteBucketCorsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketCors +func (c *S3) DeleteBucketCorsRequest(input *DeleteBucketCorsInput) (req *request.Request, output *DeleteBucketCorsOutput) { + op := &request.Operation{ + Name: opDeleteBucketCors, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}?cors", + } + + if input == nil { + input = &DeleteBucketCorsInput{} + } + + output = &DeleteBucketCorsOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteBucketCors API operation for Amazon Simple Storage Service. +// +// Deletes the cors configuration information set for the bucket. +// +// To use this operation, you must have permission to perform the s3:PutBucketCORS +// action. The bucket owner has this permission by default and can grant this +// permission to others. +// +// For information about cors, see Enabling Cross-Origin Resource Sharing (https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// Related Resources: +// +// * PutBucketCors (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketCors.html) +// +// * RESTOPTIONSobject (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTOPTIONSobject.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation DeleteBucketCors for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketCors +func (c *S3) DeleteBucketCors(input *DeleteBucketCorsInput) (*DeleteBucketCorsOutput, error) { + req, out := c.DeleteBucketCorsRequest(input) + return out, req.Send() +} + +// DeleteBucketCorsWithContext is the same as DeleteBucketCors with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteBucketCors for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeleteBucketCorsWithContext(ctx aws.Context, input *DeleteBucketCorsInput, opts ...request.Option) (*DeleteBucketCorsOutput, error) { + req, out := c.DeleteBucketCorsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteBucketEncryption = "DeleteBucketEncryption" + +// DeleteBucketEncryptionRequest generates a "aws/request.Request" representing the +// client's request for the DeleteBucketEncryption operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteBucketEncryption for more information on using the DeleteBucketEncryption +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteBucketEncryptionRequest method. +// req, resp := client.DeleteBucketEncryptionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketEncryption +func (c *S3) DeleteBucketEncryptionRequest(input *DeleteBucketEncryptionInput) (req *request.Request, output *DeleteBucketEncryptionOutput) { + op := &request.Operation{ + Name: opDeleteBucketEncryption, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}?encryption", + } + + if input == nil { + input = &DeleteBucketEncryptionInput{} + } + + output = &DeleteBucketEncryptionOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteBucketEncryption API operation for Amazon Simple Storage Service. +// +// This implementation of the DELETE operation removes default encryption from +// the bucket. For information about the Amazon S3 default encryption feature, +// see Amazon S3 Default Bucket Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// To use this operation, you must have permissions to perform the s3:PutEncryptionConfiguration +// action. The bucket owner has this permission by default. The bucket owner +// can grant this permission to others. For more information about permissions, +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// Related Resources +// +// * PutBucketEncryption (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketEncryption.html) +// +// * GetBucketEncryption (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketEncryption.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation DeleteBucketEncryption for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketEncryption +func (c *S3) DeleteBucketEncryption(input *DeleteBucketEncryptionInput) (*DeleteBucketEncryptionOutput, error) { + req, out := c.DeleteBucketEncryptionRequest(input) + return out, req.Send() +} + +// DeleteBucketEncryptionWithContext is the same as DeleteBucketEncryption with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteBucketEncryption for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeleteBucketEncryptionWithContext(ctx aws.Context, input *DeleteBucketEncryptionInput, opts ...request.Option) (*DeleteBucketEncryptionOutput, error) { + req, out := c.DeleteBucketEncryptionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteBucketIntelligentTieringConfiguration = "DeleteBucketIntelligentTieringConfiguration" + +// DeleteBucketIntelligentTieringConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the DeleteBucketIntelligentTieringConfiguration operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteBucketIntelligentTieringConfiguration for more information on using the DeleteBucketIntelligentTieringConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteBucketIntelligentTieringConfigurationRequest method. +// req, resp := client.DeleteBucketIntelligentTieringConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketIntelligentTieringConfiguration +func (c *S3) DeleteBucketIntelligentTieringConfigurationRequest(input *DeleteBucketIntelligentTieringConfigurationInput) (req *request.Request, output *DeleteBucketIntelligentTieringConfigurationOutput) { + op := &request.Operation{ + Name: opDeleteBucketIntelligentTieringConfiguration, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}?intelligent-tiering", + } + + if input == nil { + input = &DeleteBucketIntelligentTieringConfigurationInput{} + } + + output = &DeleteBucketIntelligentTieringConfigurationOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteBucketIntelligentTieringConfiguration API operation for Amazon Simple Storage Service. +// +// Deletes the S3 Intelligent-Tiering configuration from the specified bucket. +// +// The S3 Intelligent-Tiering storage class is designed to optimize storage +// costs by automatically moving data to the most cost-effective storage access +// tier, without additional operational overhead. S3 Intelligent-Tiering delivers +// automatic cost savings by moving data between access tiers, when access patterns +// change. +// +// The S3 Intelligent-Tiering storage class is suitable for objects larger than +// 128 KB that you plan to store for at least 30 days. If the size of an object +// is less than 128 KB, it is not eligible for auto-tiering. Smaller objects +// can be stored, but they are always charged at the frequent access tier rates +// in the S3 Intelligent-Tiering storage class. +// +// If you delete an object before the end of the 30-day minimum storage duration +// period, you are charged for 30 days. For more information, see Storage class +// for automatically optimizing frequently and infrequently accessed objects +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access). +// +// Operations related to DeleteBucketIntelligentTieringConfiguration include: +// +// * GetBucketIntelligentTieringConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketIntelligentTieringConfiguration.html) +// +// * PutBucketIntelligentTieringConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketIntelligentTieringConfiguration.html) +// +// * ListBucketIntelligentTieringConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketIntelligentTieringConfigurations.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation DeleteBucketIntelligentTieringConfiguration for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketIntelligentTieringConfiguration +func (c *S3) DeleteBucketIntelligentTieringConfiguration(input *DeleteBucketIntelligentTieringConfigurationInput) (*DeleteBucketIntelligentTieringConfigurationOutput, error) { + req, out := c.DeleteBucketIntelligentTieringConfigurationRequest(input) + return out, req.Send() +} + +// DeleteBucketIntelligentTieringConfigurationWithContext is the same as DeleteBucketIntelligentTieringConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteBucketIntelligentTieringConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeleteBucketIntelligentTieringConfigurationWithContext(ctx aws.Context, input *DeleteBucketIntelligentTieringConfigurationInput, opts ...request.Option) (*DeleteBucketIntelligentTieringConfigurationOutput, error) { + req, out := c.DeleteBucketIntelligentTieringConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteBucketInventoryConfiguration = "DeleteBucketInventoryConfiguration" + +// DeleteBucketInventoryConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the DeleteBucketInventoryConfiguration operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteBucketInventoryConfiguration for more information on using the DeleteBucketInventoryConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteBucketInventoryConfigurationRequest method. +// req, resp := client.DeleteBucketInventoryConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketInventoryConfiguration +func (c *S3) DeleteBucketInventoryConfigurationRequest(input *DeleteBucketInventoryConfigurationInput) (req *request.Request, output *DeleteBucketInventoryConfigurationOutput) { + op := &request.Operation{ + Name: opDeleteBucketInventoryConfiguration, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}?inventory", + } + + if input == nil { + input = &DeleteBucketInventoryConfigurationInput{} + } + + output = &DeleteBucketInventoryConfigurationOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteBucketInventoryConfiguration API operation for Amazon Simple Storage Service. +// +// Deletes an inventory configuration (identified by the inventory ID) from +// the bucket. +// +// To use this operation, you must have permissions to perform the s3:PutInventoryConfiguration +// action. The bucket owner has this permission by default. The bucket owner +// can grant this permission to others. For more information about permissions, +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). +// +// For information about the Amazon S3 inventory feature, see Amazon S3 Inventory +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-inventory.html). +// +// Operations related to DeleteBucketInventoryConfiguration include: +// +// * GetBucketInventoryConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketInventoryConfiguration.html) +// +// * PutBucketInventoryConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketInventoryConfiguration.html) +// +// * ListBucketInventoryConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketInventoryConfigurations.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation DeleteBucketInventoryConfiguration for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketInventoryConfiguration +func (c *S3) DeleteBucketInventoryConfiguration(input *DeleteBucketInventoryConfigurationInput) (*DeleteBucketInventoryConfigurationOutput, error) { + req, out := c.DeleteBucketInventoryConfigurationRequest(input) + return out, req.Send() +} + +// DeleteBucketInventoryConfigurationWithContext is the same as DeleteBucketInventoryConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteBucketInventoryConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeleteBucketInventoryConfigurationWithContext(ctx aws.Context, input *DeleteBucketInventoryConfigurationInput, opts ...request.Option) (*DeleteBucketInventoryConfigurationOutput, error) { + req, out := c.DeleteBucketInventoryConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteBucketLifecycle = "DeleteBucketLifecycle" + +// DeleteBucketLifecycleRequest generates a "aws/request.Request" representing the +// client's request for the DeleteBucketLifecycle operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteBucketLifecycle for more information on using the DeleteBucketLifecycle +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteBucketLifecycleRequest method. +// req, resp := client.DeleteBucketLifecycleRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketLifecycle +func (c *S3) DeleteBucketLifecycleRequest(input *DeleteBucketLifecycleInput) (req *request.Request, output *DeleteBucketLifecycleOutput) { + op := &request.Operation{ + Name: opDeleteBucketLifecycle, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}?lifecycle", + } + + if input == nil { + input = &DeleteBucketLifecycleInput{} + } + + output = &DeleteBucketLifecycleOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteBucketLifecycle API operation for Amazon Simple Storage Service. +// +// Deletes the lifecycle configuration from the specified bucket. Amazon S3 +// removes all the lifecycle configuration rules in the lifecycle subresource +// associated with the bucket. Your objects never expire, and Amazon S3 no longer +// automatically deletes any objects on the basis of rules contained in the +// deleted lifecycle configuration. +// +// To use this operation, you must have permission to perform the s3:PutLifecycleConfiguration +// action. By default, the bucket owner has this permission and the bucket owner +// can grant this permission to others. +// +// There is usually some time lag before lifecycle configuration deletion is +// fully propagated to all the Amazon S3 systems. +// +// For more information about the object expiration, see Elements to Describe +// Lifecycle Actions (https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html#intro-lifecycle-rules-actions). +// +// Related actions include: +// +// * PutBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html) +// +// * GetBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycleConfiguration.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation DeleteBucketLifecycle for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketLifecycle +func (c *S3) DeleteBucketLifecycle(input *DeleteBucketLifecycleInput) (*DeleteBucketLifecycleOutput, error) { + req, out := c.DeleteBucketLifecycleRequest(input) + return out, req.Send() +} + +// DeleteBucketLifecycleWithContext is the same as DeleteBucketLifecycle with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteBucketLifecycle for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeleteBucketLifecycleWithContext(ctx aws.Context, input *DeleteBucketLifecycleInput, opts ...request.Option) (*DeleteBucketLifecycleOutput, error) { + req, out := c.DeleteBucketLifecycleRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteBucketMetricsConfiguration = "DeleteBucketMetricsConfiguration" + +// DeleteBucketMetricsConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the DeleteBucketMetricsConfiguration operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteBucketMetricsConfiguration for more information on using the DeleteBucketMetricsConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteBucketMetricsConfigurationRequest method. +// req, resp := client.DeleteBucketMetricsConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketMetricsConfiguration +func (c *S3) DeleteBucketMetricsConfigurationRequest(input *DeleteBucketMetricsConfigurationInput) (req *request.Request, output *DeleteBucketMetricsConfigurationOutput) { + op := &request.Operation{ + Name: opDeleteBucketMetricsConfiguration, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}?metrics", + } + + if input == nil { + input = &DeleteBucketMetricsConfigurationInput{} + } + + output = &DeleteBucketMetricsConfigurationOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteBucketMetricsConfiguration API operation for Amazon Simple Storage Service. +// +// Deletes a metrics configuration for the Amazon CloudWatch request metrics +// (specified by the metrics configuration ID) from the bucket. Note that this +// doesn't include the daily storage metrics. +// +// To use this operation, you must have permissions to perform the s3:PutMetricsConfiguration +// action. The bucket owner has this permission by default. The bucket owner +// can grant this permission to others. For more information about permissions, +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). +// +// For information about CloudWatch request metrics for Amazon S3, see Monitoring +// Metrics with Amazon CloudWatch (https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html). +// +// The following operations are related to DeleteBucketMetricsConfiguration: +// +// * GetBucketMetricsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketMetricsConfiguration.html) +// +// * PutBucketMetricsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketMetricsConfiguration.html) +// +// * ListBucketMetricsConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketMetricsConfigurations.html) +// +// * Monitoring Metrics with Amazon CloudWatch (https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation DeleteBucketMetricsConfiguration for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketMetricsConfiguration +func (c *S3) DeleteBucketMetricsConfiguration(input *DeleteBucketMetricsConfigurationInput) (*DeleteBucketMetricsConfigurationOutput, error) { + req, out := c.DeleteBucketMetricsConfigurationRequest(input) + return out, req.Send() +} + +// DeleteBucketMetricsConfigurationWithContext is the same as DeleteBucketMetricsConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteBucketMetricsConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeleteBucketMetricsConfigurationWithContext(ctx aws.Context, input *DeleteBucketMetricsConfigurationInput, opts ...request.Option) (*DeleteBucketMetricsConfigurationOutput, error) { + req, out := c.DeleteBucketMetricsConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteBucketOwnershipControls = "DeleteBucketOwnershipControls" + +// DeleteBucketOwnershipControlsRequest generates a "aws/request.Request" representing the +// client's request for the DeleteBucketOwnershipControls operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteBucketOwnershipControls for more information on using the DeleteBucketOwnershipControls +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteBucketOwnershipControlsRequest method. +// req, resp := client.DeleteBucketOwnershipControlsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketOwnershipControls +func (c *S3) DeleteBucketOwnershipControlsRequest(input *DeleteBucketOwnershipControlsInput) (req *request.Request, output *DeleteBucketOwnershipControlsOutput) { + op := &request.Operation{ + Name: opDeleteBucketOwnershipControls, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}?ownershipControls", + } + + if input == nil { + input = &DeleteBucketOwnershipControlsInput{} + } + + output = &DeleteBucketOwnershipControlsOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteBucketOwnershipControls API operation for Amazon Simple Storage Service. +// +// Removes OwnershipControls for an Amazon S3 bucket. To use this operation, +// you must have the s3:PutBucketOwnershipControls permission. For more information +// about Amazon S3 permissions, see Specifying Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html). +// +// For information about Amazon S3 Object Ownership, see Using Object Ownership +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/about-object-ownership.html). +// +// The following operations are related to DeleteBucketOwnershipControls: +// +// * GetBucketOwnershipControls +// +// * PutBucketOwnershipControls +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation DeleteBucketOwnershipControls for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketOwnershipControls +func (c *S3) DeleteBucketOwnershipControls(input *DeleteBucketOwnershipControlsInput) (*DeleteBucketOwnershipControlsOutput, error) { + req, out := c.DeleteBucketOwnershipControlsRequest(input) + return out, req.Send() +} + +// DeleteBucketOwnershipControlsWithContext is the same as DeleteBucketOwnershipControls with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteBucketOwnershipControls for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeleteBucketOwnershipControlsWithContext(ctx aws.Context, input *DeleteBucketOwnershipControlsInput, opts ...request.Option) (*DeleteBucketOwnershipControlsOutput, error) { + req, out := c.DeleteBucketOwnershipControlsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteBucketPolicy = "DeleteBucketPolicy" + +// DeleteBucketPolicyRequest generates a "aws/request.Request" representing the +// client's request for the DeleteBucketPolicy operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteBucketPolicy for more information on using the DeleteBucketPolicy +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteBucketPolicyRequest method. +// req, resp := client.DeleteBucketPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketPolicy +func (c *S3) DeleteBucketPolicyRequest(input *DeleteBucketPolicyInput) (req *request.Request, output *DeleteBucketPolicyOutput) { + op := &request.Operation{ + Name: opDeleteBucketPolicy, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}?policy", + } + + if input == nil { + input = &DeleteBucketPolicyInput{} + } + + output = &DeleteBucketPolicyOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteBucketPolicy API operation for Amazon Simple Storage Service. +// +// This implementation of the DELETE operation uses the policy subresource to +// delete the policy of a specified bucket. If you are using an identity other +// than the root user of the AWS account that owns the bucket, the calling identity +// must have the DeleteBucketPolicy permissions on the specified bucket and +// belong to the bucket owner's account to use this operation. +// +// If you don't have DeleteBucketPolicy permissions, Amazon S3 returns a 403 +// Access Denied error. If you have the correct permissions, but you're not +// using an identity that belongs to the bucket owner's account, Amazon S3 returns +// a 405 Method Not Allowed error. +// +// As a security precaution, the root user of the AWS account that owns a bucket +// can always use this operation, even if the policy explicitly denies the root +// user the ability to perform this action. +// +// For more information about bucket policies, see Using Bucket Policies and +// UserPolicies (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html). +// +// The following operations are related to DeleteBucketPolicy +// +// * CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) +// +// * DeleteObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation DeleteBucketPolicy for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketPolicy +func (c *S3) DeleteBucketPolicy(input *DeleteBucketPolicyInput) (*DeleteBucketPolicyOutput, error) { + req, out := c.DeleteBucketPolicyRequest(input) + return out, req.Send() +} + +// DeleteBucketPolicyWithContext is the same as DeleteBucketPolicy with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteBucketPolicy for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeleteBucketPolicyWithContext(ctx aws.Context, input *DeleteBucketPolicyInput, opts ...request.Option) (*DeleteBucketPolicyOutput, error) { + req, out := c.DeleteBucketPolicyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteBucketReplication = "DeleteBucketReplication" + +// DeleteBucketReplicationRequest generates a "aws/request.Request" representing the +// client's request for the DeleteBucketReplication operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteBucketReplication for more information on using the DeleteBucketReplication +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteBucketReplicationRequest method. +// req, resp := client.DeleteBucketReplicationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketReplication +func (c *S3) DeleteBucketReplicationRequest(input *DeleteBucketReplicationInput) (req *request.Request, output *DeleteBucketReplicationOutput) { + op := &request.Operation{ + Name: opDeleteBucketReplication, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}?replication", + } + + if input == nil { + input = &DeleteBucketReplicationInput{} + } + + output = &DeleteBucketReplicationOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteBucketReplication API operation for Amazon Simple Storage Service. +// +// Deletes the replication configuration from the bucket. +// +// To use this operation, you must have permissions to perform the s3:PutReplicationConfiguration +// action. The bucket owner has these permissions by default and can grant it +// to others. For more information about permissions, see Permissions Related +// to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). +// +// It can take a while for the deletion of a replication configuration to fully +// propagate. +// +// For information about replication configuration, see Replication (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication.html) +// in the Amazon S3 Developer Guide. +// +// The following operations are related to DeleteBucketReplication: +// +// * PutBucketReplication (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketReplication.html) +// +// * GetBucketReplication (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketReplication.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation DeleteBucketReplication for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketReplication +func (c *S3) DeleteBucketReplication(input *DeleteBucketReplicationInput) (*DeleteBucketReplicationOutput, error) { + req, out := c.DeleteBucketReplicationRequest(input) + return out, req.Send() +} + +// DeleteBucketReplicationWithContext is the same as DeleteBucketReplication with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteBucketReplication for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeleteBucketReplicationWithContext(ctx aws.Context, input *DeleteBucketReplicationInput, opts ...request.Option) (*DeleteBucketReplicationOutput, error) { + req, out := c.DeleteBucketReplicationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteBucketTagging = "DeleteBucketTagging" + +// DeleteBucketTaggingRequest generates a "aws/request.Request" representing the +// client's request for the DeleteBucketTagging operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteBucketTagging for more information on using the DeleteBucketTagging +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteBucketTaggingRequest method. +// req, resp := client.DeleteBucketTaggingRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketTagging +func (c *S3) DeleteBucketTaggingRequest(input *DeleteBucketTaggingInput) (req *request.Request, output *DeleteBucketTaggingOutput) { + op := &request.Operation{ + Name: opDeleteBucketTagging, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}?tagging", + } + + if input == nil { + input = &DeleteBucketTaggingInput{} + } + + output = &DeleteBucketTaggingOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteBucketTagging API operation for Amazon Simple Storage Service. +// +// Deletes the tags from the bucket. +// +// To use this operation, you must have permission to perform the s3:PutBucketTagging +// action. By default, the bucket owner has this permission and can grant this +// permission to others. +// +// The following operations are related to DeleteBucketTagging: +// +// * GetBucketTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketTagging.html) +// +// * PutBucketTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketTagging.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation DeleteBucketTagging for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketTagging +func (c *S3) DeleteBucketTagging(input *DeleteBucketTaggingInput) (*DeleteBucketTaggingOutput, error) { + req, out := c.DeleteBucketTaggingRequest(input) + return out, req.Send() +} + +// DeleteBucketTaggingWithContext is the same as DeleteBucketTagging with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteBucketTagging for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeleteBucketTaggingWithContext(ctx aws.Context, input *DeleteBucketTaggingInput, opts ...request.Option) (*DeleteBucketTaggingOutput, error) { + req, out := c.DeleteBucketTaggingRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteBucketWebsite = "DeleteBucketWebsite" + +// DeleteBucketWebsiteRequest generates a "aws/request.Request" representing the +// client's request for the DeleteBucketWebsite operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteBucketWebsite for more information on using the DeleteBucketWebsite +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteBucketWebsiteRequest method. +// req, resp := client.DeleteBucketWebsiteRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketWebsite +func (c *S3) DeleteBucketWebsiteRequest(input *DeleteBucketWebsiteInput) (req *request.Request, output *DeleteBucketWebsiteOutput) { + op := &request.Operation{ + Name: opDeleteBucketWebsite, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}?website", + } + + if input == nil { + input = &DeleteBucketWebsiteInput{} + } + + output = &DeleteBucketWebsiteOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteBucketWebsite API operation for Amazon Simple Storage Service. +// +// This operation removes the website configuration for a bucket. Amazon S3 +// returns a 200 OK response upon successfully deleting a website configuration +// on the specified bucket. You will get a 200 OK response if the website configuration +// you are trying to delete does not exist on the bucket. Amazon S3 returns +// a 404 response if the bucket specified in the request does not exist. +// +// This DELETE operation requires the S3:DeleteBucketWebsite permission. By +// default, only the bucket owner can delete the website configuration attached +// to a bucket. However, bucket owners can grant other users permission to delete +// the website configuration by writing a bucket policy granting them the S3:DeleteBucketWebsite +// permission. +// +// For more information about hosting websites, see Hosting Websites on Amazon +// S3 (https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html). +// +// The following operations are related to DeleteBucketWebsite: +// +// * GetBucketWebsite (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketWebsite.html) +// +// * PutBucketWebsite (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketWebsite.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation DeleteBucketWebsite for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketWebsite +func (c *S3) DeleteBucketWebsite(input *DeleteBucketWebsiteInput) (*DeleteBucketWebsiteOutput, error) { + req, out := c.DeleteBucketWebsiteRequest(input) + return out, req.Send() +} + +// DeleteBucketWebsiteWithContext is the same as DeleteBucketWebsite with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteBucketWebsite for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeleteBucketWebsiteWithContext(ctx aws.Context, input *DeleteBucketWebsiteInput, opts ...request.Option) (*DeleteBucketWebsiteOutput, error) { + req, out := c.DeleteBucketWebsiteRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteObject = "DeleteObject" + +// DeleteObjectRequest generates a "aws/request.Request" representing the +// client's request for the DeleteObject operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteObject for more information on using the DeleteObject +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteObjectRequest method. +// req, resp := client.DeleteObjectRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObject +func (c *S3) DeleteObjectRequest(input *DeleteObjectInput) (req *request.Request, output *DeleteObjectOutput) { + op := &request.Operation{ + Name: opDeleteObject, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}/{Key+}", + } + + if input == nil { + input = &DeleteObjectInput{} + } + + output = &DeleteObjectOutput{} + req = c.newRequest(op, input, output) + return +} + +// DeleteObject API operation for Amazon Simple Storage Service. +// +// Removes the null version (if there is one) of an object and inserts a delete +// marker, which becomes the latest version of the object. If there isn't a +// null version, Amazon S3 does not remove any objects. +// +// To remove a specific version, you must be the bucket owner and you must use +// the version Id subresource. Using this subresource permanently deletes the +// version. If the object deleted is a delete marker, Amazon S3 sets the response +// header, x-amz-delete-marker, to true. +// +// If the object you want to delete is in a bucket where the bucket versioning +// configuration is MFA Delete enabled, you must include the x-amz-mfa request +// header in the DELETE versionId request. Requests that include x-amz-mfa must +// use HTTPS. +// +// For more information about MFA Delete, see Using MFA Delete (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMFADelete.html). +// To see sample requests that use versioning, see Sample Request (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectDELETE.html#ExampleVersionObjectDelete). +// +// You can delete objects by explicitly calling the DELETE Object API or configure +// its lifecycle (PutBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycle.html)) +// to enable Amazon S3 to remove them for you. If you want to block users or +// accounts from removing or deleting objects from your bucket, you must deny +// them the s3:DeleteObject, s3:DeleteObjectVersion, and s3:PutLifeCycleConfiguration +// actions. +// +// The following operation is related to DeleteObject: +// +// * PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation DeleteObject for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObject +func (c *S3) DeleteObject(input *DeleteObjectInput) (*DeleteObjectOutput, error) { + req, out := c.DeleteObjectRequest(input) + return out, req.Send() +} + +// DeleteObjectWithContext is the same as DeleteObject with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteObject for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeleteObjectWithContext(ctx aws.Context, input *DeleteObjectInput, opts ...request.Option) (*DeleteObjectOutput, error) { + req, out := c.DeleteObjectRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteObjectTagging = "DeleteObjectTagging" + +// DeleteObjectTaggingRequest generates a "aws/request.Request" representing the +// client's request for the DeleteObjectTagging operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteObjectTagging for more information on using the DeleteObjectTagging +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteObjectTaggingRequest method. +// req, resp := client.DeleteObjectTaggingRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjectTagging +func (c *S3) DeleteObjectTaggingRequest(input *DeleteObjectTaggingInput) (req *request.Request, output *DeleteObjectTaggingOutput) { + op := &request.Operation{ + Name: opDeleteObjectTagging, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}/{Key+}?tagging", + } + + if input == nil { + input = &DeleteObjectTaggingInput{} + } + + output = &DeleteObjectTaggingOutput{} + req = c.newRequest(op, input, output) + return +} + +// DeleteObjectTagging API operation for Amazon Simple Storage Service. +// +// Removes the entire tag set from the specified object. For more information +// about managing object tags, see Object Tagging (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-tagging.html). +// +// To use this operation, you must have permission to perform the s3:DeleteObjectTagging +// action. +// +// To delete tags of a specific object version, add the versionId query parameter +// in the request. You will need permission for the s3:DeleteObjectVersionTagging +// action. +// +// The following operations are related to DeleteBucketMetricsConfiguration: +// +// * PutObjectTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObjectTagging.html) +// +// * GetObjectTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation DeleteObjectTagging for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjectTagging +func (c *S3) DeleteObjectTagging(input *DeleteObjectTaggingInput) (*DeleteObjectTaggingOutput, error) { + req, out := c.DeleteObjectTaggingRequest(input) + return out, req.Send() +} + +// DeleteObjectTaggingWithContext is the same as DeleteObjectTagging with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteObjectTagging for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeleteObjectTaggingWithContext(ctx aws.Context, input *DeleteObjectTaggingInput, opts ...request.Option) (*DeleteObjectTaggingOutput, error) { + req, out := c.DeleteObjectTaggingRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteObjects = "DeleteObjects" + +// DeleteObjectsRequest generates a "aws/request.Request" representing the +// client's request for the DeleteObjects operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteObjects for more information on using the DeleteObjects +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteObjectsRequest method. +// req, resp := client.DeleteObjectsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjects +func (c *S3) DeleteObjectsRequest(input *DeleteObjectsInput) (req *request.Request, output *DeleteObjectsOutput) { + op := &request.Operation{ + Name: opDeleteObjects, + HTTPMethod: "POST", + HTTPPath: "/{Bucket}?delete", + } + + if input == nil { + input = &DeleteObjectsInput{} + } + + output = &DeleteObjectsOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) + return +} + +// DeleteObjects API operation for Amazon Simple Storage Service. +// +// This operation enables you to delete multiple objects from a bucket using +// a single HTTP request. If you know the object keys that you want to delete, +// then this operation provides a suitable alternative to sending individual +// delete requests, reducing per-request overhead. +// +// The request contains a list of up to 1000 keys that you want to delete. In +// the XML, you provide the object key names, and optionally, version IDs if +// you want to delete a specific version of the object from a versioning-enabled +// bucket. For each key, Amazon S3 performs a delete operation and returns the +// result of that delete, success, or failure, in the response. Note that if +// the object specified in the request is not found, Amazon S3 returns the result +// as deleted. +// +// The operation supports two modes for the response: verbose and quiet. By +// default, the operation uses verbose mode in which the response includes the +// result of deletion of each key in your request. In quiet mode the response +// includes only keys where the delete operation encountered an error. For a +// successful deletion, the operation does not return any information about +// the delete in the response body. +// +// When performing this operation on an MFA Delete enabled bucket, that attempts +// to delete any versioned objects, you must include an MFA token. If you do +// not provide one, the entire request will fail, even if there are non-versioned +// objects you are trying to delete. If you provide an invalid token, whether +// there are versioned keys in the request or not, the entire Multi-Object Delete +// request will fail. For information about MFA Delete, see MFA Delete (https://docs.aws.amazon.com/AmazonS3/latest/dev/Versioning.html#MultiFactorAuthenticationDelete). +// +// Finally, the Content-MD5 header is required for all Multi-Object Delete requests. +// Amazon S3 uses the header value to ensure that your request body has not +// been altered in transit. +// +// The following operations are related to DeleteObjects: +// +// * CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html) +// +// * UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) +// +// * CompleteMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html) +// +// * ListParts (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html) +// +// * AbortMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation DeleteObjects for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjects +func (c *S3) DeleteObjects(input *DeleteObjectsInput) (*DeleteObjectsOutput, error) { + req, out := c.DeleteObjectsRequest(input) + return out, req.Send() +} + +// DeleteObjectsWithContext is the same as DeleteObjects with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteObjects for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeleteObjectsWithContext(ctx aws.Context, input *DeleteObjectsInput, opts ...request.Option) (*DeleteObjectsOutput, error) { + req, out := c.DeleteObjectsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeletePublicAccessBlock = "DeletePublicAccessBlock" + +// DeletePublicAccessBlockRequest generates a "aws/request.Request" representing the +// client's request for the DeletePublicAccessBlock operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeletePublicAccessBlock for more information on using the DeletePublicAccessBlock +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeletePublicAccessBlockRequest method. +// req, resp := client.DeletePublicAccessBlockRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeletePublicAccessBlock +func (c *S3) DeletePublicAccessBlockRequest(input *DeletePublicAccessBlockInput) (req *request.Request, output *DeletePublicAccessBlockOutput) { + op := &request.Operation{ + Name: opDeletePublicAccessBlock, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}?publicAccessBlock", + } + + if input == nil { + input = &DeletePublicAccessBlockInput{} + } + + output = &DeletePublicAccessBlockOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeletePublicAccessBlock API operation for Amazon Simple Storage Service. +// +// Removes the PublicAccessBlock configuration for an Amazon S3 bucket. To use +// this operation, you must have the s3:PutBucketPublicAccessBlock permission. +// For more information about permissions, see Permissions Related to Bucket +// Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). +// +// The following operations are related to DeletePublicAccessBlock: +// +// * Using Amazon S3 Block Public Access (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html) +// +// * GetPublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetPublicAccessBlock.html) +// +// * PutPublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutPublicAccessBlock.html) +// +// * GetBucketPolicyStatus (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketPolicyStatus.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation DeletePublicAccessBlock for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeletePublicAccessBlock +func (c *S3) DeletePublicAccessBlock(input *DeletePublicAccessBlockInput) (*DeletePublicAccessBlockOutput, error) { + req, out := c.DeletePublicAccessBlockRequest(input) + return out, req.Send() +} + +// DeletePublicAccessBlockWithContext is the same as DeletePublicAccessBlock with the addition of +// the ability to pass a context and additional request options. +// +// See DeletePublicAccessBlock for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeletePublicAccessBlockWithContext(ctx aws.Context, input *DeletePublicAccessBlockInput, opts ...request.Option) (*DeletePublicAccessBlockOutput, error) { + req, out := c.DeletePublicAccessBlockRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketAccelerateConfiguration = "GetBucketAccelerateConfiguration" + +// GetBucketAccelerateConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketAccelerateConfiguration operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetBucketAccelerateConfiguration for more information on using the GetBucketAccelerateConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetBucketAccelerateConfigurationRequest method. +// req, resp := client.GetBucketAccelerateConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAccelerateConfiguration +func (c *S3) GetBucketAccelerateConfigurationRequest(input *GetBucketAccelerateConfigurationInput) (req *request.Request, output *GetBucketAccelerateConfigurationOutput) { + op := &request.Operation{ + Name: opGetBucketAccelerateConfiguration, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?accelerate", + } + + if input == nil { + input = &GetBucketAccelerateConfigurationInput{} + } + + output = &GetBucketAccelerateConfigurationOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketAccelerateConfiguration API operation for Amazon Simple Storage Service. +// +// This implementation of the GET operation uses the accelerate subresource +// to return the Transfer Acceleration state of a bucket, which is either Enabled +// or Suspended. Amazon S3 Transfer Acceleration is a bucket-level feature that +// enables you to perform faster data transfers to and from Amazon S3. +// +// To use this operation, you must have permission to perform the s3:GetAccelerateConfiguration +// action. The bucket owner has this permission by default. The bucket owner +// can grant this permission to others. For more information about permissions, +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// You set the Transfer Acceleration state of an existing bucket to Enabled +// or Suspended by using the PutBucketAccelerateConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketAccelerateConfiguration.html) +// operation. +// +// A GET accelerate request does not return a state value for a bucket that +// has no transfer acceleration state. A bucket has no Transfer Acceleration +// state if a state has never been set on the bucket. +// +// For more information about transfer acceleration, see Transfer Acceleration +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// Related Resources +// +// * PutBucketAccelerateConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketAccelerateConfiguration.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketAccelerateConfiguration for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAccelerateConfiguration +func (c *S3) GetBucketAccelerateConfiguration(input *GetBucketAccelerateConfigurationInput) (*GetBucketAccelerateConfigurationOutput, error) { + req, out := c.GetBucketAccelerateConfigurationRequest(input) + return out, req.Send() +} + +// GetBucketAccelerateConfigurationWithContext is the same as GetBucketAccelerateConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketAccelerateConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketAccelerateConfigurationWithContext(ctx aws.Context, input *GetBucketAccelerateConfigurationInput, opts ...request.Option) (*GetBucketAccelerateConfigurationOutput, error) { + req, out := c.GetBucketAccelerateConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketAcl = "GetBucketAcl" + +// GetBucketAclRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketAcl operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetBucketAcl for more information on using the GetBucketAcl +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetBucketAclRequest method. +// req, resp := client.GetBucketAclRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAcl +func (c *S3) GetBucketAclRequest(input *GetBucketAclInput) (req *request.Request, output *GetBucketAclOutput) { + op := &request.Operation{ + Name: opGetBucketAcl, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?acl", + } + + if input == nil { + input = &GetBucketAclInput{} + } + + output = &GetBucketAclOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketAcl API operation for Amazon Simple Storage Service. +// +// This implementation of the GET operation uses the acl subresource to return +// the access control list (ACL) of a bucket. To use GET to return the ACL of +// the bucket, you must have READ_ACP access to the bucket. If READ_ACP permission +// is granted to the anonymous user, you can return the ACL of the bucket without +// using an authorization header. +// +// Related Resources +// +// * ListObjects (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjects.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketAcl for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAcl +func (c *S3) GetBucketAcl(input *GetBucketAclInput) (*GetBucketAclOutput, error) { + req, out := c.GetBucketAclRequest(input) + return out, req.Send() +} + +// GetBucketAclWithContext is the same as GetBucketAcl with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketAcl for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketAclWithContext(ctx aws.Context, input *GetBucketAclInput, opts ...request.Option) (*GetBucketAclOutput, error) { + req, out := c.GetBucketAclRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketAnalyticsConfiguration = "GetBucketAnalyticsConfiguration" + +// GetBucketAnalyticsConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketAnalyticsConfiguration operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetBucketAnalyticsConfiguration for more information on using the GetBucketAnalyticsConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetBucketAnalyticsConfigurationRequest method. +// req, resp := client.GetBucketAnalyticsConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAnalyticsConfiguration +func (c *S3) GetBucketAnalyticsConfigurationRequest(input *GetBucketAnalyticsConfigurationInput) (req *request.Request, output *GetBucketAnalyticsConfigurationOutput) { + op := &request.Operation{ + Name: opGetBucketAnalyticsConfiguration, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?analytics", + } + + if input == nil { + input = &GetBucketAnalyticsConfigurationInput{} + } + + output = &GetBucketAnalyticsConfigurationOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketAnalyticsConfiguration API operation for Amazon Simple Storage Service. +// +// This implementation of the GET operation returns an analytics configuration +// (identified by the analytics configuration ID) from the bucket. +// +// To use this operation, you must have permissions to perform the s3:GetAnalyticsConfiguration +// action. The bucket owner has this permission by default. The bucket owner +// can grant this permission to others. For more information about permissions, +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// For information about Amazon S3 analytics feature, see Amazon S3 Analytics +// – Storage Class Analysis (https://docs.aws.amazon.com/AmazonS3/latest/dev/analytics-storage-class.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// Related Resources +// +// * DeleteBucketAnalyticsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketAnalyticsConfiguration.html) +// +// * ListBucketAnalyticsConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketAnalyticsConfigurations.html) +// +// * PutBucketAnalyticsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketAnalyticsConfiguration.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketAnalyticsConfiguration for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAnalyticsConfiguration +func (c *S3) GetBucketAnalyticsConfiguration(input *GetBucketAnalyticsConfigurationInput) (*GetBucketAnalyticsConfigurationOutput, error) { + req, out := c.GetBucketAnalyticsConfigurationRequest(input) + return out, req.Send() +} + +// GetBucketAnalyticsConfigurationWithContext is the same as GetBucketAnalyticsConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketAnalyticsConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketAnalyticsConfigurationWithContext(ctx aws.Context, input *GetBucketAnalyticsConfigurationInput, opts ...request.Option) (*GetBucketAnalyticsConfigurationOutput, error) { + req, out := c.GetBucketAnalyticsConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketCors = "GetBucketCors" + +// GetBucketCorsRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketCors operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetBucketCors for more information on using the GetBucketCors +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetBucketCorsRequest method. +// req, resp := client.GetBucketCorsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketCors +func (c *S3) GetBucketCorsRequest(input *GetBucketCorsInput) (req *request.Request, output *GetBucketCorsOutput) { + op := &request.Operation{ + Name: opGetBucketCors, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?cors", + } + + if input == nil { + input = &GetBucketCorsInput{} + } + + output = &GetBucketCorsOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketCors API operation for Amazon Simple Storage Service. +// +// Returns the cors configuration information set for the bucket. +// +// To use this operation, you must have permission to perform the s3:GetBucketCORS +// action. By default, the bucket owner has this permission and can grant it +// to others. +// +// For more information about cors, see Enabling Cross-Origin Resource Sharing +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html). +// +// The following operations are related to GetBucketCors: +// +// * PutBucketCors (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketCors.html) +// +// * DeleteBucketCors (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketCors.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketCors for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketCors +func (c *S3) GetBucketCors(input *GetBucketCorsInput) (*GetBucketCorsOutput, error) { + req, out := c.GetBucketCorsRequest(input) + return out, req.Send() +} + +// GetBucketCorsWithContext is the same as GetBucketCors with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketCors for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketCorsWithContext(ctx aws.Context, input *GetBucketCorsInput, opts ...request.Option) (*GetBucketCorsOutput, error) { + req, out := c.GetBucketCorsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketEncryption = "GetBucketEncryption" + +// GetBucketEncryptionRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketEncryption operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetBucketEncryption for more information on using the GetBucketEncryption +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetBucketEncryptionRequest method. +// req, resp := client.GetBucketEncryptionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketEncryption +func (c *S3) GetBucketEncryptionRequest(input *GetBucketEncryptionInput) (req *request.Request, output *GetBucketEncryptionOutput) { + op := &request.Operation{ + Name: opGetBucketEncryption, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?encryption", + } + + if input == nil { + input = &GetBucketEncryptionInput{} + } + + output = &GetBucketEncryptionOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketEncryption API operation for Amazon Simple Storage Service. +// +// Returns the default encryption configuration for an Amazon S3 bucket. If +// the bucket does not have a default encryption configuration, GetBucketEncryption +// returns ServerSideEncryptionConfigurationNotFoundError. +// +// For information about the Amazon S3 default encryption feature, see Amazon +// S3 Default Bucket Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html). +// +// To use this operation, you must have permission to perform the s3:GetEncryptionConfiguration +// action. The bucket owner has this permission by default. The bucket owner +// can grant this permission to others. For more information about permissions, +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). +// +// The following operations are related to GetBucketEncryption: +// +// * PutBucketEncryption (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketEncryption.html) +// +// * DeleteBucketEncryption (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketEncryption.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketEncryption for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketEncryption +func (c *S3) GetBucketEncryption(input *GetBucketEncryptionInput) (*GetBucketEncryptionOutput, error) { + req, out := c.GetBucketEncryptionRequest(input) + return out, req.Send() +} + +// GetBucketEncryptionWithContext is the same as GetBucketEncryption with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketEncryption for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketEncryptionWithContext(ctx aws.Context, input *GetBucketEncryptionInput, opts ...request.Option) (*GetBucketEncryptionOutput, error) { + req, out := c.GetBucketEncryptionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketIntelligentTieringConfiguration = "GetBucketIntelligentTieringConfiguration" + +// GetBucketIntelligentTieringConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketIntelligentTieringConfiguration operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetBucketIntelligentTieringConfiguration for more information on using the GetBucketIntelligentTieringConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetBucketIntelligentTieringConfigurationRequest method. +// req, resp := client.GetBucketIntelligentTieringConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketIntelligentTieringConfiguration +func (c *S3) GetBucketIntelligentTieringConfigurationRequest(input *GetBucketIntelligentTieringConfigurationInput) (req *request.Request, output *GetBucketIntelligentTieringConfigurationOutput) { + op := &request.Operation{ + Name: opGetBucketIntelligentTieringConfiguration, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?intelligent-tiering", + } + + if input == nil { + input = &GetBucketIntelligentTieringConfigurationInput{} + } + + output = &GetBucketIntelligentTieringConfigurationOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketIntelligentTieringConfiguration API operation for Amazon Simple Storage Service. +// +// Gets the S3 Intelligent-Tiering configuration from the specified bucket. +// +// The S3 Intelligent-Tiering storage class is designed to optimize storage +// costs by automatically moving data to the most cost-effective storage access +// tier, without additional operational overhead. S3 Intelligent-Tiering delivers +// automatic cost savings by moving data between access tiers, when access patterns +// change. +// +// The S3 Intelligent-Tiering storage class is suitable for objects larger than +// 128 KB that you plan to store for at least 30 days. If the size of an object +// is less than 128 KB, it is not eligible for auto-tiering. Smaller objects +// can be stored, but they are always charged at the frequent access tier rates +// in the S3 Intelligent-Tiering storage class. +// +// If you delete an object before the end of the 30-day minimum storage duration +// period, you are charged for 30 days. For more information, see Storage class +// for automatically optimizing frequently and infrequently accessed objects +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access). +// +// Operations related to GetBucketIntelligentTieringConfiguration include: +// +// * DeleteBucketIntelligentTieringConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketIntelligentTieringConfiguration.html) +// +// * PutBucketIntelligentTieringConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketIntelligentTieringConfiguration.html) +// +// * ListBucketIntelligentTieringConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketIntelligentTieringConfigurations.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketIntelligentTieringConfiguration for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketIntelligentTieringConfiguration +func (c *S3) GetBucketIntelligentTieringConfiguration(input *GetBucketIntelligentTieringConfigurationInput) (*GetBucketIntelligentTieringConfigurationOutput, error) { + req, out := c.GetBucketIntelligentTieringConfigurationRequest(input) + return out, req.Send() +} + +// GetBucketIntelligentTieringConfigurationWithContext is the same as GetBucketIntelligentTieringConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketIntelligentTieringConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketIntelligentTieringConfigurationWithContext(ctx aws.Context, input *GetBucketIntelligentTieringConfigurationInput, opts ...request.Option) (*GetBucketIntelligentTieringConfigurationOutput, error) { + req, out := c.GetBucketIntelligentTieringConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketInventoryConfiguration = "GetBucketInventoryConfiguration" + +// GetBucketInventoryConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketInventoryConfiguration operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetBucketInventoryConfiguration for more information on using the GetBucketInventoryConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetBucketInventoryConfigurationRequest method. +// req, resp := client.GetBucketInventoryConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketInventoryConfiguration +func (c *S3) GetBucketInventoryConfigurationRequest(input *GetBucketInventoryConfigurationInput) (req *request.Request, output *GetBucketInventoryConfigurationOutput) { + op := &request.Operation{ + Name: opGetBucketInventoryConfiguration, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?inventory", + } + + if input == nil { + input = &GetBucketInventoryConfigurationInput{} + } + + output = &GetBucketInventoryConfigurationOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketInventoryConfiguration API operation for Amazon Simple Storage Service. +// +// Returns an inventory configuration (identified by the inventory configuration +// ID) from the bucket. +// +// To use this operation, you must have permissions to perform the s3:GetInventoryConfiguration +// action. The bucket owner has this permission by default and can grant this +// permission to others. For more information about permissions, see Permissions +// Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). +// +// For information about the Amazon S3 inventory feature, see Amazon S3 Inventory +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-inventory.html). +// +// The following operations are related to GetBucketInventoryConfiguration: +// +// * DeleteBucketInventoryConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketInventoryConfiguration.html) +// +// * ListBucketInventoryConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketInventoryConfigurations.html) +// +// * PutBucketInventoryConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketInventoryConfiguration.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketInventoryConfiguration for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketInventoryConfiguration +func (c *S3) GetBucketInventoryConfiguration(input *GetBucketInventoryConfigurationInput) (*GetBucketInventoryConfigurationOutput, error) { + req, out := c.GetBucketInventoryConfigurationRequest(input) + return out, req.Send() +} + +// GetBucketInventoryConfigurationWithContext is the same as GetBucketInventoryConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketInventoryConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketInventoryConfigurationWithContext(ctx aws.Context, input *GetBucketInventoryConfigurationInput, opts ...request.Option) (*GetBucketInventoryConfigurationOutput, error) { + req, out := c.GetBucketInventoryConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketLifecycle = "GetBucketLifecycle" + +// GetBucketLifecycleRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketLifecycle operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetBucketLifecycle for more information on using the GetBucketLifecycle +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetBucketLifecycleRequest method. +// req, resp := client.GetBucketLifecycleRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycle +// +// Deprecated: GetBucketLifecycle has been deprecated +func (c *S3) GetBucketLifecycleRequest(input *GetBucketLifecycleInput) (req *request.Request, output *GetBucketLifecycleOutput) { + if c.Client.Config.Logger != nil { + c.Client.Config.Logger.Log("This operation, GetBucketLifecycle, has been deprecated") + } + op := &request.Operation{ + Name: opGetBucketLifecycle, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?lifecycle", + } + + if input == nil { + input = &GetBucketLifecycleInput{} + } + + output = &GetBucketLifecycleOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketLifecycle API operation for Amazon Simple Storage Service. +// +// +// For an updated version of this API, see GetBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycleConfiguration.html). +// If you configured a bucket lifecycle using the filter element, you should +// see the updated version of this topic. This topic is provided for backward +// compatibility. +// +// Returns the lifecycle configuration information set on the bucket. For information +// about lifecycle configuration, see Object Lifecycle Management (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html). +// +// To use this operation, you must have permission to perform the s3:GetLifecycleConfiguration +// action. The bucket owner has this permission by default. The bucket owner +// can grant this permission to others. For more information about permissions, +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). +// +// GetBucketLifecycle has the following special error: +// +// * Error code: NoSuchLifecycleConfiguration Description: The lifecycle +// configuration does not exist. HTTP Status Code: 404 Not Found SOAP Fault +// Code Prefix: Client +// +// The following operations are related to GetBucketLifecycle: +// +// * GetBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycleConfiguration.html) +// +// * PutBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycle.html) +// +// * DeleteBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketLifecycle.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketLifecycle for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycle +// +// Deprecated: GetBucketLifecycle has been deprecated +func (c *S3) GetBucketLifecycle(input *GetBucketLifecycleInput) (*GetBucketLifecycleOutput, error) { + req, out := c.GetBucketLifecycleRequest(input) + return out, req.Send() +} + +// GetBucketLifecycleWithContext is the same as GetBucketLifecycle with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketLifecycle for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +// +// Deprecated: GetBucketLifecycleWithContext has been deprecated +func (c *S3) GetBucketLifecycleWithContext(ctx aws.Context, input *GetBucketLifecycleInput, opts ...request.Option) (*GetBucketLifecycleOutput, error) { + req, out := c.GetBucketLifecycleRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketLifecycleConfiguration = "GetBucketLifecycleConfiguration" + +// GetBucketLifecycleConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketLifecycleConfiguration operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetBucketLifecycleConfiguration for more information on using the GetBucketLifecycleConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetBucketLifecycleConfigurationRequest method. +// req, resp := client.GetBucketLifecycleConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycleConfiguration +func (c *S3) GetBucketLifecycleConfigurationRequest(input *GetBucketLifecycleConfigurationInput) (req *request.Request, output *GetBucketLifecycleConfigurationOutput) { + op := &request.Operation{ + Name: opGetBucketLifecycleConfiguration, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?lifecycle", + } + + if input == nil { + input = &GetBucketLifecycleConfigurationInput{} + } + + output = &GetBucketLifecycleConfigurationOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketLifecycleConfiguration API operation for Amazon Simple Storage Service. +// +// +// Bucket lifecycle configuration now supports specifying a lifecycle rule using +// an object key name prefix, one or more object tags, or a combination of both. +// Accordingly, this section describes the latest API. The response describes +// the new filter element that you can use to specify a filter to select a subset +// of objects to which the rule applies. If you are using a previous version +// of the lifecycle configuration, it still works. For the earlier API description, +// see GetBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycle.html). +// +// Returns the lifecycle configuration information set on the bucket. For information +// about lifecycle configuration, see Object Lifecycle Management (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html). +// +// To use this operation, you must have permission to perform the s3:GetLifecycleConfiguration +// action. The bucket owner has this permission, by default. The bucket owner +// can grant this permission to others. For more information about permissions, +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). +// +// GetBucketLifecycleConfiguration has the following special error: +// +// * Error code: NoSuchLifecycleConfiguration Description: The lifecycle +// configuration does not exist. HTTP Status Code: 404 Not Found SOAP Fault +// Code Prefix: Client +// +// The following operations are related to GetBucketLifecycleConfiguration: +// +// * GetBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycle.html) +// +// * PutBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycle.html) +// +// * DeleteBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketLifecycle.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketLifecycleConfiguration for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycleConfiguration +func (c *S3) GetBucketLifecycleConfiguration(input *GetBucketLifecycleConfigurationInput) (*GetBucketLifecycleConfigurationOutput, error) { + req, out := c.GetBucketLifecycleConfigurationRequest(input) + return out, req.Send() +} + +// GetBucketLifecycleConfigurationWithContext is the same as GetBucketLifecycleConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketLifecycleConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketLifecycleConfigurationWithContext(ctx aws.Context, input *GetBucketLifecycleConfigurationInput, opts ...request.Option) (*GetBucketLifecycleConfigurationOutput, error) { + req, out := c.GetBucketLifecycleConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketLocation = "GetBucketLocation" + +// GetBucketLocationRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketLocation operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetBucketLocation for more information on using the GetBucketLocation +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetBucketLocationRequest method. +// req, resp := client.GetBucketLocationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLocation +func (c *S3) GetBucketLocationRequest(input *GetBucketLocationInput) (req *request.Request, output *GetBucketLocationOutput) { + op := &request.Operation{ + Name: opGetBucketLocation, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?location", + } + + if input == nil { + input = &GetBucketLocationInput{} + } + + output = &GetBucketLocationOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketLocation API operation for Amazon Simple Storage Service. +// +// Returns the Region the bucket resides in. You set the bucket's Region using +// the LocationConstraint request parameter in a CreateBucket request. For more +// information, see CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html). +// +// To use this implementation of the operation, you must be the bucket owner. +// +// The following operations are related to GetBucketLocation: +// +// * GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) +// +// * CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketLocation for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLocation +func (c *S3) GetBucketLocation(input *GetBucketLocationInput) (*GetBucketLocationOutput, error) { + req, out := c.GetBucketLocationRequest(input) + return out, req.Send() +} + +// GetBucketLocationWithContext is the same as GetBucketLocation with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketLocation for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketLocationWithContext(ctx aws.Context, input *GetBucketLocationInput, opts ...request.Option) (*GetBucketLocationOutput, error) { + req, out := c.GetBucketLocationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketLogging = "GetBucketLogging" + +// GetBucketLoggingRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketLogging operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetBucketLogging for more information on using the GetBucketLogging +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetBucketLoggingRequest method. +// req, resp := client.GetBucketLoggingRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLogging +func (c *S3) GetBucketLoggingRequest(input *GetBucketLoggingInput) (req *request.Request, output *GetBucketLoggingOutput) { + op := &request.Operation{ + Name: opGetBucketLogging, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?logging", + } + + if input == nil { + input = &GetBucketLoggingInput{} + } + + output = &GetBucketLoggingOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketLogging API operation for Amazon Simple Storage Service. +// +// Returns the logging status of a bucket and the permissions users have to +// view and modify that status. To use GET, you must be the bucket owner. +// +// The following operations are related to GetBucketLogging: +// +// * CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) +// +// * PutBucketLogging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLogging.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketLogging for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLogging +func (c *S3) GetBucketLogging(input *GetBucketLoggingInput) (*GetBucketLoggingOutput, error) { + req, out := c.GetBucketLoggingRequest(input) + return out, req.Send() +} + +// GetBucketLoggingWithContext is the same as GetBucketLogging with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketLogging for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketLoggingWithContext(ctx aws.Context, input *GetBucketLoggingInput, opts ...request.Option) (*GetBucketLoggingOutput, error) { + req, out := c.GetBucketLoggingRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketMetricsConfiguration = "GetBucketMetricsConfiguration" + +// GetBucketMetricsConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketMetricsConfiguration operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetBucketMetricsConfiguration for more information on using the GetBucketMetricsConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetBucketMetricsConfigurationRequest method. +// req, resp := client.GetBucketMetricsConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketMetricsConfiguration +func (c *S3) GetBucketMetricsConfigurationRequest(input *GetBucketMetricsConfigurationInput) (req *request.Request, output *GetBucketMetricsConfigurationOutput) { + op := &request.Operation{ + Name: opGetBucketMetricsConfiguration, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?metrics", + } + + if input == nil { + input = &GetBucketMetricsConfigurationInput{} + } + + output = &GetBucketMetricsConfigurationOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketMetricsConfiguration API operation for Amazon Simple Storage Service. +// +// Gets a metrics configuration (specified by the metrics configuration ID) +// from the bucket. Note that this doesn't include the daily storage metrics. +// +// To use this operation, you must have permissions to perform the s3:GetMetricsConfiguration +// action. The bucket owner has this permission by default. The bucket owner +// can grant this permission to others. For more information about permissions, +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). +// +// For information about CloudWatch request metrics for Amazon S3, see Monitoring +// Metrics with Amazon CloudWatch (https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html). +// +// The following operations are related to GetBucketMetricsConfiguration: +// +// * PutBucketMetricsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketMetricsConfiguration.html) +// +// * DeleteBucketMetricsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketMetricsConfiguration.html) +// +// * ListBucketMetricsConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketMetricsConfigurations.html) +// +// * Monitoring Metrics with Amazon CloudWatch (https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketMetricsConfiguration for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketMetricsConfiguration +func (c *S3) GetBucketMetricsConfiguration(input *GetBucketMetricsConfigurationInput) (*GetBucketMetricsConfigurationOutput, error) { + req, out := c.GetBucketMetricsConfigurationRequest(input) + return out, req.Send() +} + +// GetBucketMetricsConfigurationWithContext is the same as GetBucketMetricsConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketMetricsConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketMetricsConfigurationWithContext(ctx aws.Context, input *GetBucketMetricsConfigurationInput, opts ...request.Option) (*GetBucketMetricsConfigurationOutput, error) { + req, out := c.GetBucketMetricsConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketNotification = "GetBucketNotification" + +// GetBucketNotificationRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketNotification operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetBucketNotification for more information on using the GetBucketNotification +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetBucketNotificationRequest method. +// req, resp := client.GetBucketNotificationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketNotification +// +// Deprecated: GetBucketNotification has been deprecated +func (c *S3) GetBucketNotificationRequest(input *GetBucketNotificationConfigurationRequest) (req *request.Request, output *NotificationConfigurationDeprecated) { + if c.Client.Config.Logger != nil { + c.Client.Config.Logger.Log("This operation, GetBucketNotification, has been deprecated") + } + op := &request.Operation{ + Name: opGetBucketNotification, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?notification", + } + + if input == nil { + input = &GetBucketNotificationConfigurationRequest{} + } + + output = &NotificationConfigurationDeprecated{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketNotification API operation for Amazon Simple Storage Service. +// +// No longer used, see GetBucketNotificationConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketNotificationConfiguration.html). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketNotification for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketNotification +// +// Deprecated: GetBucketNotification has been deprecated +func (c *S3) GetBucketNotification(input *GetBucketNotificationConfigurationRequest) (*NotificationConfigurationDeprecated, error) { + req, out := c.GetBucketNotificationRequest(input) + return out, req.Send() +} + +// GetBucketNotificationWithContext is the same as GetBucketNotification with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketNotification for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +// +// Deprecated: GetBucketNotificationWithContext has been deprecated +func (c *S3) GetBucketNotificationWithContext(ctx aws.Context, input *GetBucketNotificationConfigurationRequest, opts ...request.Option) (*NotificationConfigurationDeprecated, error) { + req, out := c.GetBucketNotificationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketNotificationConfiguration = "GetBucketNotificationConfiguration" + +// GetBucketNotificationConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketNotificationConfiguration operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetBucketNotificationConfiguration for more information on using the GetBucketNotificationConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetBucketNotificationConfigurationRequest method. +// req, resp := client.GetBucketNotificationConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketNotificationConfiguration +func (c *S3) GetBucketNotificationConfigurationRequest(input *GetBucketNotificationConfigurationRequest) (req *request.Request, output *NotificationConfiguration) { + op := &request.Operation{ + Name: opGetBucketNotificationConfiguration, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?notification", + } + + if input == nil { + input = &GetBucketNotificationConfigurationRequest{} + } + + output = &NotificationConfiguration{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketNotificationConfiguration API operation for Amazon Simple Storage Service. +// +// Returns the notification configuration of a bucket. +// +// If notifications are not enabled on the bucket, the operation returns an +// empty NotificationConfiguration element. +// +// By default, you must be the bucket owner to read the notification configuration +// of a bucket. However, the bucket owner can use a bucket policy to grant permission +// to other users to read this configuration with the s3:GetBucketNotification +// permission. +// +// For more information about setting and reading the notification configuration +// on a bucket, see Setting Up Notification of Bucket Events (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html). +// For more information about bucket policies, see Using Bucket Policies (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html). +// +// The following operation is related to GetBucketNotification: +// +// * PutBucketNotification (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketNotification.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketNotificationConfiguration for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketNotificationConfiguration +func (c *S3) GetBucketNotificationConfiguration(input *GetBucketNotificationConfigurationRequest) (*NotificationConfiguration, error) { + req, out := c.GetBucketNotificationConfigurationRequest(input) + return out, req.Send() +} + +// GetBucketNotificationConfigurationWithContext is the same as GetBucketNotificationConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketNotificationConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketNotificationConfigurationWithContext(ctx aws.Context, input *GetBucketNotificationConfigurationRequest, opts ...request.Option) (*NotificationConfiguration, error) { + req, out := c.GetBucketNotificationConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketOwnershipControls = "GetBucketOwnershipControls" + +// GetBucketOwnershipControlsRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketOwnershipControls operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetBucketOwnershipControls for more information on using the GetBucketOwnershipControls +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetBucketOwnershipControlsRequest method. +// req, resp := client.GetBucketOwnershipControlsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketOwnershipControls +func (c *S3) GetBucketOwnershipControlsRequest(input *GetBucketOwnershipControlsInput) (req *request.Request, output *GetBucketOwnershipControlsOutput) { + op := &request.Operation{ + Name: opGetBucketOwnershipControls, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?ownershipControls", + } + + if input == nil { + input = &GetBucketOwnershipControlsInput{} + } + + output = &GetBucketOwnershipControlsOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketOwnershipControls API operation for Amazon Simple Storage Service. +// +// Retrieves OwnershipControls for an Amazon S3 bucket. To use this operation, +// you must have the s3:GetBucketOwnershipControls permission. For more information +// about Amazon S3 permissions, see Specifying Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html). +// +// For information about Amazon S3 Object Ownership, see Using Object Ownership +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/about-object-ownership.html). +// +// The following operations are related to GetBucketOwnershipControls: +// +// * PutBucketOwnershipControls +// +// * DeleteBucketOwnershipControls +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketOwnershipControls for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketOwnershipControls +func (c *S3) GetBucketOwnershipControls(input *GetBucketOwnershipControlsInput) (*GetBucketOwnershipControlsOutput, error) { + req, out := c.GetBucketOwnershipControlsRequest(input) + return out, req.Send() +} + +// GetBucketOwnershipControlsWithContext is the same as GetBucketOwnershipControls with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketOwnershipControls for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketOwnershipControlsWithContext(ctx aws.Context, input *GetBucketOwnershipControlsInput, opts ...request.Option) (*GetBucketOwnershipControlsOutput, error) { + req, out := c.GetBucketOwnershipControlsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketPolicy = "GetBucketPolicy" + +// GetBucketPolicyRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketPolicy operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetBucketPolicy for more information on using the GetBucketPolicy +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetBucketPolicyRequest method. +// req, resp := client.GetBucketPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketPolicy +func (c *S3) GetBucketPolicyRequest(input *GetBucketPolicyInput) (req *request.Request, output *GetBucketPolicyOutput) { + op := &request.Operation{ + Name: opGetBucketPolicy, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?policy", + } + + if input == nil { + input = &GetBucketPolicyInput{} + } + + output = &GetBucketPolicyOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketPolicy API operation for Amazon Simple Storage Service. +// +// Returns the policy of a specified bucket. If you are using an identity other +// than the root user of the AWS account that owns the bucket, the calling identity +// must have the GetBucketPolicy permissions on the specified bucket and belong +// to the bucket owner's account in order to use this operation. +// +// If you don't have GetBucketPolicy permissions, Amazon S3 returns a 403 Access +// Denied error. If you have the correct permissions, but you're not using an +// identity that belongs to the bucket owner's account, Amazon S3 returns a +// 405 Method Not Allowed error. +// +// As a security precaution, the root user of the AWS account that owns a bucket +// can always use this operation, even if the policy explicitly denies the root +// user the ability to perform this action. +// +// For more information about bucket policies, see Using Bucket Policies and +// User Policies (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html). +// +// The following operation is related to GetBucketPolicy: +// +// * GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketPolicy for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketPolicy +func (c *S3) GetBucketPolicy(input *GetBucketPolicyInput) (*GetBucketPolicyOutput, error) { + req, out := c.GetBucketPolicyRequest(input) + return out, req.Send() +} + +// GetBucketPolicyWithContext is the same as GetBucketPolicy with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketPolicy for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketPolicyWithContext(ctx aws.Context, input *GetBucketPolicyInput, opts ...request.Option) (*GetBucketPolicyOutput, error) { + req, out := c.GetBucketPolicyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketPolicyStatus = "GetBucketPolicyStatus" + +// GetBucketPolicyStatusRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketPolicyStatus operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetBucketPolicyStatus for more information on using the GetBucketPolicyStatus +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetBucketPolicyStatusRequest method. +// req, resp := client.GetBucketPolicyStatusRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketPolicyStatus +func (c *S3) GetBucketPolicyStatusRequest(input *GetBucketPolicyStatusInput) (req *request.Request, output *GetBucketPolicyStatusOutput) { + op := &request.Operation{ + Name: opGetBucketPolicyStatus, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?policyStatus", + } + + if input == nil { + input = &GetBucketPolicyStatusInput{} + } + + output = &GetBucketPolicyStatusOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketPolicyStatus API operation for Amazon Simple Storage Service. +// +// Retrieves the policy status for an Amazon S3 bucket, indicating whether the +// bucket is public. In order to use this operation, you must have the s3:GetBucketPolicyStatus +// permission. For more information about Amazon S3 permissions, see Specifying +// Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html). +// +// For more information about when Amazon S3 considers a bucket public, see +// The Meaning of "Public" (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status). +// +// The following operations are related to GetBucketPolicyStatus: +// +// * Using Amazon S3 Block Public Access (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html) +// +// * GetPublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetPublicAccessBlock.html) +// +// * PutPublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutPublicAccessBlock.html) +// +// * DeletePublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeletePublicAccessBlock.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketPolicyStatus for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketPolicyStatus +func (c *S3) GetBucketPolicyStatus(input *GetBucketPolicyStatusInput) (*GetBucketPolicyStatusOutput, error) { + req, out := c.GetBucketPolicyStatusRequest(input) + return out, req.Send() +} + +// GetBucketPolicyStatusWithContext is the same as GetBucketPolicyStatus with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketPolicyStatus for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketPolicyStatusWithContext(ctx aws.Context, input *GetBucketPolicyStatusInput, opts ...request.Option) (*GetBucketPolicyStatusOutput, error) { + req, out := c.GetBucketPolicyStatusRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketReplication = "GetBucketReplication" + +// GetBucketReplicationRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketReplication operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetBucketReplication for more information on using the GetBucketReplication +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetBucketReplicationRequest method. +// req, resp := client.GetBucketReplicationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketReplication +func (c *S3) GetBucketReplicationRequest(input *GetBucketReplicationInput) (req *request.Request, output *GetBucketReplicationOutput) { + op := &request.Operation{ + Name: opGetBucketReplication, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?replication", + } + + if input == nil { + input = &GetBucketReplicationInput{} + } + + output = &GetBucketReplicationOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketReplication API operation for Amazon Simple Storage Service. +// +// Returns the replication configuration of a bucket. +// +// It can take a while to propagate the put or delete a replication configuration +// to all Amazon S3 systems. Therefore, a get request soon after put or delete +// can return a wrong result. +// +// For information about replication configuration, see Replication (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// This operation requires permissions for the s3:GetReplicationConfiguration +// action. For more information about permissions, see Using Bucket Policies +// and User Policies (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html). +// +// If you include the Filter element in a replication configuration, you must +// also include the DeleteMarkerReplication and Priority elements. The response +// also returns those elements. +// +// For information about GetBucketReplication errors, see List of replication-related +// error codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ReplicationErrorCodeList) +// +// The following operations are related to GetBucketReplication: +// +// * PutBucketReplication (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketReplication.html) +// +// * DeleteBucketReplication (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketReplication.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketReplication for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketReplication +func (c *S3) GetBucketReplication(input *GetBucketReplicationInput) (*GetBucketReplicationOutput, error) { + req, out := c.GetBucketReplicationRequest(input) + return out, req.Send() +} + +// GetBucketReplicationWithContext is the same as GetBucketReplication with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketReplication for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketReplicationWithContext(ctx aws.Context, input *GetBucketReplicationInput, opts ...request.Option) (*GetBucketReplicationOutput, error) { + req, out := c.GetBucketReplicationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketRequestPayment = "GetBucketRequestPayment" + +// GetBucketRequestPaymentRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketRequestPayment operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetBucketRequestPayment for more information on using the GetBucketRequestPayment +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetBucketRequestPaymentRequest method. +// req, resp := client.GetBucketRequestPaymentRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketRequestPayment +func (c *S3) GetBucketRequestPaymentRequest(input *GetBucketRequestPaymentInput) (req *request.Request, output *GetBucketRequestPaymentOutput) { + op := &request.Operation{ + Name: opGetBucketRequestPayment, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?requestPayment", + } + + if input == nil { + input = &GetBucketRequestPaymentInput{} + } + + output = &GetBucketRequestPaymentOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketRequestPayment API operation for Amazon Simple Storage Service. +// +// Returns the request payment configuration of a bucket. To use this version +// of the operation, you must be the bucket owner. For more information, see +// Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/RequesterPaysBuckets.html). +// +// The following operations are related to GetBucketRequestPayment: +// +// * ListObjects (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjects.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketRequestPayment for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketRequestPayment +func (c *S3) GetBucketRequestPayment(input *GetBucketRequestPaymentInput) (*GetBucketRequestPaymentOutput, error) { + req, out := c.GetBucketRequestPaymentRequest(input) + return out, req.Send() +} + +// GetBucketRequestPaymentWithContext is the same as GetBucketRequestPayment with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketRequestPayment for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketRequestPaymentWithContext(ctx aws.Context, input *GetBucketRequestPaymentInput, opts ...request.Option) (*GetBucketRequestPaymentOutput, error) { + req, out := c.GetBucketRequestPaymentRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketTagging = "GetBucketTagging" + +// GetBucketTaggingRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketTagging operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetBucketTagging for more information on using the GetBucketTagging +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetBucketTaggingRequest method. +// req, resp := client.GetBucketTaggingRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketTagging +func (c *S3) GetBucketTaggingRequest(input *GetBucketTaggingInput) (req *request.Request, output *GetBucketTaggingOutput) { + op := &request.Operation{ + Name: opGetBucketTagging, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?tagging", + } + + if input == nil { + input = &GetBucketTaggingInput{} + } + + output = &GetBucketTaggingOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketTagging API operation for Amazon Simple Storage Service. +// +// Returns the tag set associated with the bucket. +// +// To use this operation, you must have permission to perform the s3:GetBucketTagging +// action. By default, the bucket owner has this permission and can grant this +// permission to others. +// +// GetBucketTagging has the following special error: +// +// * Error code: NoSuchTagSetError Description: There is no tag set associated +// with the bucket. +// +// The following operations are related to GetBucketTagging: +// +// * PutBucketTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketTagging.html) +// +// * DeleteBucketTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketTagging.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketTagging for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketTagging +func (c *S3) GetBucketTagging(input *GetBucketTaggingInput) (*GetBucketTaggingOutput, error) { + req, out := c.GetBucketTaggingRequest(input) + return out, req.Send() +} + +// GetBucketTaggingWithContext is the same as GetBucketTagging with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketTagging for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketTaggingWithContext(ctx aws.Context, input *GetBucketTaggingInput, opts ...request.Option) (*GetBucketTaggingOutput, error) { + req, out := c.GetBucketTaggingRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketVersioning = "GetBucketVersioning" + +// GetBucketVersioningRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketVersioning operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetBucketVersioning for more information on using the GetBucketVersioning +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetBucketVersioningRequest method. +// req, resp := client.GetBucketVersioningRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketVersioning +func (c *S3) GetBucketVersioningRequest(input *GetBucketVersioningInput) (req *request.Request, output *GetBucketVersioningOutput) { + op := &request.Operation{ + Name: opGetBucketVersioning, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?versioning", + } + + if input == nil { + input = &GetBucketVersioningInput{} + } + + output = &GetBucketVersioningOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketVersioning API operation for Amazon Simple Storage Service. +// +// Returns the versioning state of a bucket. +// +// To retrieve the versioning state of a bucket, you must be the bucket owner. +// +// This implementation also returns the MFA Delete status of the versioning +// state. If the MFA Delete status is enabled, the bucket owner must use an +// authentication device to change the versioning state of the bucket. +// +// The following operations are related to GetBucketVersioning: +// +// * GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) +// +// * PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) +// +// * DeleteObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketVersioning for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketVersioning +func (c *S3) GetBucketVersioning(input *GetBucketVersioningInput) (*GetBucketVersioningOutput, error) { + req, out := c.GetBucketVersioningRequest(input) + return out, req.Send() +} + +// GetBucketVersioningWithContext is the same as GetBucketVersioning with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketVersioning for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketVersioningWithContext(ctx aws.Context, input *GetBucketVersioningInput, opts ...request.Option) (*GetBucketVersioningOutput, error) { + req, out := c.GetBucketVersioningRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketWebsite = "GetBucketWebsite" + +// GetBucketWebsiteRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketWebsite operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetBucketWebsite for more information on using the GetBucketWebsite +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetBucketWebsiteRequest method. +// req, resp := client.GetBucketWebsiteRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketWebsite +func (c *S3) GetBucketWebsiteRequest(input *GetBucketWebsiteInput) (req *request.Request, output *GetBucketWebsiteOutput) { + op := &request.Operation{ + Name: opGetBucketWebsite, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?website", + } + + if input == nil { + input = &GetBucketWebsiteInput{} + } + + output = &GetBucketWebsiteOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketWebsite API operation for Amazon Simple Storage Service. +// +// Returns the website configuration for a bucket. To host website on Amazon +// S3, you can configure a bucket as website by adding a website configuration. +// For more information about hosting websites, see Hosting Websites on Amazon +// S3 (https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html). +// +// This GET operation requires the S3:GetBucketWebsite permission. By default, +// only the bucket owner can read the bucket website configuration. However, +// bucket owners can allow other users to read the website configuration by +// writing a bucket policy granting them the S3:GetBucketWebsite permission. +// +// The following operations are related to DeleteBucketWebsite: +// +// * DeleteBucketWebsite (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketWebsite.html) +// +// * PutBucketWebsite (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketWebsite.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketWebsite for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketWebsite +func (c *S3) GetBucketWebsite(input *GetBucketWebsiteInput) (*GetBucketWebsiteOutput, error) { + req, out := c.GetBucketWebsiteRequest(input) + return out, req.Send() +} + +// GetBucketWebsiteWithContext is the same as GetBucketWebsite with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketWebsite for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketWebsiteWithContext(ctx aws.Context, input *GetBucketWebsiteInput, opts ...request.Option) (*GetBucketWebsiteOutput, error) { + req, out := c.GetBucketWebsiteRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetObject = "GetObject" + +// GetObjectRequest generates a "aws/request.Request" representing the +// client's request for the GetObject operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetObject for more information on using the GetObject +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetObjectRequest method. +// req, resp := client.GetObjectRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObject +func (c *S3) GetObjectRequest(input *GetObjectInput) (req *request.Request, output *GetObjectOutput) { + op := &request.Operation{ + Name: opGetObject, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}/{Key+}", + } + + if input == nil { + input = &GetObjectInput{} + } + + output = &GetObjectOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetObject API operation for Amazon Simple Storage Service. +// +// Retrieves objects from Amazon S3. To use GET, you must have READ access to +// the object. If you grant READ access to the anonymous user, you can return +// the object without using an authorization header. +// +// An Amazon S3 bucket has no directory hierarchy such as you would find in +// a typical computer file system. You can, however, create a logical hierarchy +// by using object key names that imply a folder structure. For example, instead +// of naming an object sample.jpg, you can name it photos/2006/February/sample.jpg. +// +// To get an object from such a logical hierarchy, specify the full key name +// for the object in the GET operation. For a virtual hosted-style request example, +// if you have the object photos/2006/February/sample.jpg, specify the resource +// as /photos/2006/February/sample.jpg. For a path-style request example, if +// you have the object photos/2006/February/sample.jpg in the bucket named examplebucket, +// specify the resource as /examplebucket/photos/2006/February/sample.jpg. For +// more information about request types, see HTTP Host Header Bucket Specification +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html#VirtualHostingSpecifyBucket). +// +// To distribute large files to many people, you can save bandwidth costs by +// using BitTorrent. For more information, see Amazon S3 Torrent (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3Torrent.html). +// For more information about returning the ACL of an object, see GetObjectAcl +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAcl.html). +// +// If the object you are retrieving is stored in the S3 Glacier or S3 Glacier +// Deep Archive storage class, or S3 Intelligent-Tiering Archive or S3 Intelligent-Tiering +// Deep Archive tiers, before you can retrieve the object you must first restore +// a copy using RestoreObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html). +// Otherwise, this operation returns an InvalidObjectStateError error. For information +// about restoring archived objects, see Restoring Archived Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html). +// +// Encryption request headers, like x-amz-server-side-encryption, should not +// be sent for GET requests if your object uses server-side encryption with +// CMKs stored in AWS KMS (SSE-KMS) or server-side encryption with Amazon S3–managed +// encryption keys (SSE-S3). If your object does use these types of keys, you’ll +// get an HTTP 400 BadRequest error. +// +// If you encrypt an object by using server-side encryption with customer-provided +// encryption keys (SSE-C) when you store the object in Amazon S3, then when +// you GET the object, you must use the following headers: +// +// * x-amz-server-side-encryption-customer-algorithm +// +// * x-amz-server-side-encryption-customer-key +// +// * x-amz-server-side-encryption-customer-key-MD5 +// +// For more information about SSE-C, see Server-Side Encryption (Using Customer-Provided +// Encryption Keys) (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html). +// +// Assuming you have permission to read object tags (permission for the s3:GetObjectVersionTagging +// action), the response also returns the x-amz-tagging-count header that provides +// the count of number of tags associated with the object. You can use GetObjectTagging +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html) +// to retrieve the tag set associated with an object. +// +// Permissions +// +// You need the s3:GetObject permission for this operation. For more information, +// see Specifying Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html). +// If the object you request does not exist, the error Amazon S3 returns depends +// on whether you also have the s3:ListBucket permission. +// +// * If you have the s3:ListBucket permission on the bucket, Amazon S3 will +// return an HTTP status code 404 ("no such key") error. +// +// * If you don’t have the s3:ListBucket permission, Amazon S3 will return +// an HTTP status code 403 ("access denied") error. +// +// Versioning +// +// By default, the GET operation returns the current version of an object. To +// return a different version, use the versionId subresource. +// +// If the current version of the object is a delete marker, Amazon S3 behaves +// as if the object was deleted and includes x-amz-delete-marker: true in the +// response. +// +// For more information about versioning, see PutBucketVersioning (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketVersioning.html). +// +// Overriding Response Header Values +// +// There are times when you want to override certain response header values +// in a GET response. For example, you might override the Content-Disposition +// response header value in your GET request. +// +// You can override values for a set of response headers using the following +// query parameters. These response header values are sent only on a successful +// request, that is, when status code 200 OK is returned. The set of headers +// you can override using these parameters is a subset of the headers that Amazon +// S3 accepts when you create an object. The response headers that you can override +// for the GET response are Content-Type, Content-Language, Expires, Cache-Control, +// Content-Disposition, and Content-Encoding. To override these header values +// in the GET response, you use the following request parameters. +// +// You must sign the request, either using an Authorization header or a presigned +// URL, when using these parameters. They cannot be used with an unsigned (anonymous) +// request. +// +// * response-content-type +// +// * response-content-language +// +// * response-expires +// +// * response-cache-control +// +// * response-content-disposition +// +// * response-content-encoding +// +// Additional Considerations about Request Headers +// +// If both of the If-Match and If-Unmodified-Since headers are present in the +// request as follows: If-Match condition evaluates to true, and; If-Unmodified-Since +// condition evaluates to false; then, S3 returns 200 OK and the data requested. +// +// If both of the If-None-Match and If-Modified-Since headers are present in +// the request as follows:If-None-Match condition evaluates to false, and; If-Modified-Since +// condition evaluates to true; then, S3 returns 304 Not Modified response code. +// +// For more information about conditional requests, see RFC 7232 (https://tools.ietf.org/html/rfc7232). +// +// The following operations are related to GetObject: +// +// * ListBuckets (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBuckets.html) +// +// * GetObjectAcl (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAcl.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetObject for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNoSuchKey "NoSuchKey" +// The specified key does not exist. +// +// * ErrCodeInvalidObjectState "InvalidObjectState" +// Object is archived and inaccessible until restored. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObject +func (c *S3) GetObject(input *GetObjectInput) (*GetObjectOutput, error) { + req, out := c.GetObjectRequest(input) + return out, req.Send() +} + +// GetObjectWithContext is the same as GetObject with the addition of +// the ability to pass a context and additional request options. +// +// See GetObject for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetObjectWithContext(ctx aws.Context, input *GetObjectInput, opts ...request.Option) (*GetObjectOutput, error) { + req, out := c.GetObjectRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetObjectAcl = "GetObjectAcl" + +// GetObjectAclRequest generates a "aws/request.Request" representing the +// client's request for the GetObjectAcl operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetObjectAcl for more information on using the GetObjectAcl +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetObjectAclRequest method. +// req, resp := client.GetObjectAclRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectAcl +func (c *S3) GetObjectAclRequest(input *GetObjectAclInput) (req *request.Request, output *GetObjectAclOutput) { + op := &request.Operation{ + Name: opGetObjectAcl, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}/{Key+}?acl", + } + + if input == nil { + input = &GetObjectAclInput{} + } + + output = &GetObjectAclOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetObjectAcl API operation for Amazon Simple Storage Service. +// +// Returns the access control list (ACL) of an object. To use this operation, +// you must have READ_ACP access to the object. +// +// This action is not supported by Amazon S3 on Outposts. +// +// Versioning +// +// By default, GET returns ACL information about the current version of an object. +// To return ACL information about a different version, use the versionId subresource. +// +// The following operations are related to GetObjectAcl: +// +// * GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) +// +// * DeleteObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html) +// +// * PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetObjectAcl for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNoSuchKey "NoSuchKey" +// The specified key does not exist. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectAcl +func (c *S3) GetObjectAcl(input *GetObjectAclInput) (*GetObjectAclOutput, error) { + req, out := c.GetObjectAclRequest(input) + return out, req.Send() +} + +// GetObjectAclWithContext is the same as GetObjectAcl with the addition of +// the ability to pass a context and additional request options. +// +// See GetObjectAcl for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetObjectAclWithContext(ctx aws.Context, input *GetObjectAclInput, opts ...request.Option) (*GetObjectAclOutput, error) { + req, out := c.GetObjectAclRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetObjectLegalHold = "GetObjectLegalHold" + +// GetObjectLegalHoldRequest generates a "aws/request.Request" representing the +// client's request for the GetObjectLegalHold operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetObjectLegalHold for more information on using the GetObjectLegalHold +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetObjectLegalHoldRequest method. +// req, resp := client.GetObjectLegalHoldRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectLegalHold +func (c *S3) GetObjectLegalHoldRequest(input *GetObjectLegalHoldInput) (req *request.Request, output *GetObjectLegalHoldOutput) { + op := &request.Operation{ + Name: opGetObjectLegalHold, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}/{Key+}?legal-hold", + } + + if input == nil { + input = &GetObjectLegalHoldInput{} + } + + output = &GetObjectLegalHoldOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetObjectLegalHold API operation for Amazon Simple Storage Service. +// +// Gets an object's current Legal Hold status. For more information, see Locking +// Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html). +// +// This action is not supported by Amazon S3 on Outposts. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetObjectLegalHold for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectLegalHold +func (c *S3) GetObjectLegalHold(input *GetObjectLegalHoldInput) (*GetObjectLegalHoldOutput, error) { + req, out := c.GetObjectLegalHoldRequest(input) + return out, req.Send() +} + +// GetObjectLegalHoldWithContext is the same as GetObjectLegalHold with the addition of +// the ability to pass a context and additional request options. +// +// See GetObjectLegalHold for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetObjectLegalHoldWithContext(ctx aws.Context, input *GetObjectLegalHoldInput, opts ...request.Option) (*GetObjectLegalHoldOutput, error) { + req, out := c.GetObjectLegalHoldRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetObjectLockConfiguration = "GetObjectLockConfiguration" + +// GetObjectLockConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the GetObjectLockConfiguration operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetObjectLockConfiguration for more information on using the GetObjectLockConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetObjectLockConfigurationRequest method. +// req, resp := client.GetObjectLockConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectLockConfiguration +func (c *S3) GetObjectLockConfigurationRequest(input *GetObjectLockConfigurationInput) (req *request.Request, output *GetObjectLockConfigurationOutput) { + op := &request.Operation{ + Name: opGetObjectLockConfiguration, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?object-lock", + } + + if input == nil { + input = &GetObjectLockConfigurationInput{} + } + + output = &GetObjectLockConfigurationOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetObjectLockConfiguration API operation for Amazon Simple Storage Service. +// +// Gets the Object Lock configuration for a bucket. The rule specified in the +// Object Lock configuration will be applied by default to every new object +// placed in the specified bucket. For more information, see Locking Objects +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetObjectLockConfiguration for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectLockConfiguration +func (c *S3) GetObjectLockConfiguration(input *GetObjectLockConfigurationInput) (*GetObjectLockConfigurationOutput, error) { + req, out := c.GetObjectLockConfigurationRequest(input) + return out, req.Send() +} + +// GetObjectLockConfigurationWithContext is the same as GetObjectLockConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See GetObjectLockConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetObjectLockConfigurationWithContext(ctx aws.Context, input *GetObjectLockConfigurationInput, opts ...request.Option) (*GetObjectLockConfigurationOutput, error) { + req, out := c.GetObjectLockConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetObjectRetention = "GetObjectRetention" + +// GetObjectRetentionRequest generates a "aws/request.Request" representing the +// client's request for the GetObjectRetention operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetObjectRetention for more information on using the GetObjectRetention +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetObjectRetentionRequest method. +// req, resp := client.GetObjectRetentionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectRetention +func (c *S3) GetObjectRetentionRequest(input *GetObjectRetentionInput) (req *request.Request, output *GetObjectRetentionOutput) { + op := &request.Operation{ + Name: opGetObjectRetention, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}/{Key+}?retention", + } + + if input == nil { + input = &GetObjectRetentionInput{} + } + + output = &GetObjectRetentionOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetObjectRetention API operation for Amazon Simple Storage Service. +// +// Retrieves an object's retention settings. For more information, see Locking +// Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html). +// +// This action is not supported by Amazon S3 on Outposts. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetObjectRetention for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectRetention +func (c *S3) GetObjectRetention(input *GetObjectRetentionInput) (*GetObjectRetentionOutput, error) { + req, out := c.GetObjectRetentionRequest(input) + return out, req.Send() +} + +// GetObjectRetentionWithContext is the same as GetObjectRetention with the addition of +// the ability to pass a context and additional request options. +// +// See GetObjectRetention for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetObjectRetentionWithContext(ctx aws.Context, input *GetObjectRetentionInput, opts ...request.Option) (*GetObjectRetentionOutput, error) { + req, out := c.GetObjectRetentionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetObjectTagging = "GetObjectTagging" + +// GetObjectTaggingRequest generates a "aws/request.Request" representing the +// client's request for the GetObjectTagging operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetObjectTagging for more information on using the GetObjectTagging +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetObjectTaggingRequest method. +// req, resp := client.GetObjectTaggingRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTagging +func (c *S3) GetObjectTaggingRequest(input *GetObjectTaggingInput) (req *request.Request, output *GetObjectTaggingOutput) { + op := &request.Operation{ + Name: opGetObjectTagging, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}/{Key+}?tagging", + } + + if input == nil { + input = &GetObjectTaggingInput{} + } + + output = &GetObjectTaggingOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetObjectTagging API operation for Amazon Simple Storage Service. +// +// Returns the tag-set of an object. You send the GET request against the tagging +// subresource associated with the object. +// +// To use this operation, you must have permission to perform the s3:GetObjectTagging +// action. By default, the GET operation returns information about current version +// of an object. For a versioned bucket, you can have multiple versions of an +// object in your bucket. To retrieve tags of any other version, use the versionId +// query parameter. You also need permission for the s3:GetObjectVersionTagging +// action. +// +// By default, the bucket owner has this permission and can grant this permission +// to others. +// +// For information about the Amazon S3 object tagging feature, see Object Tagging +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-tagging.html). +// +// The following operation is related to GetObjectTagging: +// +// * PutObjectTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObjectTagging.html) +// +// * DeleteObjectTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObjectTagging.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetObjectTagging for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTagging +func (c *S3) GetObjectTagging(input *GetObjectTaggingInput) (*GetObjectTaggingOutput, error) { + req, out := c.GetObjectTaggingRequest(input) + return out, req.Send() +} + +// GetObjectTaggingWithContext is the same as GetObjectTagging with the addition of +// the ability to pass a context and additional request options. +// +// See GetObjectTagging for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetObjectTaggingWithContext(ctx aws.Context, input *GetObjectTaggingInput, opts ...request.Option) (*GetObjectTaggingOutput, error) { + req, out := c.GetObjectTaggingRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetObjectTorrent = "GetObjectTorrent" + +// GetObjectTorrentRequest generates a "aws/request.Request" representing the +// client's request for the GetObjectTorrent operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetObjectTorrent for more information on using the GetObjectTorrent +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetObjectTorrentRequest method. +// req, resp := client.GetObjectTorrentRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTorrent +func (c *S3) GetObjectTorrentRequest(input *GetObjectTorrentInput) (req *request.Request, output *GetObjectTorrentOutput) { + op := &request.Operation{ + Name: opGetObjectTorrent, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}/{Key+}?torrent", + } + + if input == nil { + input = &GetObjectTorrentInput{} + } + + output = &GetObjectTorrentOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetObjectTorrent API operation for Amazon Simple Storage Service. +// +// Returns torrent files from a bucket. BitTorrent can save you bandwidth when +// you're distributing large files. For more information about BitTorrent, see +// Using BitTorrent with Amazon S3 (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3Torrent.html). +// +// You can get torrent only for objects that are less than 5 GB in size, and +// that are not encrypted using server-side encryption with a customer-provided +// encryption key. +// +// To use GET, you must have READ access to the object. +// +// This action is not supported by Amazon S3 on Outposts. +// +// The following operation is related to GetObjectTorrent: +// +// * GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetObjectTorrent for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTorrent +func (c *S3) GetObjectTorrent(input *GetObjectTorrentInput) (*GetObjectTorrentOutput, error) { + req, out := c.GetObjectTorrentRequest(input) + return out, req.Send() +} + +// GetObjectTorrentWithContext is the same as GetObjectTorrent with the addition of +// the ability to pass a context and additional request options. +// +// See GetObjectTorrent for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetObjectTorrentWithContext(ctx aws.Context, input *GetObjectTorrentInput, opts ...request.Option) (*GetObjectTorrentOutput, error) { + req, out := c.GetObjectTorrentRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetPublicAccessBlock = "GetPublicAccessBlock" + +// GetPublicAccessBlockRequest generates a "aws/request.Request" representing the +// client's request for the GetPublicAccessBlock operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetPublicAccessBlock for more information on using the GetPublicAccessBlock +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetPublicAccessBlockRequest method. +// req, resp := client.GetPublicAccessBlockRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetPublicAccessBlock +func (c *S3) GetPublicAccessBlockRequest(input *GetPublicAccessBlockInput) (req *request.Request, output *GetPublicAccessBlockOutput) { + op := &request.Operation{ + Name: opGetPublicAccessBlock, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?publicAccessBlock", + } + + if input == nil { + input = &GetPublicAccessBlockInput{} + } + + output = &GetPublicAccessBlockOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetPublicAccessBlock API operation for Amazon Simple Storage Service. +// +// Retrieves the PublicAccessBlock configuration for an Amazon S3 bucket. To +// use this operation, you must have the s3:GetBucketPublicAccessBlock permission. +// For more information about Amazon S3 permissions, see Specifying Permissions +// in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html). +// +// When Amazon S3 evaluates the PublicAccessBlock configuration for a bucket +// or an object, it checks the PublicAccessBlock configuration for both the +// bucket (or the bucket that contains the object) and the bucket owner's account. +// If the PublicAccessBlock settings are different between the bucket and the +// account, Amazon S3 uses the most restrictive combination of the bucket-level +// and account-level settings. +// +// For more information about when Amazon S3 considers a bucket or an object +// public, see The Meaning of "Public" (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status). +// +// The following operations are related to GetPublicAccessBlock: +// +// * Using Amazon S3 Block Public Access (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html) +// +// * PutPublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutPublicAccessBlock.html) +// +// * GetPublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetPublicAccessBlock.html) +// +// * DeletePublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeletePublicAccessBlock.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetPublicAccessBlock for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetPublicAccessBlock +func (c *S3) GetPublicAccessBlock(input *GetPublicAccessBlockInput) (*GetPublicAccessBlockOutput, error) { + req, out := c.GetPublicAccessBlockRequest(input) + return out, req.Send() +} + +// GetPublicAccessBlockWithContext is the same as GetPublicAccessBlock with the addition of +// the ability to pass a context and additional request options. +// +// See GetPublicAccessBlock for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetPublicAccessBlockWithContext(ctx aws.Context, input *GetPublicAccessBlockInput, opts ...request.Option) (*GetPublicAccessBlockOutput, error) { + req, out := c.GetPublicAccessBlockRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opHeadBucket = "HeadBucket" + +// HeadBucketRequest generates a "aws/request.Request" representing the +// client's request for the HeadBucket operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See HeadBucket for more information on using the HeadBucket +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the HeadBucketRequest method. +// req, resp := client.HeadBucketRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadBucket +func (c *S3) HeadBucketRequest(input *HeadBucketInput) (req *request.Request, output *HeadBucketOutput) { + op := &request.Operation{ + Name: opHeadBucket, + HTTPMethod: "HEAD", + HTTPPath: "/{Bucket}", + } + + if input == nil { + input = &HeadBucketInput{} + } + + output = &HeadBucketOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// HeadBucket API operation for Amazon Simple Storage Service. +// +// This operation is useful to determine if a bucket exists and you have permission +// to access it. The operation returns a 200 OK if the bucket exists and you +// have permission to access it. +// +// If the bucket does not exist or you do not have permission to access it, +// the HEAD request returns a generic 404 Not Found or 403 Forbidden code. A +// message body is not included, so you cannot determine the exception beyond +// these error codes. +// +// To use this operation, you must have permissions to perform the s3:ListBucket +// action. The bucket owner has this permission by default and can grant this +// permission to others. For more information about permissions, see Permissions +// Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation HeadBucket for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNoSuchBucket "NoSuchBucket" +// The specified bucket does not exist. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadBucket +func (c *S3) HeadBucket(input *HeadBucketInput) (*HeadBucketOutput, error) { + req, out := c.HeadBucketRequest(input) + return out, req.Send() +} + +// HeadBucketWithContext is the same as HeadBucket with the addition of +// the ability to pass a context and additional request options. +// +// See HeadBucket for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) HeadBucketWithContext(ctx aws.Context, input *HeadBucketInput, opts ...request.Option) (*HeadBucketOutput, error) { + req, out := c.HeadBucketRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opHeadObject = "HeadObject" + +// HeadObjectRequest generates a "aws/request.Request" representing the +// client's request for the HeadObject operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See HeadObject for more information on using the HeadObject +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the HeadObjectRequest method. +// req, resp := client.HeadObjectRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadObject +func (c *S3) HeadObjectRequest(input *HeadObjectInput) (req *request.Request, output *HeadObjectOutput) { + op := &request.Operation{ + Name: opHeadObject, + HTTPMethod: "HEAD", + HTTPPath: "/{Bucket}/{Key+}", + } + + if input == nil { + input = &HeadObjectInput{} + } + + output = &HeadObjectOutput{} + req = c.newRequest(op, input, output) + return +} + +// HeadObject API operation for Amazon Simple Storage Service. +// +// The HEAD operation retrieves metadata from an object without returning the +// object itself. This operation is useful if you're only interested in an object's +// metadata. To use HEAD, you must have READ access to the object. +// +// A HEAD request has the same options as a GET operation on an object. The +// response is identical to the GET response except that there is no response +// body. Because of this, if the HEAD request generates an error, it returns +// a generic 404 Not Found or 403 Forbidden code. It is not possible to retrieve +// the exact exception beyond these error codes. +// +// If you encrypt an object by using server-side encryption with customer-provided +// encryption keys (SSE-C) when you store the object in Amazon S3, then when +// you retrieve the metadata from the object, you must use the following headers: +// +// * x-amz-server-side-encryption-customer-algorithm +// +// * x-amz-server-side-encryption-customer-key +// +// * x-amz-server-side-encryption-customer-key-MD5 +// +// For more information about SSE-C, see Server-Side Encryption (Using Customer-Provided +// Encryption Keys) (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html). +// +// * Encryption request headers, like x-amz-server-side-encryption, should +// not be sent for GET requests if your object uses server-side encryption +// with CMKs stored in AWS KMS (SSE-KMS) or server-side encryption with Amazon +// S3–managed encryption keys (SSE-S3). If your object does use these types +// of keys, you’ll get an HTTP 400 BadRequest error. +// +// * The last modified property in this case is the creation date of the +// object. +// +// Request headers are limited to 8 KB in size. For more information, see Common +// Request Headers (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTCommonRequestHeaders.html). +// +// Consider the following when using request headers: +// +// * Consideration 1 – If both of the If-Match and If-Unmodified-Since +// headers are present in the request as follows: If-Match condition evaluates +// to true, and; If-Unmodified-Since condition evaluates to false; Then Amazon +// S3 returns 200 OK and the data requested. +// +// * Consideration 2 – If both of the If-None-Match and If-Modified-Since +// headers are present in the request as follows: If-None-Match condition +// evaluates to false, and; If-Modified-Since condition evaluates to true; +// Then Amazon S3 returns the 304 Not Modified response code. +// +// For more information about conditional requests, see RFC 7232 (https://tools.ietf.org/html/rfc7232). +// +// Permissions +// +// You need the s3:GetObject permission for this operation. For more information, +// see Specifying Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html). +// If the object you request does not exist, the error Amazon S3 returns depends +// on whether you also have the s3:ListBucket permission. +// +// * If you have the s3:ListBucket permission on the bucket, Amazon S3 returns +// an HTTP status code 404 ("no such key") error. +// +// * If you don’t have the s3:ListBucket permission, Amazon S3 returns +// an HTTP status code 403 ("access denied") error. +// +// The following operation is related to HeadObject: +// +// * GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) +// +// See http://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#RESTErrorResponses +// for more information on returned errors. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation HeadObject for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadObject +func (c *S3) HeadObject(input *HeadObjectInput) (*HeadObjectOutput, error) { + req, out := c.HeadObjectRequest(input) + return out, req.Send() +} + +// HeadObjectWithContext is the same as HeadObject with the addition of +// the ability to pass a context and additional request options. +// +// See HeadObject for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) HeadObjectWithContext(ctx aws.Context, input *HeadObjectInput, opts ...request.Option) (*HeadObjectOutput, error) { + req, out := c.HeadObjectRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opListBucketAnalyticsConfigurations = "ListBucketAnalyticsConfigurations" + +// ListBucketAnalyticsConfigurationsRequest generates a "aws/request.Request" representing the +// client's request for the ListBucketAnalyticsConfigurations operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListBucketAnalyticsConfigurations for more information on using the ListBucketAnalyticsConfigurations +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListBucketAnalyticsConfigurationsRequest method. +// req, resp := client.ListBucketAnalyticsConfigurationsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketAnalyticsConfigurations +func (c *S3) ListBucketAnalyticsConfigurationsRequest(input *ListBucketAnalyticsConfigurationsInput) (req *request.Request, output *ListBucketAnalyticsConfigurationsOutput) { + op := &request.Operation{ + Name: opListBucketAnalyticsConfigurations, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?analytics", + } + + if input == nil { + input = &ListBucketAnalyticsConfigurationsInput{} + } + + output = &ListBucketAnalyticsConfigurationsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListBucketAnalyticsConfigurations API operation for Amazon Simple Storage Service. +// +// Lists the analytics configurations for the bucket. You can have up to 1,000 +// analytics configurations per bucket. +// +// This operation supports list pagination and does not return more than 100 +// configurations at a time. You should always check the IsTruncated element +// in the response. If there are no more configurations to list, IsTruncated +// is set to false. If there are more configurations to list, IsTruncated is +// set to true, and there will be a value in NextContinuationToken. You use +// the NextContinuationToken value to continue the pagination of the list by +// passing the value in continuation-token in the request to GET the next page. +// +// To use this operation, you must have permissions to perform the s3:GetAnalyticsConfiguration +// action. The bucket owner has this permission by default. The bucket owner +// can grant this permission to others. For more information about permissions, +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). +// +// For information about Amazon S3 analytics feature, see Amazon S3 Analytics +// – Storage Class Analysis (https://docs.aws.amazon.com/AmazonS3/latest/dev/analytics-storage-class.html). +// +// The following operations are related to ListBucketAnalyticsConfigurations: +// +// * GetBucketAnalyticsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketAnalyticsConfiguration.html) +// +// * DeleteBucketAnalyticsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketAnalyticsConfiguration.html) +// +// * PutBucketAnalyticsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketAnalyticsConfiguration.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation ListBucketAnalyticsConfigurations for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketAnalyticsConfigurations +func (c *S3) ListBucketAnalyticsConfigurations(input *ListBucketAnalyticsConfigurationsInput) (*ListBucketAnalyticsConfigurationsOutput, error) { + req, out := c.ListBucketAnalyticsConfigurationsRequest(input) + return out, req.Send() +} + +// ListBucketAnalyticsConfigurationsWithContext is the same as ListBucketAnalyticsConfigurations with the addition of +// the ability to pass a context and additional request options. +// +// See ListBucketAnalyticsConfigurations for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListBucketAnalyticsConfigurationsWithContext(ctx aws.Context, input *ListBucketAnalyticsConfigurationsInput, opts ...request.Option) (*ListBucketAnalyticsConfigurationsOutput, error) { + req, out := c.ListBucketAnalyticsConfigurationsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opListBucketIntelligentTieringConfigurations = "ListBucketIntelligentTieringConfigurations" + +// ListBucketIntelligentTieringConfigurationsRequest generates a "aws/request.Request" representing the +// client's request for the ListBucketIntelligentTieringConfigurations operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListBucketIntelligentTieringConfigurations for more information on using the ListBucketIntelligentTieringConfigurations +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListBucketIntelligentTieringConfigurationsRequest method. +// req, resp := client.ListBucketIntelligentTieringConfigurationsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketIntelligentTieringConfigurations +func (c *S3) ListBucketIntelligentTieringConfigurationsRequest(input *ListBucketIntelligentTieringConfigurationsInput) (req *request.Request, output *ListBucketIntelligentTieringConfigurationsOutput) { + op := &request.Operation{ + Name: opListBucketIntelligentTieringConfigurations, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?intelligent-tiering", + } + + if input == nil { + input = &ListBucketIntelligentTieringConfigurationsInput{} + } + + output = &ListBucketIntelligentTieringConfigurationsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListBucketIntelligentTieringConfigurations API operation for Amazon Simple Storage Service. +// +// Lists the S3 Intelligent-Tiering configuration from the specified bucket. +// +// The S3 Intelligent-Tiering storage class is designed to optimize storage +// costs by automatically moving data to the most cost-effective storage access +// tier, without additional operational overhead. S3 Intelligent-Tiering delivers +// automatic cost savings by moving data between access tiers, when access patterns +// change. +// +// The S3 Intelligent-Tiering storage class is suitable for objects larger than +// 128 KB that you plan to store for at least 30 days. If the size of an object +// is less than 128 KB, it is not eligible for auto-tiering. Smaller objects +// can be stored, but they are always charged at the frequent access tier rates +// in the S3 Intelligent-Tiering storage class. +// +// If you delete an object before the end of the 30-day minimum storage duration +// period, you are charged for 30 days. For more information, see Storage class +// for automatically optimizing frequently and infrequently accessed objects +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access). +// +// Operations related to ListBucketIntelligentTieringConfigurations include: +// +// * DeleteBucketIntelligentTieringConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketIntelligentTieringConfiguration.html) +// +// * PutBucketIntelligentTieringConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketIntelligentTieringConfiguration.html) +// +// * GetBucketIntelligentTieringConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketIntelligentTieringConfiguration.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation ListBucketIntelligentTieringConfigurations for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketIntelligentTieringConfigurations +func (c *S3) ListBucketIntelligentTieringConfigurations(input *ListBucketIntelligentTieringConfigurationsInput) (*ListBucketIntelligentTieringConfigurationsOutput, error) { + req, out := c.ListBucketIntelligentTieringConfigurationsRequest(input) + return out, req.Send() +} + +// ListBucketIntelligentTieringConfigurationsWithContext is the same as ListBucketIntelligentTieringConfigurations with the addition of +// the ability to pass a context and additional request options. +// +// See ListBucketIntelligentTieringConfigurations for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListBucketIntelligentTieringConfigurationsWithContext(ctx aws.Context, input *ListBucketIntelligentTieringConfigurationsInput, opts ...request.Option) (*ListBucketIntelligentTieringConfigurationsOutput, error) { + req, out := c.ListBucketIntelligentTieringConfigurationsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opListBucketInventoryConfigurations = "ListBucketInventoryConfigurations" + +// ListBucketInventoryConfigurationsRequest generates a "aws/request.Request" representing the +// client's request for the ListBucketInventoryConfigurations operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListBucketInventoryConfigurations for more information on using the ListBucketInventoryConfigurations +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListBucketInventoryConfigurationsRequest method. +// req, resp := client.ListBucketInventoryConfigurationsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketInventoryConfigurations +func (c *S3) ListBucketInventoryConfigurationsRequest(input *ListBucketInventoryConfigurationsInput) (req *request.Request, output *ListBucketInventoryConfigurationsOutput) { + op := &request.Operation{ + Name: opListBucketInventoryConfigurations, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?inventory", + } + + if input == nil { + input = &ListBucketInventoryConfigurationsInput{} + } + + output = &ListBucketInventoryConfigurationsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListBucketInventoryConfigurations API operation for Amazon Simple Storage Service. +// +// Returns a list of inventory configurations for the bucket. You can have up +// to 1,000 analytics configurations per bucket. +// +// This operation supports list pagination and does not return more than 100 +// configurations at a time. Always check the IsTruncated element in the response. +// If there are no more configurations to list, IsTruncated is set to false. +// If there are more configurations to list, IsTruncated is set to true, and +// there is a value in NextContinuationToken. You use the NextContinuationToken +// value to continue the pagination of the list by passing the value in continuation-token +// in the request to GET the next page. +// +// To use this operation, you must have permissions to perform the s3:GetInventoryConfiguration +// action. The bucket owner has this permission by default. The bucket owner +// can grant this permission to others. For more information about permissions, +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). +// +// For information about the Amazon S3 inventory feature, see Amazon S3 Inventory +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-inventory.html) +// +// The following operations are related to ListBucketInventoryConfigurations: +// +// * GetBucketInventoryConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketInventoryConfiguration.html) +// +// * DeleteBucketInventoryConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketInventoryConfiguration.html) +// +// * PutBucketInventoryConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketInventoryConfiguration.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation ListBucketInventoryConfigurations for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketInventoryConfigurations +func (c *S3) ListBucketInventoryConfigurations(input *ListBucketInventoryConfigurationsInput) (*ListBucketInventoryConfigurationsOutput, error) { + req, out := c.ListBucketInventoryConfigurationsRequest(input) + return out, req.Send() +} + +// ListBucketInventoryConfigurationsWithContext is the same as ListBucketInventoryConfigurations with the addition of +// the ability to pass a context and additional request options. +// +// See ListBucketInventoryConfigurations for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListBucketInventoryConfigurationsWithContext(ctx aws.Context, input *ListBucketInventoryConfigurationsInput, opts ...request.Option) (*ListBucketInventoryConfigurationsOutput, error) { + req, out := c.ListBucketInventoryConfigurationsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opListBucketMetricsConfigurations = "ListBucketMetricsConfigurations" + +// ListBucketMetricsConfigurationsRequest generates a "aws/request.Request" representing the +// client's request for the ListBucketMetricsConfigurations operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListBucketMetricsConfigurations for more information on using the ListBucketMetricsConfigurations +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListBucketMetricsConfigurationsRequest method. +// req, resp := client.ListBucketMetricsConfigurationsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketMetricsConfigurations +func (c *S3) ListBucketMetricsConfigurationsRequest(input *ListBucketMetricsConfigurationsInput) (req *request.Request, output *ListBucketMetricsConfigurationsOutput) { + op := &request.Operation{ + Name: opListBucketMetricsConfigurations, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?metrics", + } + + if input == nil { + input = &ListBucketMetricsConfigurationsInput{} + } + + output = &ListBucketMetricsConfigurationsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListBucketMetricsConfigurations API operation for Amazon Simple Storage Service. +// +// Lists the metrics configurations for the bucket. The metrics configurations +// are only for the request metrics of the bucket and do not provide information +// on daily storage metrics. You can have up to 1,000 configurations per bucket. +// +// This operation supports list pagination and does not return more than 100 +// configurations at a time. Always check the IsTruncated element in the response. +// If there are no more configurations to list, IsTruncated is set to false. +// If there are more configurations to list, IsTruncated is set to true, and +// there is a value in NextContinuationToken. You use the NextContinuationToken +// value to continue the pagination of the list by passing the value in continuation-token +// in the request to GET the next page. +// +// To use this operation, you must have permissions to perform the s3:GetMetricsConfiguration +// action. The bucket owner has this permission by default. The bucket owner +// can grant this permission to others. For more information about permissions, +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). +// +// For more information about metrics configurations and CloudWatch request +// metrics, see Monitoring Metrics with Amazon CloudWatch (https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html). +// +// The following operations are related to ListBucketMetricsConfigurations: +// +// * PutBucketMetricsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketMetricsConfiguration.html) +// +// * GetBucketMetricsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketMetricsConfiguration.html) +// +// * DeleteBucketMetricsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketMetricsConfiguration.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation ListBucketMetricsConfigurations for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketMetricsConfigurations +func (c *S3) ListBucketMetricsConfigurations(input *ListBucketMetricsConfigurationsInput) (*ListBucketMetricsConfigurationsOutput, error) { + req, out := c.ListBucketMetricsConfigurationsRequest(input) + return out, req.Send() +} + +// ListBucketMetricsConfigurationsWithContext is the same as ListBucketMetricsConfigurations with the addition of +// the ability to pass a context and additional request options. +// +// See ListBucketMetricsConfigurations for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListBucketMetricsConfigurationsWithContext(ctx aws.Context, input *ListBucketMetricsConfigurationsInput, opts ...request.Option) (*ListBucketMetricsConfigurationsOutput, error) { + req, out := c.ListBucketMetricsConfigurationsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opListBuckets = "ListBuckets" + +// ListBucketsRequest generates a "aws/request.Request" representing the +// client's request for the ListBuckets operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListBuckets for more information on using the ListBuckets +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListBucketsRequest method. +// req, resp := client.ListBucketsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBuckets +func (c *S3) ListBucketsRequest(input *ListBucketsInput) (req *request.Request, output *ListBucketsOutput) { + op := &request.Operation{ + Name: opListBuckets, + HTTPMethod: "GET", + HTTPPath: "/", + } + + if input == nil { + input = &ListBucketsInput{} + } + + output = &ListBucketsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListBuckets API operation for Amazon Simple Storage Service. +// +// Returns a list of all buckets owned by the authenticated sender of the request. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation ListBuckets for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBuckets +func (c *S3) ListBuckets(input *ListBucketsInput) (*ListBucketsOutput, error) { + req, out := c.ListBucketsRequest(input) + return out, req.Send() +} + +// ListBucketsWithContext is the same as ListBuckets with the addition of +// the ability to pass a context and additional request options. +// +// See ListBuckets for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListBucketsWithContext(ctx aws.Context, input *ListBucketsInput, opts ...request.Option) (*ListBucketsOutput, error) { + req, out := c.ListBucketsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opListMultipartUploads = "ListMultipartUploads" + +// ListMultipartUploadsRequest generates a "aws/request.Request" representing the +// client's request for the ListMultipartUploads operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListMultipartUploads for more information on using the ListMultipartUploads +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListMultipartUploadsRequest method. +// req, resp := client.ListMultipartUploadsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListMultipartUploads +func (c *S3) ListMultipartUploadsRequest(input *ListMultipartUploadsInput) (req *request.Request, output *ListMultipartUploadsOutput) { + op := &request.Operation{ + Name: opListMultipartUploads, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?uploads", + Paginator: &request.Paginator{ + InputTokens: []string{"KeyMarker", "UploadIdMarker"}, + OutputTokens: []string{"NextKeyMarker", "NextUploadIdMarker"}, + LimitToken: "MaxUploads", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &ListMultipartUploadsInput{} + } + + output = &ListMultipartUploadsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListMultipartUploads API operation for Amazon Simple Storage Service. +// +// This operation lists in-progress multipart uploads. An in-progress multipart +// upload is a multipart upload that has been initiated using the Initiate Multipart +// Upload request, but has not yet been completed or aborted. +// +// This operation returns at most 1,000 multipart uploads in the response. 1,000 +// multipart uploads is the maximum number of uploads a response can include, +// which is also the default value. You can further limit the number of uploads +// in a response by specifying the max-uploads parameter in the response. If +// additional multipart uploads satisfy the list criteria, the response will +// contain an IsTruncated element with the value true. To list the additional +// multipart uploads, use the key-marker and upload-id-marker request parameters. +// +// In the response, the uploads are sorted by key. If your application has initiated +// more than one multipart upload using the same object key, then uploads in +// the response are first sorted by key. Additionally, uploads are sorted in +// ascending order within each key by the upload initiation time. +// +// For more information on multipart uploads, see Uploading Objects Using Multipart +// Upload (https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html). +// +// For information on permissions required to use the multipart upload API, +// see Multipart Upload API and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html). +// +// The following operations are related to ListMultipartUploads: +// +// * CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html) +// +// * UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) +// +// * CompleteMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html) +// +// * ListParts (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html) +// +// * AbortMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation ListMultipartUploads for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListMultipartUploads +func (c *S3) ListMultipartUploads(input *ListMultipartUploadsInput) (*ListMultipartUploadsOutput, error) { + req, out := c.ListMultipartUploadsRequest(input) + return out, req.Send() +} + +// ListMultipartUploadsWithContext is the same as ListMultipartUploads with the addition of +// the ability to pass a context and additional request options. +// +// See ListMultipartUploads for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListMultipartUploadsWithContext(ctx aws.Context, input *ListMultipartUploadsInput, opts ...request.Option) (*ListMultipartUploadsOutput, error) { + req, out := c.ListMultipartUploadsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListMultipartUploadsPages iterates over the pages of a ListMultipartUploads operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListMultipartUploads method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListMultipartUploads operation. +// pageNum := 0 +// err := client.ListMultipartUploadsPages(params, +// func(page *s3.ListMultipartUploadsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *S3) ListMultipartUploadsPages(input *ListMultipartUploadsInput, fn func(*ListMultipartUploadsOutput, bool) bool) error { + return c.ListMultipartUploadsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListMultipartUploadsPagesWithContext same as ListMultipartUploadsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListMultipartUploadsPagesWithContext(ctx aws.Context, input *ListMultipartUploadsInput, fn func(*ListMultipartUploadsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListMultipartUploadsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListMultipartUploadsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListMultipartUploadsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opListObjectVersions = "ListObjectVersions" + +// ListObjectVersionsRequest generates a "aws/request.Request" representing the +// client's request for the ListObjectVersions operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListObjectVersions for more information on using the ListObjectVersions +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListObjectVersionsRequest method. +// req, resp := client.ListObjectVersionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectVersions +func (c *S3) ListObjectVersionsRequest(input *ListObjectVersionsInput) (req *request.Request, output *ListObjectVersionsOutput) { + op := &request.Operation{ + Name: opListObjectVersions, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?versions", + Paginator: &request.Paginator{ + InputTokens: []string{"KeyMarker", "VersionIdMarker"}, + OutputTokens: []string{"NextKeyMarker", "NextVersionIdMarker"}, + LimitToken: "MaxKeys", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &ListObjectVersionsInput{} + } + + output = &ListObjectVersionsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListObjectVersions API operation for Amazon Simple Storage Service. +// +// Returns metadata about all versions of the objects in a bucket. You can also +// use request parameters as selection criteria to return metadata about a subset +// of all the object versions. +// +// A 200 OK response can contain valid or invalid XML. Make sure to design your +// application to parse the contents of the response and handle it appropriately. +// +// To use this operation, you must have READ access to the bucket. +// +// This action is not supported by Amazon S3 on Outposts. +// +// The following operations are related to ListObjectVersions: +// +// * ListObjectsV2 (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjectsV2.html) +// +// * GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) +// +// * PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) +// +// * DeleteObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation ListObjectVersions for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectVersions +func (c *S3) ListObjectVersions(input *ListObjectVersionsInput) (*ListObjectVersionsOutput, error) { + req, out := c.ListObjectVersionsRequest(input) + return out, req.Send() +} + +// ListObjectVersionsWithContext is the same as ListObjectVersions with the addition of +// the ability to pass a context and additional request options. +// +// See ListObjectVersions for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListObjectVersionsWithContext(ctx aws.Context, input *ListObjectVersionsInput, opts ...request.Option) (*ListObjectVersionsOutput, error) { + req, out := c.ListObjectVersionsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListObjectVersionsPages iterates over the pages of a ListObjectVersions operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListObjectVersions method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListObjectVersions operation. +// pageNum := 0 +// err := client.ListObjectVersionsPages(params, +// func(page *s3.ListObjectVersionsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *S3) ListObjectVersionsPages(input *ListObjectVersionsInput, fn func(*ListObjectVersionsOutput, bool) bool) error { + return c.ListObjectVersionsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListObjectVersionsPagesWithContext same as ListObjectVersionsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListObjectVersionsPagesWithContext(ctx aws.Context, input *ListObjectVersionsInput, fn func(*ListObjectVersionsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListObjectVersionsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListObjectVersionsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListObjectVersionsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opListObjects = "ListObjects" + +// ListObjectsRequest generates a "aws/request.Request" representing the +// client's request for the ListObjects operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListObjects for more information on using the ListObjects +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListObjectsRequest method. +// req, resp := client.ListObjectsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjects +func (c *S3) ListObjectsRequest(input *ListObjectsInput) (req *request.Request, output *ListObjectsOutput) { + op := &request.Operation{ + Name: opListObjects, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"NextMarker || Contents[-1].Key"}, + LimitToken: "MaxKeys", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &ListObjectsInput{} + } + + output = &ListObjectsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListObjects API operation for Amazon Simple Storage Service. +// +// Returns some or all (up to 1,000) of the objects in a bucket. You can use +// the request parameters as selection criteria to return a subset of the objects +// in a bucket. A 200 OK response can contain valid or invalid XML. Be sure +// to design your application to parse the contents of the response and handle +// it appropriately. +// +// This API has been revised. We recommend that you use the newer version, ListObjectsV2 +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjectsV2.html), +// when developing applications. For backward compatibility, Amazon S3 continues +// to support ListObjects. +// +// The following operations are related to ListObjects: +// +// * ListObjectsV2 (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjectsV2.html) +// +// * GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) +// +// * PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) +// +// * CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) +// +// * ListBuckets (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBuckets.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation ListObjects for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNoSuchBucket "NoSuchBucket" +// The specified bucket does not exist. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjects +func (c *S3) ListObjects(input *ListObjectsInput) (*ListObjectsOutput, error) { + req, out := c.ListObjectsRequest(input) + return out, req.Send() +} + +// ListObjectsWithContext is the same as ListObjects with the addition of +// the ability to pass a context and additional request options. +// +// See ListObjects for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListObjectsWithContext(ctx aws.Context, input *ListObjectsInput, opts ...request.Option) (*ListObjectsOutput, error) { + req, out := c.ListObjectsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListObjectsPages iterates over the pages of a ListObjects operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListObjects method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListObjects operation. +// pageNum := 0 +// err := client.ListObjectsPages(params, +// func(page *s3.ListObjectsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *S3) ListObjectsPages(input *ListObjectsInput, fn func(*ListObjectsOutput, bool) bool) error { + return c.ListObjectsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListObjectsPagesWithContext same as ListObjectsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListObjectsPagesWithContext(ctx aws.Context, input *ListObjectsInput, fn func(*ListObjectsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListObjectsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListObjectsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListObjectsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opListObjectsV2 = "ListObjectsV2" + +// ListObjectsV2Request generates a "aws/request.Request" representing the +// client's request for the ListObjectsV2 operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListObjectsV2 for more information on using the ListObjectsV2 +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListObjectsV2Request method. +// req, resp := client.ListObjectsV2Request(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectsV2 +func (c *S3) ListObjectsV2Request(input *ListObjectsV2Input) (req *request.Request, output *ListObjectsV2Output) { + op := &request.Operation{ + Name: opListObjectsV2, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?list-type=2", + Paginator: &request.Paginator{ + InputTokens: []string{"ContinuationToken"}, + OutputTokens: []string{"NextContinuationToken"}, + LimitToken: "MaxKeys", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListObjectsV2Input{} + } + + output = &ListObjectsV2Output{} + req = c.newRequest(op, input, output) + return +} + +// ListObjectsV2 API operation for Amazon Simple Storage Service. +// +// Returns some or all (up to 1,000) of the objects in a bucket. You can use +// the request parameters as selection criteria to return a subset of the objects +// in a bucket. A 200 OK response can contain valid or invalid XML. Make sure +// to design your application to parse the contents of the response and handle +// it appropriately. Objects are returned sorted in an ascending order of the +// respective key names in the list. +// +// To use this operation, you must have READ access to the bucket. +// +// To use this operation in an AWS Identity and Access Management (IAM) policy, +// you must have permissions to perform the s3:ListBucket action. The bucket +// owner has this permission by default and can grant this permission to others. +// For more information about permissions, see Permissions Related to Bucket +// Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). +// +// This section describes the latest revision of the API. We recommend that +// you use this revised API for application development. For backward compatibility, +// Amazon S3 continues to support the prior version of this API, ListObjects +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjects.html). +// +// To get a list of your buckets, see ListBuckets (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBuckets.html). +// +// The following operations are related to ListObjectsV2: +// +// * GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) +// +// * PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) +// +// * CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation ListObjectsV2 for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNoSuchBucket "NoSuchBucket" +// The specified bucket does not exist. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectsV2 +func (c *S3) ListObjectsV2(input *ListObjectsV2Input) (*ListObjectsV2Output, error) { + req, out := c.ListObjectsV2Request(input) + return out, req.Send() +} + +// ListObjectsV2WithContext is the same as ListObjectsV2 with the addition of +// the ability to pass a context and additional request options. +// +// See ListObjectsV2 for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListObjectsV2WithContext(ctx aws.Context, input *ListObjectsV2Input, opts ...request.Option) (*ListObjectsV2Output, error) { + req, out := c.ListObjectsV2Request(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListObjectsV2Pages iterates over the pages of a ListObjectsV2 operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListObjectsV2 method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListObjectsV2 operation. +// pageNum := 0 +// err := client.ListObjectsV2Pages(params, +// func(page *s3.ListObjectsV2Output, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *S3) ListObjectsV2Pages(input *ListObjectsV2Input, fn func(*ListObjectsV2Output, bool) bool) error { + return c.ListObjectsV2PagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListObjectsV2PagesWithContext same as ListObjectsV2Pages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListObjectsV2PagesWithContext(ctx aws.Context, input *ListObjectsV2Input, fn func(*ListObjectsV2Output, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListObjectsV2Input + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListObjectsV2Request(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListObjectsV2Output), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opListParts = "ListParts" + +// ListPartsRequest generates a "aws/request.Request" representing the +// client's request for the ListParts operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListParts for more information on using the ListParts +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListPartsRequest method. +// req, resp := client.ListPartsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListParts +func (c *S3) ListPartsRequest(input *ListPartsInput) (req *request.Request, output *ListPartsOutput) { + op := &request.Operation{ + Name: opListParts, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}/{Key+}", + Paginator: &request.Paginator{ + InputTokens: []string{"PartNumberMarker"}, + OutputTokens: []string{"NextPartNumberMarker"}, + LimitToken: "MaxParts", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &ListPartsInput{} + } + + output = &ListPartsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListParts API operation for Amazon Simple Storage Service. +// +// Lists the parts that have been uploaded for a specific multipart upload. +// This operation must include the upload ID, which you obtain by sending the +// initiate multipart upload request (see CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html)). +// This request returns a maximum of 1,000 uploaded parts. The default number +// of parts returned is 1,000 parts. You can restrict the number of parts returned +// by specifying the max-parts request parameter. If your multipart upload consists +// of more than 1,000 parts, the response returns an IsTruncated field with +// the value of true, and a NextPartNumberMarker element. In subsequent ListParts +// requests you can include the part-number-marker query string parameter and +// set its value to the NextPartNumberMarker field value from the previous response. +// +// For more information on multipart uploads, see Uploading Objects Using Multipart +// Upload (https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html). +// +// For information on permissions required to use the multipart upload API, +// see Multipart Upload API and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html). +// +// The following operations are related to ListParts: +// +// * CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html) +// +// * UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) +// +// * CompleteMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html) +// +// * AbortMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html) +// +// * ListMultipartUploads (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation ListParts for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListParts +func (c *S3) ListParts(input *ListPartsInput) (*ListPartsOutput, error) { + req, out := c.ListPartsRequest(input) + return out, req.Send() +} + +// ListPartsWithContext is the same as ListParts with the addition of +// the ability to pass a context and additional request options. +// +// See ListParts for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListPartsWithContext(ctx aws.Context, input *ListPartsInput, opts ...request.Option) (*ListPartsOutput, error) { + req, out := c.ListPartsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListPartsPages iterates over the pages of a ListParts operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListParts method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListParts operation. +// pageNum := 0 +// err := client.ListPartsPages(params, +// func(page *s3.ListPartsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *S3) ListPartsPages(input *ListPartsInput, fn func(*ListPartsOutput, bool) bool) error { + return c.ListPartsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListPartsPagesWithContext same as ListPartsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListPartsPagesWithContext(ctx aws.Context, input *ListPartsInput, fn func(*ListPartsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListPartsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListPartsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListPartsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opPutBucketAccelerateConfiguration = "PutBucketAccelerateConfiguration" + +// PutBucketAccelerateConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketAccelerateConfiguration operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutBucketAccelerateConfiguration for more information on using the PutBucketAccelerateConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutBucketAccelerateConfigurationRequest method. +// req, resp := client.PutBucketAccelerateConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAccelerateConfiguration +func (c *S3) PutBucketAccelerateConfigurationRequest(input *PutBucketAccelerateConfigurationInput) (req *request.Request, output *PutBucketAccelerateConfigurationOutput) { + op := &request.Operation{ + Name: opPutBucketAccelerateConfiguration, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?accelerate", + } + + if input == nil { + input = &PutBucketAccelerateConfigurationInput{} + } + + output = &PutBucketAccelerateConfigurationOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// PutBucketAccelerateConfiguration API operation for Amazon Simple Storage Service. +// +// Sets the accelerate configuration of an existing bucket. Amazon S3 Transfer +// Acceleration is a bucket-level feature that enables you to perform faster +// data transfers to Amazon S3. +// +// To use this operation, you must have permission to perform the s3:PutAccelerateConfiguration +// action. The bucket owner has this permission by default. The bucket owner +// can grant this permission to others. For more information about permissions, +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). +// +// The Transfer Acceleration state of a bucket can be set to one of the following +// two values: +// +// * Enabled – Enables accelerated data transfers to the bucket. +// +// * Suspended – Disables accelerated data transfers to the bucket. +// +// The GetBucketAccelerateConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketAccelerateConfiguration.html) +// operation returns the transfer acceleration state of a bucket. +// +// After setting the Transfer Acceleration state of a bucket to Enabled, it +// might take up to thirty minutes before the data transfer rates to the bucket +// increase. +// +// The name of the bucket used for Transfer Acceleration must be DNS-compliant +// and must not contain periods ("."). +// +// For more information about transfer acceleration, see Transfer Acceleration +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html). +// +// The following operations are related to PutBucketAccelerateConfiguration: +// +// * GetBucketAccelerateConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketAccelerateConfiguration.html) +// +// * CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketAccelerateConfiguration for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAccelerateConfiguration +func (c *S3) PutBucketAccelerateConfiguration(input *PutBucketAccelerateConfigurationInput) (*PutBucketAccelerateConfigurationOutput, error) { + req, out := c.PutBucketAccelerateConfigurationRequest(input) + return out, req.Send() +} + +// PutBucketAccelerateConfigurationWithContext is the same as PutBucketAccelerateConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketAccelerateConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketAccelerateConfigurationWithContext(ctx aws.Context, input *PutBucketAccelerateConfigurationInput, opts ...request.Option) (*PutBucketAccelerateConfigurationOutput, error) { + req, out := c.PutBucketAccelerateConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutBucketAcl = "PutBucketAcl" + +// PutBucketAclRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketAcl operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutBucketAcl for more information on using the PutBucketAcl +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutBucketAclRequest method. +// req, resp := client.PutBucketAclRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAcl +func (c *S3) PutBucketAclRequest(input *PutBucketAclInput) (req *request.Request, output *PutBucketAclOutput) { + op := &request.Operation{ + Name: opPutBucketAcl, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?acl", + } + + if input == nil { + input = &PutBucketAclInput{} + } + + output = &PutBucketAclOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) + return +} + +// PutBucketAcl API operation for Amazon Simple Storage Service. +// +// Sets the permissions on an existing bucket using access control lists (ACL). +// For more information, see Using ACLs (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html). +// To set the ACL of a bucket, you must have WRITE_ACP permission. +// +// You can use one of the following two ways to set a bucket's permissions: +// +// * Specify the ACL in the request body +// +// * Specify permissions using request headers +// +// You cannot specify access permission using both the body and the request +// headers. +// +// Depending on your application needs, you may choose to set the ACL on a bucket +// using either the request body or the headers. For example, if you have an +// existing application that updates a bucket ACL using the request body, then +// you can continue to use that approach. +// +// Access Permissions +// +// You can set access permissions using one of the following methods: +// +// * Specify a canned ACL with the x-amz-acl request header. Amazon S3 supports +// a set of predefined ACLs, known as canned ACLs. Each canned ACL has a +// predefined set of grantees and permissions. Specify the canned ACL name +// as the value of x-amz-acl. If you use this header, you cannot use other +// access control-specific headers in your request. For more information, +// see Canned ACL (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL). +// +// * Specify access permissions explicitly with the x-amz-grant-read, x-amz-grant-read-acp, +// x-amz-grant-write-acp, and x-amz-grant-full-control headers. When using +// these headers, you specify explicit access permissions and grantees (AWS +// accounts or Amazon S3 groups) who will receive the permission. If you +// use these ACL-specific headers, you cannot use the x-amz-acl header to +// set a canned ACL. These parameters map to the set of permissions that +// Amazon S3 supports in an ACL. For more information, see Access Control +// List (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html). +// You specify each grantee as a type=value pair, where the type is one of +// the following: id – if the value specified is the canonical user ID +// of an AWS account uri – if you are granting permissions to a predefined +// group emailAddress – if the value specified is the email address of +// an AWS account Using email addresses to specify a grantee is only supported +// in the following AWS Regions: US East (N. Virginia) US West (N. California) +// US West (Oregon) Asia Pacific (Singapore) Asia Pacific (Sydney) Asia Pacific +// (Tokyo) Europe (Ireland) South America (São Paulo) For a list of all +// the Amazon S3 supported Regions and endpoints, see Regions and Endpoints +// (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) in +// the AWS General Reference. For example, the following x-amz-grant-write +// header grants create, overwrite, and delete objects permission to LogDelivery +// group predefined by Amazon S3 and two AWS accounts identified by their +// email addresses. x-amz-grant-write: uri="http://acs.amazonaws.com/groups/s3/LogDelivery", +// id="111122223333", id="555566667777" +// +// You can use either a canned ACL or specify access permissions explicitly. +// You cannot do both. +// +// Grantee Values +// +// You can specify the person (grantee) to whom you're assigning access rights +// (using request elements) in the following ways: +// +// * By the person's ID: <>ID<><>GranteesEmail<> +// DisplayName is optional and ignored in the request +// +// * By URI: <>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<> +// +// * By Email address: <>Grantees@email.com<>lt;/Grantee> +// The grantee is resolved to the CanonicalUser and, in a response to a GET +// Object acl request, appears as the CanonicalUser. Using email addresses +// to specify a grantee is only supported in the following AWS Regions: US +// East (N. Virginia) US West (N. California) US West (Oregon) Asia Pacific +// (Singapore) Asia Pacific (Sydney) Asia Pacific (Tokyo) Europe (Ireland) +// South America (São Paulo) For a list of all the Amazon S3 supported Regions +// and endpoints, see Regions and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) +// in the AWS General Reference. +// +// Related Resources +// +// * CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) +// +// * DeleteBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html) +// +// * GetObjectAcl (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAcl.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketAcl for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAcl +func (c *S3) PutBucketAcl(input *PutBucketAclInput) (*PutBucketAclOutput, error) { + req, out := c.PutBucketAclRequest(input) + return out, req.Send() +} + +// PutBucketAclWithContext is the same as PutBucketAcl with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketAcl for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketAclWithContext(ctx aws.Context, input *PutBucketAclInput, opts ...request.Option) (*PutBucketAclOutput, error) { + req, out := c.PutBucketAclRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutBucketAnalyticsConfiguration = "PutBucketAnalyticsConfiguration" + +// PutBucketAnalyticsConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketAnalyticsConfiguration operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutBucketAnalyticsConfiguration for more information on using the PutBucketAnalyticsConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutBucketAnalyticsConfigurationRequest method. +// req, resp := client.PutBucketAnalyticsConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAnalyticsConfiguration +func (c *S3) PutBucketAnalyticsConfigurationRequest(input *PutBucketAnalyticsConfigurationInput) (req *request.Request, output *PutBucketAnalyticsConfigurationOutput) { + op := &request.Operation{ + Name: opPutBucketAnalyticsConfiguration, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?analytics", + } + + if input == nil { + input = &PutBucketAnalyticsConfigurationInput{} + } + + output = &PutBucketAnalyticsConfigurationOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// PutBucketAnalyticsConfiguration API operation for Amazon Simple Storage Service. +// +// Sets an analytics configuration for the bucket (specified by the analytics +// configuration ID). You can have up to 1,000 analytics configurations per +// bucket. +// +// You can choose to have storage class analysis export analysis reports sent +// to a comma-separated values (CSV) flat file. See the DataExport request element. +// Reports are updated daily and are based on the object filters that you configure. +// When selecting data export, you specify a destination bucket and an optional +// destination prefix where the file is written. You can export the data to +// a destination bucket in a different account. However, the destination bucket +// must be in the same Region as the bucket that you are making the PUT analytics +// configuration to. For more information, see Amazon S3 Analytics – Storage +// Class Analysis (https://docs.aws.amazon.com/AmazonS3/latest/dev/analytics-storage-class.html). +// +// You must create a bucket policy on the destination bucket where the exported +// file is written to grant permissions to Amazon S3 to write objects to the +// bucket. For an example policy, see Granting Permissions for Amazon S3 Inventory +// and Storage Class Analysis (https://docs.aws.amazon.com/AmazonS3/latest/dev/example-bucket-policies.html#example-bucket-policies-use-case-9). +// +// To use this operation, you must have permissions to perform the s3:PutAnalyticsConfiguration +// action. The bucket owner has this permission by default. The bucket owner +// can grant this permission to others. For more information about permissions, +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). +// +// Special Errors +// +// * HTTP Error: HTTP 400 Bad Request Code: InvalidArgument Cause: Invalid +// argument. +// +// * HTTP Error: HTTP 400 Bad Request Code: TooManyConfigurations Cause: +// You are attempting to create a new configuration but have already reached +// the 1,000-configuration limit. +// +// * HTTP Error: HTTP 403 Forbidden Code: AccessDenied Cause: You are not +// the owner of the specified bucket, or you do not have the s3:PutAnalyticsConfiguration +// bucket permission to set the configuration on the bucket. +// +// Related Resources +// +// * GetBucketAnalyticsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketAnalyticsConfiguration.html) +// +// * DeleteBucketAnalyticsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketAnalyticsConfiguration.html) +// +// * ListBucketAnalyticsConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketAnalyticsConfigurations.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketAnalyticsConfiguration for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAnalyticsConfiguration +func (c *S3) PutBucketAnalyticsConfiguration(input *PutBucketAnalyticsConfigurationInput) (*PutBucketAnalyticsConfigurationOutput, error) { + req, out := c.PutBucketAnalyticsConfigurationRequest(input) + return out, req.Send() +} + +// PutBucketAnalyticsConfigurationWithContext is the same as PutBucketAnalyticsConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketAnalyticsConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketAnalyticsConfigurationWithContext(ctx aws.Context, input *PutBucketAnalyticsConfigurationInput, opts ...request.Option) (*PutBucketAnalyticsConfigurationOutput, error) { + req, out := c.PutBucketAnalyticsConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutBucketCors = "PutBucketCors" + +// PutBucketCorsRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketCors operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutBucketCors for more information on using the PutBucketCors +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutBucketCorsRequest method. +// req, resp := client.PutBucketCorsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketCors +func (c *S3) PutBucketCorsRequest(input *PutBucketCorsInput) (req *request.Request, output *PutBucketCorsOutput) { + op := &request.Operation{ + Name: opPutBucketCors, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?cors", + } + + if input == nil { + input = &PutBucketCorsInput{} + } + + output = &PutBucketCorsOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) + return +} + +// PutBucketCors API operation for Amazon Simple Storage Service. +// +// Sets the cors configuration for your bucket. If the configuration exists, +// Amazon S3 replaces it. +// +// To use this operation, you must be allowed to perform the s3:PutBucketCORS +// action. By default, the bucket owner has this permission and can grant it +// to others. +// +// You set this configuration on a bucket so that the bucket can service cross-origin +// requests. For example, you might want to enable a request whose origin is +// http://www.example.com to access your Amazon S3 bucket at my.example.bucket.com +// by using the browser's XMLHttpRequest capability. +// +// To enable cross-origin resource sharing (CORS) on a bucket, you add the cors +// subresource to the bucket. The cors subresource is an XML document in which +// you configure rules that identify origins and the HTTP methods that can be +// executed on your bucket. The document is limited to 64 KB in size. +// +// When Amazon S3 receives a cross-origin request (or a pre-flight OPTIONS request) +// against a bucket, it evaluates the cors configuration on the bucket and uses +// the first CORSRule rule that matches the incoming browser request to enable +// a cross-origin request. For a rule to match, the following conditions must +// be met: +// +// * The request's Origin header must match AllowedOrigin elements. +// +// * The request method (for example, GET, PUT, HEAD, and so on) or the Access-Control-Request-Method +// header in case of a pre-flight OPTIONS request must be one of the AllowedMethod +// elements. +// +// * Every header specified in the Access-Control-Request-Headers request +// header of a pre-flight request must match an AllowedHeader element. +// +// For more information about CORS, go to Enabling Cross-Origin Resource Sharing +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html) in the Amazon +// Simple Storage Service Developer Guide. +// +// Related Resources +// +// * GetBucketCors (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketCors.html) +// +// * DeleteBucketCors (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketCors.html) +// +// * RESTOPTIONSobject (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTOPTIONSobject.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketCors for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketCors +func (c *S3) PutBucketCors(input *PutBucketCorsInput) (*PutBucketCorsOutput, error) { + req, out := c.PutBucketCorsRequest(input) + return out, req.Send() +} + +// PutBucketCorsWithContext is the same as PutBucketCors with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketCors for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketCorsWithContext(ctx aws.Context, input *PutBucketCorsInput, opts ...request.Option) (*PutBucketCorsOutput, error) { + req, out := c.PutBucketCorsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutBucketEncryption = "PutBucketEncryption" + +// PutBucketEncryptionRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketEncryption operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutBucketEncryption for more information on using the PutBucketEncryption +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutBucketEncryptionRequest method. +// req, resp := client.PutBucketEncryptionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketEncryption +func (c *S3) PutBucketEncryptionRequest(input *PutBucketEncryptionInput) (req *request.Request, output *PutBucketEncryptionOutput) { + op := &request.Operation{ + Name: opPutBucketEncryption, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?encryption", + } + + if input == nil { + input = &PutBucketEncryptionInput{} + } + + output = &PutBucketEncryptionOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) + return +} + +// PutBucketEncryption API operation for Amazon Simple Storage Service. +// +// This operation uses the encryption subresource to configure default encryption +// and Amazon S3 Bucket Key for an existing bucket. +// +// Default encryption for a bucket can use server-side encryption with Amazon +// S3-managed keys (SSE-S3) or AWS KMS customer master keys (SSE-KMS). If you +// specify default encryption using SSE-KMS, you can also configure Amazon S3 +// Bucket Key. For information about default encryption, see Amazon S3 default +// bucket encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html) +// in the Amazon Simple Storage Service Developer Guide. For more information +// about S3 Bucket Keys, see Amazon S3 Bucket Keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// This operation requires AWS Signature Version 4. For more information, see +// Authenticating Requests (AWS Signature Version 4) (sig-v4-authenticating-requests.html). +// +// To use this operation, you must have permissions to perform the s3:PutEncryptionConfiguration +// action. The bucket owner has this permission by default. The bucket owner +// can grant this permission to others. For more information about permissions, +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// Related Resources +// +// * GetBucketEncryption (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketEncryption.html) +// +// * DeleteBucketEncryption (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketEncryption.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketEncryption for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketEncryption +func (c *S3) PutBucketEncryption(input *PutBucketEncryptionInput) (*PutBucketEncryptionOutput, error) { + req, out := c.PutBucketEncryptionRequest(input) + return out, req.Send() +} + +// PutBucketEncryptionWithContext is the same as PutBucketEncryption with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketEncryption for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketEncryptionWithContext(ctx aws.Context, input *PutBucketEncryptionInput, opts ...request.Option) (*PutBucketEncryptionOutput, error) { + req, out := c.PutBucketEncryptionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutBucketIntelligentTieringConfiguration = "PutBucketIntelligentTieringConfiguration" + +// PutBucketIntelligentTieringConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketIntelligentTieringConfiguration operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutBucketIntelligentTieringConfiguration for more information on using the PutBucketIntelligentTieringConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutBucketIntelligentTieringConfigurationRequest method. +// req, resp := client.PutBucketIntelligentTieringConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketIntelligentTieringConfiguration +func (c *S3) PutBucketIntelligentTieringConfigurationRequest(input *PutBucketIntelligentTieringConfigurationInput) (req *request.Request, output *PutBucketIntelligentTieringConfigurationOutput) { + op := &request.Operation{ + Name: opPutBucketIntelligentTieringConfiguration, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?intelligent-tiering", + } + + if input == nil { + input = &PutBucketIntelligentTieringConfigurationInput{} + } + + output = &PutBucketIntelligentTieringConfigurationOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// PutBucketIntelligentTieringConfiguration API operation for Amazon Simple Storage Service. +// +// Puts a S3 Intelligent-Tiering configuration to the specified bucket. You +// can have up to 1,000 S3 Intelligent-Tiering configurations per bucket. +// +// The S3 Intelligent-Tiering storage class is designed to optimize storage +// costs by automatically moving data to the most cost-effective storage access +// tier, without additional operational overhead. S3 Intelligent-Tiering delivers +// automatic cost savings by moving data between access tiers, when access patterns +// change. +// +// The S3 Intelligent-Tiering storage class is suitable for objects larger than +// 128 KB that you plan to store for at least 30 days. If the size of an object +// is less than 128 KB, it is not eligible for auto-tiering. Smaller objects +// can be stored, but they are always charged at the frequent access tier rates +// in the S3 Intelligent-Tiering storage class. +// +// If you delete an object before the end of the 30-day minimum storage duration +// period, you are charged for 30 days. For more information, see Storage class +// for automatically optimizing frequently and infrequently accessed objects +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access). +// +// Operations related to PutBucketIntelligentTieringConfiguration include: +// +// * DeleteBucketIntelligentTieringConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketIntelligentTieringConfiguration.html) +// +// * GetBucketIntelligentTieringConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketIntelligentTieringConfiguration.html) +// +// * ListBucketIntelligentTieringConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketIntelligentTieringConfigurations.html) +// +// You only need S3 Intelligent-Tiering enabled on a bucket if you want to automatically +// move objects stored in the S3 Intelligent-Tiering storage class to the Archive +// Access or Deep Archive Access tier. +// +// Special Errors +// +// * HTTP 400 Bad Request Error Code: InvalidArgument Cause: Invalid Argument +// +// * HTTP 400 Bad Request Error Code: TooManyConfigurations Cause: You are +// attempting to create a new configuration but have already reached the +// 1,000-configuration limit. +// +// * HTTP 403 Forbidden Error Code: AccessDenied Cause: You are not the owner +// of the specified bucket, or you do not have the s3:PutIntelligentTieringConfiguration +// bucket permission to set the configuration on the bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketIntelligentTieringConfiguration for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketIntelligentTieringConfiguration +func (c *S3) PutBucketIntelligentTieringConfiguration(input *PutBucketIntelligentTieringConfigurationInput) (*PutBucketIntelligentTieringConfigurationOutput, error) { + req, out := c.PutBucketIntelligentTieringConfigurationRequest(input) + return out, req.Send() +} + +// PutBucketIntelligentTieringConfigurationWithContext is the same as PutBucketIntelligentTieringConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketIntelligentTieringConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketIntelligentTieringConfigurationWithContext(ctx aws.Context, input *PutBucketIntelligentTieringConfigurationInput, opts ...request.Option) (*PutBucketIntelligentTieringConfigurationOutput, error) { + req, out := c.PutBucketIntelligentTieringConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutBucketInventoryConfiguration = "PutBucketInventoryConfiguration" + +// PutBucketInventoryConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketInventoryConfiguration operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutBucketInventoryConfiguration for more information on using the PutBucketInventoryConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutBucketInventoryConfigurationRequest method. +// req, resp := client.PutBucketInventoryConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketInventoryConfiguration +func (c *S3) PutBucketInventoryConfigurationRequest(input *PutBucketInventoryConfigurationInput) (req *request.Request, output *PutBucketInventoryConfigurationOutput) { + op := &request.Operation{ + Name: opPutBucketInventoryConfiguration, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?inventory", + } + + if input == nil { + input = &PutBucketInventoryConfigurationInput{} + } + + output = &PutBucketInventoryConfigurationOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// PutBucketInventoryConfiguration API operation for Amazon Simple Storage Service. +// +// This implementation of the PUT operation adds an inventory configuration +// (identified by the inventory ID) to the bucket. You can have up to 1,000 +// inventory configurations per bucket. +// +// Amazon S3 inventory generates inventories of the objects in the bucket on +// a daily or weekly basis, and the results are published to a flat file. The +// bucket that is inventoried is called the source bucket, and the bucket where +// the inventory flat file is stored is called the destination bucket. The destination +// bucket must be in the same AWS Region as the source bucket. +// +// When you configure an inventory for a source bucket, you specify the destination +// bucket where you want the inventory to be stored, and whether to generate +// the inventory daily or weekly. You can also configure what object metadata +// to include and whether to inventory all object versions or only current versions. +// For more information, see Amazon S3 Inventory (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-inventory.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// You must create a bucket policy on the destination bucket to grant permissions +// to Amazon S3 to write objects to the bucket in the defined location. For +// an example policy, see Granting Permissions for Amazon S3 Inventory and Storage +// Class Analysis (https://docs.aws.amazon.com/AmazonS3/latest/dev/example-bucket-policies.html#example-bucket-policies-use-case-9). +// +// To use this operation, you must have permissions to perform the s3:PutInventoryConfiguration +// action. The bucket owner has this permission by default and can grant this +// permission to others. For more information about permissions, see Permissions +// Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// Special Errors +// +// * HTTP 400 Bad Request Error Code: InvalidArgument Cause: Invalid Argument +// +// * HTTP 400 Bad Request Error Code: TooManyConfigurations Cause: You are +// attempting to create a new configuration but have already reached the +// 1,000-configuration limit. +// +// * HTTP 403 Forbidden Error Code: AccessDenied Cause: You are not the owner +// of the specified bucket, or you do not have the s3:PutInventoryConfiguration +// bucket permission to set the configuration on the bucket. +// +// Related Resources +// +// * GetBucketInventoryConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketInventoryConfiguration.html) +// +// * DeleteBucketInventoryConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketInventoryConfiguration.html) +// +// * ListBucketInventoryConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketInventoryConfigurations.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketInventoryConfiguration for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketInventoryConfiguration +func (c *S3) PutBucketInventoryConfiguration(input *PutBucketInventoryConfigurationInput) (*PutBucketInventoryConfigurationOutput, error) { + req, out := c.PutBucketInventoryConfigurationRequest(input) + return out, req.Send() +} + +// PutBucketInventoryConfigurationWithContext is the same as PutBucketInventoryConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketInventoryConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketInventoryConfigurationWithContext(ctx aws.Context, input *PutBucketInventoryConfigurationInput, opts ...request.Option) (*PutBucketInventoryConfigurationOutput, error) { + req, out := c.PutBucketInventoryConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutBucketLifecycle = "PutBucketLifecycle" + +// PutBucketLifecycleRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketLifecycle operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutBucketLifecycle for more information on using the PutBucketLifecycle +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutBucketLifecycleRequest method. +// req, resp := client.PutBucketLifecycleRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycle +// +// Deprecated: PutBucketLifecycle has been deprecated +func (c *S3) PutBucketLifecycleRequest(input *PutBucketLifecycleInput) (req *request.Request, output *PutBucketLifecycleOutput) { + if c.Client.Config.Logger != nil { + c.Client.Config.Logger.Log("This operation, PutBucketLifecycle, has been deprecated") + } + op := &request.Operation{ + Name: opPutBucketLifecycle, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?lifecycle", + } + + if input == nil { + input = &PutBucketLifecycleInput{} + } + + output = &PutBucketLifecycleOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) + return +} + +// PutBucketLifecycle API operation for Amazon Simple Storage Service. +// +// +// For an updated version of this API, see PutBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html). +// This version has been deprecated. Existing lifecycle configurations will +// work. For new lifecycle configurations, use the updated API. +// +// Creates a new lifecycle configuration for the bucket or replaces an existing +// lifecycle configuration. For information about lifecycle configuration, see +// Object Lifecycle Management (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// By default, all Amazon S3 resources, including buckets, objects, and related +// subresources (for example, lifecycle configuration and website configuration) +// are private. Only the resource owner, the AWS account that created the resource, +// can access it. The resource owner can optionally grant access permissions +// to others by writing an access policy. For this operation, users must get +// the s3:PutLifecycleConfiguration permission. +// +// You can also explicitly deny permissions. Explicit denial also supersedes +// any other permissions. If you want to prevent users or accounts from removing +// or deleting objects from your bucket, you must deny them permissions for +// the following actions: +// +// * s3:DeleteObject +// +// * s3:DeleteObjectVersion +// +// * s3:PutLifecycleConfiguration +// +// For more information about permissions, see Managing Access Permissions to +// your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// For more examples of transitioning objects to storage classes such as STANDARD_IA +// or ONEZONE_IA, see Examples of Lifecycle Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html#lifecycle-configuration-examples). +// +// Related Resources +// +// * GetBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycle.html)(Deprecated) +// +// * GetBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycleConfiguration.html) +// +// * RestoreObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html) +// +// * By default, a resource owner—in this case, a bucket owner, which is +// the AWS account that created the bucket—can perform any of the operations. +// A resource owner can also grant others permission to perform the operation. +// For more information, see the following topics in the Amazon Simple Storage +// Service Developer Guide: Specifying Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html) +// Managing Access Permissions to your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketLifecycle for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycle +// +// Deprecated: PutBucketLifecycle has been deprecated +func (c *S3) PutBucketLifecycle(input *PutBucketLifecycleInput) (*PutBucketLifecycleOutput, error) { + req, out := c.PutBucketLifecycleRequest(input) + return out, req.Send() +} + +// PutBucketLifecycleWithContext is the same as PutBucketLifecycle with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketLifecycle for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +// +// Deprecated: PutBucketLifecycleWithContext has been deprecated +func (c *S3) PutBucketLifecycleWithContext(ctx aws.Context, input *PutBucketLifecycleInput, opts ...request.Option) (*PutBucketLifecycleOutput, error) { + req, out := c.PutBucketLifecycleRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutBucketLifecycleConfiguration = "PutBucketLifecycleConfiguration" + +// PutBucketLifecycleConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketLifecycleConfiguration operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutBucketLifecycleConfiguration for more information on using the PutBucketLifecycleConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutBucketLifecycleConfigurationRequest method. +// req, resp := client.PutBucketLifecycleConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycleConfiguration +func (c *S3) PutBucketLifecycleConfigurationRequest(input *PutBucketLifecycleConfigurationInput) (req *request.Request, output *PutBucketLifecycleConfigurationOutput) { + op := &request.Operation{ + Name: opPutBucketLifecycleConfiguration, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?lifecycle", + } + + if input == nil { + input = &PutBucketLifecycleConfigurationInput{} + } + + output = &PutBucketLifecycleConfigurationOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) + return +} + +// PutBucketLifecycleConfiguration API operation for Amazon Simple Storage Service. +// +// Creates a new lifecycle configuration for the bucket or replaces an existing +// lifecycle configuration. For information about lifecycle configuration, see +// Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). +// +// Bucket lifecycle configuration now supports specifying a lifecycle rule using +// an object key name prefix, one or more object tags, or a combination of both. +// Accordingly, this section describes the latest API. The previous version +// of the API supported filtering based only on an object key name prefix, which +// is supported for backward compatibility. For the related API description, +// see PutBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycle.html). +// +// Rules +// +// You specify the lifecycle configuration in your request body. The lifecycle +// configuration is specified as XML consisting of one or more rules. Each rule +// consists of the following: +// +// * Filter identifying a subset of objects to which the rule applies. The +// filter can be based on a key name prefix, object tags, or a combination +// of both. +// +// * Status whether the rule is in effect. +// +// * One or more lifecycle transition and expiration actions that you want +// Amazon S3 to perform on the objects identified by the filter. If the state +// of your bucket is versioning-enabled or versioning-suspended, you can +// have many versions of the same object (one current version and zero or +// more noncurrent versions). Amazon S3 provides predefined actions that +// you can specify for current and noncurrent object versions. +// +// For more information, see Object Lifecycle Management (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html) +// and Lifecycle Configuration Elements (https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html). +// +// Permissions +// +// By default, all Amazon S3 resources are private, including buckets, objects, +// and related subresources (for example, lifecycle configuration and website +// configuration). Only the resource owner (that is, the AWS account that created +// it) can access the resource. The resource owner can optionally grant access +// permissions to others by writing an access policy. For this operation, a +// user must get the s3:PutLifecycleConfiguration permission. +// +// You can also explicitly deny permissions. Explicit deny also supersedes any +// other permissions. If you want to block users or accounts from removing or +// deleting objects from your bucket, you must deny them permissions for the +// following actions: +// +// * s3:DeleteObject +// +// * s3:DeleteObjectVersion +// +// * s3:PutLifecycleConfiguration +// +// For more information about permissions, see Managing Access Permissions to +// Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). +// +// The following are related to PutBucketLifecycleConfiguration: +// +// * Examples of Lifecycle Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev/lifecycle-configuration-examples.html) +// +// * GetBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycleConfiguration.html) +// +// * DeleteBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketLifecycle.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketLifecycleConfiguration for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycleConfiguration +func (c *S3) PutBucketLifecycleConfiguration(input *PutBucketLifecycleConfigurationInput) (*PutBucketLifecycleConfigurationOutput, error) { + req, out := c.PutBucketLifecycleConfigurationRequest(input) + return out, req.Send() +} + +// PutBucketLifecycleConfigurationWithContext is the same as PutBucketLifecycleConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketLifecycleConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketLifecycleConfigurationWithContext(ctx aws.Context, input *PutBucketLifecycleConfigurationInput, opts ...request.Option) (*PutBucketLifecycleConfigurationOutput, error) { + req, out := c.PutBucketLifecycleConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutBucketLogging = "PutBucketLogging" + +// PutBucketLoggingRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketLogging operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutBucketLogging for more information on using the PutBucketLogging +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutBucketLoggingRequest method. +// req, resp := client.PutBucketLoggingRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLogging +func (c *S3) PutBucketLoggingRequest(input *PutBucketLoggingInput) (req *request.Request, output *PutBucketLoggingOutput) { + op := &request.Operation{ + Name: opPutBucketLogging, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?logging", + } + + if input == nil { + input = &PutBucketLoggingInput{} + } + + output = &PutBucketLoggingOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) + return +} + +// PutBucketLogging API operation for Amazon Simple Storage Service. +// +// Set the logging parameters for a bucket and to specify permissions for who +// can view and modify the logging parameters. All logs are saved to buckets +// in the same AWS Region as the source bucket. To set the logging status of +// a bucket, you must be the bucket owner. +// +// The bucket owner is automatically granted FULL_CONTROL to all logs. You use +// the Grantee request element to grant access to other people. The Permissions +// request element specifies the kind of access the grantee has to the logs. +// +// Grantee Values +// +// You can specify the person (grantee) to whom you're assigning access rights +// (using request elements) in the following ways: +// +// * By the person's ID: <>ID<><>GranteesEmail<> +// DisplayName is optional and ignored in the request. +// +// * By Email address: <>Grantees@email.com<> +// The grantee is resolved to the CanonicalUser and, in a response to a GET +// Object acl request, appears as the CanonicalUser. +// +// * By URI: <>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<> +// +// To enable logging, you use LoggingEnabled and its children request elements. +// To disable logging, you use an empty BucketLoggingStatus request element: +// +// +// +// For more information about server access logging, see Server Access Logging +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerLogs.html). +// +// For more information about creating a bucket, see CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html). +// For more information about returning the logging status of a bucket, see +// GetBucketLogging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLogging.html). +// +// The following operations are related to PutBucketLogging: +// +// * PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) +// +// * DeleteBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html) +// +// * CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) +// +// * GetBucketLogging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLogging.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketLogging for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLogging +func (c *S3) PutBucketLogging(input *PutBucketLoggingInput) (*PutBucketLoggingOutput, error) { + req, out := c.PutBucketLoggingRequest(input) + return out, req.Send() +} + +// PutBucketLoggingWithContext is the same as PutBucketLogging with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketLogging for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketLoggingWithContext(ctx aws.Context, input *PutBucketLoggingInput, opts ...request.Option) (*PutBucketLoggingOutput, error) { + req, out := c.PutBucketLoggingRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutBucketMetricsConfiguration = "PutBucketMetricsConfiguration" + +// PutBucketMetricsConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketMetricsConfiguration operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutBucketMetricsConfiguration for more information on using the PutBucketMetricsConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutBucketMetricsConfigurationRequest method. +// req, resp := client.PutBucketMetricsConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketMetricsConfiguration +func (c *S3) PutBucketMetricsConfigurationRequest(input *PutBucketMetricsConfigurationInput) (req *request.Request, output *PutBucketMetricsConfigurationOutput) { + op := &request.Operation{ + Name: opPutBucketMetricsConfiguration, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?metrics", + } + + if input == nil { + input = &PutBucketMetricsConfigurationInput{} + } + + output = &PutBucketMetricsConfigurationOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// PutBucketMetricsConfiguration API operation for Amazon Simple Storage Service. +// +// Sets a metrics configuration (specified by the metrics configuration ID) +// for the bucket. You can have up to 1,000 metrics configurations per bucket. +// If you're updating an existing metrics configuration, note that this is a +// full replacement of the existing metrics configuration. If you don't include +// the elements you want to keep, they are erased. +// +// To use this operation, you must have permissions to perform the s3:PutMetricsConfiguration +// action. The bucket owner has this permission by default. The bucket owner +// can grant this permission to others. For more information about permissions, +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). +// +// For information about CloudWatch request metrics for Amazon S3, see Monitoring +// Metrics with Amazon CloudWatch (https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html). +// +// The following operations are related to PutBucketMetricsConfiguration: +// +// * DeleteBucketMetricsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketMetricsConfiguration.html) +// +// * PutBucketMetricsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketMetricsConfiguration.html) +// +// * ListBucketMetricsConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketMetricsConfigurations.html) +// +// GetBucketLifecycle has the following special error: +// +// * Error code: TooManyConfigurations Description: You are attempting to +// create a new configuration but have already reached the 1,000-configuration +// limit. HTTP Status Code: HTTP 400 Bad Request +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketMetricsConfiguration for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketMetricsConfiguration +func (c *S3) PutBucketMetricsConfiguration(input *PutBucketMetricsConfigurationInput) (*PutBucketMetricsConfigurationOutput, error) { + req, out := c.PutBucketMetricsConfigurationRequest(input) + return out, req.Send() +} + +// PutBucketMetricsConfigurationWithContext is the same as PutBucketMetricsConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketMetricsConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketMetricsConfigurationWithContext(ctx aws.Context, input *PutBucketMetricsConfigurationInput, opts ...request.Option) (*PutBucketMetricsConfigurationOutput, error) { + req, out := c.PutBucketMetricsConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutBucketNotification = "PutBucketNotification" + +// PutBucketNotificationRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketNotification operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutBucketNotification for more information on using the PutBucketNotification +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutBucketNotificationRequest method. +// req, resp := client.PutBucketNotificationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotification +// +// Deprecated: PutBucketNotification has been deprecated +func (c *S3) PutBucketNotificationRequest(input *PutBucketNotificationInput) (req *request.Request, output *PutBucketNotificationOutput) { + if c.Client.Config.Logger != nil { + c.Client.Config.Logger.Log("This operation, PutBucketNotification, has been deprecated") + } + op := &request.Operation{ + Name: opPutBucketNotification, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?notification", + } + + if input == nil { + input = &PutBucketNotificationInput{} + } + + output = &PutBucketNotificationOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) + return +} + +// PutBucketNotification API operation for Amazon Simple Storage Service. +// +// No longer used, see the PutBucketNotificationConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketNotificationConfiguration.html) +// operation. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketNotification for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotification +// +// Deprecated: PutBucketNotification has been deprecated +func (c *S3) PutBucketNotification(input *PutBucketNotificationInput) (*PutBucketNotificationOutput, error) { + req, out := c.PutBucketNotificationRequest(input) + return out, req.Send() +} + +// PutBucketNotificationWithContext is the same as PutBucketNotification with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketNotification for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +// +// Deprecated: PutBucketNotificationWithContext has been deprecated +func (c *S3) PutBucketNotificationWithContext(ctx aws.Context, input *PutBucketNotificationInput, opts ...request.Option) (*PutBucketNotificationOutput, error) { + req, out := c.PutBucketNotificationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutBucketNotificationConfiguration = "PutBucketNotificationConfiguration" + +// PutBucketNotificationConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketNotificationConfiguration operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutBucketNotificationConfiguration for more information on using the PutBucketNotificationConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutBucketNotificationConfigurationRequest method. +// req, resp := client.PutBucketNotificationConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotificationConfiguration +func (c *S3) PutBucketNotificationConfigurationRequest(input *PutBucketNotificationConfigurationInput) (req *request.Request, output *PutBucketNotificationConfigurationOutput) { + op := &request.Operation{ + Name: opPutBucketNotificationConfiguration, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?notification", + } + + if input == nil { + input = &PutBucketNotificationConfigurationInput{} + } + + output = &PutBucketNotificationConfigurationOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// PutBucketNotificationConfiguration API operation for Amazon Simple Storage Service. +// +// Enables notifications of specified events for a bucket. For more information +// about event notifications, see Configuring Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html). +// +// Using this API, you can replace an existing notification configuration. The +// configuration is an XML file that defines the event types that you want Amazon +// S3 to publish and the destination where you want Amazon S3 to publish an +// event notification when it detects an event of the specified type. +// +// By default, your bucket has no event notifications configured. That is, the +// notification configuration will be an empty NotificationConfiguration. +// +// +// +// +// +// This operation replaces the existing notification configuration with the +// configuration you include in the request body. +// +// After Amazon S3 receives this request, it first verifies that any Amazon +// Simple Notification Service (Amazon SNS) or Amazon Simple Queue Service (Amazon +// SQS) destination exists, and that the bucket owner has permission to publish +// to it by sending a test notification. In the case of AWS Lambda destinations, +// Amazon S3 verifies that the Lambda function permissions grant Amazon S3 permission +// to invoke the function from the Amazon S3 bucket. For more information, see +// Configuring Notifications for Amazon S3 Events (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html). +// +// You can disable notifications by adding the empty NotificationConfiguration +// element. +// +// By default, only the bucket owner can configure notifications on a bucket. +// However, bucket owners can use a bucket policy to grant permission to other +// users to set this configuration with s3:PutBucketNotification permission. +// +// The PUT notification is an atomic operation. For example, suppose your notification +// configuration includes SNS topic, SQS queue, and Lambda function configurations. +// When you send a PUT request with this configuration, Amazon S3 sends test +// messages to your SNS topic. If the message fails, the entire PUT operation +// will fail, and Amazon S3 will not add the configuration to your bucket. +// +// Responses +// +// If the configuration in the request body includes only one TopicConfiguration +// specifying only the s3:ReducedRedundancyLostObject event type, the response +// will also include the x-amz-sns-test-message-id header containing the message +// ID of the test notification sent to the topic. +// +// The following operation is related to PutBucketNotificationConfiguration: +// +// * GetBucketNotificationConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketNotificationConfiguration.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketNotificationConfiguration for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotificationConfiguration +func (c *S3) PutBucketNotificationConfiguration(input *PutBucketNotificationConfigurationInput) (*PutBucketNotificationConfigurationOutput, error) { + req, out := c.PutBucketNotificationConfigurationRequest(input) + return out, req.Send() +} + +// PutBucketNotificationConfigurationWithContext is the same as PutBucketNotificationConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketNotificationConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketNotificationConfigurationWithContext(ctx aws.Context, input *PutBucketNotificationConfigurationInput, opts ...request.Option) (*PutBucketNotificationConfigurationOutput, error) { + req, out := c.PutBucketNotificationConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutBucketOwnershipControls = "PutBucketOwnershipControls" + +// PutBucketOwnershipControlsRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketOwnershipControls operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutBucketOwnershipControls for more information on using the PutBucketOwnershipControls +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutBucketOwnershipControlsRequest method. +// req, resp := client.PutBucketOwnershipControlsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketOwnershipControls +func (c *S3) PutBucketOwnershipControlsRequest(input *PutBucketOwnershipControlsInput) (req *request.Request, output *PutBucketOwnershipControlsOutput) { + op := &request.Operation{ + Name: opPutBucketOwnershipControls, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?ownershipControls", + } + + if input == nil { + input = &PutBucketOwnershipControlsInput{} + } + + output = &PutBucketOwnershipControlsOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) + return +} + +// PutBucketOwnershipControls API operation for Amazon Simple Storage Service. +// +// Creates or modifies OwnershipControls for an Amazon S3 bucket. To use this +// operation, you must have the s3:PutBucketOwnershipControls permission. For +// more information about Amazon S3 permissions, see Specifying Permissions +// in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html). +// +// For information about Amazon S3 Object Ownership, see Using Object Ownership +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/about-object-ownership.html). +// +// The following operations are related to PutBucketOwnershipControls: +// +// * GetBucketOwnershipControls +// +// * DeleteBucketOwnershipControls +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketOwnershipControls for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketOwnershipControls +func (c *S3) PutBucketOwnershipControls(input *PutBucketOwnershipControlsInput) (*PutBucketOwnershipControlsOutput, error) { + req, out := c.PutBucketOwnershipControlsRequest(input) + return out, req.Send() +} + +// PutBucketOwnershipControlsWithContext is the same as PutBucketOwnershipControls with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketOwnershipControls for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketOwnershipControlsWithContext(ctx aws.Context, input *PutBucketOwnershipControlsInput, opts ...request.Option) (*PutBucketOwnershipControlsOutput, error) { + req, out := c.PutBucketOwnershipControlsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutBucketPolicy = "PutBucketPolicy" + +// PutBucketPolicyRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketPolicy operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutBucketPolicy for more information on using the PutBucketPolicy +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutBucketPolicyRequest method. +// req, resp := client.PutBucketPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketPolicy +func (c *S3) PutBucketPolicyRequest(input *PutBucketPolicyInput) (req *request.Request, output *PutBucketPolicyOutput) { + op := &request.Operation{ + Name: opPutBucketPolicy, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?policy", + } + + if input == nil { + input = &PutBucketPolicyInput{} + } + + output = &PutBucketPolicyOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) + return +} + +// PutBucketPolicy API operation for Amazon Simple Storage Service. +// +// Applies an Amazon S3 bucket policy to an Amazon S3 bucket. If you are using +// an identity other than the root user of the AWS account that owns the bucket, +// the calling identity must have the PutBucketPolicy permissions on the specified +// bucket and belong to the bucket owner's account in order to use this operation. +// +// If you don't have PutBucketPolicy permissions, Amazon S3 returns a 403 Access +// Denied error. If you have the correct permissions, but you're not using an +// identity that belongs to the bucket owner's account, Amazon S3 returns a +// 405 Method Not Allowed error. +// +// As a security precaution, the root user of the AWS account that owns a bucket +// can always use this operation, even if the policy explicitly denies the root +// user the ability to perform this action. +// +// For more information about bucket policies, see Using Bucket Policies and +// User Policies (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html). +// +// The following operations are related to PutBucketPolicy: +// +// * CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) +// +// * DeleteBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketPolicy for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketPolicy +func (c *S3) PutBucketPolicy(input *PutBucketPolicyInput) (*PutBucketPolicyOutput, error) { + req, out := c.PutBucketPolicyRequest(input) + return out, req.Send() +} + +// PutBucketPolicyWithContext is the same as PutBucketPolicy with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketPolicy for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketPolicyWithContext(ctx aws.Context, input *PutBucketPolicyInput, opts ...request.Option) (*PutBucketPolicyOutput, error) { + req, out := c.PutBucketPolicyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutBucketReplication = "PutBucketReplication" + +// PutBucketReplicationRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketReplication operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutBucketReplication for more information on using the PutBucketReplication +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutBucketReplicationRequest method. +// req, resp := client.PutBucketReplicationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketReplication +func (c *S3) PutBucketReplicationRequest(input *PutBucketReplicationInput) (req *request.Request, output *PutBucketReplicationOutput) { + op := &request.Operation{ + Name: opPutBucketReplication, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?replication", + } + + if input == nil { + input = &PutBucketReplicationInput{} + } + + output = &PutBucketReplicationOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) + return +} + +// PutBucketReplication API operation for Amazon Simple Storage Service. +// +// Creates a replication configuration or replaces an existing one. For more +// information, see Replication (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication.html) +// in the Amazon S3 Developer Guide. +// +// To perform this operation, the user or role performing the operation must +// have the iam:PassRole (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use_passrole.html) +// permission. +// +// Specify the replication configuration in the request body. In the replication +// configuration, you provide the name of the destination bucket or buckets +// where you want Amazon S3 to replicate objects, the IAM role that Amazon S3 +// can assume to replicate objects on your behalf, and other relevant information. +// +// A replication configuration must include at least one rule, and can contain +// a maximum of 1,000. Each rule identifies a subset of objects to replicate +// by filtering the objects in the source bucket. To choose additional subsets +// of objects to replicate, add a rule for each subset. +// +// To specify a subset of the objects in the source bucket to apply a replication +// rule to, add the Filter element as a child of the Rule element. You can filter +// objects based on an object key prefix, one or more object tags, or both. +// When you add the Filter element in the configuration, you must also add the +// following elements: DeleteMarkerReplication, Status, and Priority. +// +// If you are using an earlier version of the replication configuration, Amazon +// S3 handles replication of delete markers differently. For more information, +// see Backward Compatibility (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html#replication-backward-compat-considerations). +// +// For information about enabling versioning on a bucket, see Using Versioning +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/Versioning.html). +// +// By default, a resource owner, in this case the AWS account that created the +// bucket, can perform this operation. The resource owner can also grant others +// permissions to perform the operation. For more information about permissions, +// see Specifying Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). +// +// Handling Replication of Encrypted Objects +// +// By default, Amazon S3 doesn't replicate objects that are stored at rest using +// server-side encryption with CMKs stored in AWS KMS. To replicate AWS KMS-encrypted +// objects, add the following: SourceSelectionCriteria, SseKmsEncryptedObjects, +// Status, EncryptionConfiguration, and ReplicaKmsKeyID. For information about +// replication configuration, see Replicating Objects Created with SSE Using +// CMKs stored in AWS KMS (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-config-for-kms-objects.html). +// +// For information on PutBucketReplication errors, see List of replication-related +// error codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ReplicationErrorCodeList) +// +// The following operations are related to PutBucketReplication: +// +// * GetBucketReplication (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketReplication.html) +// +// * DeleteBucketReplication (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketReplication.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketReplication for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketReplication +func (c *S3) PutBucketReplication(input *PutBucketReplicationInput) (*PutBucketReplicationOutput, error) { + req, out := c.PutBucketReplicationRequest(input) + return out, req.Send() +} + +// PutBucketReplicationWithContext is the same as PutBucketReplication with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketReplication for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketReplicationWithContext(ctx aws.Context, input *PutBucketReplicationInput, opts ...request.Option) (*PutBucketReplicationOutput, error) { + req, out := c.PutBucketReplicationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutBucketRequestPayment = "PutBucketRequestPayment" + +// PutBucketRequestPaymentRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketRequestPayment operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutBucketRequestPayment for more information on using the PutBucketRequestPayment +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutBucketRequestPaymentRequest method. +// req, resp := client.PutBucketRequestPaymentRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketRequestPayment +func (c *S3) PutBucketRequestPaymentRequest(input *PutBucketRequestPaymentInput) (req *request.Request, output *PutBucketRequestPaymentOutput) { + op := &request.Operation{ + Name: opPutBucketRequestPayment, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?requestPayment", + } + + if input == nil { + input = &PutBucketRequestPaymentInput{} + } + + output = &PutBucketRequestPaymentOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) + return +} + +// PutBucketRequestPayment API operation for Amazon Simple Storage Service. +// +// Sets the request payment configuration for a bucket. By default, the bucket +// owner pays for downloads from the bucket. This configuration parameter enables +// the bucket owner (only) to specify that the person requesting the download +// will be charged for the download. For more information, see Requester Pays +// Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/RequesterPaysBuckets.html). +// +// The following operations are related to PutBucketRequestPayment: +// +// * CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) +// +// * GetBucketRequestPayment (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketRequestPayment.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketRequestPayment for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketRequestPayment +func (c *S3) PutBucketRequestPayment(input *PutBucketRequestPaymentInput) (*PutBucketRequestPaymentOutput, error) { + req, out := c.PutBucketRequestPaymentRequest(input) + return out, req.Send() +} + +// PutBucketRequestPaymentWithContext is the same as PutBucketRequestPayment with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketRequestPayment for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketRequestPaymentWithContext(ctx aws.Context, input *PutBucketRequestPaymentInput, opts ...request.Option) (*PutBucketRequestPaymentOutput, error) { + req, out := c.PutBucketRequestPaymentRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutBucketTagging = "PutBucketTagging" + +// PutBucketTaggingRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketTagging operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutBucketTagging for more information on using the PutBucketTagging +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutBucketTaggingRequest method. +// req, resp := client.PutBucketTaggingRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketTagging +func (c *S3) PutBucketTaggingRequest(input *PutBucketTaggingInput) (req *request.Request, output *PutBucketTaggingOutput) { + op := &request.Operation{ + Name: opPutBucketTagging, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?tagging", + } + + if input == nil { + input = &PutBucketTaggingInput{} + } + + output = &PutBucketTaggingOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) + return +} + +// PutBucketTagging API operation for Amazon Simple Storage Service. +// +// Sets the tags for a bucket. +// +// Use tags to organize your AWS bill to reflect your own cost structure. To +// do this, sign up to get your AWS account bill with tag key values included. +// Then, to see the cost of combined resources, organize your billing information +// according to resources with the same tag key values. For example, you can +// tag several resources with a specific application name, and then organize +// your billing information to see the total cost of that application across +// several services. For more information, see Cost Allocation and Tagging (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/cost-alloc-tags.html). +// +// Within a bucket, if you add a tag that has the same key as an existing tag, +// the new value overwrites the old value. For more information, see Using Cost +// Allocation in Amazon S3 Bucket Tags (https://docs.aws.amazon.com/AmazonS3/latest/dev/CostAllocTagging.html). +// +// To use this operation, you must have permissions to perform the s3:PutBucketTagging +// action. The bucket owner has this permission by default and can grant this +// permission to others. For more information about permissions, see Permissions +// Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). +// +// PutBucketTagging has the following special errors: +// +// * Error code: InvalidTagError Description: The tag provided was not a +// valid tag. This error can occur if the tag did not pass input validation. +// For information about tag restrictions, see User-Defined Tag Restrictions +// (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/allocation-tag-restrictions.html) +// and AWS-Generated Cost Allocation Tag Restrictions (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/aws-tag-restrictions.html). +// +// * Error code: MalformedXMLError Description: The XML provided does not +// match the schema. +// +// * Error code: OperationAbortedError Description: A conflicting conditional +// operation is currently in progress against this resource. Please try again. +// +// * Error code: InternalError Description: The service was unable to apply +// the provided tag to the bucket. +// +// The following operations are related to PutBucketTagging: +// +// * GetBucketTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketTagging.html) +// +// * DeleteBucketTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketTagging.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketTagging for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketTagging +func (c *S3) PutBucketTagging(input *PutBucketTaggingInput) (*PutBucketTaggingOutput, error) { + req, out := c.PutBucketTaggingRequest(input) + return out, req.Send() +} + +// PutBucketTaggingWithContext is the same as PutBucketTagging with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketTagging for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketTaggingWithContext(ctx aws.Context, input *PutBucketTaggingInput, opts ...request.Option) (*PutBucketTaggingOutput, error) { + req, out := c.PutBucketTaggingRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutBucketVersioning = "PutBucketVersioning" + +// PutBucketVersioningRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketVersioning operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutBucketVersioning for more information on using the PutBucketVersioning +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutBucketVersioningRequest method. +// req, resp := client.PutBucketVersioningRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketVersioning +func (c *S3) PutBucketVersioningRequest(input *PutBucketVersioningInput) (req *request.Request, output *PutBucketVersioningOutput) { + op := &request.Operation{ + Name: opPutBucketVersioning, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?versioning", + } + + if input == nil { + input = &PutBucketVersioningInput{} + } + + output = &PutBucketVersioningOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) + return +} + +// PutBucketVersioning API operation for Amazon Simple Storage Service. +// +// Sets the versioning state of an existing bucket. To set the versioning state, +// you must be the bucket owner. +// +// You can set the versioning state with one of the following values: +// +// Enabled—Enables versioning for the objects in the bucket. All objects added +// to the bucket receive a unique version ID. +// +// Suspended—Disables versioning for the objects in the bucket. All objects +// added to the bucket receive the version ID null. +// +// If the versioning state has never been set on a bucket, it has no versioning +// state; a GetBucketVersioning (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketVersioning.html) +// request does not return a versioning state value. +// +// If the bucket owner enables MFA Delete in the bucket versioning configuration, +// the bucket owner must include the x-amz-mfa request header and the Status +// and the MfaDelete request elements in a request to set the versioning state +// of the bucket. +// +// If you have an object expiration lifecycle policy in your non-versioned bucket +// and you want to maintain the same permanent delete behavior when you enable +// versioning, you must add a noncurrent expiration policy. The noncurrent expiration +// lifecycle policy will manage the deletes of the noncurrent object versions +// in the version-enabled bucket. (A version-enabled bucket maintains one current +// and zero or more noncurrent object versions.) For more information, see Lifecycle +// and Versioning (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html#lifecycle-and-other-bucket-config). +// +// Related Resources +// +// * CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) +// +// * DeleteBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html) +// +// * GetBucketVersioning (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketVersioning.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketVersioning for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketVersioning +func (c *S3) PutBucketVersioning(input *PutBucketVersioningInput) (*PutBucketVersioningOutput, error) { + req, out := c.PutBucketVersioningRequest(input) + return out, req.Send() +} + +// PutBucketVersioningWithContext is the same as PutBucketVersioning with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketVersioning for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketVersioningWithContext(ctx aws.Context, input *PutBucketVersioningInput, opts ...request.Option) (*PutBucketVersioningOutput, error) { + req, out := c.PutBucketVersioningRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutBucketWebsite = "PutBucketWebsite" + +// PutBucketWebsiteRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketWebsite operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutBucketWebsite for more information on using the PutBucketWebsite +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutBucketWebsiteRequest method. +// req, resp := client.PutBucketWebsiteRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketWebsite +func (c *S3) PutBucketWebsiteRequest(input *PutBucketWebsiteInput) (req *request.Request, output *PutBucketWebsiteOutput) { + op := &request.Operation{ + Name: opPutBucketWebsite, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?website", + } + + if input == nil { + input = &PutBucketWebsiteInput{} + } + + output = &PutBucketWebsiteOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) + return +} + +// PutBucketWebsite API operation for Amazon Simple Storage Service. +// +// Sets the configuration of the website that is specified in the website subresource. +// To configure a bucket as a website, you can add this subresource on the bucket +// with website configuration information such as the file name of the index +// document and any redirect rules. For more information, see Hosting Websites +// on Amazon S3 (https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html). +// +// This PUT operation requires the S3:PutBucketWebsite permission. By default, +// only the bucket owner can configure the website attached to a bucket; however, +// bucket owners can allow other users to set the website configuration by writing +// a bucket policy that grants them the S3:PutBucketWebsite permission. +// +// To redirect all website requests sent to the bucket's website endpoint, you +// add a website configuration with the following elements. Because all requests +// are sent to another website, you don't need to provide index document name +// for the bucket. +// +// * WebsiteConfiguration +// +// * RedirectAllRequestsTo +// +// * HostName +// +// * Protocol +// +// If you want granular control over redirects, you can use the following elements +// to add routing rules that describe conditions for redirecting requests and +// information about the redirect destination. In this case, the website configuration +// must provide an index document for the bucket, because some requests might +// not be redirected. +// +// * WebsiteConfiguration +// +// * IndexDocument +// +// * Suffix +// +// * ErrorDocument +// +// * Key +// +// * RoutingRules +// +// * RoutingRule +// +// * Condition +// +// * HttpErrorCodeReturnedEquals +// +// * KeyPrefixEquals +// +// * Redirect +// +// * Protocol +// +// * HostName +// +// * ReplaceKeyPrefixWith +// +// * ReplaceKeyWith +// +// * HttpRedirectCode +// +// Amazon S3 has a limitation of 50 routing rules per website configuration. +// If you require more than 50 routing rules, you can use object redirect. For +// more information, see Configuring an Object Redirect (https://docs.aws.amazon.com/AmazonS3/latest/dev/how-to-page-redirect.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketWebsite for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketWebsite +func (c *S3) PutBucketWebsite(input *PutBucketWebsiteInput) (*PutBucketWebsiteOutput, error) { + req, out := c.PutBucketWebsiteRequest(input) + return out, req.Send() +} + +// PutBucketWebsiteWithContext is the same as PutBucketWebsite with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketWebsite for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketWebsiteWithContext(ctx aws.Context, input *PutBucketWebsiteInput, opts ...request.Option) (*PutBucketWebsiteOutput, error) { + req, out := c.PutBucketWebsiteRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutObject = "PutObject" + +// PutObjectRequest generates a "aws/request.Request" representing the +// client's request for the PutObject operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutObject for more information on using the PutObject +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutObjectRequest method. +// req, resp := client.PutObjectRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObject +func (c *S3) PutObjectRequest(input *PutObjectInput) (req *request.Request, output *PutObjectOutput) { + op := &request.Operation{ + Name: opPutObject, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}/{Key+}", + } + + if input == nil { + input = &PutObjectInput{} + } + + output = &PutObjectOutput{} + req = c.newRequest(op, input, output) + return +} + +// PutObject API operation for Amazon Simple Storage Service. +// +// Adds an object to a bucket. You must have WRITE permissions on a bucket to +// add an object to it. +// +// Amazon S3 never adds partial objects; if you receive a success response, +// Amazon S3 added the entire object to the bucket. +// +// Amazon S3 is a distributed system. If it receives multiple write requests +// for the same object simultaneously, it overwrites all but the last object +// written. Amazon S3 does not provide object locking; if you need this, make +// sure to build it into your application layer or use versioning instead. +// +// To ensure that data is not corrupted traversing the network, use the Content-MD5 +// header. When you use this header, Amazon S3 checks the object against the +// provided MD5 value and, if they do not match, returns an error. Additionally, +// you can calculate the MD5 while putting an object to Amazon S3 and compare +// the returned ETag to the calculated MD5 value. +// +// The Content-MD5 header is required for any request to upload an object with +// a retention period configured using Amazon S3 Object Lock. For more information +// about Amazon S3 Object Lock, see Amazon S3 Object Lock Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock-overview.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// Server-side Encryption +// +// You can optionally request server-side encryption. With server-side encryption, +// Amazon S3 encrypts your data as it writes it to disks in its data centers +// and decrypts the data when you access it. You have the option to provide +// your own encryption key or use AWS managed encryption keys (SSE-S3 or SSE-KMS). +// For more information, see Using Server-Side Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html). +// +// If you request server-side encryption using AWS Key Management Service (SSE-KMS), +// you can enable an S3 Bucket Key at the object-level. For more information, +// see Amazon S3 Bucket Keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// Access Control List (ACL)-Specific Request Headers +// +// You can use headers to grant ACL- based permissions. By default, all objects +// are private. Only the owner has full access control. When adding a new object, +// you can grant permissions to individual AWS accounts or to predefined groups +// defined by Amazon S3. These permissions are then added to the ACL on the +// object. For more information, see Access Control List (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html) +// and Managing ACLs Using the REST API (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-using-rest-api.html). +// +// Storage Class Options +// +// By default, Amazon S3 uses the STANDARD Storage Class to store newly created +// objects. The STANDARD storage class provides high durability and high availability. +// Depending on performance needs, you can specify a different Storage Class. +// Amazon S3 on Outposts only uses the OUTPOSTS Storage Class. For more information, +// see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html) +// in the Amazon S3 Service Developer Guide. +// +// Versioning +// +// If you enable versioning for a bucket, Amazon S3 automatically generates +// a unique version ID for the object being stored. Amazon S3 returns this ID +// in the response. When you enable versioning for a bucket, if Amazon S3 receives +// multiple write requests for the same object simultaneously, it stores all +// of the objects. +// +// For more information about versioning, see Adding Objects to Versioning Enabled +// Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/AddingObjectstoVersioningEnabledBuckets.html). +// For information about returning the versioning state of a bucket, see GetBucketVersioning +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketVersioning.html). +// +// Related Resources +// +// * CopyObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html) +// +// * DeleteObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutObject for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObject +func (c *S3) PutObject(input *PutObjectInput) (*PutObjectOutput, error) { + req, out := c.PutObjectRequest(input) + return out, req.Send() +} + +// PutObjectWithContext is the same as PutObject with the addition of +// the ability to pass a context and additional request options. +// +// See PutObject for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutObjectWithContext(ctx aws.Context, input *PutObjectInput, opts ...request.Option) (*PutObjectOutput, error) { + req, out := c.PutObjectRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutObjectAcl = "PutObjectAcl" + +// PutObjectAclRequest generates a "aws/request.Request" representing the +// client's request for the PutObjectAcl operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutObjectAcl for more information on using the PutObjectAcl +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutObjectAclRequest method. +// req, resp := client.PutObjectAclRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectAcl +func (c *S3) PutObjectAclRequest(input *PutObjectAclInput) (req *request.Request, output *PutObjectAclOutput) { + op := &request.Operation{ + Name: opPutObjectAcl, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}/{Key+}?acl", + } + + if input == nil { + input = &PutObjectAclInput{} + } + + output = &PutObjectAclOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) + return +} + +// PutObjectAcl API operation for Amazon Simple Storage Service. +// +// Uses the acl subresource to set the access control list (ACL) permissions +// for a new or existing object in an S3 bucket. You must have WRITE_ACP permission +// to set the ACL of an object. For more information, see What permissions can +// I grant? (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#permissions) +// in the Amazon Simple Storage Service Developer Guide. +// +// This action is not supported by Amazon S3 on Outposts. +// +// Depending on your application needs, you can choose to set the ACL on an +// object using either the request body or the headers. For example, if you +// have an existing application that updates a bucket ACL using the request +// body, you can continue to use that approach. For more information, see Access +// Control List (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html) +// in the Amazon S3 Developer Guide. +// +// Access Permissions +// +// You can set access permissions using one of the following methods: +// +// * Specify a canned ACL with the x-amz-acl request header. Amazon S3 supports +// a set of predefined ACLs, known as canned ACLs. Each canned ACL has a +// predefined set of grantees and permissions. Specify the canned ACL name +// as the value of x-amz-acl. If you use this header, you cannot use other +// access control-specific headers in your request. For more information, +// see Canned ACL (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL). +// +// * Specify access permissions explicitly with the x-amz-grant-read, x-amz-grant-read-acp, +// x-amz-grant-write-acp, and x-amz-grant-full-control headers. When using +// these headers, you specify explicit access permissions and grantees (AWS +// accounts or Amazon S3 groups) who will receive the permission. If you +// use these ACL-specific headers, you cannot use x-amz-acl header to set +// a canned ACL. These parameters map to the set of permissions that Amazon +// S3 supports in an ACL. For more information, see Access Control List (ACL) +// Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html). +// You specify each grantee as a type=value pair, where the type is one of +// the following: id – if the value specified is the canonical user ID +// of an AWS account uri – if you are granting permissions to a predefined +// group emailAddress – if the value specified is the email address of +// an AWS account Using email addresses to specify a grantee is only supported +// in the following AWS Regions: US East (N. Virginia) US West (N. California) +// US West (Oregon) Asia Pacific (Singapore) Asia Pacific (Sydney) Asia Pacific +// (Tokyo) Europe (Ireland) South America (São Paulo) For a list of all +// the Amazon S3 supported Regions and endpoints, see Regions and Endpoints +// (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) in +// the AWS General Reference. For example, the following x-amz-grant-read +// header grants list objects permission to the two AWS accounts identified +// by their email addresses. x-amz-grant-read: emailAddress="xyz@amazon.com", +// emailAddress="abc@amazon.com" +// +// You can use either a canned ACL or specify access permissions explicitly. +// You cannot do both. +// +// Grantee Values +// +// You can specify the person (grantee) to whom you're assigning access rights +// (using request elements) in the following ways: +// +// * By the person's ID: <>ID<><>GranteesEmail<> +// DisplayName is optional and ignored in the request. +// +// * By URI: <>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<> +// +// * By Email address: <>Grantees@email.com<>lt;/Grantee> +// The grantee is resolved to the CanonicalUser and, in a response to a GET +// Object acl request, appears as the CanonicalUser. Using email addresses +// to specify a grantee is only supported in the following AWS Regions: US +// East (N. Virginia) US West (N. California) US West (Oregon) Asia Pacific +// (Singapore) Asia Pacific (Sydney) Asia Pacific (Tokyo) Europe (Ireland) +// South America (São Paulo) For a list of all the Amazon S3 supported Regions +// and endpoints, see Regions and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) +// in the AWS General Reference. +// +// Versioning +// +// The ACL of an object is set at the object version level. By default, PUT +// sets the ACL of the current version of an object. To set the ACL of a different +// version, use the versionId subresource. +// +// Related Resources +// +// * CopyObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html) +// +// * GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutObjectAcl for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNoSuchKey "NoSuchKey" +// The specified key does not exist. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectAcl +func (c *S3) PutObjectAcl(input *PutObjectAclInput) (*PutObjectAclOutput, error) { + req, out := c.PutObjectAclRequest(input) + return out, req.Send() +} + +// PutObjectAclWithContext is the same as PutObjectAcl with the addition of +// the ability to pass a context and additional request options. +// +// See PutObjectAcl for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutObjectAclWithContext(ctx aws.Context, input *PutObjectAclInput, opts ...request.Option) (*PutObjectAclOutput, error) { + req, out := c.PutObjectAclRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutObjectLegalHold = "PutObjectLegalHold" + +// PutObjectLegalHoldRequest generates a "aws/request.Request" representing the +// client's request for the PutObjectLegalHold operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutObjectLegalHold for more information on using the PutObjectLegalHold +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutObjectLegalHoldRequest method. +// req, resp := client.PutObjectLegalHoldRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectLegalHold +func (c *S3) PutObjectLegalHoldRequest(input *PutObjectLegalHoldInput) (req *request.Request, output *PutObjectLegalHoldOutput) { + op := &request.Operation{ + Name: opPutObjectLegalHold, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}/{Key+}?legal-hold", + } + + if input == nil { + input = &PutObjectLegalHoldInput{} + } + + output = &PutObjectLegalHoldOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) + return +} + +// PutObjectLegalHold API operation for Amazon Simple Storage Service. +// +// Applies a Legal Hold configuration to the specified object. +// +// This action is not supported by Amazon S3 on Outposts. +// +// Related Resources +// +// * Locking Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutObjectLegalHold for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectLegalHold +func (c *S3) PutObjectLegalHold(input *PutObjectLegalHoldInput) (*PutObjectLegalHoldOutput, error) { + req, out := c.PutObjectLegalHoldRequest(input) + return out, req.Send() +} + +// PutObjectLegalHoldWithContext is the same as PutObjectLegalHold with the addition of +// the ability to pass a context and additional request options. +// +// See PutObjectLegalHold for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutObjectLegalHoldWithContext(ctx aws.Context, input *PutObjectLegalHoldInput, opts ...request.Option) (*PutObjectLegalHoldOutput, error) { + req, out := c.PutObjectLegalHoldRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutObjectLockConfiguration = "PutObjectLockConfiguration" + +// PutObjectLockConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the PutObjectLockConfiguration operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutObjectLockConfiguration for more information on using the PutObjectLockConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutObjectLockConfigurationRequest method. +// req, resp := client.PutObjectLockConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectLockConfiguration +func (c *S3) PutObjectLockConfigurationRequest(input *PutObjectLockConfigurationInput) (req *request.Request, output *PutObjectLockConfigurationOutput) { + op := &request.Operation{ + Name: opPutObjectLockConfiguration, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?object-lock", + } + + if input == nil { + input = &PutObjectLockConfigurationInput{} + } + + output = &PutObjectLockConfigurationOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) + return +} + +// PutObjectLockConfiguration API operation for Amazon Simple Storage Service. +// +// Places an Object Lock configuration on the specified bucket. The rule specified +// in the Object Lock configuration will be applied by default to every new +// object placed in the specified bucket. +// +// DefaultRetention requires either Days or Years. You can't specify both at +// the same time. +// +// Related Resources +// +// * Locking Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutObjectLockConfiguration for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectLockConfiguration +func (c *S3) PutObjectLockConfiguration(input *PutObjectLockConfigurationInput) (*PutObjectLockConfigurationOutput, error) { + req, out := c.PutObjectLockConfigurationRequest(input) + return out, req.Send() +} + +// PutObjectLockConfigurationWithContext is the same as PutObjectLockConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See PutObjectLockConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutObjectLockConfigurationWithContext(ctx aws.Context, input *PutObjectLockConfigurationInput, opts ...request.Option) (*PutObjectLockConfigurationOutput, error) { + req, out := c.PutObjectLockConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutObjectRetention = "PutObjectRetention" + +// PutObjectRetentionRequest generates a "aws/request.Request" representing the +// client's request for the PutObjectRetention operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutObjectRetention for more information on using the PutObjectRetention +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutObjectRetentionRequest method. +// req, resp := client.PutObjectRetentionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectRetention +func (c *S3) PutObjectRetentionRequest(input *PutObjectRetentionInput) (req *request.Request, output *PutObjectRetentionOutput) { + op := &request.Operation{ + Name: opPutObjectRetention, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}/{Key+}?retention", + } + + if input == nil { + input = &PutObjectRetentionInput{} + } + + output = &PutObjectRetentionOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) + return +} + +// PutObjectRetention API operation for Amazon Simple Storage Service. +// +// Places an Object Retention configuration on an object. +// +// This action is not supported by Amazon S3 on Outposts. +// +// Related Resources +// +// * Locking Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutObjectRetention for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectRetention +func (c *S3) PutObjectRetention(input *PutObjectRetentionInput) (*PutObjectRetentionOutput, error) { + req, out := c.PutObjectRetentionRequest(input) + return out, req.Send() +} + +// PutObjectRetentionWithContext is the same as PutObjectRetention with the addition of +// the ability to pass a context and additional request options. +// +// See PutObjectRetention for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutObjectRetentionWithContext(ctx aws.Context, input *PutObjectRetentionInput, opts ...request.Option) (*PutObjectRetentionOutput, error) { + req, out := c.PutObjectRetentionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutObjectTagging = "PutObjectTagging" + +// PutObjectTaggingRequest generates a "aws/request.Request" representing the +// client's request for the PutObjectTagging operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutObjectTagging for more information on using the PutObjectTagging +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutObjectTaggingRequest method. +// req, resp := client.PutObjectTaggingRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectTagging +func (c *S3) PutObjectTaggingRequest(input *PutObjectTaggingInput) (req *request.Request, output *PutObjectTaggingOutput) { + op := &request.Operation{ + Name: opPutObjectTagging, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}/{Key+}?tagging", + } + + if input == nil { + input = &PutObjectTaggingInput{} + } + + output = &PutObjectTaggingOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) + return +} + +// PutObjectTagging API operation for Amazon Simple Storage Service. +// +// Sets the supplied tag-set to an object that already exists in a bucket. +// +// A tag is a key-value pair. You can associate tags with an object by sending +// a PUT request against the tagging subresource that is associated with the +// object. You can retrieve tags by sending a GET request. For more information, +// see GetObjectTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html). +// +// For tagging-related restrictions related to characters and encodings, see +// Tag Restrictions (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/allocation-tag-restrictions.html). +// Note that Amazon S3 limits the maximum number of tags to 10 tags per object. +// +// To use this operation, you must have permission to perform the s3:PutObjectTagging +// action. By default, the bucket owner has this permission and can grant this +// permission to others. +// +// To put tags of any other version, use the versionId query parameter. You +// also need permission for the s3:PutObjectVersionTagging action. +// +// For information about the Amazon S3 object tagging feature, see Object Tagging +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-tagging.html). +// +// Special Errors +// +// * Code: InvalidTagError Cause: The tag provided was not a valid tag. This +// error can occur if the tag did not pass input validation. For more information, +// see Object Tagging (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-tagging.html). +// +// * Code: MalformedXMLError Cause: The XML provided does not match the schema. +// +// * Code: OperationAbortedError Cause: A conflicting conditional operation +// is currently in progress against this resource. Please try again. +// +// * Code: InternalError Cause: The service was unable to apply the provided +// tag to the object. +// +// Related Resources +// +// * GetObjectTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html) +// +// * DeleteObjectTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObjectTagging.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutObjectTagging for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectTagging +func (c *S3) PutObjectTagging(input *PutObjectTaggingInput) (*PutObjectTaggingOutput, error) { + req, out := c.PutObjectTaggingRequest(input) + return out, req.Send() +} + +// PutObjectTaggingWithContext is the same as PutObjectTagging with the addition of +// the ability to pass a context and additional request options. +// +// See PutObjectTagging for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutObjectTaggingWithContext(ctx aws.Context, input *PutObjectTaggingInput, opts ...request.Option) (*PutObjectTaggingOutput, error) { + req, out := c.PutObjectTaggingRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutPublicAccessBlock = "PutPublicAccessBlock" + +// PutPublicAccessBlockRequest generates a "aws/request.Request" representing the +// client's request for the PutPublicAccessBlock operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutPublicAccessBlock for more information on using the PutPublicAccessBlock +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutPublicAccessBlockRequest method. +// req, resp := client.PutPublicAccessBlockRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutPublicAccessBlock +func (c *S3) PutPublicAccessBlockRequest(input *PutPublicAccessBlockInput) (req *request.Request, output *PutPublicAccessBlockOutput) { + op := &request.Operation{ + Name: opPutPublicAccessBlock, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?publicAccessBlock", + } + + if input == nil { + input = &PutPublicAccessBlockInput{} + } + + output = &PutPublicAccessBlockOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) + return +} + +// PutPublicAccessBlock API operation for Amazon Simple Storage Service. +// +// Creates or modifies the PublicAccessBlock configuration for an Amazon S3 +// bucket. To use this operation, you must have the s3:PutBucketPublicAccessBlock +// permission. For more information about Amazon S3 permissions, see Specifying +// Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html). +// +// When Amazon S3 evaluates the PublicAccessBlock configuration for a bucket +// or an object, it checks the PublicAccessBlock configuration for both the +// bucket (or the bucket that contains the object) and the bucket owner's account. +// If the PublicAccessBlock configurations are different between the bucket +// and the account, Amazon S3 uses the most restrictive combination of the bucket-level +// and account-level settings. +// +// For more information about when Amazon S3 considers a bucket or an object +// public, see The Meaning of "Public" (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status). +// +// Related Resources +// +// * GetPublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetPublicAccessBlock.html) +// +// * DeletePublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeletePublicAccessBlock.html) +// +// * GetBucketPolicyStatus (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketPolicyStatus.html) +// +// * Using Amazon S3 Block Public Access (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutPublicAccessBlock for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutPublicAccessBlock +func (c *S3) PutPublicAccessBlock(input *PutPublicAccessBlockInput) (*PutPublicAccessBlockOutput, error) { + req, out := c.PutPublicAccessBlockRequest(input) + return out, req.Send() +} + +// PutPublicAccessBlockWithContext is the same as PutPublicAccessBlock with the addition of +// the ability to pass a context and additional request options. +// +// See PutPublicAccessBlock for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutPublicAccessBlockWithContext(ctx aws.Context, input *PutPublicAccessBlockInput, opts ...request.Option) (*PutPublicAccessBlockOutput, error) { + req, out := c.PutPublicAccessBlockRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opRestoreObject = "RestoreObject" + +// RestoreObjectRequest generates a "aws/request.Request" representing the +// client's request for the RestoreObject operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See RestoreObject for more information on using the RestoreObject +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the RestoreObjectRequest method. +// req, resp := client.RestoreObjectRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RestoreObject +func (c *S3) RestoreObjectRequest(input *RestoreObjectInput) (req *request.Request, output *RestoreObjectOutput) { + op := &request.Operation{ + Name: opRestoreObject, + HTTPMethod: "POST", + HTTPPath: "/{Bucket}/{Key+}?restore", + } + + if input == nil { + input = &RestoreObjectInput{} + } + + output = &RestoreObjectOutput{} + req = c.newRequest(op, input, output) + return +} + +// RestoreObject API operation for Amazon Simple Storage Service. +// +// Restores an archived copy of an object back into Amazon S3 +// +// This action is not supported by Amazon S3 on Outposts. +// +// This action performs the following types of requests: +// +// * select - Perform a select query on an archived object +// +// * restore an archive - Restore an archived object +// +// To use this operation, you must have permissions to perform the s3:RestoreObject +// action. The bucket owner has this permission by default and can grant this +// permission to others. For more information about permissions, see Permissions +// Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// Querying Archives with Select Requests +// +// You use a select type of request to perform SQL queries on archived objects. +// The archived objects that are being queried by the select request must be +// formatted as uncompressed comma-separated values (CSV) files. You can run +// queries and custom analytics on your archived data without having to restore +// your data to a hotter Amazon S3 tier. For an overview about select requests, +// see Querying Archived Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/querying-glacier-archives.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// When making a select request, do the following: +// +// * Define an output location for the select query's output. This must be +// an Amazon S3 bucket in the same AWS Region as the bucket that contains +// the archive object that is being queried. The AWS account that initiates +// the job must have permissions to write to the S3 bucket. You can specify +// the storage class and encryption for the output objects stored in the +// bucket. For more information about output, see Querying Archived Objects +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/querying-glacier-archives.html) +// in the Amazon Simple Storage Service Developer Guide. For more information +// about the S3 structure in the request body, see the following: PutObject +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) Managing +// Access with ACLs (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html) +// in the Amazon Simple Storage Service Developer Guide Protecting Data Using +// Server-Side Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html) +// in the Amazon Simple Storage Service Developer Guide +// +// * Define the SQL expression for the SELECT type of restoration for your +// query in the request body's SelectParameters structure. You can use expressions +// like the following examples. The following expression returns all records +// from the specified object. SELECT * FROM Object Assuming that you are +// not using any headers for data stored in the object, you can specify columns +// with positional headers. SELECT s._1, s._2 FROM Object s WHERE s._3 > +// 100 If you have headers and you set the fileHeaderInfo in the CSV structure +// in the request body to USE, you can specify headers in the query. (If +// you set the fileHeaderInfo field to IGNORE, the first row is skipped for +// the query.) You cannot mix ordinal positions with header column names. +// SELECT s.Id, s.FirstName, s.SSN FROM S3Object s +// +// For more information about using SQL with S3 Glacier Select restore, see +// SQL Reference for Amazon S3 Select and S3 Glacier Select (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-glacier-select-sql-reference.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// When making a select request, you can also do the following: +// +// * To expedite your queries, specify the Expedited tier. For more information +// about tiers, see "Restoring Archives," later in this topic. +// +// * Specify details about the data serialization format of both the input +// object that is being queried and the serialization of the CSV-encoded +// query results. +// +// The following are additional important facts about the select feature: +// +// * The output results are new Amazon S3 objects. Unlike archive retrievals, +// they are stored until explicitly deleted-manually or through a lifecycle +// policy. +// +// * You can issue more than one select request on the same Amazon S3 object. +// Amazon S3 doesn't deduplicate requests, so avoid issuing duplicate requests. +// +// * Amazon S3 accepts a select request even if the object has already been +// restored. A select request doesn’t return error response 409. +// +// Restoring objects +// +// Objects that you archive to the S3 Glacier or S3 Glacier Deep Archive storage +// class, and S3 Intelligent-Tiering Archive or S3 Intelligent-Tiering Deep +// Archive tiers are not accessible in real time. For objects in Archive Access +// or Deep Archive Access tiers you must first initiate a restore request, and +// then wait until the object is moved into the Frequent Access tier. For objects +// in S3 Glacier or S3 Glacier Deep Archive storage classes you must first initiate +// a restore request, and then wait until a temporary copy of the object is +// available. To access an archived object, you must restore the object for +// the duration (number of days) that you specify. +// +// To restore a specific object version, you can provide a version ID. If you +// don't provide a version ID, Amazon S3 restores the current version. +// +// When restoring an archived object (or using a select request), you can specify +// one of the following data access tier options in the Tier element of the +// request body: +// +// * Expedited - Expedited retrievals allow you to quickly access your data +// stored in the S3 Glacier storage class or S3 Intelligent-Tiering Archive +// tier when occasional urgent requests for a subset of archives are required. +// For all but the largest archived objects (250 MB+), data accessed using +// Expedited retrievals is typically made available within 1–5 minutes. +// Provisioned capacity ensures that retrieval capacity for Expedited retrievals +// is available when you need it. Expedited retrievals and provisioned capacity +// are not available for objects stored in the S3 Glacier Deep Archive storage +// class or S3 Intelligent-Tiering Deep Archive tier. +// +// * Standard - Standard retrievals allow you to access any of your archived +// objects within several hours. This is the default option for retrieval +// requests that do not specify the retrieval option. Standard retrievals +// typically finish within 3–5 hours for objects stored in the S3 Glacier +// storage class or S3 Intelligent-Tiering Archive tier. They typically finish +// within 12 hours for objects stored in the S3 Glacier Deep Archive storage +// class or S3 Intelligent-Tiering Deep Archive tier. Standard retrievals +// are free for objects stored in S3 Intelligent-Tiering. +// +// * Bulk - Bulk retrievals are the lowest-cost retrieval option in S3 Glacier, +// enabling you to retrieve large amounts, even petabytes, of data inexpensively. +// Bulk retrievals typically finish within 5–12 hours for objects stored +// in the S3 Glacier storage class or S3 Intelligent-Tiering Archive tier. +// They typically finish within 48 hours for objects stored in the S3 Glacier +// Deep Archive storage class or S3 Intelligent-Tiering Deep Archive tier. +// Bulk retrievals are free for objects stored in S3 Intelligent-Tiering. +// +// For more information about archive retrieval options and provisioned capacity +// for Expedited data access, see Restoring Archived Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// You can use Amazon S3 restore speed upgrade to change the restore speed to +// a faster speed while it is in progress. For more information, see Upgrading +// the speed of an in-progress restore (https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html#restoring-objects-upgrade-tier.title.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// To get the status of object restoration, you can send a HEAD request. Operations +// return the x-amz-restore header, which provides information about the restoration +// status, in the response. You can use Amazon S3 event notifications to notify +// you when a restore is initiated or completed. For more information, see Configuring +// Amazon S3 Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// After restoring an archived object, you can update the restoration period +// by reissuing the request with a new period. Amazon S3 updates the restoration +// period relative to the current time and charges only for the request-there +// are no data transfer charges. You cannot update the restoration period when +// Amazon S3 is actively processing your current restore request for the object. +// +// If your bucket has a lifecycle configuration with a rule that includes an +// expiration action, the object expiration overrides the life span that you +// specify in a restore request. For example, if you restore an object copy +// for 10 days, but the object is scheduled to expire in 3 days, Amazon S3 deletes +// the object in 3 days. For more information about lifecycle configuration, +// see PutBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html) +// and Object Lifecycle Management (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html) +// in Amazon Simple Storage Service Developer Guide. +// +// Responses +// +// A successful operation returns either the 200 OK or 202 Accepted status code. +// +// * If the object is not previously restored, then Amazon S3 returns 202 +// Accepted in the response. +// +// * If the object is previously restored, Amazon S3 returns 200 OK in the +// response. +// +// Special Errors +// +// * Code: RestoreAlreadyInProgress Cause: Object restore is already in progress. +// (This error does not apply to SELECT type requests.) HTTP Status Code: +// 409 Conflict SOAP Fault Code Prefix: Client +// +// * Code: GlacierExpeditedRetrievalNotAvailable Cause: expedited retrievals +// are currently not available. Try again later. (Returned if there is insufficient +// capacity to process the Expedited request. This error applies only to +// Expedited retrievals and not to S3 Standard or Bulk retrievals.) HTTP +// Status Code: 503 SOAP Fault Code Prefix: N/A +// +// Related Resources +// +// * PutBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html) +// +// * GetBucketNotificationConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketNotificationConfiguration.html) +// +// * SQL Reference for Amazon S3 Select and S3 Glacier Select (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-glacier-select-sql-reference.html) +// in the Amazon Simple Storage Service Developer Guide +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation RestoreObject for usage and error information. +// +// Returned Error Codes: +// * ErrCodeObjectAlreadyInActiveTierError "ObjectAlreadyInActiveTierError" +// This operation is not allowed against this storage tier. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RestoreObject +func (c *S3) RestoreObject(input *RestoreObjectInput) (*RestoreObjectOutput, error) { + req, out := c.RestoreObjectRequest(input) + return out, req.Send() +} + +// RestoreObjectWithContext is the same as RestoreObject with the addition of +// the ability to pass a context and additional request options. +// +// See RestoreObject for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) RestoreObjectWithContext(ctx aws.Context, input *RestoreObjectInput, opts ...request.Option) (*RestoreObjectOutput, error) { + req, out := c.RestoreObjectRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opSelectObjectContent = "SelectObjectContent" + +// SelectObjectContentRequest generates a "aws/request.Request" representing the +// client's request for the SelectObjectContent operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See SelectObjectContent for more information on using the SelectObjectContent +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the SelectObjectContentRequest method. +// req, resp := client.SelectObjectContentRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/SelectObjectContent +func (c *S3) SelectObjectContentRequest(input *SelectObjectContentInput) (req *request.Request, output *SelectObjectContentOutput) { + op := &request.Operation{ + Name: opSelectObjectContent, + HTTPMethod: "POST", + HTTPPath: "/{Bucket}/{Key+}?select&select-type=2", + } + + if input == nil { + input = &SelectObjectContentInput{} + } + + output = &SelectObjectContentOutput{} + req = c.newRequest(op, input, output) + + es := NewSelectObjectContentEventStream() + req.Handlers.Unmarshal.PushBack(es.setStreamCloser) + output.EventStream = es + + req.Handlers.Send.Swap(client.LogHTTPResponseHandler.Name, client.LogHTTPResponseHeaderHandler) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, rest.UnmarshalHandler) + req.Handlers.Unmarshal.PushBack(es.runOutputStream) + req.Handlers.Unmarshal.PushBack(es.runOnStreamPartClose) + return +} + +// SelectObjectContent API operation for Amazon Simple Storage Service. +// +// This operation filters the contents of an Amazon S3 object based on a simple +// structured query language (SQL) statement. In the request, along with the +// SQL expression, you must also specify a data serialization format (JSON, +// CSV, or Apache Parquet) of the object. Amazon S3 uses this format to parse +// object data into records, and returns only records that match the specified +// SQL expression. You must also specify the data serialization format for the +// response. +// +// This action is not supported by Amazon S3 on Outposts. +// +// For more information about Amazon S3 Select, see Selecting Content from Objects +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/selecting-content-from-objects.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// For more information about using SQL with Amazon S3 Select, see SQL Reference +// for Amazon S3 Select and S3 Glacier Select (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-glacier-select-sql-reference.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// Permissions +// +// You must have s3:GetObject permission for this operation. Amazon S3 Select +// does not support anonymous access. For more information about permissions, +// see Specifying Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// Object Data Formats +// +// You can use Amazon S3 Select to query objects that have the following format +// properties: +// +// * CSV, JSON, and Parquet - Objects must be in CSV, JSON, or Parquet format. +// +// * UTF-8 - UTF-8 is the only encoding type Amazon S3 Select supports. +// +// * GZIP or BZIP2 - CSV and JSON files can be compressed using GZIP or BZIP2. +// GZIP and BZIP2 are the only compression formats that Amazon S3 Select +// supports for CSV and JSON files. Amazon S3 Select supports columnar compression +// for Parquet using GZIP or Snappy. Amazon S3 Select does not support whole-object +// compression for Parquet objects. +// +// * Server-side encryption - Amazon S3 Select supports querying objects +// that are protected with server-side encryption. For objects that are encrypted +// with customer-provided encryption keys (SSE-C), you must use HTTPS, and +// you must use the headers that are documented in the GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html). +// For more information about SSE-C, see Server-Side Encryption (Using Customer-Provided +// Encryption Keys) (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) +// in the Amazon Simple Storage Service Developer Guide. For objects that +// are encrypted with Amazon S3 managed encryption keys (SSE-S3) and customer +// master keys (CMKs) stored in AWS Key Management Service (SSE-KMS), server-side +// encryption is handled transparently, so you don't need to specify anything. +// For more information about server-side encryption, including SSE-S3 and +// SSE-KMS, see Protecting Data Using Server-Side Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// Working with the Response Body +// +// Given the response size is unknown, Amazon S3 Select streams the response +// as a series of messages and includes a Transfer-Encoding header with chunked +// as its value in the response. For more information, see Appendix: SelectObjectContent +// Response (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTSelectObjectAppendix.html) . +// +// GetObject Support +// +// The SelectObjectContent operation does not support the following GetObject +// functionality. For more information, see GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html). +// +// * Range: Although you can specify a scan range for an Amazon S3 Select +// request (see SelectObjectContentRequest - ScanRange (https://docs.aws.amazon.com/AmazonS3/latest/API/API_SelectObjectContent.html#AmazonS3-SelectObjectContent-request-ScanRange) +// in the request parameters), you cannot specify the range of bytes of an +// object to return. +// +// * GLACIER, DEEP_ARCHIVE and REDUCED_REDUNDANCY storage classes: You cannot +// specify the GLACIER, DEEP_ARCHIVE, or REDUCED_REDUNDANCY storage classes. +// For more information, about storage classes see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html#storage-class-intro) +// in the Amazon Simple Storage Service Developer Guide. +// +// Special Errors +// +// For a list of special errors for this operation, see List of SELECT Object +// Content Error Codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#SelectObjectContentErrorCodeList) +// +// Related Resources +// +// * GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) +// +// * GetBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycleConfiguration.html) +// +// * PutBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation SelectObjectContent for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/SelectObjectContent +func (c *S3) SelectObjectContent(input *SelectObjectContentInput) (*SelectObjectContentOutput, error) { + req, out := c.SelectObjectContentRequest(input) + return out, req.Send() +} + +// SelectObjectContentWithContext is the same as SelectObjectContent with the addition of +// the ability to pass a context and additional request options. +// +// See SelectObjectContent for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) SelectObjectContentWithContext(ctx aws.Context, input *SelectObjectContentInput, opts ...request.Option) (*SelectObjectContentOutput, error) { + req, out := c.SelectObjectContentRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +var _ awserr.Error + +// SelectObjectContentEventStream provides the event stream handling for the SelectObjectContent. +// +// For testing and mocking the event stream this type should be initialized via +// the NewSelectObjectContentEventStream constructor function. Using the functional options +// to pass in nested mock behavior. +type SelectObjectContentEventStream struct { + + // Reader is the EventStream reader for the SelectObjectContentEventStream + // events. This value is automatically set by the SDK when the API call is made + // Use this member when unit testing your code with the SDK to mock out the + // EventStream Reader. + // + // Must not be nil. + Reader SelectObjectContentEventStreamReader + + outputReader io.ReadCloser + + // StreamCloser is the io.Closer for the EventStream connection. For HTTP + // EventStream this is the response Body. The stream will be closed when + // the Close method of the EventStream is called. + StreamCloser io.Closer + + done chan struct{} + closeOnce sync.Once + err *eventstreamapi.OnceError +} + +// NewSelectObjectContentEventStream initializes an SelectObjectContentEventStream. +// This function should only be used for testing and mocking the SelectObjectContentEventStream +// stream within your application. +// +// The Reader member must be set before reading events from the stream. +// +// The StreamCloser member should be set to the underlying io.Closer, +// (e.g. http.Response.Body), that will be closed when the stream Close method +// is called. +// +// es := NewSelectObjectContentEventStream(func(o *SelectObjectContentEventStream{ +// es.Reader = myMockStreamReader +// es.StreamCloser = myMockStreamCloser +// }) +func NewSelectObjectContentEventStream(opts ...func(*SelectObjectContentEventStream)) *SelectObjectContentEventStream { + es := &SelectObjectContentEventStream{ + done: make(chan struct{}), + err: eventstreamapi.NewOnceError(), + } + + for _, fn := range opts { + fn(es) + } + + return es +} + +func (es *SelectObjectContentEventStream) setStreamCloser(r *request.Request) { + es.StreamCloser = r.HTTPResponse.Body +} + +func (es *SelectObjectContentEventStream) runOnStreamPartClose(r *request.Request) { + if es.done == nil { + return + } + go es.waitStreamPartClose() + +} + +func (es *SelectObjectContentEventStream) waitStreamPartClose() { + var outputErrCh <-chan struct{} + if v, ok := es.Reader.(interface{ ErrorSet() <-chan struct{} }); ok { + outputErrCh = v.ErrorSet() + } + var outputClosedCh <-chan struct{} + if v, ok := es.Reader.(interface{ Closed() <-chan struct{} }); ok { + outputClosedCh = v.Closed() + } + + select { + case <-es.done: + case <-outputErrCh: + es.err.SetError(es.Reader.Err()) + es.Close() + case <-outputClosedCh: + if err := es.Reader.Err(); err != nil { + es.err.SetError(es.Reader.Err()) + } + es.Close() + } +} + +// Events returns a channel to read events from. +// +// These events are: +// +// * ContinuationEvent +// * EndEvent +// * ProgressEvent +// * RecordsEvent +// * StatsEvent +// * SelectObjectContentEventStreamUnknownEvent +func (es *SelectObjectContentEventStream) Events() <-chan SelectObjectContentEventStreamEvent { + return es.Reader.Events() +} + +func (es *SelectObjectContentEventStream) runOutputStream(r *request.Request) { + var opts []func(*eventstream.Decoder) + if r.Config.Logger != nil && r.Config.LogLevel.Matches(aws.LogDebugWithEventStreamBody) { + opts = append(opts, eventstream.DecodeWithLogger(r.Config.Logger)) + } + + unmarshalerForEvent := unmarshalerForSelectObjectContentEventStreamEvent{ + metadata: protocol.ResponseMetadata{ + StatusCode: r.HTTPResponse.StatusCode, + RequestID: r.RequestID, + }, + }.UnmarshalerForEventName + + decoder := eventstream.NewDecoder(r.HTTPResponse.Body, opts...) + eventReader := eventstreamapi.NewEventReader(decoder, + protocol.HandlerPayloadUnmarshal{ + Unmarshalers: r.Handlers.UnmarshalStream, + }, + unmarshalerForEvent, + ) + + es.outputReader = r.HTTPResponse.Body + es.Reader = newReadSelectObjectContentEventStream(eventReader) +} + +// Close closes the stream. This will also cause the stream to be closed. +// Close must be called when done using the stream API. Not calling Close +// may result in resource leaks. +// +// You can use the closing of the Reader's Events channel to terminate your +// application's read from the API's stream. +// +func (es *SelectObjectContentEventStream) Close() (err error) { + es.closeOnce.Do(es.safeClose) + return es.Err() +} + +func (es *SelectObjectContentEventStream) safeClose() { + if es.done != nil { + close(es.done) + } + + es.Reader.Close() + if es.outputReader != nil { + es.outputReader.Close() + } + + es.StreamCloser.Close() +} + +// Err returns any error that occurred while reading or writing EventStream +// Events from the service API's response. Returns nil if there were no errors. +func (es *SelectObjectContentEventStream) Err() error { + if err := es.err.Err(); err != nil { + return err + } + if err := es.Reader.Err(); err != nil { + return err + } + + return nil +} + +const opUploadPart = "UploadPart" + +// UploadPartRequest generates a "aws/request.Request" representing the +// client's request for the UploadPart operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UploadPart for more information on using the UploadPart +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UploadPartRequest method. +// req, resp := client.UploadPartRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPart +func (c *S3) UploadPartRequest(input *UploadPartInput) (req *request.Request, output *UploadPartOutput) { + op := &request.Operation{ + Name: opUploadPart, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}/{Key+}", + } + + if input == nil { + input = &UploadPartInput{} + } + + output = &UploadPartOutput{} + req = c.newRequest(op, input, output) + return +} + +// UploadPart API operation for Amazon Simple Storage Service. +// +// Uploads a part in a multipart upload. +// +// In this operation, you provide part data in your request. However, you have +// an option to specify your existing Amazon S3 object as a data source for +// the part you are uploading. To upload a part from an existing object, you +// use the UploadPartCopy (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html) +// operation. +// +// You must initiate a multipart upload (see CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html)) +// before you can upload any part. In response to your initiate request, Amazon +// S3 returns an upload ID, a unique identifier, that you must include in your +// upload part request. +// +// Part numbers can be any number from 1 to 10,000, inclusive. A part number +// uniquely identifies a part and also defines its position within the object +// being created. If you upload a new part using the same part number that was +// used with a previous part, the previously uploaded part is overwritten. Each +// part must be at least 5 MB in size, except the last part. There is no size +// limit on the last part of your multipart upload. +// +// To ensure that data is not corrupted when traversing the network, specify +// the Content-MD5 header in the upload part request. Amazon S3 checks the part +// data against the provided MD5 value. If they do not match, Amazon S3 returns +// an error. +// +// If the upload request is signed with Signature Version 4, then AWS S3 uses +// the x-amz-content-sha256 header as a checksum instead of Content-MD5. For +// more information see Authenticating Requests: Using the Authorization Header +// (AWS Signature Version 4) (https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-auth-using-authorization-header.html). +// +// Note: After you initiate multipart upload and upload one or more parts, you +// must either complete or abort multipart upload in order to stop getting charged +// for storage of the uploaded parts. Only after you either complete or abort +// multipart upload, Amazon S3 frees up the parts storage and stops charging +// you for the parts storage. +// +// For more information on multipart uploads, go to Multipart Upload Overview +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html) in the +// Amazon Simple Storage Service Developer Guide . +// +// For information on the permissions required to use the multipart upload API, +// go to Multipart Upload API and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// You can optionally request server-side encryption where Amazon S3 encrypts +// your data as it writes it to disks in its data centers and decrypts it for +// you when you access it. You have the option of providing your own encryption +// key, or you can use the AWS managed encryption keys. If you choose to provide +// your own encryption key, the request headers you provide in the request must +// match the headers you used in the request to initiate the upload by using +// CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html). +// For more information, go to Using Server-Side Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// Server-side encryption is supported by the S3 Multipart Upload actions. Unless +// you are using a customer-provided encryption key, you don't need to specify +// the encryption parameters in each UploadPart request. Instead, you only need +// to specify the server-side encryption parameters in the initial Initiate +// Multipart request. For more information, see CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html). +// +// If you requested server-side encryption using a customer-provided encryption +// key in your initiate multipart upload request, you must provide identical +// encryption information in each part upload using the following headers. +// +// * x-amz-server-side-encryption-customer-algorithm +// +// * x-amz-server-side-encryption-customer-key +// +// * x-amz-server-side-encryption-customer-key-MD5 +// +// Special Errors +// +// * Code: NoSuchUpload Cause: The specified multipart upload does not exist. +// The upload ID might be invalid, or the multipart upload might have been +// aborted or completed. HTTP Status Code: 404 Not Found SOAP Fault Code +// Prefix: Client +// +// Related Resources +// +// * CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html) +// +// * CompleteMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html) +// +// * AbortMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html) +// +// * ListParts (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html) +// +// * ListMultipartUploads (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation UploadPart for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPart +func (c *S3) UploadPart(input *UploadPartInput) (*UploadPartOutput, error) { + req, out := c.UploadPartRequest(input) + return out, req.Send() +} + +// UploadPartWithContext is the same as UploadPart with the addition of +// the ability to pass a context and additional request options. +// +// See UploadPart for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) UploadPartWithContext(ctx aws.Context, input *UploadPartInput, opts ...request.Option) (*UploadPartOutput, error) { + req, out := c.UploadPartRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUploadPartCopy = "UploadPartCopy" + +// UploadPartCopyRequest generates a "aws/request.Request" representing the +// client's request for the UploadPartCopy operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UploadPartCopy for more information on using the UploadPartCopy +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UploadPartCopyRequest method. +// req, resp := client.UploadPartCopyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPartCopy +func (c *S3) UploadPartCopyRequest(input *UploadPartCopyInput) (req *request.Request, output *UploadPartCopyOutput) { + op := &request.Operation{ + Name: opUploadPartCopy, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}/{Key+}", + } + + if input == nil { + input = &UploadPartCopyInput{} + } + + output = &UploadPartCopyOutput{} + req = c.newRequest(op, input, output) + return +} + +// UploadPartCopy API operation for Amazon Simple Storage Service. +// +// Uploads a part by copying data from an existing object as data source. You +// specify the data source by adding the request header x-amz-copy-source in +// your request and a byte range by adding the request header x-amz-copy-source-range +// in your request. +// +// The minimum allowable part size for a multipart upload is 5 MB. For more +// information about multipart upload limits, go to Quick Facts (https://docs.aws.amazon.com/AmazonS3/latest/dev/qfacts.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// Instead of using an existing object as part data, you might use the UploadPart +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) operation +// and provide data in your request. +// +// You must initiate a multipart upload before you can upload any part. In response +// to your initiate request. Amazon S3 returns a unique identifier, the upload +// ID, that you must include in your upload part request. +// +// For more information about using the UploadPartCopy operation, see the following: +// +// * For conceptual information about multipart uploads, see Uploading Objects +// Using Multipart Upload (https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// * For information about permissions required to use the multipart upload +// API, see Multipart Upload API and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// * For information about copying objects using a single atomic operation +// vs. the multipart upload, see Operations on Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectOperations.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// * For information about using server-side encryption with customer-provided +// encryption keys with the UploadPartCopy operation, see CopyObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html) +// and UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html). +// +// Note the following additional considerations about the request headers x-amz-copy-source-if-match, +// x-amz-copy-source-if-none-match, x-amz-copy-source-if-unmodified-since, and +// x-amz-copy-source-if-modified-since: +// +// * Consideration 1 - If both of the x-amz-copy-source-if-match and x-amz-copy-source-if-unmodified-since +// headers are present in the request as follows: x-amz-copy-source-if-match +// condition evaluates to true, and; x-amz-copy-source-if-unmodified-since +// condition evaluates to false; Amazon S3 returns 200 OK and copies the +// data. +// +// * Consideration 2 - If both of the x-amz-copy-source-if-none-match and +// x-amz-copy-source-if-modified-since headers are present in the request +// as follows: x-amz-copy-source-if-none-match condition evaluates to false, +// and; x-amz-copy-source-if-modified-since condition evaluates to true; +// Amazon S3 returns 412 Precondition Failed response code. +// +// Versioning +// +// If your bucket has versioning enabled, you could have multiple versions of +// the same object. By default, x-amz-copy-source identifies the current version +// of the object to copy. If the current version is a delete marker and you +// don't specify a versionId in the x-amz-copy-source, Amazon S3 returns a 404 +// error, because the object does not exist. If you specify versionId in the +// x-amz-copy-source and the versionId is a delete marker, Amazon S3 returns +// an HTTP 400 error, because you are not allowed to specify a delete marker +// as a version for the x-amz-copy-source. +// +// You can optionally specify a specific version of the source object to copy +// by adding the versionId subresource as shown in the following example: +// +// x-amz-copy-source: /bucket/object?versionId=version id +// +// Special Errors +// +// * Code: NoSuchUpload Cause: The specified multipart upload does not exist. +// The upload ID might be invalid, or the multipart upload might have been +// aborted or completed. HTTP Status Code: 404 Not Found +// +// * Code: InvalidRequest Cause: The specified copy source is not supported +// as a byte-range copy source. HTTP Status Code: 400 Bad Request +// +// Related Resources +// +// * CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html) +// +// * UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) +// +// * CompleteMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html) +// +// * AbortMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html) +// +// * ListParts (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html) +// +// * ListMultipartUploads (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation UploadPartCopy for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPartCopy +func (c *S3) UploadPartCopy(input *UploadPartCopyInput) (*UploadPartCopyOutput, error) { + req, out := c.UploadPartCopyRequest(input) + return out, req.Send() +} + +// UploadPartCopyWithContext is the same as UploadPartCopy with the addition of +// the ability to pass a context and additional request options. +// +// See UploadPartCopy for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) UploadPartCopyWithContext(ctx aws.Context, input *UploadPartCopyInput, opts ...request.Option) (*UploadPartCopyOutput, error) { + req, out := c.UploadPartCopyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// Specifies the days since the initiation of an incomplete multipart upload +// that Amazon S3 will wait before permanently removing all parts of the upload. +// For more information, see Aborting Incomplete Multipart Uploads Using a Bucket +// Lifecycle Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config) +// in the Amazon Simple Storage Service Developer Guide. +type AbortIncompleteMultipartUpload struct { + _ struct{} `type:"structure"` + + // Specifies the number of days after which Amazon S3 aborts an incomplete multipart + // upload. + DaysAfterInitiation *int64 `type:"integer"` +} + +// String returns the string representation +func (s AbortIncompleteMultipartUpload) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AbortIncompleteMultipartUpload) GoString() string { + return s.String() +} + +// SetDaysAfterInitiation sets the DaysAfterInitiation field's value. +func (s *AbortIncompleteMultipartUpload) SetDaysAfterInitiation(v int64) *AbortIncompleteMultipartUpload { + s.DaysAfterInitiation = &v + return s +} + +type AbortMultipartUploadInput struct { + _ struct{} `locationName:"AbortMultipartUploadRequest" type:"structure"` + + // The bucket name to which the upload was taking place. + // + // When using this API with an access point, you must direct requests to the + // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this operation with an access point through the AWS SDKs, you + // provide the access point ARN in place of the bucket name. For more information + // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // in the Amazon Simple Storage Service Developer Guide. + // + // When using this API with Amazon S3 on Outposts, you must direct requests + // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When + // using this operation using S3 on Outposts through the AWS SDKs, you provide + // the Outposts bucket ARN in place of the bucket name. For more information + // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) + // in the Amazon Simple Storage Service Developer Guide. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // Key of the object for which the multipart upload was initiated. + // + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from requester pays buckets, see Downloading Objects + // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 Developer Guide. + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // Upload ID that identifies the multipart upload. + // + // UploadId is a required field + UploadId *string `location:"querystring" locationName:"uploadId" type:"string" required:"true"` +} + +// String returns the string representation +func (s AbortMultipartUploadInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AbortMultipartUploadInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AbortMultipartUploadInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AbortMultipartUploadInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.UploadId == nil { + invalidParams.Add(request.NewErrParamRequired("UploadId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *AbortMultipartUploadInput) SetBucket(v string) *AbortMultipartUploadInput { + s.Bucket = &v + return s +} + +func (s *AbortMultipartUploadInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *AbortMultipartUploadInput) SetExpectedBucketOwner(v string) *AbortMultipartUploadInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetKey sets the Key field's value. +func (s *AbortMultipartUploadInput) SetKey(v string) *AbortMultipartUploadInput { + s.Key = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *AbortMultipartUploadInput) SetRequestPayer(v string) *AbortMultipartUploadInput { + s.RequestPayer = &v + return s +} + +// SetUploadId sets the UploadId field's value. +func (s *AbortMultipartUploadInput) SetUploadId(v string) *AbortMultipartUploadInput { + s.UploadId = &v + return s +} + +func (s *AbortMultipartUploadInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *AbortMultipartUploadInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s AbortMultipartUploadInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type AbortMultipartUploadOutput struct { + _ struct{} `type:"structure"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` +} + +// String returns the string representation +func (s AbortMultipartUploadOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AbortMultipartUploadOutput) GoString() string { + return s.String() +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *AbortMultipartUploadOutput) SetRequestCharged(v string) *AbortMultipartUploadOutput { + s.RequestCharged = &v + return s +} + +// Configures the transfer acceleration state for an Amazon S3 bucket. For more +// information, see Amazon S3 Transfer Acceleration (https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html) +// in the Amazon Simple Storage Service Developer Guide. +type AccelerateConfiguration struct { + _ struct{} `type:"structure"` + + // Specifies the transfer acceleration status of the bucket. + Status *string `type:"string" enum:"BucketAccelerateStatus"` +} + +// String returns the string representation +func (s AccelerateConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AccelerateConfiguration) GoString() string { + return s.String() +} + +// SetStatus sets the Status field's value. +func (s *AccelerateConfiguration) SetStatus(v string) *AccelerateConfiguration { + s.Status = &v + return s +} + +// Contains the elements that set the ACL permissions for an object per grantee. +type AccessControlPolicy struct { + _ struct{} `type:"structure"` + + // A list of grants. + Grants []*Grant `locationName:"AccessControlList" locationNameList:"Grant" type:"list"` + + // Container for the bucket owner's display name and ID. + Owner *Owner `type:"structure"` +} + +// String returns the string representation +func (s AccessControlPolicy) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AccessControlPolicy) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AccessControlPolicy) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AccessControlPolicy"} + if s.Grants != nil { + for i, v := range s.Grants { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Grants", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetGrants sets the Grants field's value. +func (s *AccessControlPolicy) SetGrants(v []*Grant) *AccessControlPolicy { + s.Grants = v + return s +} + +// SetOwner sets the Owner field's value. +func (s *AccessControlPolicy) SetOwner(v *Owner) *AccessControlPolicy { + s.Owner = v + return s +} + +// A container for information about access control for replicas. +type AccessControlTranslation struct { + _ struct{} `type:"structure"` + + // Specifies the replica ownership. For default and valid values, see PUT bucket + // replication (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTreplication.html) + // in the Amazon Simple Storage Service API Reference. + // + // Owner is a required field + Owner *string `type:"string" required:"true" enum:"OwnerOverride"` +} + +// String returns the string representation +func (s AccessControlTranslation) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AccessControlTranslation) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AccessControlTranslation) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AccessControlTranslation"} + if s.Owner == nil { + invalidParams.Add(request.NewErrParamRequired("Owner")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetOwner sets the Owner field's value. +func (s *AccessControlTranslation) SetOwner(v string) *AccessControlTranslation { + s.Owner = &v + return s +} + +// A conjunction (logical AND) of predicates, which is used in evaluating a +// metrics filter. The operator must have at least two predicates in any combination, +// and an object must match all of the predicates for the filter to apply. +type AnalyticsAndOperator struct { + _ struct{} `type:"structure"` + + // The prefix to use when evaluating an AND predicate: The prefix that an object + // must have to be included in the metrics results. + Prefix *string `type:"string"` + + // The list of tags to use when evaluating an AND predicate. + Tags []*Tag `locationName:"Tag" locationNameList:"Tag" type:"list" flattened:"true"` +} + +// String returns the string representation +func (s AnalyticsAndOperator) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AnalyticsAndOperator) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AnalyticsAndOperator) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AnalyticsAndOperator"} + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetPrefix sets the Prefix field's value. +func (s *AnalyticsAndOperator) SetPrefix(v string) *AnalyticsAndOperator { + s.Prefix = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *AnalyticsAndOperator) SetTags(v []*Tag) *AnalyticsAndOperator { + s.Tags = v + return s +} + +// Specifies the configuration and any analyses for the analytics filter of +// an Amazon S3 bucket. +type AnalyticsConfiguration struct { + _ struct{} `type:"structure"` + + // The filter used to describe a set of objects for analyses. A filter must + // have exactly one prefix, one tag, or one conjunction (AnalyticsAndOperator). + // If no filter is provided, all objects will be considered in any analysis. + Filter *AnalyticsFilter `type:"structure"` + + // The ID that identifies the analytics configuration. + // + // Id is a required field + Id *string `type:"string" required:"true"` + + // Contains data related to access patterns to be collected and made available + // to analyze the tradeoffs between different storage classes. + // + // StorageClassAnalysis is a required field + StorageClassAnalysis *StorageClassAnalysis `type:"structure" required:"true"` +} + +// String returns the string representation +func (s AnalyticsConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AnalyticsConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AnalyticsConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AnalyticsConfiguration"} + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + if s.StorageClassAnalysis == nil { + invalidParams.Add(request.NewErrParamRequired("StorageClassAnalysis")) + } + if s.Filter != nil { + if err := s.Filter.Validate(); err != nil { + invalidParams.AddNested("Filter", err.(request.ErrInvalidParams)) + } + } + if s.StorageClassAnalysis != nil { + if err := s.StorageClassAnalysis.Validate(); err != nil { + invalidParams.AddNested("StorageClassAnalysis", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetFilter sets the Filter field's value. +func (s *AnalyticsConfiguration) SetFilter(v *AnalyticsFilter) *AnalyticsConfiguration { + s.Filter = v + return s +} + +// SetId sets the Id field's value. +func (s *AnalyticsConfiguration) SetId(v string) *AnalyticsConfiguration { + s.Id = &v + return s +} + +// SetStorageClassAnalysis sets the StorageClassAnalysis field's value. +func (s *AnalyticsConfiguration) SetStorageClassAnalysis(v *StorageClassAnalysis) *AnalyticsConfiguration { + s.StorageClassAnalysis = v + return s +} + +// Where to publish the analytics results. +type AnalyticsExportDestination struct { + _ struct{} `type:"structure"` + + // A destination signifying output to an S3 bucket. + // + // S3BucketDestination is a required field + S3BucketDestination *AnalyticsS3BucketDestination `type:"structure" required:"true"` +} + +// String returns the string representation +func (s AnalyticsExportDestination) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AnalyticsExportDestination) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AnalyticsExportDestination) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AnalyticsExportDestination"} + if s.S3BucketDestination == nil { + invalidParams.Add(request.NewErrParamRequired("S3BucketDestination")) + } + if s.S3BucketDestination != nil { + if err := s.S3BucketDestination.Validate(); err != nil { + invalidParams.AddNested("S3BucketDestination", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetS3BucketDestination sets the S3BucketDestination field's value. +func (s *AnalyticsExportDestination) SetS3BucketDestination(v *AnalyticsS3BucketDestination) *AnalyticsExportDestination { + s.S3BucketDestination = v + return s +} + +// The filter used to describe a set of objects for analyses. A filter must +// have exactly one prefix, one tag, or one conjunction (AnalyticsAndOperator). +// If no filter is provided, all objects will be considered in any analysis. +type AnalyticsFilter struct { + _ struct{} `type:"structure"` + + // A conjunction (logical AND) of predicates, which is used in evaluating an + // analytics filter. The operator must have at least two predicates. + And *AnalyticsAndOperator `type:"structure"` + + // The prefix to use when evaluating an analytics filter. + Prefix *string `type:"string"` + + // The tag to use when evaluating an analytics filter. + Tag *Tag `type:"structure"` +} + +// String returns the string representation +func (s AnalyticsFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AnalyticsFilter) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AnalyticsFilter) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AnalyticsFilter"} + if s.And != nil { + if err := s.And.Validate(); err != nil { + invalidParams.AddNested("And", err.(request.ErrInvalidParams)) + } + } + if s.Tag != nil { + if err := s.Tag.Validate(); err != nil { + invalidParams.AddNested("Tag", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAnd sets the And field's value. +func (s *AnalyticsFilter) SetAnd(v *AnalyticsAndOperator) *AnalyticsFilter { + s.And = v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *AnalyticsFilter) SetPrefix(v string) *AnalyticsFilter { + s.Prefix = &v + return s +} + +// SetTag sets the Tag field's value. +func (s *AnalyticsFilter) SetTag(v *Tag) *AnalyticsFilter { + s.Tag = v + return s +} + +// Contains information about where to publish the analytics results. +type AnalyticsS3BucketDestination struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the bucket to which data is exported. + // + // Bucket is a required field + Bucket *string `type:"string" required:"true"` + + // The account ID that owns the destination S3 bucket. If no account ID is provided, + // the owner is not validated before exporting data. + // + // Although this value is optional, we strongly recommend that you set it to + // help prevent problems if the destination bucket ownership changes. + BucketAccountId *string `type:"string"` + + // Specifies the file format used when exporting data to Amazon S3. + // + // Format is a required field + Format *string `type:"string" required:"true" enum:"AnalyticsS3ExportFileFormat"` + + // The prefix to use when exporting data. The prefix is prepended to all results. + Prefix *string `type:"string"` +} + +// String returns the string representation +func (s AnalyticsS3BucketDestination) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AnalyticsS3BucketDestination) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AnalyticsS3BucketDestination) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AnalyticsS3BucketDestination"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Format == nil { + invalidParams.Add(request.NewErrParamRequired("Format")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *AnalyticsS3BucketDestination) SetBucket(v string) *AnalyticsS3BucketDestination { + s.Bucket = &v + return s +} + +func (s *AnalyticsS3BucketDestination) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetBucketAccountId sets the BucketAccountId field's value. +func (s *AnalyticsS3BucketDestination) SetBucketAccountId(v string) *AnalyticsS3BucketDestination { + s.BucketAccountId = &v + return s +} + +// SetFormat sets the Format field's value. +func (s *AnalyticsS3BucketDestination) SetFormat(v string) *AnalyticsS3BucketDestination { + s.Format = &v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *AnalyticsS3BucketDestination) SetPrefix(v string) *AnalyticsS3BucketDestination { + s.Prefix = &v + return s +} + +// In terms of implementation, a Bucket is a resource. An Amazon S3 bucket name +// is globally unique, and the namespace is shared by all AWS accounts. +type Bucket struct { + _ struct{} `type:"structure"` + + // Date the bucket was created. This date can change when making changes to + // your bucket, such as editing its bucket policy. + CreationDate *time.Time `type:"timestamp"` + + // The name of the bucket. + Name *string `type:"string"` +} + +// String returns the string representation +func (s Bucket) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Bucket) GoString() string { + return s.String() +} + +// SetCreationDate sets the CreationDate field's value. +func (s *Bucket) SetCreationDate(v time.Time) *Bucket { + s.CreationDate = &v + return s +} + +// SetName sets the Name field's value. +func (s *Bucket) SetName(v string) *Bucket { + s.Name = &v + return s +} + +// Specifies the lifecycle configuration for objects in an Amazon S3 bucket. +// For more information, see Object Lifecycle Management (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html) +// in the Amazon Simple Storage Service Developer Guide. +type BucketLifecycleConfiguration struct { + _ struct{} `type:"structure"` + + // A lifecycle rule for individual objects in an Amazon S3 bucket. + // + // Rules is a required field + Rules []*LifecycleRule `locationName:"Rule" type:"list" flattened:"true" required:"true"` +} + +// String returns the string representation +func (s BucketLifecycleConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BucketLifecycleConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *BucketLifecycleConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "BucketLifecycleConfiguration"} + if s.Rules == nil { + invalidParams.Add(request.NewErrParamRequired("Rules")) + } + if s.Rules != nil { + for i, v := range s.Rules { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Rules", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetRules sets the Rules field's value. +func (s *BucketLifecycleConfiguration) SetRules(v []*LifecycleRule) *BucketLifecycleConfiguration { + s.Rules = v + return s +} + +// Container for logging status information. +type BucketLoggingStatus struct { + _ struct{} `type:"structure"` + + // Describes where logs are stored and the prefix that Amazon S3 assigns to + // all log object keys for a bucket. For more information, see PUT Bucket logging + // (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTlogging.html) + // in the Amazon Simple Storage Service API Reference. + LoggingEnabled *LoggingEnabled `type:"structure"` +} + +// String returns the string representation +func (s BucketLoggingStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BucketLoggingStatus) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *BucketLoggingStatus) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "BucketLoggingStatus"} + if s.LoggingEnabled != nil { + if err := s.LoggingEnabled.Validate(); err != nil { + invalidParams.AddNested("LoggingEnabled", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetLoggingEnabled sets the LoggingEnabled field's value. +func (s *BucketLoggingStatus) SetLoggingEnabled(v *LoggingEnabled) *BucketLoggingStatus { + s.LoggingEnabled = v + return s +} + +// Describes the cross-origin access configuration for objects in an Amazon +// S3 bucket. For more information, see Enabling Cross-Origin Resource Sharing +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html) in the Amazon +// Simple Storage Service Developer Guide. +type CORSConfiguration struct { + _ struct{} `type:"structure"` + + // A set of origins and methods (cross-origin access that you want to allow). + // You can add up to 100 rules to the configuration. + // + // CORSRules is a required field + CORSRules []*CORSRule `locationName:"CORSRule" type:"list" flattened:"true" required:"true"` +} + +// String returns the string representation +func (s CORSConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CORSConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CORSConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CORSConfiguration"} + if s.CORSRules == nil { + invalidParams.Add(request.NewErrParamRequired("CORSRules")) + } + if s.CORSRules != nil { + for i, v := range s.CORSRules { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "CORSRules", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCORSRules sets the CORSRules field's value. +func (s *CORSConfiguration) SetCORSRules(v []*CORSRule) *CORSConfiguration { + s.CORSRules = v + return s +} + +// Specifies a cross-origin access rule for an Amazon S3 bucket. +type CORSRule struct { + _ struct{} `type:"structure"` + + // Headers that are specified in the Access-Control-Request-Headers header. + // These headers are allowed in a preflight OPTIONS request. In response to + // any preflight OPTIONS request, Amazon S3 returns any requested headers that + // are allowed. + AllowedHeaders []*string `locationName:"AllowedHeader" type:"list" flattened:"true"` + + // An HTTP method that you allow the origin to execute. Valid values are GET, + // PUT, HEAD, POST, and DELETE. + // + // AllowedMethods is a required field + AllowedMethods []*string `locationName:"AllowedMethod" type:"list" flattened:"true" required:"true"` + + // One or more origins you want customers to be able to access the bucket from. + // + // AllowedOrigins is a required field + AllowedOrigins []*string `locationName:"AllowedOrigin" type:"list" flattened:"true" required:"true"` + + // One or more headers in the response that you want customers to be able to + // access from their applications (for example, from a JavaScript XMLHttpRequest + // object). + ExposeHeaders []*string `locationName:"ExposeHeader" type:"list" flattened:"true"` + + // The time in seconds that your browser is to cache the preflight response + // for the specified resource. + MaxAgeSeconds *int64 `type:"integer"` +} + +// String returns the string representation +func (s CORSRule) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CORSRule) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CORSRule) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CORSRule"} + if s.AllowedMethods == nil { + invalidParams.Add(request.NewErrParamRequired("AllowedMethods")) + } + if s.AllowedOrigins == nil { + invalidParams.Add(request.NewErrParamRequired("AllowedOrigins")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAllowedHeaders sets the AllowedHeaders field's value. +func (s *CORSRule) SetAllowedHeaders(v []*string) *CORSRule { + s.AllowedHeaders = v + return s +} + +// SetAllowedMethods sets the AllowedMethods field's value. +func (s *CORSRule) SetAllowedMethods(v []*string) *CORSRule { + s.AllowedMethods = v + return s +} + +// SetAllowedOrigins sets the AllowedOrigins field's value. +func (s *CORSRule) SetAllowedOrigins(v []*string) *CORSRule { + s.AllowedOrigins = v + return s +} + +// SetExposeHeaders sets the ExposeHeaders field's value. +func (s *CORSRule) SetExposeHeaders(v []*string) *CORSRule { + s.ExposeHeaders = v + return s +} + +// SetMaxAgeSeconds sets the MaxAgeSeconds field's value. +func (s *CORSRule) SetMaxAgeSeconds(v int64) *CORSRule { + s.MaxAgeSeconds = &v + return s +} + +// Describes how an uncompressed comma-separated values (CSV)-formatted input +// object is formatted. +type CSVInput struct { + _ struct{} `type:"structure"` + + // Specifies that CSV field values may contain quoted record delimiters and + // such records should be allowed. Default value is FALSE. Setting this value + // to TRUE may lower performance. + AllowQuotedRecordDelimiter *bool `type:"boolean"` + + // A single character used to indicate that a row should be ignored when the + // character is present at the start of that row. You can specify any character + // to indicate a comment line. + Comments *string `type:"string"` + + // A single character used to separate individual fields in a record. You can + // specify an arbitrary delimiter. + FieldDelimiter *string `type:"string"` + + // Describes the first line of input. Valid values are: + // + // * NONE: First line is not a header. + // + // * IGNORE: First line is a header, but you can't use the header values + // to indicate the column in an expression. You can use column position (such + // as _1, _2, …) to indicate the column (SELECT s._1 FROM OBJECT s). + // + // * Use: First line is a header, and you can use the header value to identify + // a column in an expression (SELECT "name" FROM OBJECT). + FileHeaderInfo *string `type:"string" enum:"FileHeaderInfo"` + + // A single character used for escaping when the field delimiter is part of + // the value. For example, if the value is a, b, Amazon S3 wraps this field + // value in quotation marks, as follows: " a , b ". + // + // Type: String + // + // Default: " + // + // Ancestors: CSV + QuoteCharacter *string `type:"string"` + + // A single character used for escaping the quotation mark character inside + // an already escaped value. For example, the value """ a , b """ is parsed + // as " a , b ". + QuoteEscapeCharacter *string `type:"string"` + + // A single character used to separate individual records in the input. Instead + // of the default value, you can specify an arbitrary delimiter. + RecordDelimiter *string `type:"string"` +} + +// String returns the string representation +func (s CSVInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CSVInput) GoString() string { + return s.String() +} + +// SetAllowQuotedRecordDelimiter sets the AllowQuotedRecordDelimiter field's value. +func (s *CSVInput) SetAllowQuotedRecordDelimiter(v bool) *CSVInput { + s.AllowQuotedRecordDelimiter = &v + return s +} + +// SetComments sets the Comments field's value. +func (s *CSVInput) SetComments(v string) *CSVInput { + s.Comments = &v + return s +} + +// SetFieldDelimiter sets the FieldDelimiter field's value. +func (s *CSVInput) SetFieldDelimiter(v string) *CSVInput { + s.FieldDelimiter = &v + return s +} + +// SetFileHeaderInfo sets the FileHeaderInfo field's value. +func (s *CSVInput) SetFileHeaderInfo(v string) *CSVInput { + s.FileHeaderInfo = &v + return s +} + +// SetQuoteCharacter sets the QuoteCharacter field's value. +func (s *CSVInput) SetQuoteCharacter(v string) *CSVInput { + s.QuoteCharacter = &v + return s +} + +// SetQuoteEscapeCharacter sets the QuoteEscapeCharacter field's value. +func (s *CSVInput) SetQuoteEscapeCharacter(v string) *CSVInput { + s.QuoteEscapeCharacter = &v + return s +} + +// SetRecordDelimiter sets the RecordDelimiter field's value. +func (s *CSVInput) SetRecordDelimiter(v string) *CSVInput { + s.RecordDelimiter = &v + return s +} + +// Describes how uncompressed comma-separated values (CSV)-formatted results +// are formatted. +type CSVOutput struct { + _ struct{} `type:"structure"` + + // The value used to separate individual fields in a record. You can specify + // an arbitrary delimiter. + FieldDelimiter *string `type:"string"` + + // A single character used for escaping when the field delimiter is part of + // the value. For example, if the value is a, b, Amazon S3 wraps this field + // value in quotation marks, as follows: " a , b ". + QuoteCharacter *string `type:"string"` + + // The single character used for escaping the quote character inside an already + // escaped value. + QuoteEscapeCharacter *string `type:"string"` + + // Indicates whether to use quotation marks around output fields. + // + // * ALWAYS: Always use quotation marks for output fields. + // + // * ASNEEDED: Use quotation marks for output fields when needed. + QuoteFields *string `type:"string" enum:"QuoteFields"` + + // A single character used to separate individual records in the output. Instead + // of the default value, you can specify an arbitrary delimiter. + RecordDelimiter *string `type:"string"` +} + +// String returns the string representation +func (s CSVOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CSVOutput) GoString() string { + return s.String() +} + +// SetFieldDelimiter sets the FieldDelimiter field's value. +func (s *CSVOutput) SetFieldDelimiter(v string) *CSVOutput { + s.FieldDelimiter = &v + return s +} + +// SetQuoteCharacter sets the QuoteCharacter field's value. +func (s *CSVOutput) SetQuoteCharacter(v string) *CSVOutput { + s.QuoteCharacter = &v + return s +} + +// SetQuoteEscapeCharacter sets the QuoteEscapeCharacter field's value. +func (s *CSVOutput) SetQuoteEscapeCharacter(v string) *CSVOutput { + s.QuoteEscapeCharacter = &v + return s +} + +// SetQuoteFields sets the QuoteFields field's value. +func (s *CSVOutput) SetQuoteFields(v string) *CSVOutput { + s.QuoteFields = &v + return s +} + +// SetRecordDelimiter sets the RecordDelimiter field's value. +func (s *CSVOutput) SetRecordDelimiter(v string) *CSVOutput { + s.RecordDelimiter = &v + return s +} + +// Container for specifying the AWS Lambda notification configuration. +type CloudFunctionConfiguration struct { + _ struct{} `type:"structure"` + + // Lambda cloud function ARN that Amazon S3 can invoke when it detects events + // of the specified type. + CloudFunction *string `type:"string"` + + // The bucket event for which to send notifications. + // + // Deprecated: Event has been deprecated + Event *string `deprecated:"true" type:"string" enum:"Event"` + + // Bucket events for which to send notifications. + Events []*string `locationName:"Event" type:"list" flattened:"true"` + + // An optional unique identifier for configurations in a notification configuration. + // If you don't provide one, Amazon S3 will assign an ID. + Id *string `type:"string"` + + // The role supporting the invocation of the Lambda function + InvocationRole *string `type:"string"` +} + +// String returns the string representation +func (s CloudFunctionConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CloudFunctionConfiguration) GoString() string { + return s.String() +} + +// SetCloudFunction sets the CloudFunction field's value. +func (s *CloudFunctionConfiguration) SetCloudFunction(v string) *CloudFunctionConfiguration { + s.CloudFunction = &v + return s +} + +// SetEvent sets the Event field's value. +func (s *CloudFunctionConfiguration) SetEvent(v string) *CloudFunctionConfiguration { + s.Event = &v + return s +} + +// SetEvents sets the Events field's value. +func (s *CloudFunctionConfiguration) SetEvents(v []*string) *CloudFunctionConfiguration { + s.Events = v + return s +} + +// SetId sets the Id field's value. +func (s *CloudFunctionConfiguration) SetId(v string) *CloudFunctionConfiguration { + s.Id = &v + return s +} + +// SetInvocationRole sets the InvocationRole field's value. +func (s *CloudFunctionConfiguration) SetInvocationRole(v string) *CloudFunctionConfiguration { + s.InvocationRole = &v + return s +} + +// Container for all (if there are any) keys between Prefix and the next occurrence +// of the string specified by a delimiter. CommonPrefixes lists keys that act +// like subdirectories in the directory specified by Prefix. For example, if +// the prefix is notes/ and the delimiter is a slash (/) as in notes/summer/july, +// the common prefix is notes/summer/. +type CommonPrefix struct { + _ struct{} `type:"structure"` + + // Container for the specified common prefix. + Prefix *string `type:"string"` +} + +// String returns the string representation +func (s CommonPrefix) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CommonPrefix) GoString() string { + return s.String() +} + +// SetPrefix sets the Prefix field's value. +func (s *CommonPrefix) SetPrefix(v string) *CommonPrefix { + s.Prefix = &v + return s +} + +type CompleteMultipartUploadInput struct { + _ struct{} `locationName:"CompleteMultipartUploadRequest" type:"structure" payload:"MultipartUpload"` + + // Name of the bucket to which the multipart upload was initiated. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // Object key for which the multipart upload was initiated. + // + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // The container for the multipart upload request information. + MultipartUpload *CompletedMultipartUpload `locationName:"CompleteMultipartUpload" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from requester pays buckets, see Downloading Objects + // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 Developer Guide. + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // ID for the initiated multipart upload. + // + // UploadId is a required field + UploadId *string `location:"querystring" locationName:"uploadId" type:"string" required:"true"` +} + +// String returns the string representation +func (s CompleteMultipartUploadInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CompleteMultipartUploadInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CompleteMultipartUploadInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CompleteMultipartUploadInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.UploadId == nil { + invalidParams.Add(request.NewErrParamRequired("UploadId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *CompleteMultipartUploadInput) SetBucket(v string) *CompleteMultipartUploadInput { + s.Bucket = &v + return s +} + +func (s *CompleteMultipartUploadInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *CompleteMultipartUploadInput) SetExpectedBucketOwner(v string) *CompleteMultipartUploadInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetKey sets the Key field's value. +func (s *CompleteMultipartUploadInput) SetKey(v string) *CompleteMultipartUploadInput { + s.Key = &v + return s +} + +// SetMultipartUpload sets the MultipartUpload field's value. +func (s *CompleteMultipartUploadInput) SetMultipartUpload(v *CompletedMultipartUpload) *CompleteMultipartUploadInput { + s.MultipartUpload = v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *CompleteMultipartUploadInput) SetRequestPayer(v string) *CompleteMultipartUploadInput { + s.RequestPayer = &v + return s +} + +// SetUploadId sets the UploadId field's value. +func (s *CompleteMultipartUploadInput) SetUploadId(v string) *CompleteMultipartUploadInput { + s.UploadId = &v + return s +} + +func (s *CompleteMultipartUploadInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *CompleteMultipartUploadInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s CompleteMultipartUploadInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type CompleteMultipartUploadOutput struct { + _ struct{} `type:"structure"` + + // The name of the bucket that contains the newly created object. + // + // When using this API with an access point, you must direct requests to the + // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this operation with an access point through the AWS SDKs, you + // provide the access point ARN in place of the bucket name. For more information + // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // in the Amazon Simple Storage Service Developer Guide. + // + // When using this API with Amazon S3 on Outposts, you must direct requests + // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When + // using this operation using S3 on Outposts through the AWS SDKs, you provide + // the Outposts bucket ARN in place of the bucket name. For more information + // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) + // in the Amazon Simple Storage Service Developer Guide. + Bucket *string `type:"string"` + + // Indicates whether the multipart upload uses an S3 Bucket Key for server-side + // encryption with AWS KMS (SSE-KMS). + BucketKeyEnabled *bool `location:"header" locationName:"x-amz-server-side-encryption-bucket-key-enabled" type:"boolean"` + + // Entity tag that identifies the newly created object's data. Objects with + // different object data will have different entity tags. The entity tag is + // an opaque string. The entity tag may or may not be an MD5 digest of the object + // data. If the entity tag is not an MD5 digest of the object data, it will + // contain one or more nonhexadecimal characters and/or will consist of less + // than 32 or more than 32 hexadecimal digits. + ETag *string `type:"string"` + + // If the object expiration is configured, this will contain the expiration + // date (expiry-date) and rule ID (rule-id). The value of rule-id is URL encoded. + Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"` + + // The object key of the newly created object. + Key *string `min:"1" type:"string"` + + // The URI that identifies the newly created object. + Location *string `type:"string"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // If present, specifies the ID of the AWS Key Management Service (AWS KMS) + // symmetric customer managed customer master key (CMK) that was used for the + // object. + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` + + // If you specified server-side encryption either with an Amazon S3-managed + // encryption key or an AWS KMS customer master key (CMK) in your initiate multipart + // upload request, the response includes this header. It confirms the encryption + // algorithm that Amazon S3 used to encrypt the object. + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` + + // Version ID of the newly created object, in case the bucket has versioning + // turned on. + VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` +} + +// String returns the string representation +func (s CompleteMultipartUploadOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CompleteMultipartUploadOutput) GoString() string { + return s.String() +} + +// SetBucket sets the Bucket field's value. +func (s *CompleteMultipartUploadOutput) SetBucket(v string) *CompleteMultipartUploadOutput { + s.Bucket = &v + return s +} + +func (s *CompleteMultipartUploadOutput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetBucketKeyEnabled sets the BucketKeyEnabled field's value. +func (s *CompleteMultipartUploadOutput) SetBucketKeyEnabled(v bool) *CompleteMultipartUploadOutput { + s.BucketKeyEnabled = &v + return s +} + +// SetETag sets the ETag field's value. +func (s *CompleteMultipartUploadOutput) SetETag(v string) *CompleteMultipartUploadOutput { + s.ETag = &v + return s +} + +// SetExpiration sets the Expiration field's value. +func (s *CompleteMultipartUploadOutput) SetExpiration(v string) *CompleteMultipartUploadOutput { + s.Expiration = &v + return s +} + +// SetKey sets the Key field's value. +func (s *CompleteMultipartUploadOutput) SetKey(v string) *CompleteMultipartUploadOutput { + s.Key = &v + return s +} + +// SetLocation sets the Location field's value. +func (s *CompleteMultipartUploadOutput) SetLocation(v string) *CompleteMultipartUploadOutput { + s.Location = &v + return s +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *CompleteMultipartUploadOutput) SetRequestCharged(v string) *CompleteMultipartUploadOutput { + s.RequestCharged = &v + return s +} + +// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. +func (s *CompleteMultipartUploadOutput) SetSSEKMSKeyId(v string) *CompleteMultipartUploadOutput { + s.SSEKMSKeyId = &v + return s +} + +// SetServerSideEncryption sets the ServerSideEncryption field's value. +func (s *CompleteMultipartUploadOutput) SetServerSideEncryption(v string) *CompleteMultipartUploadOutput { + s.ServerSideEncryption = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *CompleteMultipartUploadOutput) SetVersionId(v string) *CompleteMultipartUploadOutput { + s.VersionId = &v + return s +} + +// The container for the completed multipart upload details. +type CompletedMultipartUpload struct { + _ struct{} `type:"structure"` + + // Array of CompletedPart data types. + Parts []*CompletedPart `locationName:"Part" type:"list" flattened:"true"` +} + +// String returns the string representation +func (s CompletedMultipartUpload) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CompletedMultipartUpload) GoString() string { + return s.String() +} + +// SetParts sets the Parts field's value. +func (s *CompletedMultipartUpload) SetParts(v []*CompletedPart) *CompletedMultipartUpload { + s.Parts = v + return s +} + +// Details of the parts that were uploaded. +type CompletedPart struct { + _ struct{} `type:"structure"` + + // Entity tag returned when the part was uploaded. + ETag *string `type:"string"` + + // Part number that identifies the part. This is a positive integer between + // 1 and 10,000. + PartNumber *int64 `type:"integer"` +} + +// String returns the string representation +func (s CompletedPart) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CompletedPart) GoString() string { + return s.String() +} + +// SetETag sets the ETag field's value. +func (s *CompletedPart) SetETag(v string) *CompletedPart { + s.ETag = &v + return s +} + +// SetPartNumber sets the PartNumber field's value. +func (s *CompletedPart) SetPartNumber(v int64) *CompletedPart { + s.PartNumber = &v + return s +} + +// A container for describing a condition that must be met for the specified +// redirect to apply. For example, 1. If request is for pages in the /docs folder, +// redirect to the /documents folder. 2. If request results in HTTP error 4xx, +// redirect request to another host where you might process the error. +type Condition struct { + _ struct{} `type:"structure"` + + // The HTTP error code when the redirect is applied. In the event of an error, + // if the error code equals this value, then the specified redirect is applied. + // Required when parent element Condition is specified and sibling KeyPrefixEquals + // is not specified. If both are specified, then both must be true for the redirect + // to be applied. + HttpErrorCodeReturnedEquals *string `type:"string"` + + // The object key name prefix when the redirect is applied. For example, to + // redirect requests for ExamplePage.html, the key prefix will be ExamplePage.html. + // To redirect request for all pages with the prefix docs/, the key prefix will + // be /docs, which identifies all objects in the docs/ folder. Required when + // the parent element Condition is specified and sibling HttpErrorCodeReturnedEquals + // is not specified. If both conditions are specified, both must be true for + // the redirect to be applied. + // + // Replacement must be made for object keys containing special characters (such + // as carriage returns) when using XML requests. For more information, see XML + // related object key constraints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints). + KeyPrefixEquals *string `type:"string"` +} + +// String returns the string representation +func (s Condition) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Condition) GoString() string { + return s.String() +} + +// SetHttpErrorCodeReturnedEquals sets the HttpErrorCodeReturnedEquals field's value. +func (s *Condition) SetHttpErrorCodeReturnedEquals(v string) *Condition { + s.HttpErrorCodeReturnedEquals = &v + return s +} + +// SetKeyPrefixEquals sets the KeyPrefixEquals field's value. +func (s *Condition) SetKeyPrefixEquals(v string) *Condition { + s.KeyPrefixEquals = &v + return s +} + +type ContinuationEvent struct { + _ struct{} `locationName:"ContinuationEvent" type:"structure"` +} + +// String returns the string representation +func (s ContinuationEvent) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ContinuationEvent) GoString() string { + return s.String() +} + +// The ContinuationEvent is and event in the SelectObjectContentEventStream group of events. +func (s *ContinuationEvent) eventSelectObjectContentEventStream() {} + +// UnmarshalEvent unmarshals the EventStream Message into the ContinuationEvent value. +// This method is only used internally within the SDK's EventStream handling. +func (s *ContinuationEvent) UnmarshalEvent( + payloadUnmarshaler protocol.PayloadUnmarshaler, + msg eventstream.Message, +) error { + return nil +} + +// MarshalEvent marshals the type into an stream event value. This method +// should only used internally within the SDK's EventStream handling. +func (s *ContinuationEvent) MarshalEvent(pm protocol.PayloadMarshaler) (msg eventstream.Message, err error) { + msg.Headers.Set(eventstreamapi.MessageTypeHeader, eventstream.StringValue(eventstreamapi.EventMessageType)) + return msg, err +} + +type CopyObjectInput struct { + _ struct{} `locationName:"CopyObjectRequest" type:"structure"` + + // The canned ACL to apply to the object. + // + // This action is not supported by Amazon S3 on Outposts. + ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"` + + // The name of the destination bucket. + // + // When using this API with an access point, you must direct requests to the + // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this operation with an access point through the AWS SDKs, you + // provide the access point ARN in place of the bucket name. For more information + // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // in the Amazon Simple Storage Service Developer Guide. + // + // When using this API with Amazon S3 on Outposts, you must direct requests + // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When + // using this operation using S3 on Outposts through the AWS SDKs, you provide + // the Outposts bucket ARN in place of the bucket name. For more information + // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) + // in the Amazon Simple Storage Service Developer Guide. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption + // with server-side encryption using AWS KMS (SSE-KMS). Setting this header + // to true causes Amazon S3 to use an S3 Bucket Key for object encryption with + // SSE-KMS. + // + // Specifying this header with a COPY operation doesn’t affect bucket-level + // settings for S3 Bucket Key. + BucketKeyEnabled *bool `location:"header" locationName:"x-amz-server-side-encryption-bucket-key-enabled" type:"boolean"` + + // Specifies caching behavior along the request/reply chain. + CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"` + + // Specifies presentational information for the object. + ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"` + + // Specifies what content encodings have been applied to the object and thus + // what decoding mechanisms must be applied to obtain the media-type referenced + // by the Content-Type header field. + ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"` + + // The language the content is in. + ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"` + + // A standard MIME type describing the format of the object data. + ContentType *string `location:"header" locationName:"Content-Type" type:"string"` + + // Specifies the source object for the copy operation. You specify the value + // in one of two formats, depending on whether you want to access the source + // object through an access point (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-points.html): + // + // * For objects not accessed through an access point, specify the name of + // the source bucket and the key of the source object, separated by a slash + // (/). For example, to copy the object reports/january.pdf from the bucket + // awsexamplebucket, use awsexamplebucket/reports/january.pdf. The value + // must be URL encoded. + // + // * For objects accessed through access points, specify the Amazon Resource + // Name (ARN) of the object as accessed through the access point, in the + // format arn:aws:s3:::accesspoint//object/. + // For example, to copy the object reports/january.pdf through access point + // my-access-point owned by account 123456789012 in Region us-west-2, use + // the URL encoding of arn:aws:s3:us-west-2:123456789012:accesspoint/my-access-point/object/reports/january.pdf. + // The value must be URL encoded. Amazon S3 supports copy operations using + // access points only when the source and destination buckets are in the + // same AWS Region. Alternatively, for objects accessed through Amazon S3 + // on Outposts, specify the ARN of the object as accessed in the format arn:aws:s3-outposts:::outpost//object/. + // For example, to copy the object reports/january.pdf through outpost my-outpost + // owned by account 123456789012 in Region us-west-2, use the URL encoding + // of arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/object/reports/january.pdf. + // The value must be URL encoded. + // + // To copy a specific version of an object, append ?versionId= to + // the value (for example, awsexamplebucket/reports/january.pdf?versionId=QUpfdndhfd8438MNFDN93jdnJFkdmqnh893). + // If you don't specify a version ID, Amazon S3 copies the latest version of + // the source object. + // + // CopySource is a required field + CopySource *string `location:"header" locationName:"x-amz-copy-source" type:"string" required:"true"` + + // Copies the object if its entity tag (ETag) matches the specified tag. + CopySourceIfMatch *string `location:"header" locationName:"x-amz-copy-source-if-match" type:"string"` + + // Copies the object if it has been modified since the specified time. + CopySourceIfModifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-modified-since" type:"timestamp"` + + // Copies the object if its entity tag (ETag) is different than the specified + // ETag. + CopySourceIfNoneMatch *string `location:"header" locationName:"x-amz-copy-source-if-none-match" type:"string"` + + // Copies the object if it hasn't been modified since the specified time. + CopySourceIfUnmodifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-unmodified-since" type:"timestamp"` + + // Specifies the algorithm to use when decrypting the source object (for example, + // AES256). + CopySourceSSECustomerAlgorithm *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-algorithm" type:"string"` + + // Specifies the customer-provided encryption key for Amazon S3 to use to decrypt + // the source object. The encryption key provided in this header must be one + // that was used when the source object was created. + CopySourceSSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key" type:"string" sensitive:"true"` + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure that the + // encryption key was transmitted without error. + CopySourceSSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key-MD5" type:"string"` + + // The account id of the expected destination bucket owner. If the destination + // bucket is owned by a different account, the request will fail with an HTTP + // 403 (Access Denied) error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // The account id of the expected source bucket owner. If the source bucket + // is owned by a different account, the request will fail with an HTTP 403 (Access + // Denied) error. + ExpectedSourceBucketOwner *string `location:"header" locationName:"x-amz-source-expected-bucket-owner" type:"string"` + + // The date and time at which the object is no longer cacheable. + Expires *time.Time `location:"header" locationName:"Expires" type:"timestamp"` + + // Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object. + // + // This action is not supported by Amazon S3 on Outposts. + GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` + + // Allows grantee to read the object data and its metadata. + // + // This action is not supported by Amazon S3 on Outposts. + GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"` + + // Allows grantee to read the object ACL. + // + // This action is not supported by Amazon S3 on Outposts. + GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"` + + // Allows grantee to write the ACL for the applicable object. + // + // This action is not supported by Amazon S3 on Outposts. + GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` + + // The key of the destination object. + // + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // A map of metadata to store with the object in S3. + Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"` + + // Specifies whether the metadata is copied from the source object or replaced + // with metadata provided in the request. + MetadataDirective *string `location:"header" locationName:"x-amz-metadata-directive" type:"string" enum:"MetadataDirective"` + + // Specifies whether you want to apply a Legal Hold to the copied object. + ObjectLockLegalHoldStatus *string `location:"header" locationName:"x-amz-object-lock-legal-hold" type:"string" enum:"ObjectLockLegalHoldStatus"` + + // The Object Lock mode that you want to apply to the copied object. + ObjectLockMode *string `location:"header" locationName:"x-amz-object-lock-mode" type:"string" enum:"ObjectLockMode"` + + // The date and time when you want the copied object's Object Lock to expire. + ObjectLockRetainUntilDate *time.Time `location:"header" locationName:"x-amz-object-lock-retain-until-date" type:"timestamp" timestampFormat:"iso8601"` + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from requester pays buckets, see Downloading Objects + // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 Developer Guide. + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // Specifies the algorithm to use to when encrypting the object (for example, + // AES256). + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting + // data. This value is used to store the object and then it is discarded; Amazon + // S3 does not store the encryption key. The key must be appropriate for use + // with the algorithm specified in the x-amz-server-side-encryption-customer-algorithm + // header. + SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure that the + // encryption key was transmitted without error. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // Specifies the AWS KMS Encryption Context to use for object encryption. The + // value of this header is a base64-encoded UTF-8 string holding JSON with the + // encryption context key-value pairs. + SSEKMSEncryptionContext *string `location:"header" locationName:"x-amz-server-side-encryption-context" type:"string" sensitive:"true"` + + // Specifies the AWS KMS key ID to use for object encryption. All GET and PUT + // requests for an object protected by AWS KMS will fail if not made via SSL + // or using SigV4. For information about configuring using any of the officially + // supported AWS SDKs and AWS CLI, see Specifying the Signature Version in Request + // Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version) + // in the Amazon S3 Developer Guide. + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` + + // The server-side encryption algorithm used when storing this object in Amazon + // S3 (for example, AES256, aws:kms). + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` + + // By default, Amazon S3 uses the STANDARD Storage Class to store newly created + // objects. The STANDARD storage class provides high durability and high availability. + // Depending on performance needs, you can specify a different Storage Class. + // Amazon S3 on Outposts only uses the OUTPOSTS Storage Class. For more information, + // see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html) + // in the Amazon S3 Service Developer Guide. + StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"` + + // The tag-set for the object destination object this value must be used in + // conjunction with the TaggingDirective. The tag-set must be encoded as URL + // Query parameters. + Tagging *string `location:"header" locationName:"x-amz-tagging" type:"string"` + + // Specifies whether the object tag-set are copied from the source object or + // replaced with tag-set provided in the request. + TaggingDirective *string `location:"header" locationName:"x-amz-tagging-directive" type:"string" enum:"TaggingDirective"` + + // If the bucket is configured as a website, redirects requests for this object + // to another object in the same bucket or to an external URL. Amazon S3 stores + // the value of this header in the object metadata. + WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"` +} + +// String returns the string representation +func (s CopyObjectInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CopyObjectInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CopyObjectInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CopyObjectInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.CopySource == nil { + invalidParams.Add(request.NewErrParamRequired("CopySource")) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetACL sets the ACL field's value. +func (s *CopyObjectInput) SetACL(v string) *CopyObjectInput { + s.ACL = &v + return s +} + +// SetBucket sets the Bucket field's value. +func (s *CopyObjectInput) SetBucket(v string) *CopyObjectInput { + s.Bucket = &v + return s +} + +func (s *CopyObjectInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetBucketKeyEnabled sets the BucketKeyEnabled field's value. +func (s *CopyObjectInput) SetBucketKeyEnabled(v bool) *CopyObjectInput { + s.BucketKeyEnabled = &v + return s +} + +// SetCacheControl sets the CacheControl field's value. +func (s *CopyObjectInput) SetCacheControl(v string) *CopyObjectInput { + s.CacheControl = &v + return s +} + +// SetContentDisposition sets the ContentDisposition field's value. +func (s *CopyObjectInput) SetContentDisposition(v string) *CopyObjectInput { + s.ContentDisposition = &v + return s +} + +// SetContentEncoding sets the ContentEncoding field's value. +func (s *CopyObjectInput) SetContentEncoding(v string) *CopyObjectInput { + s.ContentEncoding = &v + return s +} + +// SetContentLanguage sets the ContentLanguage field's value. +func (s *CopyObjectInput) SetContentLanguage(v string) *CopyObjectInput { + s.ContentLanguage = &v + return s +} + +// SetContentType sets the ContentType field's value. +func (s *CopyObjectInput) SetContentType(v string) *CopyObjectInput { + s.ContentType = &v + return s +} + +// SetCopySource sets the CopySource field's value. +func (s *CopyObjectInput) SetCopySource(v string) *CopyObjectInput { + s.CopySource = &v + return s +} + +// SetCopySourceIfMatch sets the CopySourceIfMatch field's value. +func (s *CopyObjectInput) SetCopySourceIfMatch(v string) *CopyObjectInput { + s.CopySourceIfMatch = &v + return s +} + +// SetCopySourceIfModifiedSince sets the CopySourceIfModifiedSince field's value. +func (s *CopyObjectInput) SetCopySourceIfModifiedSince(v time.Time) *CopyObjectInput { + s.CopySourceIfModifiedSince = &v + return s +} + +// SetCopySourceIfNoneMatch sets the CopySourceIfNoneMatch field's value. +func (s *CopyObjectInput) SetCopySourceIfNoneMatch(v string) *CopyObjectInput { + s.CopySourceIfNoneMatch = &v + return s +} + +// SetCopySourceIfUnmodifiedSince sets the CopySourceIfUnmodifiedSince field's value. +func (s *CopyObjectInput) SetCopySourceIfUnmodifiedSince(v time.Time) *CopyObjectInput { + s.CopySourceIfUnmodifiedSince = &v + return s +} + +// SetCopySourceSSECustomerAlgorithm sets the CopySourceSSECustomerAlgorithm field's value. +func (s *CopyObjectInput) SetCopySourceSSECustomerAlgorithm(v string) *CopyObjectInput { + s.CopySourceSSECustomerAlgorithm = &v + return s +} + +// SetCopySourceSSECustomerKey sets the CopySourceSSECustomerKey field's value. +func (s *CopyObjectInput) SetCopySourceSSECustomerKey(v string) *CopyObjectInput { + s.CopySourceSSECustomerKey = &v + return s +} + +func (s *CopyObjectInput) getCopySourceSSECustomerKey() (v string) { + if s.CopySourceSSECustomerKey == nil { + return v + } + return *s.CopySourceSSECustomerKey +} + +// SetCopySourceSSECustomerKeyMD5 sets the CopySourceSSECustomerKeyMD5 field's value. +func (s *CopyObjectInput) SetCopySourceSSECustomerKeyMD5(v string) *CopyObjectInput { + s.CopySourceSSECustomerKeyMD5 = &v + return s +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *CopyObjectInput) SetExpectedBucketOwner(v string) *CopyObjectInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetExpectedSourceBucketOwner sets the ExpectedSourceBucketOwner field's value. +func (s *CopyObjectInput) SetExpectedSourceBucketOwner(v string) *CopyObjectInput { + s.ExpectedSourceBucketOwner = &v + return s +} + +// SetExpires sets the Expires field's value. +func (s *CopyObjectInput) SetExpires(v time.Time) *CopyObjectInput { + s.Expires = &v + return s +} + +// SetGrantFullControl sets the GrantFullControl field's value. +func (s *CopyObjectInput) SetGrantFullControl(v string) *CopyObjectInput { + s.GrantFullControl = &v + return s +} + +// SetGrantRead sets the GrantRead field's value. +func (s *CopyObjectInput) SetGrantRead(v string) *CopyObjectInput { + s.GrantRead = &v + return s +} + +// SetGrantReadACP sets the GrantReadACP field's value. +func (s *CopyObjectInput) SetGrantReadACP(v string) *CopyObjectInput { + s.GrantReadACP = &v + return s +} + +// SetGrantWriteACP sets the GrantWriteACP field's value. +func (s *CopyObjectInput) SetGrantWriteACP(v string) *CopyObjectInput { + s.GrantWriteACP = &v + return s +} + +// SetKey sets the Key field's value. +func (s *CopyObjectInput) SetKey(v string) *CopyObjectInput { + s.Key = &v + return s +} + +// SetMetadata sets the Metadata field's value. +func (s *CopyObjectInput) SetMetadata(v map[string]*string) *CopyObjectInput { + s.Metadata = v + return s +} + +// SetMetadataDirective sets the MetadataDirective field's value. +func (s *CopyObjectInput) SetMetadataDirective(v string) *CopyObjectInput { + s.MetadataDirective = &v + return s +} + +// SetObjectLockLegalHoldStatus sets the ObjectLockLegalHoldStatus field's value. +func (s *CopyObjectInput) SetObjectLockLegalHoldStatus(v string) *CopyObjectInput { + s.ObjectLockLegalHoldStatus = &v + return s +} + +// SetObjectLockMode sets the ObjectLockMode field's value. +func (s *CopyObjectInput) SetObjectLockMode(v string) *CopyObjectInput { + s.ObjectLockMode = &v + return s +} + +// SetObjectLockRetainUntilDate sets the ObjectLockRetainUntilDate field's value. +func (s *CopyObjectInput) SetObjectLockRetainUntilDate(v time.Time) *CopyObjectInput { + s.ObjectLockRetainUntilDate = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *CopyObjectInput) SetRequestPayer(v string) *CopyObjectInput { + s.RequestPayer = &v + return s +} + +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *CopyObjectInput) SetSSECustomerAlgorithm(v string) *CopyObjectInput { + s.SSECustomerAlgorithm = &v + return s +} + +// SetSSECustomerKey sets the SSECustomerKey field's value. +func (s *CopyObjectInput) SetSSECustomerKey(v string) *CopyObjectInput { + s.SSECustomerKey = &v + return s +} + +func (s *CopyObjectInput) getSSECustomerKey() (v string) { + if s.SSECustomerKey == nil { + return v + } + return *s.SSECustomerKey +} + +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *CopyObjectInput) SetSSECustomerKeyMD5(v string) *CopyObjectInput { + s.SSECustomerKeyMD5 = &v + return s +} + +// SetSSEKMSEncryptionContext sets the SSEKMSEncryptionContext field's value. +func (s *CopyObjectInput) SetSSEKMSEncryptionContext(v string) *CopyObjectInput { + s.SSEKMSEncryptionContext = &v + return s +} + +// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. +func (s *CopyObjectInput) SetSSEKMSKeyId(v string) *CopyObjectInput { + s.SSEKMSKeyId = &v + return s +} + +// SetServerSideEncryption sets the ServerSideEncryption field's value. +func (s *CopyObjectInput) SetServerSideEncryption(v string) *CopyObjectInput { + s.ServerSideEncryption = &v + return s +} + +// SetStorageClass sets the StorageClass field's value. +func (s *CopyObjectInput) SetStorageClass(v string) *CopyObjectInput { + s.StorageClass = &v + return s +} + +// SetTagging sets the Tagging field's value. +func (s *CopyObjectInput) SetTagging(v string) *CopyObjectInput { + s.Tagging = &v + return s +} + +// SetTaggingDirective sets the TaggingDirective field's value. +func (s *CopyObjectInput) SetTaggingDirective(v string) *CopyObjectInput { + s.TaggingDirective = &v + return s +} + +// SetWebsiteRedirectLocation sets the WebsiteRedirectLocation field's value. +func (s *CopyObjectInput) SetWebsiteRedirectLocation(v string) *CopyObjectInput { + s.WebsiteRedirectLocation = &v + return s +} + +func (s *CopyObjectInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *CopyObjectInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s CopyObjectInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type CopyObjectOutput struct { + _ struct{} `type:"structure" payload:"CopyObjectResult"` + + // Indicates whether the copied object uses an S3 Bucket Key for server-side + // encryption with AWS KMS (SSE-KMS). + BucketKeyEnabled *bool `location:"header" locationName:"x-amz-server-side-encryption-bucket-key-enabled" type:"boolean"` + + // Container for all response elements. + CopyObjectResult *CopyObjectResult `type:"structure"` + + // Version of the copied object in the destination bucket. + CopySourceVersionId *string `location:"header" locationName:"x-amz-copy-source-version-id" type:"string"` + + // If the object expiration is configured, the response includes this header. + Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header confirming the encryption algorithm + // used. + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header to provide round-trip message integrity + // verification of the customer-provided encryption key. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // If present, specifies the AWS KMS Encryption Context to use for object encryption. + // The value of this header is a base64-encoded UTF-8 string holding JSON with + // the encryption context key-value pairs. + SSEKMSEncryptionContext *string `location:"header" locationName:"x-amz-server-side-encryption-context" type:"string" sensitive:"true"` + + // If present, specifies the ID of the AWS Key Management Service (AWS KMS) + // symmetric customer managed customer master key (CMK) that was used for the + // object. + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` + + // The server-side encryption algorithm used when storing this object in Amazon + // S3 (for example, AES256, aws:kms). + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` + + // Version ID of the newly created copy. + VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` +} + +// String returns the string representation +func (s CopyObjectOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CopyObjectOutput) GoString() string { + return s.String() +} + +// SetBucketKeyEnabled sets the BucketKeyEnabled field's value. +func (s *CopyObjectOutput) SetBucketKeyEnabled(v bool) *CopyObjectOutput { + s.BucketKeyEnabled = &v + return s +} + +// SetCopyObjectResult sets the CopyObjectResult field's value. +func (s *CopyObjectOutput) SetCopyObjectResult(v *CopyObjectResult) *CopyObjectOutput { + s.CopyObjectResult = v + return s +} + +// SetCopySourceVersionId sets the CopySourceVersionId field's value. +func (s *CopyObjectOutput) SetCopySourceVersionId(v string) *CopyObjectOutput { + s.CopySourceVersionId = &v + return s +} + +// SetExpiration sets the Expiration field's value. +func (s *CopyObjectOutput) SetExpiration(v string) *CopyObjectOutput { + s.Expiration = &v + return s +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *CopyObjectOutput) SetRequestCharged(v string) *CopyObjectOutput { + s.RequestCharged = &v + return s +} + +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *CopyObjectOutput) SetSSECustomerAlgorithm(v string) *CopyObjectOutput { + s.SSECustomerAlgorithm = &v + return s +} + +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *CopyObjectOutput) SetSSECustomerKeyMD5(v string) *CopyObjectOutput { + s.SSECustomerKeyMD5 = &v + return s +} + +// SetSSEKMSEncryptionContext sets the SSEKMSEncryptionContext field's value. +func (s *CopyObjectOutput) SetSSEKMSEncryptionContext(v string) *CopyObjectOutput { + s.SSEKMSEncryptionContext = &v + return s +} + +// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. +func (s *CopyObjectOutput) SetSSEKMSKeyId(v string) *CopyObjectOutput { + s.SSEKMSKeyId = &v + return s +} + +// SetServerSideEncryption sets the ServerSideEncryption field's value. +func (s *CopyObjectOutput) SetServerSideEncryption(v string) *CopyObjectOutput { + s.ServerSideEncryption = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *CopyObjectOutput) SetVersionId(v string) *CopyObjectOutput { + s.VersionId = &v + return s +} + +// Container for all response elements. +type CopyObjectResult struct { + _ struct{} `type:"structure"` + + // Returns the ETag of the new object. The ETag reflects only changes to the + // contents of an object, not its metadata. The source and destination ETag + // is identical for a successfully copied non-multipart object. + ETag *string `type:"string"` + + // Creation date of the object. + LastModified *time.Time `type:"timestamp"` +} + +// String returns the string representation +func (s CopyObjectResult) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CopyObjectResult) GoString() string { + return s.String() +} + +// SetETag sets the ETag field's value. +func (s *CopyObjectResult) SetETag(v string) *CopyObjectResult { + s.ETag = &v + return s +} + +// SetLastModified sets the LastModified field's value. +func (s *CopyObjectResult) SetLastModified(v time.Time) *CopyObjectResult { + s.LastModified = &v + return s +} + +// Container for all response elements. +type CopyPartResult struct { + _ struct{} `type:"structure"` + + // Entity tag of the object. + ETag *string `type:"string"` + + // Date and time at which the object was uploaded. + LastModified *time.Time `type:"timestamp"` +} + +// String returns the string representation +func (s CopyPartResult) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CopyPartResult) GoString() string { + return s.String() +} + +// SetETag sets the ETag field's value. +func (s *CopyPartResult) SetETag(v string) *CopyPartResult { + s.ETag = &v + return s +} + +// SetLastModified sets the LastModified field's value. +func (s *CopyPartResult) SetLastModified(v time.Time) *CopyPartResult { + s.LastModified = &v + return s +} + +// The configuration information for the bucket. +type CreateBucketConfiguration struct { + _ struct{} `type:"structure"` + + // Specifies the Region where the bucket will be created. If you don't specify + // a Region, the bucket is created in the US East (N. Virginia) Region (us-east-1). + LocationConstraint *string `type:"string" enum:"BucketLocationConstraint"` +} + +// String returns the string representation +func (s CreateBucketConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateBucketConfiguration) GoString() string { + return s.String() +} + +// SetLocationConstraint sets the LocationConstraint field's value. +func (s *CreateBucketConfiguration) SetLocationConstraint(v string) *CreateBucketConfiguration { + s.LocationConstraint = &v + return s +} + +type CreateBucketInput struct { + _ struct{} `locationName:"CreateBucketRequest" type:"structure" payload:"CreateBucketConfiguration"` + + // The canned ACL to apply to the bucket. + ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"BucketCannedACL"` + + // The name of the bucket to create. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The configuration information for the bucket. + CreateBucketConfiguration *CreateBucketConfiguration `locationName:"CreateBucketConfiguration" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` + + // Allows grantee the read, write, read ACP, and write ACP permissions on the + // bucket. + GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` + + // Allows grantee to list the objects in the bucket. + GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"` + + // Allows grantee to read the bucket ACL. + GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"` + + // Allows grantee to create, overwrite, and delete any object in the bucket. + GrantWrite *string `location:"header" locationName:"x-amz-grant-write" type:"string"` + + // Allows grantee to write the ACL for the applicable bucket. + GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` + + // Specifies whether you want S3 Object Lock to be enabled for the new bucket. + ObjectLockEnabledForBucket *bool `location:"header" locationName:"x-amz-bucket-object-lock-enabled" type:"boolean"` +} + +// String returns the string representation +func (s CreateBucketInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateBucketInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateBucketInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateBucketInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetACL sets the ACL field's value. +func (s *CreateBucketInput) SetACL(v string) *CreateBucketInput { + s.ACL = &v + return s +} + +// SetBucket sets the Bucket field's value. +func (s *CreateBucketInput) SetBucket(v string) *CreateBucketInput { + s.Bucket = &v + return s +} + +func (s *CreateBucketInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetCreateBucketConfiguration sets the CreateBucketConfiguration field's value. +func (s *CreateBucketInput) SetCreateBucketConfiguration(v *CreateBucketConfiguration) *CreateBucketInput { + s.CreateBucketConfiguration = v + return s +} + +// SetGrantFullControl sets the GrantFullControl field's value. +func (s *CreateBucketInput) SetGrantFullControl(v string) *CreateBucketInput { + s.GrantFullControl = &v + return s +} + +// SetGrantRead sets the GrantRead field's value. +func (s *CreateBucketInput) SetGrantRead(v string) *CreateBucketInput { + s.GrantRead = &v + return s +} + +// SetGrantReadACP sets the GrantReadACP field's value. +func (s *CreateBucketInput) SetGrantReadACP(v string) *CreateBucketInput { + s.GrantReadACP = &v + return s +} + +// SetGrantWrite sets the GrantWrite field's value. +func (s *CreateBucketInput) SetGrantWrite(v string) *CreateBucketInput { + s.GrantWrite = &v + return s +} + +// SetGrantWriteACP sets the GrantWriteACP field's value. +func (s *CreateBucketInput) SetGrantWriteACP(v string) *CreateBucketInput { + s.GrantWriteACP = &v + return s +} + +// SetObjectLockEnabledForBucket sets the ObjectLockEnabledForBucket field's value. +func (s *CreateBucketInput) SetObjectLockEnabledForBucket(v bool) *CreateBucketInput { + s.ObjectLockEnabledForBucket = &v + return s +} + +type CreateBucketOutput struct { + _ struct{} `type:"structure"` + + // Specifies the Region where the bucket will be created. If you are creating + // a bucket on the US East (N. Virginia) Region (us-east-1), you do not need + // to specify the location. + Location *string `location:"header" locationName:"Location" type:"string"` +} + +// String returns the string representation +func (s CreateBucketOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateBucketOutput) GoString() string { + return s.String() +} + +// SetLocation sets the Location field's value. +func (s *CreateBucketOutput) SetLocation(v string) *CreateBucketOutput { + s.Location = &v + return s +} + +type CreateMultipartUploadInput struct { + _ struct{} `locationName:"CreateMultipartUploadRequest" type:"structure"` + + // The canned ACL to apply to the object. + // + // This action is not supported by Amazon S3 on Outposts. + ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"` + + // The name of the bucket to which to initiate the upload + // + // When using this API with an access point, you must direct requests to the + // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this operation with an access point through the AWS SDKs, you + // provide the access point ARN in place of the bucket name. For more information + // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // in the Amazon Simple Storage Service Developer Guide. + // + // When using this API with Amazon S3 on Outposts, you must direct requests + // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When + // using this operation using S3 on Outposts through the AWS SDKs, you provide + // the Outposts bucket ARN in place of the bucket name. For more information + // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) + // in the Amazon Simple Storage Service Developer Guide. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption + // with server-side encryption using AWS KMS (SSE-KMS). Setting this header + // to true causes Amazon S3 to use an S3 Bucket Key for object encryption with + // SSE-KMS. + // + // Specifying this header with an object operation doesn’t affect bucket-level + // settings for S3 Bucket Key. + BucketKeyEnabled *bool `location:"header" locationName:"x-amz-server-side-encryption-bucket-key-enabled" type:"boolean"` + + // Specifies caching behavior along the request/reply chain. + CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"` + + // Specifies presentational information for the object. + ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"` + + // Specifies what content encodings have been applied to the object and thus + // what decoding mechanisms must be applied to obtain the media-type referenced + // by the Content-Type header field. + ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"` + + // The language the content is in. + ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"` + + // A standard MIME type describing the format of the object data. + ContentType *string `location:"header" locationName:"Content-Type" type:"string"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // The date and time at which the object is no longer cacheable. + Expires *time.Time `location:"header" locationName:"Expires" type:"timestamp"` + + // Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object. + // + // This action is not supported by Amazon S3 on Outposts. + GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` + + // Allows grantee to read the object data and its metadata. + // + // This action is not supported by Amazon S3 on Outposts. + GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"` + + // Allows grantee to read the object ACL. + // + // This action is not supported by Amazon S3 on Outposts. + GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"` + + // Allows grantee to write the ACL for the applicable object. + // + // This action is not supported by Amazon S3 on Outposts. + GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` + + // Object key for which the multipart upload is to be initiated. + // + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // A map of metadata to store with the object in S3. + Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"` + + // Specifies whether you want to apply a Legal Hold to the uploaded object. + ObjectLockLegalHoldStatus *string `location:"header" locationName:"x-amz-object-lock-legal-hold" type:"string" enum:"ObjectLockLegalHoldStatus"` + + // Specifies the Object Lock mode that you want to apply to the uploaded object. + ObjectLockMode *string `location:"header" locationName:"x-amz-object-lock-mode" type:"string" enum:"ObjectLockMode"` + + // Specifies the date and time when you want the Object Lock to expire. + ObjectLockRetainUntilDate *time.Time `location:"header" locationName:"x-amz-object-lock-retain-until-date" type:"timestamp" timestampFormat:"iso8601"` + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from requester pays buckets, see Downloading Objects + // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 Developer Guide. + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // Specifies the algorithm to use to when encrypting the object (for example, + // AES256). + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting + // data. This value is used to store the object and then it is discarded; Amazon + // S3 does not store the encryption key. The key must be appropriate for use + // with the algorithm specified in the x-amz-server-side-encryption-customer-algorithm + // header. + SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure that the + // encryption key was transmitted without error. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // Specifies the AWS KMS Encryption Context to use for object encryption. The + // value of this header is a base64-encoded UTF-8 string holding JSON with the + // encryption context key-value pairs. + SSEKMSEncryptionContext *string `location:"header" locationName:"x-amz-server-side-encryption-context" type:"string" sensitive:"true"` + + // Specifies the ID of the symmetric customer managed AWS KMS CMK to use for + // object encryption. All GET and PUT requests for an object protected by AWS + // KMS will fail if not made via SSL or using SigV4. For information about configuring + // using any of the officially supported AWS SDKs and AWS CLI, see Specifying + // the Signature Version in Request Authentication (https://docs.aws.amazon.com/http:/docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version) + // in the Amazon S3 Developer Guide. + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` + + // The server-side encryption algorithm used when storing this object in Amazon + // S3 (for example, AES256, aws:kms). + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` + + // By default, Amazon S3 uses the STANDARD Storage Class to store newly created + // objects. The STANDARD storage class provides high durability and high availability. + // Depending on performance needs, you can specify a different Storage Class. + // Amazon S3 on Outposts only uses the OUTPOSTS Storage Class. For more information, + // see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html) + // in the Amazon S3 Service Developer Guide. + StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"` + + // The tag-set for the object. The tag-set must be encoded as URL Query parameters. + Tagging *string `location:"header" locationName:"x-amz-tagging" type:"string"` + + // If the bucket is configured as a website, redirects requests for this object + // to another object in the same bucket or to an external URL. Amazon S3 stores + // the value of this header in the object metadata. + WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"` +} + +// String returns the string representation +func (s CreateMultipartUploadInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateMultipartUploadInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateMultipartUploadInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateMultipartUploadInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetACL sets the ACL field's value. +func (s *CreateMultipartUploadInput) SetACL(v string) *CreateMultipartUploadInput { + s.ACL = &v + return s +} + +// SetBucket sets the Bucket field's value. +func (s *CreateMultipartUploadInput) SetBucket(v string) *CreateMultipartUploadInput { + s.Bucket = &v + return s +} + +func (s *CreateMultipartUploadInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetBucketKeyEnabled sets the BucketKeyEnabled field's value. +func (s *CreateMultipartUploadInput) SetBucketKeyEnabled(v bool) *CreateMultipartUploadInput { + s.BucketKeyEnabled = &v + return s +} + +// SetCacheControl sets the CacheControl field's value. +func (s *CreateMultipartUploadInput) SetCacheControl(v string) *CreateMultipartUploadInput { + s.CacheControl = &v + return s +} + +// SetContentDisposition sets the ContentDisposition field's value. +func (s *CreateMultipartUploadInput) SetContentDisposition(v string) *CreateMultipartUploadInput { + s.ContentDisposition = &v + return s +} + +// SetContentEncoding sets the ContentEncoding field's value. +func (s *CreateMultipartUploadInput) SetContentEncoding(v string) *CreateMultipartUploadInput { + s.ContentEncoding = &v + return s +} + +// SetContentLanguage sets the ContentLanguage field's value. +func (s *CreateMultipartUploadInput) SetContentLanguage(v string) *CreateMultipartUploadInput { + s.ContentLanguage = &v + return s +} + +// SetContentType sets the ContentType field's value. +func (s *CreateMultipartUploadInput) SetContentType(v string) *CreateMultipartUploadInput { + s.ContentType = &v + return s +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *CreateMultipartUploadInput) SetExpectedBucketOwner(v string) *CreateMultipartUploadInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetExpires sets the Expires field's value. +func (s *CreateMultipartUploadInput) SetExpires(v time.Time) *CreateMultipartUploadInput { + s.Expires = &v + return s +} + +// SetGrantFullControl sets the GrantFullControl field's value. +func (s *CreateMultipartUploadInput) SetGrantFullControl(v string) *CreateMultipartUploadInput { + s.GrantFullControl = &v + return s +} + +// SetGrantRead sets the GrantRead field's value. +func (s *CreateMultipartUploadInput) SetGrantRead(v string) *CreateMultipartUploadInput { + s.GrantRead = &v + return s +} + +// SetGrantReadACP sets the GrantReadACP field's value. +func (s *CreateMultipartUploadInput) SetGrantReadACP(v string) *CreateMultipartUploadInput { + s.GrantReadACP = &v + return s +} + +// SetGrantWriteACP sets the GrantWriteACP field's value. +func (s *CreateMultipartUploadInput) SetGrantWriteACP(v string) *CreateMultipartUploadInput { + s.GrantWriteACP = &v + return s +} + +// SetKey sets the Key field's value. +func (s *CreateMultipartUploadInput) SetKey(v string) *CreateMultipartUploadInput { + s.Key = &v + return s +} + +// SetMetadata sets the Metadata field's value. +func (s *CreateMultipartUploadInput) SetMetadata(v map[string]*string) *CreateMultipartUploadInput { + s.Metadata = v + return s +} + +// SetObjectLockLegalHoldStatus sets the ObjectLockLegalHoldStatus field's value. +func (s *CreateMultipartUploadInput) SetObjectLockLegalHoldStatus(v string) *CreateMultipartUploadInput { + s.ObjectLockLegalHoldStatus = &v + return s +} + +// SetObjectLockMode sets the ObjectLockMode field's value. +func (s *CreateMultipartUploadInput) SetObjectLockMode(v string) *CreateMultipartUploadInput { + s.ObjectLockMode = &v + return s +} + +// SetObjectLockRetainUntilDate sets the ObjectLockRetainUntilDate field's value. +func (s *CreateMultipartUploadInput) SetObjectLockRetainUntilDate(v time.Time) *CreateMultipartUploadInput { + s.ObjectLockRetainUntilDate = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *CreateMultipartUploadInput) SetRequestPayer(v string) *CreateMultipartUploadInput { + s.RequestPayer = &v + return s +} + +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *CreateMultipartUploadInput) SetSSECustomerAlgorithm(v string) *CreateMultipartUploadInput { + s.SSECustomerAlgorithm = &v + return s +} + +// SetSSECustomerKey sets the SSECustomerKey field's value. +func (s *CreateMultipartUploadInput) SetSSECustomerKey(v string) *CreateMultipartUploadInput { + s.SSECustomerKey = &v + return s +} + +func (s *CreateMultipartUploadInput) getSSECustomerKey() (v string) { + if s.SSECustomerKey == nil { + return v + } + return *s.SSECustomerKey +} + +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *CreateMultipartUploadInput) SetSSECustomerKeyMD5(v string) *CreateMultipartUploadInput { + s.SSECustomerKeyMD5 = &v + return s +} + +// SetSSEKMSEncryptionContext sets the SSEKMSEncryptionContext field's value. +func (s *CreateMultipartUploadInput) SetSSEKMSEncryptionContext(v string) *CreateMultipartUploadInput { + s.SSEKMSEncryptionContext = &v + return s +} + +// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. +func (s *CreateMultipartUploadInput) SetSSEKMSKeyId(v string) *CreateMultipartUploadInput { + s.SSEKMSKeyId = &v + return s +} + +// SetServerSideEncryption sets the ServerSideEncryption field's value. +func (s *CreateMultipartUploadInput) SetServerSideEncryption(v string) *CreateMultipartUploadInput { + s.ServerSideEncryption = &v + return s +} + +// SetStorageClass sets the StorageClass field's value. +func (s *CreateMultipartUploadInput) SetStorageClass(v string) *CreateMultipartUploadInput { + s.StorageClass = &v + return s +} + +// SetTagging sets the Tagging field's value. +func (s *CreateMultipartUploadInput) SetTagging(v string) *CreateMultipartUploadInput { + s.Tagging = &v + return s +} + +// SetWebsiteRedirectLocation sets the WebsiteRedirectLocation field's value. +func (s *CreateMultipartUploadInput) SetWebsiteRedirectLocation(v string) *CreateMultipartUploadInput { + s.WebsiteRedirectLocation = &v + return s +} + +func (s *CreateMultipartUploadInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *CreateMultipartUploadInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s CreateMultipartUploadInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type CreateMultipartUploadOutput struct { + _ struct{} `type:"structure"` + + // If the bucket has a lifecycle rule configured with an action to abort incomplete + // multipart uploads and the prefix in the lifecycle rule matches the object + // name in the request, the response includes this header. The header indicates + // when the initiated multipart upload becomes eligible for an abort operation. + // For more information, see Aborting Incomplete Multipart Uploads Using a Bucket + // Lifecycle Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config). + // + // The response also includes the x-amz-abort-rule-id header that provides the + // ID of the lifecycle configuration rule that defines this action. + AbortDate *time.Time `location:"header" locationName:"x-amz-abort-date" type:"timestamp"` + + // This header is returned along with the x-amz-abort-date header. It identifies + // the applicable lifecycle configuration rule that defines the action to abort + // incomplete multipart uploads. + AbortRuleId *string `location:"header" locationName:"x-amz-abort-rule-id" type:"string"` + + // The name of the bucket to which the multipart upload was initiated. + // + // When using this API with an access point, you must direct requests to the + // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this operation with an access point through the AWS SDKs, you + // provide the access point ARN in place of the bucket name. For more information + // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // in the Amazon Simple Storage Service Developer Guide. + // + // When using this API with Amazon S3 on Outposts, you must direct requests + // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When + // using this operation using S3 on Outposts through the AWS SDKs, you provide + // the Outposts bucket ARN in place of the bucket name. For more information + // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) + // in the Amazon Simple Storage Service Developer Guide. + Bucket *string `locationName:"Bucket" type:"string"` + + // Indicates whether the multipart upload uses an S3 Bucket Key for server-side + // encryption with AWS KMS (SSE-KMS). + BucketKeyEnabled *bool `location:"header" locationName:"x-amz-server-side-encryption-bucket-key-enabled" type:"boolean"` + + // Object key for which the multipart upload was initiated. + Key *string `min:"1" type:"string"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header confirming the encryption algorithm + // used. + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header to provide round-trip message integrity + // verification of the customer-provided encryption key. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // If present, specifies the AWS KMS Encryption Context to use for object encryption. + // The value of this header is a base64-encoded UTF-8 string holding JSON with + // the encryption context key-value pairs. + SSEKMSEncryptionContext *string `location:"header" locationName:"x-amz-server-side-encryption-context" type:"string" sensitive:"true"` + + // If present, specifies the ID of the AWS Key Management Service (AWS KMS) + // symmetric customer managed customer master key (CMK) that was used for the + // object. + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` + + // The server-side encryption algorithm used when storing this object in Amazon + // S3 (for example, AES256, aws:kms). + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` + + // ID for the initiated multipart upload. + UploadId *string `type:"string"` +} + +// String returns the string representation +func (s CreateMultipartUploadOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateMultipartUploadOutput) GoString() string { + return s.String() +} + +// SetAbortDate sets the AbortDate field's value. +func (s *CreateMultipartUploadOutput) SetAbortDate(v time.Time) *CreateMultipartUploadOutput { + s.AbortDate = &v + return s +} + +// SetAbortRuleId sets the AbortRuleId field's value. +func (s *CreateMultipartUploadOutput) SetAbortRuleId(v string) *CreateMultipartUploadOutput { + s.AbortRuleId = &v + return s +} + +// SetBucket sets the Bucket field's value. +func (s *CreateMultipartUploadOutput) SetBucket(v string) *CreateMultipartUploadOutput { + s.Bucket = &v + return s +} + +func (s *CreateMultipartUploadOutput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetBucketKeyEnabled sets the BucketKeyEnabled field's value. +func (s *CreateMultipartUploadOutput) SetBucketKeyEnabled(v bool) *CreateMultipartUploadOutput { + s.BucketKeyEnabled = &v + return s +} + +// SetKey sets the Key field's value. +func (s *CreateMultipartUploadOutput) SetKey(v string) *CreateMultipartUploadOutput { + s.Key = &v + return s +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *CreateMultipartUploadOutput) SetRequestCharged(v string) *CreateMultipartUploadOutput { + s.RequestCharged = &v + return s +} + +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *CreateMultipartUploadOutput) SetSSECustomerAlgorithm(v string) *CreateMultipartUploadOutput { + s.SSECustomerAlgorithm = &v + return s +} + +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *CreateMultipartUploadOutput) SetSSECustomerKeyMD5(v string) *CreateMultipartUploadOutput { + s.SSECustomerKeyMD5 = &v + return s +} + +// SetSSEKMSEncryptionContext sets the SSEKMSEncryptionContext field's value. +func (s *CreateMultipartUploadOutput) SetSSEKMSEncryptionContext(v string) *CreateMultipartUploadOutput { + s.SSEKMSEncryptionContext = &v + return s +} + +// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. +func (s *CreateMultipartUploadOutput) SetSSEKMSKeyId(v string) *CreateMultipartUploadOutput { + s.SSEKMSKeyId = &v + return s +} + +// SetServerSideEncryption sets the ServerSideEncryption field's value. +func (s *CreateMultipartUploadOutput) SetServerSideEncryption(v string) *CreateMultipartUploadOutput { + s.ServerSideEncryption = &v + return s +} + +// SetUploadId sets the UploadId field's value. +func (s *CreateMultipartUploadOutput) SetUploadId(v string) *CreateMultipartUploadOutput { + s.UploadId = &v + return s +} + +// The container element for specifying the default Object Lock retention settings +// for new objects placed in the specified bucket. +type DefaultRetention struct { + _ struct{} `type:"structure"` + + // The number of days that you want to specify for the default retention period. + Days *int64 `type:"integer"` + + // The default Object Lock retention mode you want to apply to new objects placed + // in the specified bucket. + Mode *string `type:"string" enum:"ObjectLockRetentionMode"` + + // The number of years that you want to specify for the default retention period. + Years *int64 `type:"integer"` +} + +// String returns the string representation +func (s DefaultRetention) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DefaultRetention) GoString() string { + return s.String() +} + +// SetDays sets the Days field's value. +func (s *DefaultRetention) SetDays(v int64) *DefaultRetention { + s.Days = &v + return s +} + +// SetMode sets the Mode field's value. +func (s *DefaultRetention) SetMode(v string) *DefaultRetention { + s.Mode = &v + return s +} + +// SetYears sets the Years field's value. +func (s *DefaultRetention) SetYears(v int64) *DefaultRetention { + s.Years = &v + return s +} + +// Container for the objects to delete. +type Delete struct { + _ struct{} `type:"structure"` + + // The objects to delete. + // + // Objects is a required field + Objects []*ObjectIdentifier `locationName:"Object" type:"list" flattened:"true" required:"true"` + + // Element to enable quiet mode for the request. When you add this element, + // you must set its value to true. + Quiet *bool `type:"boolean"` +} + +// String returns the string representation +func (s Delete) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Delete) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Delete) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Delete"} + if s.Objects == nil { + invalidParams.Add(request.NewErrParamRequired("Objects")) + } + if s.Objects != nil { + for i, v := range s.Objects { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Objects", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetObjects sets the Objects field's value. +func (s *Delete) SetObjects(v []*ObjectIdentifier) *Delete { + s.Objects = v + return s +} + +// SetQuiet sets the Quiet field's value. +func (s *Delete) SetQuiet(v bool) *Delete { + s.Quiet = &v + return s +} + +type DeleteBucketAnalyticsConfigurationInput struct { + _ struct{} `locationName:"DeleteBucketAnalyticsConfigurationRequest" type:"structure"` + + // The name of the bucket from which an analytics configuration is deleted. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // The ID that identifies the analytics configuration. + // + // Id is a required field + Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteBucketAnalyticsConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketAnalyticsConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteBucketAnalyticsConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteBucketAnalyticsConfigurationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *DeleteBucketAnalyticsConfigurationInput) SetBucket(v string) *DeleteBucketAnalyticsConfigurationInput { + s.Bucket = &v + return s +} + +func (s *DeleteBucketAnalyticsConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *DeleteBucketAnalyticsConfigurationInput) SetExpectedBucketOwner(v string) *DeleteBucketAnalyticsConfigurationInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetId sets the Id field's value. +func (s *DeleteBucketAnalyticsConfigurationInput) SetId(v string) *DeleteBucketAnalyticsConfigurationInput { + s.Id = &v + return s +} + +func (s *DeleteBucketAnalyticsConfigurationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *DeleteBucketAnalyticsConfigurationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s DeleteBucketAnalyticsConfigurationInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type DeleteBucketAnalyticsConfigurationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteBucketAnalyticsConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketAnalyticsConfigurationOutput) GoString() string { + return s.String() +} + +type DeleteBucketCorsInput struct { + _ struct{} `locationName:"DeleteBucketCorsRequest" type:"structure"` + + // Specifies the bucket whose cors configuration is being deleted. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation +func (s DeleteBucketCorsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketCorsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteBucketCorsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteBucketCorsInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *DeleteBucketCorsInput) SetBucket(v string) *DeleteBucketCorsInput { + s.Bucket = &v + return s +} + +func (s *DeleteBucketCorsInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *DeleteBucketCorsInput) SetExpectedBucketOwner(v string) *DeleteBucketCorsInput { + s.ExpectedBucketOwner = &v + return s +} + +func (s *DeleteBucketCorsInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *DeleteBucketCorsInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s DeleteBucketCorsInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type DeleteBucketCorsOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteBucketCorsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketCorsOutput) GoString() string { + return s.String() +} + +type DeleteBucketEncryptionInput struct { + _ struct{} `locationName:"DeleteBucketEncryptionRequest" type:"structure"` + + // The name of the bucket containing the server-side encryption configuration + // to delete. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation +func (s DeleteBucketEncryptionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketEncryptionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteBucketEncryptionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteBucketEncryptionInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *DeleteBucketEncryptionInput) SetBucket(v string) *DeleteBucketEncryptionInput { + s.Bucket = &v + return s +} + +func (s *DeleteBucketEncryptionInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *DeleteBucketEncryptionInput) SetExpectedBucketOwner(v string) *DeleteBucketEncryptionInput { + s.ExpectedBucketOwner = &v + return s +} + +func (s *DeleteBucketEncryptionInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *DeleteBucketEncryptionInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s DeleteBucketEncryptionInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type DeleteBucketEncryptionOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteBucketEncryptionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketEncryptionOutput) GoString() string { + return s.String() +} + +type DeleteBucketInput struct { + _ struct{} `locationName:"DeleteBucketRequest" type:"structure"` + + // Specifies the bucket being deleted. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation +func (s DeleteBucketInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteBucketInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteBucketInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *DeleteBucketInput) SetBucket(v string) *DeleteBucketInput { + s.Bucket = &v + return s +} + +func (s *DeleteBucketInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *DeleteBucketInput) SetExpectedBucketOwner(v string) *DeleteBucketInput { + s.ExpectedBucketOwner = &v + return s +} + +func (s *DeleteBucketInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *DeleteBucketInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s DeleteBucketInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type DeleteBucketIntelligentTieringConfigurationInput struct { + _ struct{} `locationName:"DeleteBucketIntelligentTieringConfigurationRequest" type:"structure"` + + // The name of the Amazon S3 bucket whose configuration you want to modify or + // retrieve. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The ID used to identify the S3 Intelligent-Tiering configuration. + // + // Id is a required field + Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteBucketIntelligentTieringConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketIntelligentTieringConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteBucketIntelligentTieringConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteBucketIntelligentTieringConfigurationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *DeleteBucketIntelligentTieringConfigurationInput) SetBucket(v string) *DeleteBucketIntelligentTieringConfigurationInput { + s.Bucket = &v + return s +} + +func (s *DeleteBucketIntelligentTieringConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetId sets the Id field's value. +func (s *DeleteBucketIntelligentTieringConfigurationInput) SetId(v string) *DeleteBucketIntelligentTieringConfigurationInput { + s.Id = &v + return s +} + +func (s *DeleteBucketIntelligentTieringConfigurationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *DeleteBucketIntelligentTieringConfigurationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s DeleteBucketIntelligentTieringConfigurationInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type DeleteBucketIntelligentTieringConfigurationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteBucketIntelligentTieringConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketIntelligentTieringConfigurationOutput) GoString() string { + return s.String() +} + +type DeleteBucketInventoryConfigurationInput struct { + _ struct{} `locationName:"DeleteBucketInventoryConfigurationRequest" type:"structure"` + + // The name of the bucket containing the inventory configuration to delete. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // The ID used to identify the inventory configuration. + // + // Id is a required field + Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteBucketInventoryConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketInventoryConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteBucketInventoryConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteBucketInventoryConfigurationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *DeleteBucketInventoryConfigurationInput) SetBucket(v string) *DeleteBucketInventoryConfigurationInput { + s.Bucket = &v + return s +} + +func (s *DeleteBucketInventoryConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *DeleteBucketInventoryConfigurationInput) SetExpectedBucketOwner(v string) *DeleteBucketInventoryConfigurationInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetId sets the Id field's value. +func (s *DeleteBucketInventoryConfigurationInput) SetId(v string) *DeleteBucketInventoryConfigurationInput { + s.Id = &v + return s +} + +func (s *DeleteBucketInventoryConfigurationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *DeleteBucketInventoryConfigurationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s DeleteBucketInventoryConfigurationInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type DeleteBucketInventoryConfigurationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteBucketInventoryConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketInventoryConfigurationOutput) GoString() string { + return s.String() +} + +type DeleteBucketLifecycleInput struct { + _ struct{} `locationName:"DeleteBucketLifecycleRequest" type:"structure"` + + // The bucket name of the lifecycle to delete. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation +func (s DeleteBucketLifecycleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketLifecycleInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteBucketLifecycleInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteBucketLifecycleInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *DeleteBucketLifecycleInput) SetBucket(v string) *DeleteBucketLifecycleInput { + s.Bucket = &v + return s +} + +func (s *DeleteBucketLifecycleInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *DeleteBucketLifecycleInput) SetExpectedBucketOwner(v string) *DeleteBucketLifecycleInput { + s.ExpectedBucketOwner = &v + return s +} + +func (s *DeleteBucketLifecycleInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *DeleteBucketLifecycleInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s DeleteBucketLifecycleInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type DeleteBucketLifecycleOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteBucketLifecycleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketLifecycleOutput) GoString() string { + return s.String() +} + +type DeleteBucketMetricsConfigurationInput struct { + _ struct{} `locationName:"DeleteBucketMetricsConfigurationRequest" type:"structure"` + + // The name of the bucket containing the metrics configuration to delete. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // The ID used to identify the metrics configuration. + // + // Id is a required field + Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteBucketMetricsConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketMetricsConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteBucketMetricsConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteBucketMetricsConfigurationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *DeleteBucketMetricsConfigurationInput) SetBucket(v string) *DeleteBucketMetricsConfigurationInput { + s.Bucket = &v + return s +} + +func (s *DeleteBucketMetricsConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *DeleteBucketMetricsConfigurationInput) SetExpectedBucketOwner(v string) *DeleteBucketMetricsConfigurationInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetId sets the Id field's value. +func (s *DeleteBucketMetricsConfigurationInput) SetId(v string) *DeleteBucketMetricsConfigurationInput { + s.Id = &v + return s +} + +func (s *DeleteBucketMetricsConfigurationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *DeleteBucketMetricsConfigurationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s DeleteBucketMetricsConfigurationInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type DeleteBucketMetricsConfigurationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteBucketMetricsConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketMetricsConfigurationOutput) GoString() string { + return s.String() +} + +type DeleteBucketOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteBucketOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketOutput) GoString() string { + return s.String() +} + +type DeleteBucketOwnershipControlsInput struct { + _ struct{} `locationName:"DeleteBucketOwnershipControlsRequest" type:"structure"` + + // The Amazon S3 bucket whose OwnershipControls you want to delete. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation +func (s DeleteBucketOwnershipControlsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketOwnershipControlsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteBucketOwnershipControlsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteBucketOwnershipControlsInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *DeleteBucketOwnershipControlsInput) SetBucket(v string) *DeleteBucketOwnershipControlsInput { + s.Bucket = &v + return s +} + +func (s *DeleteBucketOwnershipControlsInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *DeleteBucketOwnershipControlsInput) SetExpectedBucketOwner(v string) *DeleteBucketOwnershipControlsInput { + s.ExpectedBucketOwner = &v + return s +} + +func (s *DeleteBucketOwnershipControlsInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *DeleteBucketOwnershipControlsInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s DeleteBucketOwnershipControlsInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type DeleteBucketOwnershipControlsOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteBucketOwnershipControlsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketOwnershipControlsOutput) GoString() string { + return s.String() +} + +type DeleteBucketPolicyInput struct { + _ struct{} `locationName:"DeleteBucketPolicyRequest" type:"structure"` + + // The bucket name. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation +func (s DeleteBucketPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketPolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteBucketPolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteBucketPolicyInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *DeleteBucketPolicyInput) SetBucket(v string) *DeleteBucketPolicyInput { + s.Bucket = &v + return s +} + +func (s *DeleteBucketPolicyInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *DeleteBucketPolicyInput) SetExpectedBucketOwner(v string) *DeleteBucketPolicyInput { + s.ExpectedBucketOwner = &v + return s +} + +func (s *DeleteBucketPolicyInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *DeleteBucketPolicyInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s DeleteBucketPolicyInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type DeleteBucketPolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteBucketPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketPolicyOutput) GoString() string { + return s.String() +} + +type DeleteBucketReplicationInput struct { + _ struct{} `locationName:"DeleteBucketReplicationRequest" type:"structure"` + + // The bucket name. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation +func (s DeleteBucketReplicationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketReplicationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteBucketReplicationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteBucketReplicationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *DeleteBucketReplicationInput) SetBucket(v string) *DeleteBucketReplicationInput { + s.Bucket = &v + return s +} + +func (s *DeleteBucketReplicationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *DeleteBucketReplicationInput) SetExpectedBucketOwner(v string) *DeleteBucketReplicationInput { + s.ExpectedBucketOwner = &v + return s +} + +func (s *DeleteBucketReplicationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *DeleteBucketReplicationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s DeleteBucketReplicationInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type DeleteBucketReplicationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteBucketReplicationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketReplicationOutput) GoString() string { + return s.String() +} + +type DeleteBucketTaggingInput struct { + _ struct{} `locationName:"DeleteBucketTaggingRequest" type:"structure"` + + // The bucket that has the tag set to be removed. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation +func (s DeleteBucketTaggingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketTaggingInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteBucketTaggingInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteBucketTaggingInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *DeleteBucketTaggingInput) SetBucket(v string) *DeleteBucketTaggingInput { + s.Bucket = &v + return s +} + +func (s *DeleteBucketTaggingInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *DeleteBucketTaggingInput) SetExpectedBucketOwner(v string) *DeleteBucketTaggingInput { + s.ExpectedBucketOwner = &v + return s +} + +func (s *DeleteBucketTaggingInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *DeleteBucketTaggingInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s DeleteBucketTaggingInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type DeleteBucketTaggingOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteBucketTaggingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketTaggingOutput) GoString() string { + return s.String() +} + +type DeleteBucketWebsiteInput struct { + _ struct{} `locationName:"DeleteBucketWebsiteRequest" type:"structure"` + + // The bucket name for which you want to remove the website configuration. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation +func (s DeleteBucketWebsiteInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketWebsiteInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteBucketWebsiteInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteBucketWebsiteInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *DeleteBucketWebsiteInput) SetBucket(v string) *DeleteBucketWebsiteInput { + s.Bucket = &v + return s +} + +func (s *DeleteBucketWebsiteInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *DeleteBucketWebsiteInput) SetExpectedBucketOwner(v string) *DeleteBucketWebsiteInput { + s.ExpectedBucketOwner = &v + return s +} + +func (s *DeleteBucketWebsiteInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *DeleteBucketWebsiteInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s DeleteBucketWebsiteInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type DeleteBucketWebsiteOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteBucketWebsiteOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketWebsiteOutput) GoString() string { + return s.String() +} + +// Information about the delete marker. +type DeleteMarkerEntry struct { + _ struct{} `type:"structure"` + + // Specifies whether the object is (true) or is not (false) the latest version + // of an object. + IsLatest *bool `type:"boolean"` + + // The object key. + Key *string `min:"1" type:"string"` + + // Date and time the object was last modified. + LastModified *time.Time `type:"timestamp"` + + // The account that created the delete marker.> + Owner *Owner `type:"structure"` + + // Version ID of an object. + VersionId *string `type:"string"` +} + +// String returns the string representation +func (s DeleteMarkerEntry) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteMarkerEntry) GoString() string { + return s.String() +} + +// SetIsLatest sets the IsLatest field's value. +func (s *DeleteMarkerEntry) SetIsLatest(v bool) *DeleteMarkerEntry { + s.IsLatest = &v + return s +} + +// SetKey sets the Key field's value. +func (s *DeleteMarkerEntry) SetKey(v string) *DeleteMarkerEntry { + s.Key = &v + return s +} + +// SetLastModified sets the LastModified field's value. +func (s *DeleteMarkerEntry) SetLastModified(v time.Time) *DeleteMarkerEntry { + s.LastModified = &v + return s +} + +// SetOwner sets the Owner field's value. +func (s *DeleteMarkerEntry) SetOwner(v *Owner) *DeleteMarkerEntry { + s.Owner = v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *DeleteMarkerEntry) SetVersionId(v string) *DeleteMarkerEntry { + s.VersionId = &v + return s +} + +// Specifies whether Amazon S3 replicates delete markers. If you specify a Filter +// in your replication configuration, you must also include a DeleteMarkerReplication +// element. If your Filter includes a Tag element, the DeleteMarkerReplication +// Status must be set to Disabled, because Amazon S3 does not support replicating +// delete markers for tag-based rules. For an example configuration, see Basic +// Rule Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html#replication-config-min-rule-config). +// +// For more information about delete marker replication, see Basic Rule Configuration +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/delete-marker-replication.html). +// +// If you are using an earlier version of the replication configuration, Amazon +// S3 handles replication of delete markers differently. For more information, +// see Backward Compatibility (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html#replication-backward-compat-considerations). +type DeleteMarkerReplication struct { + _ struct{} `type:"structure"` + + // Indicates whether to replicate delete markers. + // + // Indicates whether to replicate delete markers. + Status *string `type:"string" enum:"DeleteMarkerReplicationStatus"` +} + +// String returns the string representation +func (s DeleteMarkerReplication) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteMarkerReplication) GoString() string { + return s.String() +} + +// SetStatus sets the Status field's value. +func (s *DeleteMarkerReplication) SetStatus(v string) *DeleteMarkerReplication { + s.Status = &v + return s +} + +type DeleteObjectInput struct { + _ struct{} `locationName:"DeleteObjectRequest" type:"structure"` + + // The bucket name of the bucket containing the object. + // + // When using this API with an access point, you must direct requests to the + // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this operation with an access point through the AWS SDKs, you + // provide the access point ARN in place of the bucket name. For more information + // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // in the Amazon Simple Storage Service Developer Guide. + // + // When using this API with Amazon S3 on Outposts, you must direct requests + // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When + // using this operation using S3 on Outposts through the AWS SDKs, you provide + // the Outposts bucket ARN in place of the bucket name. For more information + // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) + // in the Amazon Simple Storage Service Developer Guide. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Indicates whether S3 Object Lock should bypass Governance-mode restrictions + // to process this operation. + BypassGovernanceRetention *bool `location:"header" locationName:"x-amz-bypass-governance-retention" type:"boolean"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // Key name of the object to delete. + // + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // The concatenation of the authentication device's serial number, a space, + // and the value that is displayed on your authentication device. Required to + // permanently delete a versioned object if versioning is configured with MFA + // delete enabled. + MFA *string `location:"header" locationName:"x-amz-mfa" type:"string"` + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from requester pays buckets, see Downloading Objects + // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 Developer Guide. + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // VersionId used to reference a specific version of the object. + VersionId *string `location:"querystring" locationName:"versionId" type:"string"` +} + +// String returns the string representation +func (s DeleteObjectInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteObjectInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteObjectInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteObjectInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *DeleteObjectInput) SetBucket(v string) *DeleteObjectInput { + s.Bucket = &v + return s +} + +func (s *DeleteObjectInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetBypassGovernanceRetention sets the BypassGovernanceRetention field's value. +func (s *DeleteObjectInput) SetBypassGovernanceRetention(v bool) *DeleteObjectInput { + s.BypassGovernanceRetention = &v + return s +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *DeleteObjectInput) SetExpectedBucketOwner(v string) *DeleteObjectInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetKey sets the Key field's value. +func (s *DeleteObjectInput) SetKey(v string) *DeleteObjectInput { + s.Key = &v + return s +} + +// SetMFA sets the MFA field's value. +func (s *DeleteObjectInput) SetMFA(v string) *DeleteObjectInput { + s.MFA = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *DeleteObjectInput) SetRequestPayer(v string) *DeleteObjectInput { + s.RequestPayer = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *DeleteObjectInput) SetVersionId(v string) *DeleteObjectInput { + s.VersionId = &v + return s +} + +func (s *DeleteObjectInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *DeleteObjectInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s DeleteObjectInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type DeleteObjectOutput struct { + _ struct{} `type:"structure"` + + // Specifies whether the versioned object that was permanently deleted was (true) + // or was not (false) a delete marker. + DeleteMarker *bool `location:"header" locationName:"x-amz-delete-marker" type:"boolean"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // Returns the version ID of the delete marker created as a result of the DELETE + // operation. + VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` +} + +// String returns the string representation +func (s DeleteObjectOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteObjectOutput) GoString() string { + return s.String() +} + +// SetDeleteMarker sets the DeleteMarker field's value. +func (s *DeleteObjectOutput) SetDeleteMarker(v bool) *DeleteObjectOutput { + s.DeleteMarker = &v + return s +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *DeleteObjectOutput) SetRequestCharged(v string) *DeleteObjectOutput { + s.RequestCharged = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *DeleteObjectOutput) SetVersionId(v string) *DeleteObjectOutput { + s.VersionId = &v + return s +} + +type DeleteObjectTaggingInput struct { + _ struct{} `locationName:"DeleteObjectTaggingRequest" type:"structure"` + + // The bucket name containing the objects from which to remove the tags. + // + // When using this API with an access point, you must direct requests to the + // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this operation with an access point through the AWS SDKs, you + // provide the access point ARN in place of the bucket name. For more information + // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // in the Amazon Simple Storage Service Developer Guide. + // + // When using this API with Amazon S3 on Outposts, you must direct requests + // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When + // using this operation using S3 on Outposts through the AWS SDKs, you provide + // the Outposts bucket ARN in place of the bucket name. For more information + // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) + // in the Amazon Simple Storage Service Developer Guide. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // The key that identifies the object in the bucket from which to remove all + // tags. + // + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // The versionId of the object that the tag-set will be removed from. + VersionId *string `location:"querystring" locationName:"versionId" type:"string"` +} + +// String returns the string representation +func (s DeleteObjectTaggingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteObjectTaggingInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteObjectTaggingInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteObjectTaggingInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *DeleteObjectTaggingInput) SetBucket(v string) *DeleteObjectTaggingInput { + s.Bucket = &v + return s +} + +func (s *DeleteObjectTaggingInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *DeleteObjectTaggingInput) SetExpectedBucketOwner(v string) *DeleteObjectTaggingInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetKey sets the Key field's value. +func (s *DeleteObjectTaggingInput) SetKey(v string) *DeleteObjectTaggingInput { + s.Key = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *DeleteObjectTaggingInput) SetVersionId(v string) *DeleteObjectTaggingInput { + s.VersionId = &v + return s +} + +func (s *DeleteObjectTaggingInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *DeleteObjectTaggingInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s DeleteObjectTaggingInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type DeleteObjectTaggingOutput struct { + _ struct{} `type:"structure"` + + // The versionId of the object the tag-set was removed from. + VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` +} + +// String returns the string representation +func (s DeleteObjectTaggingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteObjectTaggingOutput) GoString() string { + return s.String() +} + +// SetVersionId sets the VersionId field's value. +func (s *DeleteObjectTaggingOutput) SetVersionId(v string) *DeleteObjectTaggingOutput { + s.VersionId = &v + return s +} + +type DeleteObjectsInput struct { + _ struct{} `locationName:"DeleteObjectsRequest" type:"structure" payload:"Delete"` + + // The bucket name containing the objects to delete. + // + // When using this API with an access point, you must direct requests to the + // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this operation with an access point through the AWS SDKs, you + // provide the access point ARN in place of the bucket name. For more information + // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // in the Amazon Simple Storage Service Developer Guide. + // + // When using this API with Amazon S3 on Outposts, you must direct requests + // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When + // using this operation using S3 on Outposts through the AWS SDKs, you provide + // the Outposts bucket ARN in place of the bucket name. For more information + // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) + // in the Amazon Simple Storage Service Developer Guide. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Specifies whether you want to delete this object even if it has a Governance-type + // Object Lock in place. You must have sufficient permissions to perform this + // operation. + BypassGovernanceRetention *bool `location:"header" locationName:"x-amz-bypass-governance-retention" type:"boolean"` + + // Container for the request. + // + // Delete is a required field + Delete *Delete `locationName:"Delete" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // The concatenation of the authentication device's serial number, a space, + // and the value that is displayed on your authentication device. Required to + // permanently delete a versioned object if versioning is configured with MFA + // delete enabled. + MFA *string `location:"header" locationName:"x-amz-mfa" type:"string"` + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from requester pays buckets, see Downloading Objects + // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 Developer Guide. + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` +} + +// String returns the string representation +func (s DeleteObjectsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteObjectsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteObjectsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteObjectsInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Delete == nil { + invalidParams.Add(request.NewErrParamRequired("Delete")) + } + if s.Delete != nil { + if err := s.Delete.Validate(); err != nil { + invalidParams.AddNested("Delete", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *DeleteObjectsInput) SetBucket(v string) *DeleteObjectsInput { + s.Bucket = &v + return s +} + +func (s *DeleteObjectsInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetBypassGovernanceRetention sets the BypassGovernanceRetention field's value. +func (s *DeleteObjectsInput) SetBypassGovernanceRetention(v bool) *DeleteObjectsInput { + s.BypassGovernanceRetention = &v + return s +} + +// SetDelete sets the Delete field's value. +func (s *DeleteObjectsInput) SetDelete(v *Delete) *DeleteObjectsInput { + s.Delete = v + return s +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *DeleteObjectsInput) SetExpectedBucketOwner(v string) *DeleteObjectsInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetMFA sets the MFA field's value. +func (s *DeleteObjectsInput) SetMFA(v string) *DeleteObjectsInput { + s.MFA = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *DeleteObjectsInput) SetRequestPayer(v string) *DeleteObjectsInput { + s.RequestPayer = &v + return s +} + +func (s *DeleteObjectsInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *DeleteObjectsInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s DeleteObjectsInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type DeleteObjectsOutput struct { + _ struct{} `type:"structure"` + + // Container element for a successful delete. It identifies the object that + // was successfully deleted. + Deleted []*DeletedObject `type:"list" flattened:"true"` + + // Container for a failed delete operation that describes the object that Amazon + // S3 attempted to delete and the error it encountered. + Errors []*Error `locationName:"Error" type:"list" flattened:"true"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` +} + +// String returns the string representation +func (s DeleteObjectsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteObjectsOutput) GoString() string { + return s.String() +} + +// SetDeleted sets the Deleted field's value. +func (s *DeleteObjectsOutput) SetDeleted(v []*DeletedObject) *DeleteObjectsOutput { + s.Deleted = v + return s +} + +// SetErrors sets the Errors field's value. +func (s *DeleteObjectsOutput) SetErrors(v []*Error) *DeleteObjectsOutput { + s.Errors = v + return s +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *DeleteObjectsOutput) SetRequestCharged(v string) *DeleteObjectsOutput { + s.RequestCharged = &v + return s +} + +type DeletePublicAccessBlockInput struct { + _ struct{} `locationName:"DeletePublicAccessBlockRequest" type:"structure"` + + // The Amazon S3 bucket whose PublicAccessBlock configuration you want to delete. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation +func (s DeletePublicAccessBlockInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeletePublicAccessBlockInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeletePublicAccessBlockInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeletePublicAccessBlockInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *DeletePublicAccessBlockInput) SetBucket(v string) *DeletePublicAccessBlockInput { + s.Bucket = &v + return s +} + +func (s *DeletePublicAccessBlockInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *DeletePublicAccessBlockInput) SetExpectedBucketOwner(v string) *DeletePublicAccessBlockInput { + s.ExpectedBucketOwner = &v + return s +} + +func (s *DeletePublicAccessBlockInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *DeletePublicAccessBlockInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s DeletePublicAccessBlockInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type DeletePublicAccessBlockOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeletePublicAccessBlockOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeletePublicAccessBlockOutput) GoString() string { + return s.String() +} + +// Information about the deleted object. +type DeletedObject struct { + _ struct{} `type:"structure"` + + // Specifies whether the versioned object that was permanently deleted was (true) + // or was not (false) a delete marker. In a simple DELETE, this header indicates + // whether (true) or not (false) a delete marker was created. + DeleteMarker *bool `type:"boolean"` + + // The version ID of the delete marker created as a result of the DELETE operation. + // If you delete a specific object version, the value returned by this header + // is the version ID of the object version deleted. + DeleteMarkerVersionId *string `type:"string"` + + // The name of the deleted object. + Key *string `min:"1" type:"string"` + + // The version ID of the deleted object. + VersionId *string `type:"string"` +} + +// String returns the string representation +func (s DeletedObject) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeletedObject) GoString() string { + return s.String() +} + +// SetDeleteMarker sets the DeleteMarker field's value. +func (s *DeletedObject) SetDeleteMarker(v bool) *DeletedObject { + s.DeleteMarker = &v + return s +} + +// SetDeleteMarkerVersionId sets the DeleteMarkerVersionId field's value. +func (s *DeletedObject) SetDeleteMarkerVersionId(v string) *DeletedObject { + s.DeleteMarkerVersionId = &v + return s +} + +// SetKey sets the Key field's value. +func (s *DeletedObject) SetKey(v string) *DeletedObject { + s.Key = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *DeletedObject) SetVersionId(v string) *DeletedObject { + s.VersionId = &v + return s +} + +// Specifies information about where to publish analysis or configuration results +// for an Amazon S3 bucket and S3 Replication Time Control (S3 RTC). +type Destination struct { + _ struct{} `type:"structure"` + + // Specify this only in a cross-account scenario (where source and destination + // bucket owners are not the same), and you want to change replica ownership + // to the AWS account that owns the destination bucket. If this is not specified + // in the replication configuration, the replicas are owned by same AWS account + // that owns the source object. + AccessControlTranslation *AccessControlTranslation `type:"structure"` + + // Destination bucket owner account ID. In a cross-account scenario, if you + // direct Amazon S3 to change replica ownership to the AWS account that owns + // the destination bucket by specifying the AccessControlTranslation property, + // this is the account ID of the destination bucket owner. For more information, + // see Replication Additional Configuration: Changing the Replica Owner (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-change-owner.html) + // in the Amazon Simple Storage Service Developer Guide. + Account *string `type:"string"` + + // The Amazon Resource Name (ARN) of the bucket where you want Amazon S3 to + // store the results. + // + // Bucket is a required field + Bucket *string `type:"string" required:"true"` + + // A container that provides information about encryption. If SourceSelectionCriteria + // is specified, you must specify this element. + EncryptionConfiguration *EncryptionConfiguration `type:"structure"` + + // A container specifying replication metrics-related settings enabling replication + // metrics and events. + Metrics *Metrics `type:"structure"` + + // A container specifying S3 Replication Time Control (S3 RTC), including whether + // S3 RTC is enabled and the time when all objects and operations on objects + // must be replicated. Must be specified together with a Metrics block. + ReplicationTime *ReplicationTime `type:"structure"` + + // The storage class to use when replicating objects, such as S3 Standard or + // reduced redundancy. By default, Amazon S3 uses the storage class of the source + // object to create the object replica. + // + // For valid values, see the StorageClass element of the PUT Bucket replication + // (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTreplication.html) + // action in the Amazon Simple Storage Service API Reference. + StorageClass *string `type:"string" enum:"StorageClass"` +} + +// String returns the string representation +func (s Destination) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Destination) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Destination) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Destination"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.AccessControlTranslation != nil { + if err := s.AccessControlTranslation.Validate(); err != nil { + invalidParams.AddNested("AccessControlTranslation", err.(request.ErrInvalidParams)) + } + } + if s.Metrics != nil { + if err := s.Metrics.Validate(); err != nil { + invalidParams.AddNested("Metrics", err.(request.ErrInvalidParams)) + } + } + if s.ReplicationTime != nil { + if err := s.ReplicationTime.Validate(); err != nil { + invalidParams.AddNested("ReplicationTime", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAccessControlTranslation sets the AccessControlTranslation field's value. +func (s *Destination) SetAccessControlTranslation(v *AccessControlTranslation) *Destination { + s.AccessControlTranslation = v + return s +} + +// SetAccount sets the Account field's value. +func (s *Destination) SetAccount(v string) *Destination { + s.Account = &v + return s +} + +// SetBucket sets the Bucket field's value. +func (s *Destination) SetBucket(v string) *Destination { + s.Bucket = &v + return s +} + +func (s *Destination) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetEncryptionConfiguration sets the EncryptionConfiguration field's value. +func (s *Destination) SetEncryptionConfiguration(v *EncryptionConfiguration) *Destination { + s.EncryptionConfiguration = v + return s +} + +// SetMetrics sets the Metrics field's value. +func (s *Destination) SetMetrics(v *Metrics) *Destination { + s.Metrics = v + return s +} + +// SetReplicationTime sets the ReplicationTime field's value. +func (s *Destination) SetReplicationTime(v *ReplicationTime) *Destination { + s.ReplicationTime = v + return s +} + +// SetStorageClass sets the StorageClass field's value. +func (s *Destination) SetStorageClass(v string) *Destination { + s.StorageClass = &v + return s +} + +// Contains the type of server-side encryption used. +type Encryption struct { + _ struct{} `type:"structure"` + + // The server-side encryption algorithm used when storing job results in Amazon + // S3 (for example, AES256, aws:kms). + // + // EncryptionType is a required field + EncryptionType *string `type:"string" required:"true" enum:"ServerSideEncryption"` + + // If the encryption type is aws:kms, this optional value can be used to specify + // the encryption context for the restore results. + KMSContext *string `type:"string"` + + // If the encryption type is aws:kms, this optional value specifies the ID of + // the symmetric customer managed AWS KMS CMK to use for encryption of job results. + // Amazon S3 only supports symmetric CMKs. For more information, see Using Symmetric + // and Asymmetric Keys (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html) + // in the AWS Key Management Service Developer Guide. + KMSKeyId *string `type:"string" sensitive:"true"` +} + +// String returns the string representation +func (s Encryption) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Encryption) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Encryption) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Encryption"} + if s.EncryptionType == nil { + invalidParams.Add(request.NewErrParamRequired("EncryptionType")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetEncryptionType sets the EncryptionType field's value. +func (s *Encryption) SetEncryptionType(v string) *Encryption { + s.EncryptionType = &v + return s +} + +// SetKMSContext sets the KMSContext field's value. +func (s *Encryption) SetKMSContext(v string) *Encryption { + s.KMSContext = &v + return s +} + +// SetKMSKeyId sets the KMSKeyId field's value. +func (s *Encryption) SetKMSKeyId(v string) *Encryption { + s.KMSKeyId = &v + return s +} + +// Specifies encryption-related information for an Amazon S3 bucket that is +// a destination for replicated objects. +type EncryptionConfiguration struct { + _ struct{} `type:"structure"` + + // Specifies the ID (Key ARN or Alias ARN) of the customer managed customer + // master key (CMK) stored in AWS Key Management Service (KMS) for the destination + // bucket. Amazon S3 uses this key to encrypt replica objects. Amazon S3 only + // supports symmetric customer managed CMKs. For more information, see Using + // Symmetric and Asymmetric Keys (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html) + // in the AWS Key Management Service Developer Guide. + ReplicaKmsKeyID *string `type:"string"` +} + +// String returns the string representation +func (s EncryptionConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EncryptionConfiguration) GoString() string { + return s.String() +} + +// SetReplicaKmsKeyID sets the ReplicaKmsKeyID field's value. +func (s *EncryptionConfiguration) SetReplicaKmsKeyID(v string) *EncryptionConfiguration { + s.ReplicaKmsKeyID = &v + return s +} + +// A message that indicates the request is complete and no more messages will +// be sent. You should not assume that the request is complete until the client +// receives an EndEvent. +type EndEvent struct { + _ struct{} `locationName:"EndEvent" type:"structure"` +} + +// String returns the string representation +func (s EndEvent) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EndEvent) GoString() string { + return s.String() +} + +// The EndEvent is and event in the SelectObjectContentEventStream group of events. +func (s *EndEvent) eventSelectObjectContentEventStream() {} + +// UnmarshalEvent unmarshals the EventStream Message into the EndEvent value. +// This method is only used internally within the SDK's EventStream handling. +func (s *EndEvent) UnmarshalEvent( + payloadUnmarshaler protocol.PayloadUnmarshaler, + msg eventstream.Message, +) error { + return nil +} + +// MarshalEvent marshals the type into an stream event value. This method +// should only used internally within the SDK's EventStream handling. +func (s *EndEvent) MarshalEvent(pm protocol.PayloadMarshaler) (msg eventstream.Message, err error) { + msg.Headers.Set(eventstreamapi.MessageTypeHeader, eventstream.StringValue(eventstreamapi.EventMessageType)) + return msg, err +} + +// Container for all error elements. +type Error struct { + _ struct{} `type:"structure"` + + // The error code is a string that uniquely identifies an error condition. It + // is meant to be read and understood by programs that detect and handle errors + // by type. + // + // Amazon S3 error codes + // + // * Code: AccessDenied Description: Access Denied HTTP Status Code: 403 + // Forbidden SOAP Fault Code Prefix: Client + // + // * Code: AccountProblem Description: There is a problem with your AWS account + // that prevents the operation from completing successfully. Contact AWS + // Support for further assistance. HTTP Status Code: 403 Forbidden SOAP Fault + // Code Prefix: Client + // + // * Code: AllAccessDisabled Description: All access to this Amazon S3 resource + // has been disabled. Contact AWS Support for further assistance. HTTP Status + // Code: 403 Forbidden SOAP Fault Code Prefix: Client + // + // * Code: AmbiguousGrantByEmailAddress Description: The email address you + // provided is associated with more than one account. HTTP Status Code: 400 + // Bad Request SOAP Fault Code Prefix: Client + // + // * Code: AuthorizationHeaderMalformed Description: The authorization header + // you provided is invalid. HTTP Status Code: 400 Bad Request HTTP Status + // Code: N/A + // + // * Code: BadDigest Description: The Content-MD5 you specified did not match + // what we received. HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: + // Client + // + // * Code: BucketAlreadyExists Description: The requested bucket name is + // not available. The bucket namespace is shared by all users of the system. + // Please select a different name and try again. HTTP Status Code: 409 Conflict + // SOAP Fault Code Prefix: Client + // + // * Code: BucketAlreadyOwnedByYou Description: The bucket you tried to create + // already exists, and you own it. Amazon S3 returns this error in all AWS + // Regions except in the North Virginia Region. For legacy compatibility, + // if you re-create an existing bucket that you already own in the North + // Virginia Region, Amazon S3 returns 200 OK and resets the bucket access + // control lists (ACLs). Code: 409 Conflict (in all Regions except the North + // Virginia Region) SOAP Fault Code Prefix: Client + // + // * Code: BucketNotEmpty Description: The bucket you tried to delete is + // not empty. HTTP Status Code: 409 Conflict SOAP Fault Code Prefix: Client + // + // * Code: CredentialsNotSupported Description: This request does not support + // credentials. HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: + // Client + // + // * Code: CrossLocationLoggingProhibited Description: Cross-location logging + // not allowed. Buckets in one geographic location cannot log information + // to a bucket in another location. HTTP Status Code: 403 Forbidden SOAP + // Fault Code Prefix: Client + // + // * Code: EntityTooSmall Description: Your proposed upload is smaller than + // the minimum allowed object size. HTTP Status Code: 400 Bad Request SOAP + // Fault Code Prefix: Client + // + // * Code: EntityTooLarge Description: Your proposed upload exceeds the maximum + // allowed object size. HTTP Status Code: 400 Bad Request SOAP Fault Code + // Prefix: Client + // + // * Code: ExpiredToken Description: The provided token has expired. HTTP + // Status Code: 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: IllegalVersioningConfigurationException Description: Indicates + // that the versioning configuration specified in the request is invalid. + // HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: IncompleteBody Description: You did not provide the number of + // bytes specified by the Content-Length HTTP header HTTP Status Code: 400 + // Bad Request SOAP Fault Code Prefix: Client + // + // * Code: IncorrectNumberOfFilesInPostRequest Description: POST requires + // exactly one file upload per request. HTTP Status Code: 400 Bad Request + // SOAP Fault Code Prefix: Client + // + // * Code: InlineDataTooLarge Description: Inline data exceeds the maximum + // allowed size. HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: + // Client + // + // * Code: InternalError Description: We encountered an internal error. Please + // try again. HTTP Status Code: 500 Internal Server Error SOAP Fault Code + // Prefix: Server + // + // * Code: InvalidAccessKeyId Description: The AWS access key ID you provided + // does not exist in our records. HTTP Status Code: 403 Forbidden SOAP Fault + // Code Prefix: Client + // + // * Code: InvalidAddressingHeader Description: You must specify the Anonymous + // role. HTTP Status Code: N/A SOAP Fault Code Prefix: Client + // + // * Code: InvalidArgument Description: Invalid Argument HTTP Status Code: + // 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: InvalidBucketName Description: The specified bucket is not valid. + // HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: InvalidBucketState Description: The request is not valid with + // the current state of the bucket. HTTP Status Code: 409 Conflict SOAP Fault + // Code Prefix: Client + // + // * Code: InvalidDigest Description: The Content-MD5 you specified is not + // valid. HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: InvalidEncryptionAlgorithmError Description: The encryption request + // you specified is not valid. The valid value is AES256. HTTP Status Code: + // 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: InvalidLocationConstraint Description: The specified location + // constraint is not valid. For more information about Regions, see How to + // Select a Region for Your Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro). + // HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: InvalidObjectState Description: The operation is not valid for + // the current state of the object. HTTP Status Code: 403 Forbidden SOAP + // Fault Code Prefix: Client + // + // * Code: InvalidPart Description: One or more of the specified parts could + // not be found. The part might not have been uploaded, or the specified + // entity tag might not have matched the part's entity tag. HTTP Status Code: + // 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: InvalidPartOrder Description: The list of parts was not in ascending + // order. Parts list must be specified in order by part number. HTTP Status + // Code: 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: InvalidPayer Description: All access to this object has been disabled. + // Please contact AWS Support for further assistance. HTTP Status Code: 403 + // Forbidden SOAP Fault Code Prefix: Client + // + // * Code: InvalidPolicyDocument Description: The content of the form does + // not meet the conditions specified in the policy document. HTTP Status + // Code: 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: InvalidRange Description: The requested range cannot be satisfied. + // HTTP Status Code: 416 Requested Range Not Satisfiable SOAP Fault Code + // Prefix: Client + // + // * Code: InvalidRequest Description: Please use AWS4-HMAC-SHA256. HTTP + // Status Code: 400 Bad Request Code: N/A + // + // * Code: InvalidRequest Description: SOAP requests must be made over an + // HTTPS connection. HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: + // Client + // + // * Code: InvalidRequest Description: Amazon S3 Transfer Acceleration is + // not supported for buckets with non-DNS compliant names. HTTP Status Code: + // 400 Bad Request Code: N/A + // + // * Code: InvalidRequest Description: Amazon S3 Transfer Acceleration is + // not supported for buckets with periods (.) in their names. HTTP Status + // Code: 400 Bad Request Code: N/A + // + // * Code: InvalidRequest Description: Amazon S3 Transfer Accelerate endpoint + // only supports virtual style requests. HTTP Status Code: 400 Bad Request + // Code: N/A + // + // * Code: InvalidRequest Description: Amazon S3 Transfer Accelerate is not + // configured on this bucket. HTTP Status Code: 400 Bad Request Code: N/A + // + // * Code: InvalidRequest Description: Amazon S3 Transfer Accelerate is disabled + // on this bucket. HTTP Status Code: 400 Bad Request Code: N/A + // + // * Code: InvalidRequest Description: Amazon S3 Transfer Acceleration is + // not supported on this bucket. Contact AWS Support for more information. + // HTTP Status Code: 400 Bad Request Code: N/A + // + // * Code: InvalidRequest Description: Amazon S3 Transfer Acceleration cannot + // be enabled on this bucket. Contact AWS Support for more information. HTTP + // Status Code: 400 Bad Request Code: N/A + // + // * Code: InvalidSecurity Description: The provided security credentials + // are not valid. HTTP Status Code: 403 Forbidden SOAP Fault Code Prefix: + // Client + // + // * Code: InvalidSOAPRequest Description: The SOAP request body is invalid. + // HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: InvalidStorageClass Description: The storage class you specified + // is not valid. HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: + // Client + // + // * Code: InvalidTargetBucketForLogging Description: The target bucket for + // logging does not exist, is not owned by you, or does not have the appropriate + // grants for the log-delivery group. HTTP Status Code: 400 Bad Request SOAP + // Fault Code Prefix: Client + // + // * Code: InvalidToken Description: The provided token is malformed or otherwise + // invalid. HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: InvalidURI Description: Couldn't parse the specified URI. HTTP + // Status Code: 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: KeyTooLongError Description: Your key is too long. HTTP Status + // Code: 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: MalformedACLError Description: The XML you provided was not well-formed + // or did not validate against our published schema. HTTP Status Code: 400 + // Bad Request SOAP Fault Code Prefix: Client + // + // * Code: MalformedPOSTRequest Description: The body of your POST request + // is not well-formed multipart/form-data. HTTP Status Code: 400 Bad Request + // SOAP Fault Code Prefix: Client + // + // * Code: MalformedXML Description: This happens when the user sends malformed + // XML (XML that doesn't conform to the published XSD) for the configuration. + // The error message is, "The XML you provided was not well-formed or did + // not validate against our published schema." HTTP Status Code: 400 Bad + // Request SOAP Fault Code Prefix: Client + // + // * Code: MaxMessageLengthExceeded Description: Your request was too big. + // HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: MaxPostPreDataLengthExceededError Description: Your POST request + // fields preceding the upload file were too large. HTTP Status Code: 400 + // Bad Request SOAP Fault Code Prefix: Client + // + // * Code: MetadataTooLarge Description: Your metadata headers exceed the + // maximum allowed metadata size. HTTP Status Code: 400 Bad Request SOAP + // Fault Code Prefix: Client + // + // * Code: MethodNotAllowed Description: The specified method is not allowed + // against this resource. HTTP Status Code: 405 Method Not Allowed SOAP Fault + // Code Prefix: Client + // + // * Code: MissingAttachment Description: A SOAP attachment was expected, + // but none were found. HTTP Status Code: N/A SOAP Fault Code Prefix: Client + // + // * Code: MissingContentLength Description: You must provide the Content-Length + // HTTP header. HTTP Status Code: 411 Length Required SOAP Fault Code Prefix: + // Client + // + // * Code: MissingRequestBodyError Description: This happens when the user + // sends an empty XML document as a request. The error message is, "Request + // body is empty." HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: + // Client + // + // * Code: MissingSecurityElement Description: The SOAP 1.1 request is missing + // a security element. HTTP Status Code: 400 Bad Request SOAP Fault Code + // Prefix: Client + // + // * Code: MissingSecurityHeader Description: Your request is missing a required + // header. HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: NoLoggingStatusForKey Description: There is no such thing as a + // logging status subresource for a key. HTTP Status Code: 400 Bad Request + // SOAP Fault Code Prefix: Client + // + // * Code: NoSuchBucket Description: The specified bucket does not exist. + // HTTP Status Code: 404 Not Found SOAP Fault Code Prefix: Client + // + // * Code: NoSuchBucketPolicy Description: The specified bucket does not + // have a bucket policy. HTTP Status Code: 404 Not Found SOAP Fault Code + // Prefix: Client + // + // * Code: NoSuchKey Description: The specified key does not exist. HTTP + // Status Code: 404 Not Found SOAP Fault Code Prefix: Client + // + // * Code: NoSuchLifecycleConfiguration Description: The lifecycle configuration + // does not exist. HTTP Status Code: 404 Not Found SOAP Fault Code Prefix: + // Client + // + // * Code: NoSuchUpload Description: The specified multipart upload does + // not exist. The upload ID might be invalid, or the multipart upload might + // have been aborted or completed. HTTP Status Code: 404 Not Found SOAP Fault + // Code Prefix: Client + // + // * Code: NoSuchVersion Description: Indicates that the version ID specified + // in the request does not match an existing version. HTTP Status Code: 404 + // Not Found SOAP Fault Code Prefix: Client + // + // * Code: NotImplemented Description: A header you provided implies functionality + // that is not implemented. HTTP Status Code: 501 Not Implemented SOAP Fault + // Code Prefix: Server + // + // * Code: NotSignedUp Description: Your account is not signed up for the + // Amazon S3 service. You must sign up before you can use Amazon S3. You + // can sign up at the following URL: https://aws.amazon.com/s3 HTTP Status + // Code: 403 Forbidden SOAP Fault Code Prefix: Client + // + // * Code: OperationAborted Description: A conflicting conditional operation + // is currently in progress against this resource. Try again. HTTP Status + // Code: 409 Conflict SOAP Fault Code Prefix: Client + // + // * Code: PermanentRedirect Description: The bucket you are attempting to + // access must be addressed using the specified endpoint. Send all future + // requests to this endpoint. HTTP Status Code: 301 Moved Permanently SOAP + // Fault Code Prefix: Client + // + // * Code: PreconditionFailed Description: At least one of the preconditions + // you specified did not hold. HTTP Status Code: 412 Precondition Failed + // SOAP Fault Code Prefix: Client + // + // * Code: Redirect Description: Temporary redirect. HTTP Status Code: 307 + // Moved Temporarily SOAP Fault Code Prefix: Client + // + // * Code: RestoreAlreadyInProgress Description: Object restore is already + // in progress. HTTP Status Code: 409 Conflict SOAP Fault Code Prefix: Client + // + // * Code: RequestIsNotMultiPartContent Description: Bucket POST must be + // of the enclosure-type multipart/form-data. HTTP Status Code: 400 Bad Request + // SOAP Fault Code Prefix: Client + // + // * Code: RequestTimeout Description: Your socket connection to the server + // was not read from or written to within the timeout period. HTTP Status + // Code: 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: RequestTimeTooSkewed Description: The difference between the request + // time and the server's time is too large. HTTP Status Code: 403 Forbidden + // SOAP Fault Code Prefix: Client + // + // * Code: RequestTorrentOfBucketError Description: Requesting the torrent + // file of a bucket is not permitted. HTTP Status Code: 400 Bad Request SOAP + // Fault Code Prefix: Client + // + // * Code: SignatureDoesNotMatch Description: The request signature we calculated + // does not match the signature you provided. Check your AWS secret access + // key and signing method. For more information, see REST Authentication + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html) + // and SOAP Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/SOAPAuthentication.html) + // for details. HTTP Status Code: 403 Forbidden SOAP Fault Code Prefix: Client + // + // * Code: ServiceUnavailable Description: Reduce your request rate. HTTP + // Status Code: 503 Service Unavailable SOAP Fault Code Prefix: Server + // + // * Code: SlowDown Description: Reduce your request rate. HTTP Status Code: + // 503 Slow Down SOAP Fault Code Prefix: Server + // + // * Code: TemporaryRedirect Description: You are being redirected to the + // bucket while DNS updates. HTTP Status Code: 307 Moved Temporarily SOAP + // Fault Code Prefix: Client + // + // * Code: TokenRefreshRequired Description: The provided token must be refreshed. + // HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: TooManyBuckets Description: You have attempted to create more + // buckets than allowed. HTTP Status Code: 400 Bad Request SOAP Fault Code + // Prefix: Client + // + // * Code: UnexpectedContent Description: This request does not support content. + // HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: UnresolvableGrantByEmailAddress Description: The email address + // you provided does not match any account on record. HTTP Status Code: 400 + // Bad Request SOAP Fault Code Prefix: Client + // + // * Code: UserKeyMustBeSpecified Description: The bucket POST must contain + // the specified field name. If it is specified, check the order of the fields. + // HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: Client + Code *string `type:"string"` + + // The error key. + Key *string `min:"1" type:"string"` + + // The error message contains a generic description of the error condition in + // English. It is intended for a human audience. Simple programs display the + // message directly to the end user if they encounter an error condition they + // don't know how or don't care to handle. Sophisticated programs with more + // exhaustive error handling and proper internationalization are more likely + // to ignore the error message. + Message *string `type:"string"` + + // The version ID of the error. + VersionId *string `type:"string"` +} + +// String returns the string representation +func (s Error) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Error) GoString() string { + return s.String() +} + +// SetCode sets the Code field's value. +func (s *Error) SetCode(v string) *Error { + s.Code = &v + return s +} + +// SetKey sets the Key field's value. +func (s *Error) SetKey(v string) *Error { + s.Key = &v + return s +} + +// SetMessage sets the Message field's value. +func (s *Error) SetMessage(v string) *Error { + s.Message = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *Error) SetVersionId(v string) *Error { + s.VersionId = &v + return s +} + +// The error information. +type ErrorDocument struct { + _ struct{} `type:"structure"` + + // The object key name to use when a 4XX class error occurs. + // + // Replacement must be made for object keys containing special characters (such + // as carriage returns) when using XML requests. For more information, see XML + // related object key constraints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints). + // + // Key is a required field + Key *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s ErrorDocument) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ErrorDocument) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ErrorDocument) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ErrorDocument"} + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetKey sets the Key field's value. +func (s *ErrorDocument) SetKey(v string) *ErrorDocument { + s.Key = &v + return s +} + +// Optional configuration to replicate existing source bucket objects. For more +// information, see Replicating Existing Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-what-is-isnot-replicated.html#existing-object-replication) +// in the Amazon S3 Developer Guide. +type ExistingObjectReplication struct { + _ struct{} `type:"structure"` + + // Status is a required field + Status *string `type:"string" required:"true" enum:"ExistingObjectReplicationStatus"` +} + +// String returns the string representation +func (s ExistingObjectReplication) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ExistingObjectReplication) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ExistingObjectReplication) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ExistingObjectReplication"} + if s.Status == nil { + invalidParams.Add(request.NewErrParamRequired("Status")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetStatus sets the Status field's value. +func (s *ExistingObjectReplication) SetStatus(v string) *ExistingObjectReplication { + s.Status = &v + return s +} + +// Specifies the Amazon S3 object key name to filter on and whether to filter +// on the suffix or prefix of the key name. +type FilterRule struct { + _ struct{} `type:"structure"` + + // The object key name prefix or suffix identifying one or more objects to which + // the filtering rule applies. The maximum length is 1,024 characters. Overlapping + // prefixes and suffixes are not supported. For more information, see Configuring + // Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) + // in the Amazon Simple Storage Service Developer Guide. + Name *string `type:"string" enum:"FilterRuleName"` + + // The value that the filter searches for in object key names. + Value *string `type:"string"` +} + +// String returns the string representation +func (s FilterRule) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FilterRule) GoString() string { + return s.String() +} + +// SetName sets the Name field's value. +func (s *FilterRule) SetName(v string) *FilterRule { + s.Name = &v + return s +} + +// SetValue sets the Value field's value. +func (s *FilterRule) SetValue(v string) *FilterRule { + s.Value = &v + return s +} + +type GetBucketAccelerateConfigurationInput struct { + _ struct{} `locationName:"GetBucketAccelerateConfigurationRequest" type:"structure"` + + // The name of the bucket for which the accelerate configuration is retrieved. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation +func (s GetBucketAccelerateConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketAccelerateConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketAccelerateConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketAccelerateConfigurationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketAccelerateConfigurationInput) SetBucket(v string) *GetBucketAccelerateConfigurationInput { + s.Bucket = &v + return s +} + +func (s *GetBucketAccelerateConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *GetBucketAccelerateConfigurationInput) SetExpectedBucketOwner(v string) *GetBucketAccelerateConfigurationInput { + s.ExpectedBucketOwner = &v + return s +} + +func (s *GetBucketAccelerateConfigurationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetBucketAccelerateConfigurationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetBucketAccelerateConfigurationInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type GetBucketAccelerateConfigurationOutput struct { + _ struct{} `type:"structure"` + + // The accelerate configuration of the bucket. + Status *string `type:"string" enum:"BucketAccelerateStatus"` +} + +// String returns the string representation +func (s GetBucketAccelerateConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketAccelerateConfigurationOutput) GoString() string { + return s.String() +} + +// SetStatus sets the Status field's value. +func (s *GetBucketAccelerateConfigurationOutput) SetStatus(v string) *GetBucketAccelerateConfigurationOutput { + s.Status = &v + return s +} + +type GetBucketAclInput struct { + _ struct{} `locationName:"GetBucketAclRequest" type:"structure"` + + // Specifies the S3 bucket whose ACL is being requested. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation +func (s GetBucketAclInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketAclInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketAclInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketAclInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketAclInput) SetBucket(v string) *GetBucketAclInput { + s.Bucket = &v + return s +} + +func (s *GetBucketAclInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *GetBucketAclInput) SetExpectedBucketOwner(v string) *GetBucketAclInput { + s.ExpectedBucketOwner = &v + return s +} + +func (s *GetBucketAclInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetBucketAclInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetBucketAclInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type GetBucketAclOutput struct { + _ struct{} `type:"structure"` + + // A list of grants. + Grants []*Grant `locationName:"AccessControlList" locationNameList:"Grant" type:"list"` + + // Container for the bucket owner's display name and ID. + Owner *Owner `type:"structure"` +} + +// String returns the string representation +func (s GetBucketAclOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketAclOutput) GoString() string { + return s.String() +} + +// SetGrants sets the Grants field's value. +func (s *GetBucketAclOutput) SetGrants(v []*Grant) *GetBucketAclOutput { + s.Grants = v + return s +} + +// SetOwner sets the Owner field's value. +func (s *GetBucketAclOutput) SetOwner(v *Owner) *GetBucketAclOutput { + s.Owner = v + return s +} + +type GetBucketAnalyticsConfigurationInput struct { + _ struct{} `locationName:"GetBucketAnalyticsConfigurationRequest" type:"structure"` + + // The name of the bucket from which an analytics configuration is retrieved. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // The ID that identifies the analytics configuration. + // + // Id is a required field + Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBucketAnalyticsConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketAnalyticsConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketAnalyticsConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketAnalyticsConfigurationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketAnalyticsConfigurationInput) SetBucket(v string) *GetBucketAnalyticsConfigurationInput { + s.Bucket = &v + return s +} + +func (s *GetBucketAnalyticsConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *GetBucketAnalyticsConfigurationInput) SetExpectedBucketOwner(v string) *GetBucketAnalyticsConfigurationInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetId sets the Id field's value. +func (s *GetBucketAnalyticsConfigurationInput) SetId(v string) *GetBucketAnalyticsConfigurationInput { + s.Id = &v + return s +} + +func (s *GetBucketAnalyticsConfigurationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetBucketAnalyticsConfigurationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetBucketAnalyticsConfigurationInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type GetBucketAnalyticsConfigurationOutput struct { + _ struct{} `type:"structure" payload:"AnalyticsConfiguration"` + + // The configuration and any analyses for the analytics filter. + AnalyticsConfiguration *AnalyticsConfiguration `type:"structure"` +} + +// String returns the string representation +func (s GetBucketAnalyticsConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketAnalyticsConfigurationOutput) GoString() string { + return s.String() +} + +// SetAnalyticsConfiguration sets the AnalyticsConfiguration field's value. +func (s *GetBucketAnalyticsConfigurationOutput) SetAnalyticsConfiguration(v *AnalyticsConfiguration) *GetBucketAnalyticsConfigurationOutput { + s.AnalyticsConfiguration = v + return s +} + +type GetBucketCorsInput struct { + _ struct{} `locationName:"GetBucketCorsRequest" type:"structure"` + + // The bucket name for which to get the cors configuration. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation +func (s GetBucketCorsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketCorsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketCorsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketCorsInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketCorsInput) SetBucket(v string) *GetBucketCorsInput { + s.Bucket = &v + return s +} + +func (s *GetBucketCorsInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *GetBucketCorsInput) SetExpectedBucketOwner(v string) *GetBucketCorsInput { + s.ExpectedBucketOwner = &v + return s +} + +func (s *GetBucketCorsInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetBucketCorsInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetBucketCorsInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type GetBucketCorsOutput struct { + _ struct{} `type:"structure"` + + // A set of origins and methods (cross-origin access that you want to allow). + // You can add up to 100 rules to the configuration. + CORSRules []*CORSRule `locationName:"CORSRule" type:"list" flattened:"true"` +} + +// String returns the string representation +func (s GetBucketCorsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketCorsOutput) GoString() string { + return s.String() +} + +// SetCORSRules sets the CORSRules field's value. +func (s *GetBucketCorsOutput) SetCORSRules(v []*CORSRule) *GetBucketCorsOutput { + s.CORSRules = v + return s +} + +type GetBucketEncryptionInput struct { + _ struct{} `locationName:"GetBucketEncryptionRequest" type:"structure"` + + // The name of the bucket from which the server-side encryption configuration + // is retrieved. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation +func (s GetBucketEncryptionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketEncryptionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketEncryptionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketEncryptionInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketEncryptionInput) SetBucket(v string) *GetBucketEncryptionInput { + s.Bucket = &v + return s +} + +func (s *GetBucketEncryptionInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *GetBucketEncryptionInput) SetExpectedBucketOwner(v string) *GetBucketEncryptionInput { + s.ExpectedBucketOwner = &v + return s +} + +func (s *GetBucketEncryptionInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetBucketEncryptionInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetBucketEncryptionInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type GetBucketEncryptionOutput struct { + _ struct{} `type:"structure" payload:"ServerSideEncryptionConfiguration"` + + // Specifies the default server-side-encryption configuration. + ServerSideEncryptionConfiguration *ServerSideEncryptionConfiguration `type:"structure"` +} + +// String returns the string representation +func (s GetBucketEncryptionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketEncryptionOutput) GoString() string { + return s.String() +} + +// SetServerSideEncryptionConfiguration sets the ServerSideEncryptionConfiguration field's value. +func (s *GetBucketEncryptionOutput) SetServerSideEncryptionConfiguration(v *ServerSideEncryptionConfiguration) *GetBucketEncryptionOutput { + s.ServerSideEncryptionConfiguration = v + return s +} + +type GetBucketIntelligentTieringConfigurationInput struct { + _ struct{} `locationName:"GetBucketIntelligentTieringConfigurationRequest" type:"structure"` + + // The name of the Amazon S3 bucket whose configuration you want to modify or + // retrieve. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The ID used to identify the S3 Intelligent-Tiering configuration. + // + // Id is a required field + Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBucketIntelligentTieringConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketIntelligentTieringConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketIntelligentTieringConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketIntelligentTieringConfigurationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketIntelligentTieringConfigurationInput) SetBucket(v string) *GetBucketIntelligentTieringConfigurationInput { + s.Bucket = &v + return s +} + +func (s *GetBucketIntelligentTieringConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetId sets the Id field's value. +func (s *GetBucketIntelligentTieringConfigurationInput) SetId(v string) *GetBucketIntelligentTieringConfigurationInput { + s.Id = &v + return s +} + +func (s *GetBucketIntelligentTieringConfigurationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetBucketIntelligentTieringConfigurationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetBucketIntelligentTieringConfigurationInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type GetBucketIntelligentTieringConfigurationOutput struct { + _ struct{} `type:"structure" payload:"IntelligentTieringConfiguration"` + + // Container for S3 Intelligent-Tiering configuration. + IntelligentTieringConfiguration *IntelligentTieringConfiguration `type:"structure"` +} + +// String returns the string representation +func (s GetBucketIntelligentTieringConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketIntelligentTieringConfigurationOutput) GoString() string { + return s.String() +} + +// SetIntelligentTieringConfiguration sets the IntelligentTieringConfiguration field's value. +func (s *GetBucketIntelligentTieringConfigurationOutput) SetIntelligentTieringConfiguration(v *IntelligentTieringConfiguration) *GetBucketIntelligentTieringConfigurationOutput { + s.IntelligentTieringConfiguration = v + return s +} + +type GetBucketInventoryConfigurationInput struct { + _ struct{} `locationName:"GetBucketInventoryConfigurationRequest" type:"structure"` + + // The name of the bucket containing the inventory configuration to retrieve. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // The ID used to identify the inventory configuration. + // + // Id is a required field + Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBucketInventoryConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketInventoryConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketInventoryConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketInventoryConfigurationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketInventoryConfigurationInput) SetBucket(v string) *GetBucketInventoryConfigurationInput { + s.Bucket = &v + return s +} + +func (s *GetBucketInventoryConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *GetBucketInventoryConfigurationInput) SetExpectedBucketOwner(v string) *GetBucketInventoryConfigurationInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetId sets the Id field's value. +func (s *GetBucketInventoryConfigurationInput) SetId(v string) *GetBucketInventoryConfigurationInput { + s.Id = &v + return s +} + +func (s *GetBucketInventoryConfigurationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetBucketInventoryConfigurationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetBucketInventoryConfigurationInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type GetBucketInventoryConfigurationOutput struct { + _ struct{} `type:"structure" payload:"InventoryConfiguration"` + + // Specifies the inventory configuration. + InventoryConfiguration *InventoryConfiguration `type:"structure"` +} + +// String returns the string representation +func (s GetBucketInventoryConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketInventoryConfigurationOutput) GoString() string { + return s.String() +} + +// SetInventoryConfiguration sets the InventoryConfiguration field's value. +func (s *GetBucketInventoryConfigurationOutput) SetInventoryConfiguration(v *InventoryConfiguration) *GetBucketInventoryConfigurationOutput { + s.InventoryConfiguration = v + return s +} + +type GetBucketLifecycleConfigurationInput struct { + _ struct{} `locationName:"GetBucketLifecycleConfigurationRequest" type:"structure"` + + // The name of the bucket for which to get the lifecycle information. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation +func (s GetBucketLifecycleConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketLifecycleConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketLifecycleConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketLifecycleConfigurationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketLifecycleConfigurationInput) SetBucket(v string) *GetBucketLifecycleConfigurationInput { + s.Bucket = &v + return s +} + +func (s *GetBucketLifecycleConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *GetBucketLifecycleConfigurationInput) SetExpectedBucketOwner(v string) *GetBucketLifecycleConfigurationInput { + s.ExpectedBucketOwner = &v + return s +} + +func (s *GetBucketLifecycleConfigurationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetBucketLifecycleConfigurationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetBucketLifecycleConfigurationInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type GetBucketLifecycleConfigurationOutput struct { + _ struct{} `type:"structure"` + + // Container for a lifecycle rule. + Rules []*LifecycleRule `locationName:"Rule" type:"list" flattened:"true"` +} + +// String returns the string representation +func (s GetBucketLifecycleConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketLifecycleConfigurationOutput) GoString() string { + return s.String() +} + +// SetRules sets the Rules field's value. +func (s *GetBucketLifecycleConfigurationOutput) SetRules(v []*LifecycleRule) *GetBucketLifecycleConfigurationOutput { + s.Rules = v + return s +} + +type GetBucketLifecycleInput struct { + _ struct{} `locationName:"GetBucketLifecycleRequest" type:"structure"` + + // The name of the bucket for which to get the lifecycle information. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation +func (s GetBucketLifecycleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketLifecycleInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketLifecycleInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketLifecycleInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketLifecycleInput) SetBucket(v string) *GetBucketLifecycleInput { + s.Bucket = &v + return s +} + +func (s *GetBucketLifecycleInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *GetBucketLifecycleInput) SetExpectedBucketOwner(v string) *GetBucketLifecycleInput { + s.ExpectedBucketOwner = &v + return s +} + +func (s *GetBucketLifecycleInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetBucketLifecycleInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetBucketLifecycleInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type GetBucketLifecycleOutput struct { + _ struct{} `type:"structure"` + + // Container for a lifecycle rule. + Rules []*Rule `locationName:"Rule" type:"list" flattened:"true"` +} + +// String returns the string representation +func (s GetBucketLifecycleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketLifecycleOutput) GoString() string { + return s.String() +} + +// SetRules sets the Rules field's value. +func (s *GetBucketLifecycleOutput) SetRules(v []*Rule) *GetBucketLifecycleOutput { + s.Rules = v + return s +} + +type GetBucketLocationInput struct { + _ struct{} `locationName:"GetBucketLocationRequest" type:"structure"` + + // The name of the bucket for which to get the location. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation +func (s GetBucketLocationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketLocationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketLocationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketLocationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketLocationInput) SetBucket(v string) *GetBucketLocationInput { + s.Bucket = &v + return s +} + +func (s *GetBucketLocationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *GetBucketLocationInput) SetExpectedBucketOwner(v string) *GetBucketLocationInput { + s.ExpectedBucketOwner = &v + return s +} + +func (s *GetBucketLocationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetBucketLocationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetBucketLocationInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type GetBucketLocationOutput struct { + _ struct{} `type:"structure"` + + // Specifies the Region where the bucket resides. For a list of all the Amazon + // S3 supported location constraints by Region, see Regions and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region). + // Buckets in Region us-east-1 have a LocationConstraint of null. + LocationConstraint *string `type:"string" enum:"BucketLocationConstraint"` +} + +// String returns the string representation +func (s GetBucketLocationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketLocationOutput) GoString() string { + return s.String() +} + +// SetLocationConstraint sets the LocationConstraint field's value. +func (s *GetBucketLocationOutput) SetLocationConstraint(v string) *GetBucketLocationOutput { + s.LocationConstraint = &v + return s +} + +type GetBucketLoggingInput struct { + _ struct{} `locationName:"GetBucketLoggingRequest" type:"structure"` + + // The bucket name for which to get the logging information. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation +func (s GetBucketLoggingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketLoggingInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketLoggingInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketLoggingInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketLoggingInput) SetBucket(v string) *GetBucketLoggingInput { + s.Bucket = &v + return s +} + +func (s *GetBucketLoggingInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *GetBucketLoggingInput) SetExpectedBucketOwner(v string) *GetBucketLoggingInput { + s.ExpectedBucketOwner = &v + return s +} + +func (s *GetBucketLoggingInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetBucketLoggingInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetBucketLoggingInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type GetBucketLoggingOutput struct { + _ struct{} `type:"structure"` + + // Describes where logs are stored and the prefix that Amazon S3 assigns to + // all log object keys for a bucket. For more information, see PUT Bucket logging + // (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTlogging.html) + // in the Amazon Simple Storage Service API Reference. + LoggingEnabled *LoggingEnabled `type:"structure"` +} + +// String returns the string representation +func (s GetBucketLoggingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketLoggingOutput) GoString() string { + return s.String() +} + +// SetLoggingEnabled sets the LoggingEnabled field's value. +func (s *GetBucketLoggingOutput) SetLoggingEnabled(v *LoggingEnabled) *GetBucketLoggingOutput { + s.LoggingEnabled = v + return s +} + +type GetBucketMetricsConfigurationInput struct { + _ struct{} `locationName:"GetBucketMetricsConfigurationRequest" type:"structure"` + + // The name of the bucket containing the metrics configuration to retrieve. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // The ID used to identify the metrics configuration. + // + // Id is a required field + Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBucketMetricsConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketMetricsConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketMetricsConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketMetricsConfigurationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketMetricsConfigurationInput) SetBucket(v string) *GetBucketMetricsConfigurationInput { + s.Bucket = &v + return s +} + +func (s *GetBucketMetricsConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *GetBucketMetricsConfigurationInput) SetExpectedBucketOwner(v string) *GetBucketMetricsConfigurationInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetId sets the Id field's value. +func (s *GetBucketMetricsConfigurationInput) SetId(v string) *GetBucketMetricsConfigurationInput { + s.Id = &v + return s +} + +func (s *GetBucketMetricsConfigurationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetBucketMetricsConfigurationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetBucketMetricsConfigurationInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type GetBucketMetricsConfigurationOutput struct { + _ struct{} `type:"structure" payload:"MetricsConfiguration"` + + // Specifies the metrics configuration. + MetricsConfiguration *MetricsConfiguration `type:"structure"` +} + +// String returns the string representation +func (s GetBucketMetricsConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketMetricsConfigurationOutput) GoString() string { + return s.String() +} + +// SetMetricsConfiguration sets the MetricsConfiguration field's value. +func (s *GetBucketMetricsConfigurationOutput) SetMetricsConfiguration(v *MetricsConfiguration) *GetBucketMetricsConfigurationOutput { + s.MetricsConfiguration = v + return s +} + +type GetBucketNotificationConfigurationRequest struct { + _ struct{} `locationName:"GetBucketNotificationConfigurationRequest" type:"structure"` + + // The name of the bucket for which to get the notification configuration. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation +func (s GetBucketNotificationConfigurationRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketNotificationConfigurationRequest) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketNotificationConfigurationRequest) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketNotificationConfigurationRequest"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketNotificationConfigurationRequest) SetBucket(v string) *GetBucketNotificationConfigurationRequest { + s.Bucket = &v + return s +} + +func (s *GetBucketNotificationConfigurationRequest) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *GetBucketNotificationConfigurationRequest) SetExpectedBucketOwner(v string) *GetBucketNotificationConfigurationRequest { + s.ExpectedBucketOwner = &v + return s +} + +func (s *GetBucketNotificationConfigurationRequest) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetBucketNotificationConfigurationRequest) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetBucketNotificationConfigurationRequest) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type GetBucketOwnershipControlsInput struct { + _ struct{} `locationName:"GetBucketOwnershipControlsRequest" type:"structure"` + + // The name of the Amazon S3 bucket whose OwnershipControls you want to retrieve. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation +func (s GetBucketOwnershipControlsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketOwnershipControlsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketOwnershipControlsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketOwnershipControlsInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketOwnershipControlsInput) SetBucket(v string) *GetBucketOwnershipControlsInput { + s.Bucket = &v + return s +} + +func (s *GetBucketOwnershipControlsInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *GetBucketOwnershipControlsInput) SetExpectedBucketOwner(v string) *GetBucketOwnershipControlsInput { + s.ExpectedBucketOwner = &v + return s +} + +func (s *GetBucketOwnershipControlsInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetBucketOwnershipControlsInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetBucketOwnershipControlsInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type GetBucketOwnershipControlsOutput struct { + _ struct{} `type:"structure" payload:"OwnershipControls"` + + // The OwnershipControls (BucketOwnerPreferred or ObjectWriter) currently in + // effect for this Amazon S3 bucket. + OwnershipControls *OwnershipControls `type:"structure"` +} + +// String returns the string representation +func (s GetBucketOwnershipControlsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketOwnershipControlsOutput) GoString() string { + return s.String() +} + +// SetOwnershipControls sets the OwnershipControls field's value. +func (s *GetBucketOwnershipControlsOutput) SetOwnershipControls(v *OwnershipControls) *GetBucketOwnershipControlsOutput { + s.OwnershipControls = v + return s +} + +type GetBucketPolicyInput struct { + _ struct{} `locationName:"GetBucketPolicyRequest" type:"structure"` + + // The bucket name for which to get the bucket policy. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation +func (s GetBucketPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketPolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketPolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketPolicyInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketPolicyInput) SetBucket(v string) *GetBucketPolicyInput { + s.Bucket = &v + return s +} + +func (s *GetBucketPolicyInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *GetBucketPolicyInput) SetExpectedBucketOwner(v string) *GetBucketPolicyInput { + s.ExpectedBucketOwner = &v + return s +} + +func (s *GetBucketPolicyInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetBucketPolicyInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetBucketPolicyInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type GetBucketPolicyOutput struct { + _ struct{} `type:"structure" payload:"Policy"` + + // The bucket policy as a JSON document. + Policy *string `type:"string"` +} + +// String returns the string representation +func (s GetBucketPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketPolicyOutput) GoString() string { + return s.String() +} + +// SetPolicy sets the Policy field's value. +func (s *GetBucketPolicyOutput) SetPolicy(v string) *GetBucketPolicyOutput { + s.Policy = &v + return s +} + +type GetBucketPolicyStatusInput struct { + _ struct{} `locationName:"GetBucketPolicyStatusRequest" type:"structure"` + + // The name of the Amazon S3 bucket whose policy status you want to retrieve. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation +func (s GetBucketPolicyStatusInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketPolicyStatusInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketPolicyStatusInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketPolicyStatusInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketPolicyStatusInput) SetBucket(v string) *GetBucketPolicyStatusInput { + s.Bucket = &v + return s +} + +func (s *GetBucketPolicyStatusInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *GetBucketPolicyStatusInput) SetExpectedBucketOwner(v string) *GetBucketPolicyStatusInput { + s.ExpectedBucketOwner = &v + return s +} + +func (s *GetBucketPolicyStatusInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetBucketPolicyStatusInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetBucketPolicyStatusInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type GetBucketPolicyStatusOutput struct { + _ struct{} `type:"structure" payload:"PolicyStatus"` + + // The policy status for the specified bucket. + PolicyStatus *PolicyStatus `type:"structure"` +} + +// String returns the string representation +func (s GetBucketPolicyStatusOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketPolicyStatusOutput) GoString() string { + return s.String() +} + +// SetPolicyStatus sets the PolicyStatus field's value. +func (s *GetBucketPolicyStatusOutput) SetPolicyStatus(v *PolicyStatus) *GetBucketPolicyStatusOutput { + s.PolicyStatus = v + return s +} + +type GetBucketReplicationInput struct { + _ struct{} `locationName:"GetBucketReplicationRequest" type:"structure"` + + // The bucket name for which to get the replication information. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation +func (s GetBucketReplicationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketReplicationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketReplicationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketReplicationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketReplicationInput) SetBucket(v string) *GetBucketReplicationInput { + s.Bucket = &v + return s +} + +func (s *GetBucketReplicationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *GetBucketReplicationInput) SetExpectedBucketOwner(v string) *GetBucketReplicationInput { + s.ExpectedBucketOwner = &v + return s +} + +func (s *GetBucketReplicationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetBucketReplicationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetBucketReplicationInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type GetBucketReplicationOutput struct { + _ struct{} `type:"structure" payload:"ReplicationConfiguration"` + + // A container for replication rules. You can add up to 1,000 rules. The maximum + // size of a replication configuration is 2 MB. + ReplicationConfiguration *ReplicationConfiguration `type:"structure"` +} + +// String returns the string representation +func (s GetBucketReplicationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketReplicationOutput) GoString() string { + return s.String() +} + +// SetReplicationConfiguration sets the ReplicationConfiguration field's value. +func (s *GetBucketReplicationOutput) SetReplicationConfiguration(v *ReplicationConfiguration) *GetBucketReplicationOutput { + s.ReplicationConfiguration = v + return s +} + +type GetBucketRequestPaymentInput struct { + _ struct{} `locationName:"GetBucketRequestPaymentRequest" type:"structure"` + + // The name of the bucket for which to get the payment request configuration + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation +func (s GetBucketRequestPaymentInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketRequestPaymentInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketRequestPaymentInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketRequestPaymentInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketRequestPaymentInput) SetBucket(v string) *GetBucketRequestPaymentInput { + s.Bucket = &v + return s +} + +func (s *GetBucketRequestPaymentInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *GetBucketRequestPaymentInput) SetExpectedBucketOwner(v string) *GetBucketRequestPaymentInput { + s.ExpectedBucketOwner = &v + return s +} + +func (s *GetBucketRequestPaymentInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetBucketRequestPaymentInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetBucketRequestPaymentInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type GetBucketRequestPaymentOutput struct { + _ struct{} `type:"structure"` + + // Specifies who pays for the download and request fees. + Payer *string `type:"string" enum:"Payer"` +} + +// String returns the string representation +func (s GetBucketRequestPaymentOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketRequestPaymentOutput) GoString() string { + return s.String() +} + +// SetPayer sets the Payer field's value. +func (s *GetBucketRequestPaymentOutput) SetPayer(v string) *GetBucketRequestPaymentOutput { + s.Payer = &v + return s +} + +type GetBucketTaggingInput struct { + _ struct{} `locationName:"GetBucketTaggingRequest" type:"structure"` + + // The name of the bucket for which to get the tagging information. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation +func (s GetBucketTaggingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketTaggingInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketTaggingInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketTaggingInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketTaggingInput) SetBucket(v string) *GetBucketTaggingInput { + s.Bucket = &v + return s +} + +func (s *GetBucketTaggingInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *GetBucketTaggingInput) SetExpectedBucketOwner(v string) *GetBucketTaggingInput { + s.ExpectedBucketOwner = &v + return s +} + +func (s *GetBucketTaggingInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetBucketTaggingInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetBucketTaggingInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type GetBucketTaggingOutput struct { + _ struct{} `type:"structure"` + + // Contains the tag set. + // + // TagSet is a required field + TagSet []*Tag `locationNameList:"Tag" type:"list" required:"true"` +} + +// String returns the string representation +func (s GetBucketTaggingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketTaggingOutput) GoString() string { + return s.String() +} + +// SetTagSet sets the TagSet field's value. +func (s *GetBucketTaggingOutput) SetTagSet(v []*Tag) *GetBucketTaggingOutput { + s.TagSet = v + return s +} + +type GetBucketVersioningInput struct { + _ struct{} `locationName:"GetBucketVersioningRequest" type:"structure"` + + // The name of the bucket for which to get the versioning information. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation +func (s GetBucketVersioningInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketVersioningInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketVersioningInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketVersioningInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketVersioningInput) SetBucket(v string) *GetBucketVersioningInput { + s.Bucket = &v + return s +} + +func (s *GetBucketVersioningInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *GetBucketVersioningInput) SetExpectedBucketOwner(v string) *GetBucketVersioningInput { + s.ExpectedBucketOwner = &v + return s +} + +func (s *GetBucketVersioningInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetBucketVersioningInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetBucketVersioningInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type GetBucketVersioningOutput struct { + _ struct{} `type:"structure"` + + // Specifies whether MFA delete is enabled in the bucket versioning configuration. + // This element is only returned if the bucket has been configured with MFA + // delete. If the bucket has never been so configured, this element is not returned. + MFADelete *string `locationName:"MfaDelete" type:"string" enum:"MFADeleteStatus"` + + // The versioning state of the bucket. + Status *string `type:"string" enum:"BucketVersioningStatus"` +} + +// String returns the string representation +func (s GetBucketVersioningOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketVersioningOutput) GoString() string { + return s.String() +} + +// SetMFADelete sets the MFADelete field's value. +func (s *GetBucketVersioningOutput) SetMFADelete(v string) *GetBucketVersioningOutput { + s.MFADelete = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *GetBucketVersioningOutput) SetStatus(v string) *GetBucketVersioningOutput { + s.Status = &v + return s +} + +type GetBucketWebsiteInput struct { + _ struct{} `locationName:"GetBucketWebsiteRequest" type:"structure"` + + // The bucket name for which to get the website configuration. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation +func (s GetBucketWebsiteInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketWebsiteInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketWebsiteInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketWebsiteInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketWebsiteInput) SetBucket(v string) *GetBucketWebsiteInput { + s.Bucket = &v + return s +} + +func (s *GetBucketWebsiteInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *GetBucketWebsiteInput) SetExpectedBucketOwner(v string) *GetBucketWebsiteInput { + s.ExpectedBucketOwner = &v + return s +} + +func (s *GetBucketWebsiteInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetBucketWebsiteInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetBucketWebsiteInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type GetBucketWebsiteOutput struct { + _ struct{} `type:"structure"` + + // The object key name of the website error document to use for 4XX class errors. + ErrorDocument *ErrorDocument `type:"structure"` + + // The name of the index document for the website (for example index.html). + IndexDocument *IndexDocument `type:"structure"` + + // Specifies the redirect behavior of all requests to a website endpoint of + // an Amazon S3 bucket. + RedirectAllRequestsTo *RedirectAllRequestsTo `type:"structure"` + + // Rules that define when a redirect is applied and the redirect behavior. + RoutingRules []*RoutingRule `locationNameList:"RoutingRule" type:"list"` +} + +// String returns the string representation +func (s GetBucketWebsiteOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketWebsiteOutput) GoString() string { + return s.String() +} + +// SetErrorDocument sets the ErrorDocument field's value. +func (s *GetBucketWebsiteOutput) SetErrorDocument(v *ErrorDocument) *GetBucketWebsiteOutput { + s.ErrorDocument = v + return s +} + +// SetIndexDocument sets the IndexDocument field's value. +func (s *GetBucketWebsiteOutput) SetIndexDocument(v *IndexDocument) *GetBucketWebsiteOutput { + s.IndexDocument = v + return s +} + +// SetRedirectAllRequestsTo sets the RedirectAllRequestsTo field's value. +func (s *GetBucketWebsiteOutput) SetRedirectAllRequestsTo(v *RedirectAllRequestsTo) *GetBucketWebsiteOutput { + s.RedirectAllRequestsTo = v + return s +} + +// SetRoutingRules sets the RoutingRules field's value. +func (s *GetBucketWebsiteOutput) SetRoutingRules(v []*RoutingRule) *GetBucketWebsiteOutput { + s.RoutingRules = v + return s +} + +type GetObjectAclInput struct { + _ struct{} `locationName:"GetObjectAclRequest" type:"structure"` + + // The bucket name that contains the object for which to get the ACL information. + // + // When using this API with an access point, you must direct requests to the + // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this operation with an access point through the AWS SDKs, you + // provide the access point ARN in place of the bucket name. For more information + // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // in the Amazon Simple Storage Service Developer Guide. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // The key of the object for which to get the ACL information. + // + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from requester pays buckets, see Downloading Objects + // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 Developer Guide. + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // VersionId used to reference a specific version of the object. + VersionId *string `location:"querystring" locationName:"versionId" type:"string"` +} + +// String returns the string representation +func (s GetObjectAclInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetObjectAclInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetObjectAclInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetObjectAclInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetObjectAclInput) SetBucket(v string) *GetObjectAclInput { + s.Bucket = &v + return s +} + +func (s *GetObjectAclInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *GetObjectAclInput) SetExpectedBucketOwner(v string) *GetObjectAclInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetKey sets the Key field's value. +func (s *GetObjectAclInput) SetKey(v string) *GetObjectAclInput { + s.Key = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *GetObjectAclInput) SetRequestPayer(v string) *GetObjectAclInput { + s.RequestPayer = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *GetObjectAclInput) SetVersionId(v string) *GetObjectAclInput { + s.VersionId = &v + return s +} + +func (s *GetObjectAclInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetObjectAclInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetObjectAclInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type GetObjectAclOutput struct { + _ struct{} `type:"structure"` + + // A list of grants. + Grants []*Grant `locationName:"AccessControlList" locationNameList:"Grant" type:"list"` + + // Container for the bucket owner's display name and ID. + Owner *Owner `type:"structure"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` +} + +// String returns the string representation +func (s GetObjectAclOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetObjectAclOutput) GoString() string { + return s.String() +} + +// SetGrants sets the Grants field's value. +func (s *GetObjectAclOutput) SetGrants(v []*Grant) *GetObjectAclOutput { + s.Grants = v + return s +} + +// SetOwner sets the Owner field's value. +func (s *GetObjectAclOutput) SetOwner(v *Owner) *GetObjectAclOutput { + s.Owner = v + return s +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *GetObjectAclOutput) SetRequestCharged(v string) *GetObjectAclOutput { + s.RequestCharged = &v + return s +} + +type GetObjectInput struct { + _ struct{} `locationName:"GetObjectRequest" type:"structure"` + + // The bucket name containing the object. + // + // When using this API with an access point, you must direct requests to the + // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this operation with an access point through the AWS SDKs, you + // provide the access point ARN in place of the bucket name. For more information + // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // in the Amazon Simple Storage Service Developer Guide. + // + // When using this API with Amazon S3 on Outposts, you must direct requests + // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When + // using this operation using S3 on Outposts through the AWS SDKs, you provide + // the Outposts bucket ARN in place of the bucket name. For more information + // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) + // in the Amazon Simple Storage Service Developer Guide. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // Return the object only if its entity tag (ETag) is the same as the one specified, + // otherwise return a 412 (precondition failed). + IfMatch *string `location:"header" locationName:"If-Match" type:"string"` + + // Return the object only if it has been modified since the specified time, + // otherwise return a 304 (not modified). + IfModifiedSince *time.Time `location:"header" locationName:"If-Modified-Since" type:"timestamp"` + + // Return the object only if its entity tag (ETag) is different from the one + // specified, otherwise return a 304 (not modified). + IfNoneMatch *string `location:"header" locationName:"If-None-Match" type:"string"` + + // Return the object only if it has not been modified since the specified time, + // otherwise return a 412 (precondition failed). + IfUnmodifiedSince *time.Time `location:"header" locationName:"If-Unmodified-Since" type:"timestamp"` + + // Key of the object to get. + // + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Part number of the object being read. This is a positive integer between + // 1 and 10,000. Effectively performs a 'ranged' GET request for the part specified. + // Useful for downloading just a part of an object. + PartNumber *int64 `location:"querystring" locationName:"partNumber" type:"integer"` + + // Downloads the specified range bytes of an object. For more information about + // the HTTP Range header, see https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35 + // (https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35). + // + // Amazon S3 doesn't support retrieving multiple ranges of data per GET request. + Range *string `location:"header" locationName:"Range" type:"string"` + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from requester pays buckets, see Downloading Objects + // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 Developer Guide. + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // Sets the Cache-Control header of the response. + ResponseCacheControl *string `location:"querystring" locationName:"response-cache-control" type:"string"` + + // Sets the Content-Disposition header of the response + ResponseContentDisposition *string `location:"querystring" locationName:"response-content-disposition" type:"string"` + + // Sets the Content-Encoding header of the response. + ResponseContentEncoding *string `location:"querystring" locationName:"response-content-encoding" type:"string"` + + // Sets the Content-Language header of the response. + ResponseContentLanguage *string `location:"querystring" locationName:"response-content-language" type:"string"` + + // Sets the Content-Type header of the response. + ResponseContentType *string `location:"querystring" locationName:"response-content-type" type:"string"` + + // Sets the Expires header of the response. + ResponseExpires *time.Time `location:"querystring" locationName:"response-expires" type:"timestamp" timestampFormat:"rfc822"` + + // Specifies the algorithm to use to when decrypting the object (for example, + // AES256). + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // Specifies the customer-provided encryption key for Amazon S3 used to encrypt + // the data. This value is used to decrypt the object when recovering it and + // must match the one used when storing the data. The key must be appropriate + // for use with the algorithm specified in the x-amz-server-side-encryption-customer-algorithm + // header. + SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure that the + // encryption key was transmitted without error. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // VersionId used to reference a specific version of the object. + VersionId *string `location:"querystring" locationName:"versionId" type:"string"` +} + +// String returns the string representation +func (s GetObjectInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetObjectInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetObjectInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetObjectInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetObjectInput) SetBucket(v string) *GetObjectInput { + s.Bucket = &v + return s +} + +func (s *GetObjectInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *GetObjectInput) SetExpectedBucketOwner(v string) *GetObjectInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetIfMatch sets the IfMatch field's value. +func (s *GetObjectInput) SetIfMatch(v string) *GetObjectInput { + s.IfMatch = &v + return s +} + +// SetIfModifiedSince sets the IfModifiedSince field's value. +func (s *GetObjectInput) SetIfModifiedSince(v time.Time) *GetObjectInput { + s.IfModifiedSince = &v + return s +} + +// SetIfNoneMatch sets the IfNoneMatch field's value. +func (s *GetObjectInput) SetIfNoneMatch(v string) *GetObjectInput { + s.IfNoneMatch = &v + return s +} + +// SetIfUnmodifiedSince sets the IfUnmodifiedSince field's value. +func (s *GetObjectInput) SetIfUnmodifiedSince(v time.Time) *GetObjectInput { + s.IfUnmodifiedSince = &v + return s +} + +// SetKey sets the Key field's value. +func (s *GetObjectInput) SetKey(v string) *GetObjectInput { + s.Key = &v + return s +} + +// SetPartNumber sets the PartNumber field's value. +func (s *GetObjectInput) SetPartNumber(v int64) *GetObjectInput { + s.PartNumber = &v + return s +} + +// SetRange sets the Range field's value. +func (s *GetObjectInput) SetRange(v string) *GetObjectInput { + s.Range = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *GetObjectInput) SetRequestPayer(v string) *GetObjectInput { + s.RequestPayer = &v + return s +} + +// SetResponseCacheControl sets the ResponseCacheControl field's value. +func (s *GetObjectInput) SetResponseCacheControl(v string) *GetObjectInput { + s.ResponseCacheControl = &v + return s +} + +// SetResponseContentDisposition sets the ResponseContentDisposition field's value. +func (s *GetObjectInput) SetResponseContentDisposition(v string) *GetObjectInput { + s.ResponseContentDisposition = &v + return s +} + +// SetResponseContentEncoding sets the ResponseContentEncoding field's value. +func (s *GetObjectInput) SetResponseContentEncoding(v string) *GetObjectInput { + s.ResponseContentEncoding = &v + return s +} + +// SetResponseContentLanguage sets the ResponseContentLanguage field's value. +func (s *GetObjectInput) SetResponseContentLanguage(v string) *GetObjectInput { + s.ResponseContentLanguage = &v + return s +} + +// SetResponseContentType sets the ResponseContentType field's value. +func (s *GetObjectInput) SetResponseContentType(v string) *GetObjectInput { + s.ResponseContentType = &v + return s +} + +// SetResponseExpires sets the ResponseExpires field's value. +func (s *GetObjectInput) SetResponseExpires(v time.Time) *GetObjectInput { + s.ResponseExpires = &v + return s +} + +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *GetObjectInput) SetSSECustomerAlgorithm(v string) *GetObjectInput { + s.SSECustomerAlgorithm = &v + return s +} + +// SetSSECustomerKey sets the SSECustomerKey field's value. +func (s *GetObjectInput) SetSSECustomerKey(v string) *GetObjectInput { + s.SSECustomerKey = &v + return s +} + +func (s *GetObjectInput) getSSECustomerKey() (v string) { + if s.SSECustomerKey == nil { + return v + } + return *s.SSECustomerKey +} + +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *GetObjectInput) SetSSECustomerKeyMD5(v string) *GetObjectInput { + s.SSECustomerKeyMD5 = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *GetObjectInput) SetVersionId(v string) *GetObjectInput { + s.VersionId = &v + return s +} + +func (s *GetObjectInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetObjectInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetObjectInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type GetObjectLegalHoldInput struct { + _ struct{} `locationName:"GetObjectLegalHoldRequest" type:"structure"` + + // The bucket name containing the object whose Legal Hold status you want to + // retrieve. + // + // When using this API with an access point, you must direct requests to the + // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this operation with an access point through the AWS SDKs, you + // provide the access point ARN in place of the bucket name. For more information + // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // in the Amazon Simple Storage Service Developer Guide. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // The key name for the object whose Legal Hold status you want to retrieve. + // + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from requester pays buckets, see Downloading Objects + // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 Developer Guide. + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // The version ID of the object whose Legal Hold status you want to retrieve. + VersionId *string `location:"querystring" locationName:"versionId" type:"string"` +} + +// String returns the string representation +func (s GetObjectLegalHoldInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetObjectLegalHoldInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetObjectLegalHoldInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetObjectLegalHoldInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetObjectLegalHoldInput) SetBucket(v string) *GetObjectLegalHoldInput { + s.Bucket = &v + return s +} + +func (s *GetObjectLegalHoldInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *GetObjectLegalHoldInput) SetExpectedBucketOwner(v string) *GetObjectLegalHoldInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetKey sets the Key field's value. +func (s *GetObjectLegalHoldInput) SetKey(v string) *GetObjectLegalHoldInput { + s.Key = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *GetObjectLegalHoldInput) SetRequestPayer(v string) *GetObjectLegalHoldInput { + s.RequestPayer = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *GetObjectLegalHoldInput) SetVersionId(v string) *GetObjectLegalHoldInput { + s.VersionId = &v + return s +} + +func (s *GetObjectLegalHoldInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetObjectLegalHoldInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetObjectLegalHoldInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type GetObjectLegalHoldOutput struct { + _ struct{} `type:"structure" payload:"LegalHold"` + + // The current Legal Hold status for the specified object. + LegalHold *ObjectLockLegalHold `type:"structure"` +} + +// String returns the string representation +func (s GetObjectLegalHoldOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetObjectLegalHoldOutput) GoString() string { + return s.String() +} + +// SetLegalHold sets the LegalHold field's value. +func (s *GetObjectLegalHoldOutput) SetLegalHold(v *ObjectLockLegalHold) *GetObjectLegalHoldOutput { + s.LegalHold = v + return s +} + +type GetObjectLockConfigurationInput struct { + _ struct{} `locationName:"GetObjectLockConfigurationRequest" type:"structure"` + + // The bucket whose Object Lock configuration you want to retrieve. + // + // When using this API with an access point, you must direct requests to the + // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this operation with an access point through the AWS SDKs, you + // provide the access point ARN in place of the bucket name. For more information + // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // in the Amazon Simple Storage Service Developer Guide. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation +func (s GetObjectLockConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetObjectLockConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetObjectLockConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetObjectLockConfigurationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetObjectLockConfigurationInput) SetBucket(v string) *GetObjectLockConfigurationInput { + s.Bucket = &v + return s +} + +func (s *GetObjectLockConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *GetObjectLockConfigurationInput) SetExpectedBucketOwner(v string) *GetObjectLockConfigurationInput { + s.ExpectedBucketOwner = &v + return s +} + +func (s *GetObjectLockConfigurationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetObjectLockConfigurationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetObjectLockConfigurationInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type GetObjectLockConfigurationOutput struct { + _ struct{} `type:"structure" payload:"ObjectLockConfiguration"` + + // The specified bucket's Object Lock configuration. + ObjectLockConfiguration *ObjectLockConfiguration `type:"structure"` +} + +// String returns the string representation +func (s GetObjectLockConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetObjectLockConfigurationOutput) GoString() string { + return s.String() +} + +// SetObjectLockConfiguration sets the ObjectLockConfiguration field's value. +func (s *GetObjectLockConfigurationOutput) SetObjectLockConfiguration(v *ObjectLockConfiguration) *GetObjectLockConfigurationOutput { + s.ObjectLockConfiguration = v + return s +} + +type GetObjectOutput struct { + _ struct{} `type:"structure" payload:"Body"` + + // Indicates that a range of bytes was specified. + AcceptRanges *string `location:"header" locationName:"accept-ranges" type:"string"` + + // Object data. + Body io.ReadCloser `type:"blob"` + + // Indicates whether the object uses an S3 Bucket Key for server-side encryption + // with AWS KMS (SSE-KMS). + BucketKeyEnabled *bool `location:"header" locationName:"x-amz-server-side-encryption-bucket-key-enabled" type:"boolean"` + + // Specifies caching behavior along the request/reply chain. + CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"` + + // Specifies presentational information for the object. + ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"` + + // Specifies what content encodings have been applied to the object and thus + // what decoding mechanisms must be applied to obtain the media-type referenced + // by the Content-Type header field. + ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"` + + // The language the content is in. + ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"` + + // Size of the body in bytes. + ContentLength *int64 `location:"header" locationName:"Content-Length" type:"long"` + + // The portion of the object returned in the response. + ContentRange *string `location:"header" locationName:"Content-Range" type:"string"` + + // A standard MIME type describing the format of the object data. + ContentType *string `location:"header" locationName:"Content-Type" type:"string"` + + // Specifies whether the object retrieved was (true) or was not (false) a Delete + // Marker. If false, this response header does not appear in the response. + DeleteMarker *bool `location:"header" locationName:"x-amz-delete-marker" type:"boolean"` + + // An ETag is an opaque identifier assigned by a web server to a specific version + // of a resource found at a URL. + ETag *string `location:"header" locationName:"ETag" type:"string"` + + // If the object expiration is configured (see PUT Bucket lifecycle), the response + // includes this header. It includes the expiry-date and rule-id key-value pairs + // providing object expiration information. The value of the rule-id is URL + // encoded. + Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"` + + // The date and time at which the object is no longer cacheable. + Expires *string `location:"header" locationName:"Expires" type:"string"` + + // Creation date of the object. + LastModified *time.Time `location:"header" locationName:"Last-Modified" type:"timestamp"` + + // A map of metadata to store with the object in S3. + // + // By default unmarshaled keys are written as a map keys in following canonicalized format: + // the first letter and any letter following a hyphen will be capitalized, and the rest as lowercase. + // Set `aws.Config.LowerCaseHeaderMaps` to `true` to write unmarshaled keys to the map as lowercase. + Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"` + + // This is set to the number of metadata entries not returned in x-amz-meta + // headers. This can happen if you create metadata using an API like SOAP that + // supports more flexible metadata than the REST API. For example, using SOAP, + // you can create metadata whose values are not legal HTTP headers. + MissingMeta *int64 `location:"header" locationName:"x-amz-missing-meta" type:"integer"` + + // Indicates whether this object has an active legal hold. This field is only + // returned if you have permission to view an object's legal hold status. + ObjectLockLegalHoldStatus *string `location:"header" locationName:"x-amz-object-lock-legal-hold" type:"string" enum:"ObjectLockLegalHoldStatus"` + + // The Object Lock mode currently in place for this object. + ObjectLockMode *string `location:"header" locationName:"x-amz-object-lock-mode" type:"string" enum:"ObjectLockMode"` + + // The date and time when this object's Object Lock will expire. + ObjectLockRetainUntilDate *time.Time `location:"header" locationName:"x-amz-object-lock-retain-until-date" type:"timestamp" timestampFormat:"iso8601"` + + // The count of parts this object has. + PartsCount *int64 `location:"header" locationName:"x-amz-mp-parts-count" type:"integer"` + + // Amazon S3 can return this if your request involves a bucket that is either + // a source or destination in a replication rule. + ReplicationStatus *string `location:"header" locationName:"x-amz-replication-status" type:"string" enum:"ReplicationStatus"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // Provides information about object restoration operation and expiration time + // of the restored object copy. + Restore *string `location:"header" locationName:"x-amz-restore" type:"string"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header confirming the encryption algorithm + // used. + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header to provide round-trip message integrity + // verification of the customer-provided encryption key. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // If present, specifies the ID of the AWS Key Management Service (AWS KMS) + // symmetric customer managed customer master key (CMK) that was used for the + // object. + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` + + // The server-side encryption algorithm used when storing this object in Amazon + // S3 (for example, AES256, aws:kms). + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` + + // Provides storage class information of the object. Amazon S3 returns this + // header for all objects except for S3 Standard storage class objects. + StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"` + + // The number of tags, if any, on the object. + TagCount *int64 `location:"header" locationName:"x-amz-tagging-count" type:"integer"` + + // Version of the object. + VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` + + // If the bucket is configured as a website, redirects requests for this object + // to another object in the same bucket or to an external URL. Amazon S3 stores + // the value of this header in the object metadata. + WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"` +} + +// String returns the string representation +func (s GetObjectOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetObjectOutput) GoString() string { + return s.String() +} + +// SetAcceptRanges sets the AcceptRanges field's value. +func (s *GetObjectOutput) SetAcceptRanges(v string) *GetObjectOutput { + s.AcceptRanges = &v + return s +} + +// SetBody sets the Body field's value. +func (s *GetObjectOutput) SetBody(v io.ReadCloser) *GetObjectOutput { + s.Body = v + return s +} + +// SetBucketKeyEnabled sets the BucketKeyEnabled field's value. +func (s *GetObjectOutput) SetBucketKeyEnabled(v bool) *GetObjectOutput { + s.BucketKeyEnabled = &v + return s +} + +// SetCacheControl sets the CacheControl field's value. +func (s *GetObjectOutput) SetCacheControl(v string) *GetObjectOutput { + s.CacheControl = &v + return s +} + +// SetContentDisposition sets the ContentDisposition field's value. +func (s *GetObjectOutput) SetContentDisposition(v string) *GetObjectOutput { + s.ContentDisposition = &v + return s +} + +// SetContentEncoding sets the ContentEncoding field's value. +func (s *GetObjectOutput) SetContentEncoding(v string) *GetObjectOutput { + s.ContentEncoding = &v + return s +} + +// SetContentLanguage sets the ContentLanguage field's value. +func (s *GetObjectOutput) SetContentLanguage(v string) *GetObjectOutput { + s.ContentLanguage = &v + return s +} + +// SetContentLength sets the ContentLength field's value. +func (s *GetObjectOutput) SetContentLength(v int64) *GetObjectOutput { + s.ContentLength = &v + return s +} + +// SetContentRange sets the ContentRange field's value. +func (s *GetObjectOutput) SetContentRange(v string) *GetObjectOutput { + s.ContentRange = &v + return s +} + +// SetContentType sets the ContentType field's value. +func (s *GetObjectOutput) SetContentType(v string) *GetObjectOutput { + s.ContentType = &v + return s +} + +// SetDeleteMarker sets the DeleteMarker field's value. +func (s *GetObjectOutput) SetDeleteMarker(v bool) *GetObjectOutput { + s.DeleteMarker = &v + return s +} + +// SetETag sets the ETag field's value. +func (s *GetObjectOutput) SetETag(v string) *GetObjectOutput { + s.ETag = &v + return s +} + +// SetExpiration sets the Expiration field's value. +func (s *GetObjectOutput) SetExpiration(v string) *GetObjectOutput { + s.Expiration = &v + return s +} + +// SetExpires sets the Expires field's value. +func (s *GetObjectOutput) SetExpires(v string) *GetObjectOutput { + s.Expires = &v + return s +} + +// SetLastModified sets the LastModified field's value. +func (s *GetObjectOutput) SetLastModified(v time.Time) *GetObjectOutput { + s.LastModified = &v + return s +} + +// SetMetadata sets the Metadata field's value. +func (s *GetObjectOutput) SetMetadata(v map[string]*string) *GetObjectOutput { + s.Metadata = v + return s +} + +// SetMissingMeta sets the MissingMeta field's value. +func (s *GetObjectOutput) SetMissingMeta(v int64) *GetObjectOutput { + s.MissingMeta = &v + return s +} + +// SetObjectLockLegalHoldStatus sets the ObjectLockLegalHoldStatus field's value. +func (s *GetObjectOutput) SetObjectLockLegalHoldStatus(v string) *GetObjectOutput { + s.ObjectLockLegalHoldStatus = &v + return s +} + +// SetObjectLockMode sets the ObjectLockMode field's value. +func (s *GetObjectOutput) SetObjectLockMode(v string) *GetObjectOutput { + s.ObjectLockMode = &v + return s +} + +// SetObjectLockRetainUntilDate sets the ObjectLockRetainUntilDate field's value. +func (s *GetObjectOutput) SetObjectLockRetainUntilDate(v time.Time) *GetObjectOutput { + s.ObjectLockRetainUntilDate = &v + return s +} + +// SetPartsCount sets the PartsCount field's value. +func (s *GetObjectOutput) SetPartsCount(v int64) *GetObjectOutput { + s.PartsCount = &v + return s +} + +// SetReplicationStatus sets the ReplicationStatus field's value. +func (s *GetObjectOutput) SetReplicationStatus(v string) *GetObjectOutput { + s.ReplicationStatus = &v + return s +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *GetObjectOutput) SetRequestCharged(v string) *GetObjectOutput { + s.RequestCharged = &v + return s +} + +// SetRestore sets the Restore field's value. +func (s *GetObjectOutput) SetRestore(v string) *GetObjectOutput { + s.Restore = &v + return s +} + +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *GetObjectOutput) SetSSECustomerAlgorithm(v string) *GetObjectOutput { + s.SSECustomerAlgorithm = &v + return s +} + +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *GetObjectOutput) SetSSECustomerKeyMD5(v string) *GetObjectOutput { + s.SSECustomerKeyMD5 = &v + return s +} + +// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. +func (s *GetObjectOutput) SetSSEKMSKeyId(v string) *GetObjectOutput { + s.SSEKMSKeyId = &v + return s +} + +// SetServerSideEncryption sets the ServerSideEncryption field's value. +func (s *GetObjectOutput) SetServerSideEncryption(v string) *GetObjectOutput { + s.ServerSideEncryption = &v + return s +} + +// SetStorageClass sets the StorageClass field's value. +func (s *GetObjectOutput) SetStorageClass(v string) *GetObjectOutput { + s.StorageClass = &v + return s +} + +// SetTagCount sets the TagCount field's value. +func (s *GetObjectOutput) SetTagCount(v int64) *GetObjectOutput { + s.TagCount = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *GetObjectOutput) SetVersionId(v string) *GetObjectOutput { + s.VersionId = &v + return s +} + +// SetWebsiteRedirectLocation sets the WebsiteRedirectLocation field's value. +func (s *GetObjectOutput) SetWebsiteRedirectLocation(v string) *GetObjectOutput { + s.WebsiteRedirectLocation = &v + return s +} + +type GetObjectRetentionInput struct { + _ struct{} `locationName:"GetObjectRetentionRequest" type:"structure"` + + // The bucket name containing the object whose retention settings you want to + // retrieve. + // + // When using this API with an access point, you must direct requests to the + // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this operation with an access point through the AWS SDKs, you + // provide the access point ARN in place of the bucket name. For more information + // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // in the Amazon Simple Storage Service Developer Guide. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // The key name for the object whose retention settings you want to retrieve. + // + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from requester pays buckets, see Downloading Objects + // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 Developer Guide. + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // The version ID for the object whose retention settings you want to retrieve. + VersionId *string `location:"querystring" locationName:"versionId" type:"string"` +} + +// String returns the string representation +func (s GetObjectRetentionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetObjectRetentionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetObjectRetentionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetObjectRetentionInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetObjectRetentionInput) SetBucket(v string) *GetObjectRetentionInput { + s.Bucket = &v + return s +} + +func (s *GetObjectRetentionInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *GetObjectRetentionInput) SetExpectedBucketOwner(v string) *GetObjectRetentionInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetKey sets the Key field's value. +func (s *GetObjectRetentionInput) SetKey(v string) *GetObjectRetentionInput { + s.Key = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *GetObjectRetentionInput) SetRequestPayer(v string) *GetObjectRetentionInput { + s.RequestPayer = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *GetObjectRetentionInput) SetVersionId(v string) *GetObjectRetentionInput { + s.VersionId = &v + return s +} + +func (s *GetObjectRetentionInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetObjectRetentionInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetObjectRetentionInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type GetObjectRetentionOutput struct { + _ struct{} `type:"structure" payload:"Retention"` + + // The container element for an object's retention settings. + Retention *ObjectLockRetention `type:"structure"` +} + +// String returns the string representation +func (s GetObjectRetentionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetObjectRetentionOutput) GoString() string { + return s.String() +} + +// SetRetention sets the Retention field's value. +func (s *GetObjectRetentionOutput) SetRetention(v *ObjectLockRetention) *GetObjectRetentionOutput { + s.Retention = v + return s +} + +type GetObjectTaggingInput struct { + _ struct{} `locationName:"GetObjectTaggingRequest" type:"structure"` + + // The bucket name containing the object for which to get the tagging information. + // + // When using this API with an access point, you must direct requests to the + // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this operation with an access point through the AWS SDKs, you + // provide the access point ARN in place of the bucket name. For more information + // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // in the Amazon Simple Storage Service Developer Guide. + // + // When using this API with Amazon S3 on Outposts, you must direct requests + // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When + // using this operation using S3 on Outposts through the AWS SDKs, you provide + // the Outposts bucket ARN in place of the bucket name. For more information + // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) + // in the Amazon Simple Storage Service Developer Guide. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // Object key for which to get the tagging information. + // + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from requester pays buckets, see Downloading Objects + // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 Developer Guide. + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // The versionId of the object for which to get the tagging information. + VersionId *string `location:"querystring" locationName:"versionId" type:"string"` +} + +// String returns the string representation +func (s GetObjectTaggingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetObjectTaggingInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetObjectTaggingInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetObjectTaggingInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetObjectTaggingInput) SetBucket(v string) *GetObjectTaggingInput { + s.Bucket = &v + return s +} + +func (s *GetObjectTaggingInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *GetObjectTaggingInput) SetExpectedBucketOwner(v string) *GetObjectTaggingInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetKey sets the Key field's value. +func (s *GetObjectTaggingInput) SetKey(v string) *GetObjectTaggingInput { + s.Key = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *GetObjectTaggingInput) SetRequestPayer(v string) *GetObjectTaggingInput { + s.RequestPayer = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *GetObjectTaggingInput) SetVersionId(v string) *GetObjectTaggingInput { + s.VersionId = &v + return s +} + +func (s *GetObjectTaggingInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetObjectTaggingInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetObjectTaggingInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type GetObjectTaggingOutput struct { + _ struct{} `type:"structure"` + + // Contains the tag set. + // + // TagSet is a required field + TagSet []*Tag `locationNameList:"Tag" type:"list" required:"true"` + + // The versionId of the object for which you got the tagging information. + VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` +} + +// String returns the string representation +func (s GetObjectTaggingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetObjectTaggingOutput) GoString() string { + return s.String() +} + +// SetTagSet sets the TagSet field's value. +func (s *GetObjectTaggingOutput) SetTagSet(v []*Tag) *GetObjectTaggingOutput { + s.TagSet = v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *GetObjectTaggingOutput) SetVersionId(v string) *GetObjectTaggingOutput { + s.VersionId = &v + return s +} + +type GetObjectTorrentInput struct { + _ struct{} `locationName:"GetObjectTorrentRequest" type:"structure"` + + // The name of the bucket containing the object for which to get the torrent + // files. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // The object key for which to get the information. + // + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from requester pays buckets, see Downloading Objects + // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 Developer Guide. + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` +} + +// String returns the string representation +func (s GetObjectTorrentInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetObjectTorrentInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetObjectTorrentInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetObjectTorrentInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetObjectTorrentInput) SetBucket(v string) *GetObjectTorrentInput { + s.Bucket = &v + return s +} + +func (s *GetObjectTorrentInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *GetObjectTorrentInput) SetExpectedBucketOwner(v string) *GetObjectTorrentInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetKey sets the Key field's value. +func (s *GetObjectTorrentInput) SetKey(v string) *GetObjectTorrentInput { + s.Key = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *GetObjectTorrentInput) SetRequestPayer(v string) *GetObjectTorrentInput { + s.RequestPayer = &v + return s +} + +func (s *GetObjectTorrentInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetObjectTorrentInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetObjectTorrentInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type GetObjectTorrentOutput struct { + _ struct{} `type:"structure" payload:"Body"` + + // A Bencoded dictionary as defined by the BitTorrent specification + Body io.ReadCloser `type:"blob"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` +} + +// String returns the string representation +func (s GetObjectTorrentOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetObjectTorrentOutput) GoString() string { + return s.String() +} + +// SetBody sets the Body field's value. +func (s *GetObjectTorrentOutput) SetBody(v io.ReadCloser) *GetObjectTorrentOutput { + s.Body = v + return s +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *GetObjectTorrentOutput) SetRequestCharged(v string) *GetObjectTorrentOutput { + s.RequestCharged = &v + return s +} + +type GetPublicAccessBlockInput struct { + _ struct{} `locationName:"GetPublicAccessBlockRequest" type:"structure"` + + // The name of the Amazon S3 bucket whose PublicAccessBlock configuration you + // want to retrieve. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation +func (s GetPublicAccessBlockInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetPublicAccessBlockInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetPublicAccessBlockInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetPublicAccessBlockInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetPublicAccessBlockInput) SetBucket(v string) *GetPublicAccessBlockInput { + s.Bucket = &v + return s +} + +func (s *GetPublicAccessBlockInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *GetPublicAccessBlockInput) SetExpectedBucketOwner(v string) *GetPublicAccessBlockInput { + s.ExpectedBucketOwner = &v + return s +} + +func (s *GetPublicAccessBlockInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetPublicAccessBlockInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetPublicAccessBlockInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type GetPublicAccessBlockOutput struct { + _ struct{} `type:"structure" payload:"PublicAccessBlockConfiguration"` + + // The PublicAccessBlock configuration currently in effect for this Amazon S3 + // bucket. + PublicAccessBlockConfiguration *PublicAccessBlockConfiguration `type:"structure"` +} + +// String returns the string representation +func (s GetPublicAccessBlockOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetPublicAccessBlockOutput) GoString() string { + return s.String() +} + +// SetPublicAccessBlockConfiguration sets the PublicAccessBlockConfiguration field's value. +func (s *GetPublicAccessBlockOutput) SetPublicAccessBlockConfiguration(v *PublicAccessBlockConfiguration) *GetPublicAccessBlockOutput { + s.PublicAccessBlockConfiguration = v + return s +} + +// Container for S3 Glacier job parameters. +type GlacierJobParameters struct { + _ struct{} `type:"structure"` + + // Retrieval tier at which the restore will be processed. + // + // Tier is a required field + Tier *string `type:"string" required:"true" enum:"Tier"` +} + +// String returns the string representation +func (s GlacierJobParameters) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GlacierJobParameters) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GlacierJobParameters) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GlacierJobParameters"} + if s.Tier == nil { + invalidParams.Add(request.NewErrParamRequired("Tier")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetTier sets the Tier field's value. +func (s *GlacierJobParameters) SetTier(v string) *GlacierJobParameters { + s.Tier = &v + return s +} + +// Container for grant information. +type Grant struct { + _ struct{} `type:"structure"` + + // The person being granted permissions. + Grantee *Grantee `type:"structure" xmlPrefix:"xsi" xmlURI:"http://www.w3.org/2001/XMLSchema-instance"` + + // Specifies the permission given to the grantee. + Permission *string `type:"string" enum:"Permission"` +} + +// String returns the string representation +func (s Grant) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Grant) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Grant) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Grant"} + if s.Grantee != nil { + if err := s.Grantee.Validate(); err != nil { + invalidParams.AddNested("Grantee", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetGrantee sets the Grantee field's value. +func (s *Grant) SetGrantee(v *Grantee) *Grant { + s.Grantee = v + return s +} + +// SetPermission sets the Permission field's value. +func (s *Grant) SetPermission(v string) *Grant { + s.Permission = &v + return s +} + +// Container for the person being granted permissions. +type Grantee struct { + _ struct{} `type:"structure" xmlPrefix:"xsi" xmlURI:"http://www.w3.org/2001/XMLSchema-instance"` + + // Screen name of the grantee. + DisplayName *string `type:"string"` + + // Email address of the grantee. + // + // Using email addresses to specify a grantee is only supported in the following + // AWS Regions: + // + // * US East (N. Virginia) + // + // * US West (N. California) + // + // * US West (Oregon) + // + // * Asia Pacific (Singapore) + // + // * Asia Pacific (Sydney) + // + // * Asia Pacific (Tokyo) + // + // * Europe (Ireland) + // + // * South America (São Paulo) + // + // For a list of all the Amazon S3 supported Regions and endpoints, see Regions + // and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) + // in the AWS General Reference. + EmailAddress *string `type:"string"` + + // The canonical user ID of the grantee. + ID *string `type:"string"` + + // Type of grantee + // + // Type is a required field + Type *string `locationName:"xsi:type" type:"string" xmlAttribute:"true" required:"true" enum:"Type"` + + // URI of the grantee group. + URI *string `type:"string"` +} + +// String returns the string representation +func (s Grantee) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Grantee) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Grantee) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Grantee"} + if s.Type == nil { + invalidParams.Add(request.NewErrParamRequired("Type")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDisplayName sets the DisplayName field's value. +func (s *Grantee) SetDisplayName(v string) *Grantee { + s.DisplayName = &v + return s +} + +// SetEmailAddress sets the EmailAddress field's value. +func (s *Grantee) SetEmailAddress(v string) *Grantee { + s.EmailAddress = &v + return s +} + +// SetID sets the ID field's value. +func (s *Grantee) SetID(v string) *Grantee { + s.ID = &v + return s +} + +// SetType sets the Type field's value. +func (s *Grantee) SetType(v string) *Grantee { + s.Type = &v + return s +} + +// SetURI sets the URI field's value. +func (s *Grantee) SetURI(v string) *Grantee { + s.URI = &v + return s +} + +type HeadBucketInput struct { + _ struct{} `locationName:"HeadBucketRequest" type:"structure"` + + // The bucket name. + // + // When using this API with an access point, you must direct requests to the + // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this operation with an access point through the AWS SDKs, you + // provide the access point ARN in place of the bucket name. For more information + // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // in the Amazon Simple Storage Service Developer Guide. + // + // When using this API with Amazon S3 on Outposts, you must direct requests + // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When + // using this operation using S3 on Outposts through the AWS SDKs, you provide + // the Outposts bucket ARN in place of the bucket name. For more information + // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) + // in the Amazon Simple Storage Service Developer Guide. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation +func (s HeadBucketInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s HeadBucketInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *HeadBucketInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "HeadBucketInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *HeadBucketInput) SetBucket(v string) *HeadBucketInput { + s.Bucket = &v + return s +} + +func (s *HeadBucketInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *HeadBucketInput) SetExpectedBucketOwner(v string) *HeadBucketInput { + s.ExpectedBucketOwner = &v + return s +} + +func (s *HeadBucketInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *HeadBucketInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s HeadBucketInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type HeadBucketOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s HeadBucketOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s HeadBucketOutput) GoString() string { + return s.String() +} + +type HeadObjectInput struct { + _ struct{} `locationName:"HeadObjectRequest" type:"structure"` + + // The name of the bucket containing the object. + // + // When using this API with an access point, you must direct requests to the + // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this operation with an access point through the AWS SDKs, you + // provide the access point ARN in place of the bucket name. For more information + // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // in the Amazon Simple Storage Service Developer Guide. + // + // When using this API with Amazon S3 on Outposts, you must direct requests + // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When + // using this operation using S3 on Outposts through the AWS SDKs, you provide + // the Outposts bucket ARN in place of the bucket name. For more information + // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) + // in the Amazon Simple Storage Service Developer Guide. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // Return the object only if its entity tag (ETag) is the same as the one specified, + // otherwise return a 412 (precondition failed). + IfMatch *string `location:"header" locationName:"If-Match" type:"string"` + + // Return the object only if it has been modified since the specified time, + // otherwise return a 304 (not modified). + IfModifiedSince *time.Time `location:"header" locationName:"If-Modified-Since" type:"timestamp"` + + // Return the object only if its entity tag (ETag) is different from the one + // specified, otherwise return a 304 (not modified). + IfNoneMatch *string `location:"header" locationName:"If-None-Match" type:"string"` + + // Return the object only if it has not been modified since the specified time, + // otherwise return a 412 (precondition failed). + IfUnmodifiedSince *time.Time `location:"header" locationName:"If-Unmodified-Since" type:"timestamp"` + + // The object key. + // + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Part number of the object being read. This is a positive integer between + // 1 and 10,000. Effectively performs a 'ranged' HEAD request for the part specified. + // Useful querying about the size of the part and the number of parts in this + // object. + PartNumber *int64 `location:"querystring" locationName:"partNumber" type:"integer"` + + // Downloads the specified range bytes of an object. For more information about + // the HTTP Range header, see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35 + // (http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35). + // + // Amazon S3 doesn't support retrieving multiple ranges of data per GET request. + Range *string `location:"header" locationName:"Range" type:"string"` + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from requester pays buckets, see Downloading Objects + // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 Developer Guide. + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // Specifies the algorithm to use to when encrypting the object (for example, + // AES256). + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting + // data. This value is used to store the object and then it is discarded; Amazon + // S3 does not store the encryption key. The key must be appropriate for use + // with the algorithm specified in the x-amz-server-side-encryption-customer-algorithm + // header. + SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure that the + // encryption key was transmitted without error. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // VersionId used to reference a specific version of the object. + VersionId *string `location:"querystring" locationName:"versionId" type:"string"` +} + +// String returns the string representation +func (s HeadObjectInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s HeadObjectInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *HeadObjectInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "HeadObjectInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *HeadObjectInput) SetBucket(v string) *HeadObjectInput { + s.Bucket = &v + return s +} + +func (s *HeadObjectInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *HeadObjectInput) SetExpectedBucketOwner(v string) *HeadObjectInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetIfMatch sets the IfMatch field's value. +func (s *HeadObjectInput) SetIfMatch(v string) *HeadObjectInput { + s.IfMatch = &v + return s +} + +// SetIfModifiedSince sets the IfModifiedSince field's value. +func (s *HeadObjectInput) SetIfModifiedSince(v time.Time) *HeadObjectInput { + s.IfModifiedSince = &v + return s +} + +// SetIfNoneMatch sets the IfNoneMatch field's value. +func (s *HeadObjectInput) SetIfNoneMatch(v string) *HeadObjectInput { + s.IfNoneMatch = &v + return s +} + +// SetIfUnmodifiedSince sets the IfUnmodifiedSince field's value. +func (s *HeadObjectInput) SetIfUnmodifiedSince(v time.Time) *HeadObjectInput { + s.IfUnmodifiedSince = &v + return s +} + +// SetKey sets the Key field's value. +func (s *HeadObjectInput) SetKey(v string) *HeadObjectInput { + s.Key = &v + return s +} + +// SetPartNumber sets the PartNumber field's value. +func (s *HeadObjectInput) SetPartNumber(v int64) *HeadObjectInput { + s.PartNumber = &v + return s +} + +// SetRange sets the Range field's value. +func (s *HeadObjectInput) SetRange(v string) *HeadObjectInput { + s.Range = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *HeadObjectInput) SetRequestPayer(v string) *HeadObjectInput { + s.RequestPayer = &v + return s +} + +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *HeadObjectInput) SetSSECustomerAlgorithm(v string) *HeadObjectInput { + s.SSECustomerAlgorithm = &v + return s +} + +// SetSSECustomerKey sets the SSECustomerKey field's value. +func (s *HeadObjectInput) SetSSECustomerKey(v string) *HeadObjectInput { + s.SSECustomerKey = &v + return s +} + +func (s *HeadObjectInput) getSSECustomerKey() (v string) { + if s.SSECustomerKey == nil { + return v + } + return *s.SSECustomerKey +} + +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *HeadObjectInput) SetSSECustomerKeyMD5(v string) *HeadObjectInput { + s.SSECustomerKeyMD5 = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *HeadObjectInput) SetVersionId(v string) *HeadObjectInput { + s.VersionId = &v + return s +} + +func (s *HeadObjectInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *HeadObjectInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s HeadObjectInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type HeadObjectOutput struct { + _ struct{} `type:"structure"` + + // Indicates that a range of bytes was specified. + AcceptRanges *string `location:"header" locationName:"accept-ranges" type:"string"` + + // The archive state of the head object. + ArchiveStatus *string `location:"header" locationName:"x-amz-archive-status" type:"string" enum:"ArchiveStatus"` + + // Indicates whether the object uses an S3 Bucket Key for server-side encryption + // with AWS KMS (SSE-KMS). + BucketKeyEnabled *bool `location:"header" locationName:"x-amz-server-side-encryption-bucket-key-enabled" type:"boolean"` + + // Specifies caching behavior along the request/reply chain. + CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"` + + // Specifies presentational information for the object. + ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"` + + // Specifies what content encodings have been applied to the object and thus + // what decoding mechanisms must be applied to obtain the media-type referenced + // by the Content-Type header field. + ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"` + + // The language the content is in. + ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"` + + // Size of the body in bytes. + ContentLength *int64 `location:"header" locationName:"Content-Length" type:"long"` + + // A standard MIME type describing the format of the object data. + ContentType *string `location:"header" locationName:"Content-Type" type:"string"` + + // Specifies whether the object retrieved was (true) or was not (false) a Delete + // Marker. If false, this response header does not appear in the response. + DeleteMarker *bool `location:"header" locationName:"x-amz-delete-marker" type:"boolean"` + + // An ETag is an opaque identifier assigned by a web server to a specific version + // of a resource found at a URL. + ETag *string `location:"header" locationName:"ETag" type:"string"` + + // If the object expiration is configured (see PUT Bucket lifecycle), the response + // includes this header. It includes the expiry-date and rule-id key-value pairs + // providing object expiration information. The value of the rule-id is URL + // encoded. + Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"` + + // The date and time at which the object is no longer cacheable. + Expires *string `location:"header" locationName:"Expires" type:"string"` + + // Creation date of the object. + LastModified *time.Time `location:"header" locationName:"Last-Modified" type:"timestamp"` + + // A map of metadata to store with the object in S3. + // + // By default unmarshaled keys are written as a map keys in following canonicalized format: + // the first letter and any letter following a hyphen will be capitalized, and the rest as lowercase. + // Set `aws.Config.LowerCaseHeaderMaps` to `true` to write unmarshaled keys to the map as lowercase. + Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"` + + // This is set to the number of metadata entries not returned in x-amz-meta + // headers. This can happen if you create metadata using an API like SOAP that + // supports more flexible metadata than the REST API. For example, using SOAP, + // you can create metadata whose values are not legal HTTP headers. + MissingMeta *int64 `location:"header" locationName:"x-amz-missing-meta" type:"integer"` + + // Specifies whether a legal hold is in effect for this object. This header + // is only returned if the requester has the s3:GetObjectLegalHold permission. + // This header is not returned if the specified version of this object has never + // had a legal hold applied. For more information about S3 Object Lock, see + // Object Lock (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html). + ObjectLockLegalHoldStatus *string `location:"header" locationName:"x-amz-object-lock-legal-hold" type:"string" enum:"ObjectLockLegalHoldStatus"` + + // The Object Lock mode, if any, that's in effect for this object. This header + // is only returned if the requester has the s3:GetObjectRetention permission. + // For more information about S3 Object Lock, see Object Lock (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html). + ObjectLockMode *string `location:"header" locationName:"x-amz-object-lock-mode" type:"string" enum:"ObjectLockMode"` + + // The date and time when the Object Lock retention period expires. This header + // is only returned if the requester has the s3:GetObjectRetention permission. + ObjectLockRetainUntilDate *time.Time `location:"header" locationName:"x-amz-object-lock-retain-until-date" type:"timestamp" timestampFormat:"iso8601"` + + // The count of parts this object has. + PartsCount *int64 `location:"header" locationName:"x-amz-mp-parts-count" type:"integer"` + + // Amazon S3 can return this header if your request involves a bucket that is + // either a source or a destination in a replication rule. + // + // In replication, you have a source bucket on which you configure replication + // and destination bucket or buckets where Amazon S3 stores object replicas. + // When you request an object (GetObject) or object metadata (HeadObject) from + // these buckets, Amazon S3 will return the x-amz-replication-status header + // in the response as follows: + // + // * If requesting an object from the source bucket — Amazon S3 will return + // the x-amz-replication-status header if the object in your request is eligible + // for replication. For example, suppose that in your replication configuration, + // you specify object prefix TaxDocs requesting Amazon S3 to replicate objects + // with key prefix TaxDocs. Any objects you upload with this key name prefix, + // for example TaxDocs/document1.pdf, are eligible for replication. For any + // object request with this key name prefix, Amazon S3 will return the x-amz-replication-status + // header with value PENDING, COMPLETED or FAILED indicating object replication + // status. + // + // * If requesting an object from a destination bucket — Amazon S3 will + // return the x-amz-replication-status header with value REPLICA if the object + // in your request is a replica that Amazon S3 created and there is no replica + // modification replication in progress. + // + // * When replicating objects to multiple destination buckets the x-amz-replication-status + // header acts differently. The header of the source object will only return + // a value of COMPLETED when replication is successful to all destinations. + // The header will remain at value PENDING until replication has completed + // for all destinations. If one or more destinations fails replication the + // header will return FAILED. + // + // For more information, see Replication (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html). + ReplicationStatus *string `location:"header" locationName:"x-amz-replication-status" type:"string" enum:"ReplicationStatus"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // If the object is an archived object (an object whose storage class is GLACIER), + // the response includes this header if either the archive restoration is in + // progress (see RestoreObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html) + // or an archive copy is already restored. + // + // If an archive copy is already restored, the header value indicates when Amazon + // S3 is scheduled to delete the object copy. For example: + // + // x-amz-restore: ongoing-request="false", expiry-date="Fri, 23 Dec 2012 00:00:00 + // GMT" + // + // If the object restoration is in progress, the header returns the value ongoing-request="true". + // + // For more information about archiving objects, see Transitioning Objects: + // General Considerations (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html#lifecycle-transition-general-considerations). + Restore *string `location:"header" locationName:"x-amz-restore" type:"string"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header confirming the encryption algorithm + // used. + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header to provide round-trip message integrity + // verification of the customer-provided encryption key. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // If present, specifies the ID of the AWS Key Management Service (AWS KMS) + // symmetric customer managed customer master key (CMK) that was used for the + // object. + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` + + // If the object is stored using server-side encryption either with an AWS KMS + // customer master key (CMK) or an Amazon S3-managed encryption key, the response + // includes this header with the value of the server-side encryption algorithm + // used when storing this object in Amazon S3 (for example, AES256, aws:kms). + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` + + // Provides storage class information of the object. Amazon S3 returns this + // header for all objects except for S3 Standard storage class objects. + // + // For more information, see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html). + StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"` + + // Version of the object. + VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` + + // If the bucket is configured as a website, redirects requests for this object + // to another object in the same bucket or to an external URL. Amazon S3 stores + // the value of this header in the object metadata. + WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"` +} + +// String returns the string representation +func (s HeadObjectOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s HeadObjectOutput) GoString() string { + return s.String() +} + +// SetAcceptRanges sets the AcceptRanges field's value. +func (s *HeadObjectOutput) SetAcceptRanges(v string) *HeadObjectOutput { + s.AcceptRanges = &v + return s +} + +// SetArchiveStatus sets the ArchiveStatus field's value. +func (s *HeadObjectOutput) SetArchiveStatus(v string) *HeadObjectOutput { + s.ArchiveStatus = &v + return s +} + +// SetBucketKeyEnabled sets the BucketKeyEnabled field's value. +func (s *HeadObjectOutput) SetBucketKeyEnabled(v bool) *HeadObjectOutput { + s.BucketKeyEnabled = &v + return s +} + +// SetCacheControl sets the CacheControl field's value. +func (s *HeadObjectOutput) SetCacheControl(v string) *HeadObjectOutput { + s.CacheControl = &v + return s +} + +// SetContentDisposition sets the ContentDisposition field's value. +func (s *HeadObjectOutput) SetContentDisposition(v string) *HeadObjectOutput { + s.ContentDisposition = &v + return s +} + +// SetContentEncoding sets the ContentEncoding field's value. +func (s *HeadObjectOutput) SetContentEncoding(v string) *HeadObjectOutput { + s.ContentEncoding = &v + return s +} + +// SetContentLanguage sets the ContentLanguage field's value. +func (s *HeadObjectOutput) SetContentLanguage(v string) *HeadObjectOutput { + s.ContentLanguage = &v + return s +} + +// SetContentLength sets the ContentLength field's value. +func (s *HeadObjectOutput) SetContentLength(v int64) *HeadObjectOutput { + s.ContentLength = &v + return s +} + +// SetContentType sets the ContentType field's value. +func (s *HeadObjectOutput) SetContentType(v string) *HeadObjectOutput { + s.ContentType = &v + return s +} + +// SetDeleteMarker sets the DeleteMarker field's value. +func (s *HeadObjectOutput) SetDeleteMarker(v bool) *HeadObjectOutput { + s.DeleteMarker = &v + return s +} + +// SetETag sets the ETag field's value. +func (s *HeadObjectOutput) SetETag(v string) *HeadObjectOutput { + s.ETag = &v + return s +} + +// SetExpiration sets the Expiration field's value. +func (s *HeadObjectOutput) SetExpiration(v string) *HeadObjectOutput { + s.Expiration = &v + return s +} + +// SetExpires sets the Expires field's value. +func (s *HeadObjectOutput) SetExpires(v string) *HeadObjectOutput { + s.Expires = &v + return s +} + +// SetLastModified sets the LastModified field's value. +func (s *HeadObjectOutput) SetLastModified(v time.Time) *HeadObjectOutput { + s.LastModified = &v + return s +} + +// SetMetadata sets the Metadata field's value. +func (s *HeadObjectOutput) SetMetadata(v map[string]*string) *HeadObjectOutput { + s.Metadata = v + return s +} + +// SetMissingMeta sets the MissingMeta field's value. +func (s *HeadObjectOutput) SetMissingMeta(v int64) *HeadObjectOutput { + s.MissingMeta = &v + return s +} + +// SetObjectLockLegalHoldStatus sets the ObjectLockLegalHoldStatus field's value. +func (s *HeadObjectOutput) SetObjectLockLegalHoldStatus(v string) *HeadObjectOutput { + s.ObjectLockLegalHoldStatus = &v + return s +} + +// SetObjectLockMode sets the ObjectLockMode field's value. +func (s *HeadObjectOutput) SetObjectLockMode(v string) *HeadObjectOutput { + s.ObjectLockMode = &v + return s +} + +// SetObjectLockRetainUntilDate sets the ObjectLockRetainUntilDate field's value. +func (s *HeadObjectOutput) SetObjectLockRetainUntilDate(v time.Time) *HeadObjectOutput { + s.ObjectLockRetainUntilDate = &v + return s +} + +// SetPartsCount sets the PartsCount field's value. +func (s *HeadObjectOutput) SetPartsCount(v int64) *HeadObjectOutput { + s.PartsCount = &v + return s +} + +// SetReplicationStatus sets the ReplicationStatus field's value. +func (s *HeadObjectOutput) SetReplicationStatus(v string) *HeadObjectOutput { + s.ReplicationStatus = &v + return s +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *HeadObjectOutput) SetRequestCharged(v string) *HeadObjectOutput { + s.RequestCharged = &v + return s +} + +// SetRestore sets the Restore field's value. +func (s *HeadObjectOutput) SetRestore(v string) *HeadObjectOutput { + s.Restore = &v + return s +} + +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *HeadObjectOutput) SetSSECustomerAlgorithm(v string) *HeadObjectOutput { + s.SSECustomerAlgorithm = &v + return s +} + +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *HeadObjectOutput) SetSSECustomerKeyMD5(v string) *HeadObjectOutput { + s.SSECustomerKeyMD5 = &v + return s +} + +// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. +func (s *HeadObjectOutput) SetSSEKMSKeyId(v string) *HeadObjectOutput { + s.SSEKMSKeyId = &v + return s +} + +// SetServerSideEncryption sets the ServerSideEncryption field's value. +func (s *HeadObjectOutput) SetServerSideEncryption(v string) *HeadObjectOutput { + s.ServerSideEncryption = &v + return s +} + +// SetStorageClass sets the StorageClass field's value. +func (s *HeadObjectOutput) SetStorageClass(v string) *HeadObjectOutput { + s.StorageClass = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *HeadObjectOutput) SetVersionId(v string) *HeadObjectOutput { + s.VersionId = &v + return s +} + +// SetWebsiteRedirectLocation sets the WebsiteRedirectLocation field's value. +func (s *HeadObjectOutput) SetWebsiteRedirectLocation(v string) *HeadObjectOutput { + s.WebsiteRedirectLocation = &v + return s +} + +// Container for the Suffix element. +type IndexDocument struct { + _ struct{} `type:"structure"` + + // A suffix that is appended to a request that is for a directory on the website + // endpoint (for example,if the suffix is index.html and you make a request + // to samplebucket/images/ the data that is returned will be for the object + // with the key name images/index.html) The suffix must not be empty and must + // not include a slash character. + // + // Replacement must be made for object keys containing special characters (such + // as carriage returns) when using XML requests. For more information, see XML + // related object key constraints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints). + // + // Suffix is a required field + Suffix *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s IndexDocument) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s IndexDocument) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *IndexDocument) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "IndexDocument"} + if s.Suffix == nil { + invalidParams.Add(request.NewErrParamRequired("Suffix")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetSuffix sets the Suffix field's value. +func (s *IndexDocument) SetSuffix(v string) *IndexDocument { + s.Suffix = &v + return s +} + +// Container element that identifies who initiated the multipart upload. +type Initiator struct { + _ struct{} `type:"structure"` + + // Name of the Principal. + DisplayName *string `type:"string"` + + // If the principal is an AWS account, it provides the Canonical User ID. If + // the principal is an IAM User, it provides a user ARN value. + ID *string `type:"string"` +} + +// String returns the string representation +func (s Initiator) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Initiator) GoString() string { + return s.String() +} + +// SetDisplayName sets the DisplayName field's value. +func (s *Initiator) SetDisplayName(v string) *Initiator { + s.DisplayName = &v + return s +} + +// SetID sets the ID field's value. +func (s *Initiator) SetID(v string) *Initiator { + s.ID = &v + return s +} + +// Describes the serialization format of the object. +type InputSerialization struct { + _ struct{} `type:"structure"` + + // Describes the serialization of a CSV-encoded object. + CSV *CSVInput `type:"structure"` + + // Specifies object's compression format. Valid values: NONE, GZIP, BZIP2. Default + // Value: NONE. + CompressionType *string `type:"string" enum:"CompressionType"` + + // Specifies JSON as object's input serialization format. + JSON *JSONInput `type:"structure"` + + // Specifies Parquet as object's input serialization format. + Parquet *ParquetInput `type:"structure"` +} + +// String returns the string representation +func (s InputSerialization) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InputSerialization) GoString() string { + return s.String() +} + +// SetCSV sets the CSV field's value. +func (s *InputSerialization) SetCSV(v *CSVInput) *InputSerialization { + s.CSV = v + return s +} + +// SetCompressionType sets the CompressionType field's value. +func (s *InputSerialization) SetCompressionType(v string) *InputSerialization { + s.CompressionType = &v + return s +} + +// SetJSON sets the JSON field's value. +func (s *InputSerialization) SetJSON(v *JSONInput) *InputSerialization { + s.JSON = v + return s +} + +// SetParquet sets the Parquet field's value. +func (s *InputSerialization) SetParquet(v *ParquetInput) *InputSerialization { + s.Parquet = v + return s +} + +// A container for specifying S3 Intelligent-Tiering filters. The filters determine +// the subset of objects to which the rule applies. +type IntelligentTieringAndOperator struct { + _ struct{} `type:"structure"` + + // An object key name prefix that identifies the subset of objects to which + // the configuration applies. + Prefix *string `type:"string"` + + // All of these tags must exist in the object's tag set in order for the configuration + // to apply. + Tags []*Tag `locationName:"Tag" locationNameList:"Tag" type:"list" flattened:"true"` +} + +// String returns the string representation +func (s IntelligentTieringAndOperator) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s IntelligentTieringAndOperator) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *IntelligentTieringAndOperator) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "IntelligentTieringAndOperator"} + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetPrefix sets the Prefix field's value. +func (s *IntelligentTieringAndOperator) SetPrefix(v string) *IntelligentTieringAndOperator { + s.Prefix = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *IntelligentTieringAndOperator) SetTags(v []*Tag) *IntelligentTieringAndOperator { + s.Tags = v + return s +} + +// Specifies the S3 Intelligent-Tiering configuration for an Amazon S3 bucket. +// +// For information about the S3 Intelligent-Tiering storage class, see Storage +// class for automatically optimizing frequently and infrequently accessed objects +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access). +type IntelligentTieringConfiguration struct { + _ struct{} `type:"structure"` + + // Specifies a bucket filter. The configuration only includes objects that meet + // the filter's criteria. + Filter *IntelligentTieringFilter `type:"structure"` + + // The ID used to identify the S3 Intelligent-Tiering configuration. + // + // Id is a required field + Id *string `type:"string" required:"true"` + + // Specifies the status of the configuration. + // + // Status is a required field + Status *string `type:"string" required:"true" enum:"IntelligentTieringStatus"` + + // Specifies the S3 Intelligent-Tiering storage class tier of the configuration. + // + // Tierings is a required field + Tierings []*Tiering `locationName:"Tiering" type:"list" flattened:"true" required:"true"` +} + +// String returns the string representation +func (s IntelligentTieringConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s IntelligentTieringConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *IntelligentTieringConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "IntelligentTieringConfiguration"} + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + if s.Status == nil { + invalidParams.Add(request.NewErrParamRequired("Status")) + } + if s.Tierings == nil { + invalidParams.Add(request.NewErrParamRequired("Tierings")) + } + if s.Filter != nil { + if err := s.Filter.Validate(); err != nil { + invalidParams.AddNested("Filter", err.(request.ErrInvalidParams)) + } + } + if s.Tierings != nil { + for i, v := range s.Tierings { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tierings", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetFilter sets the Filter field's value. +func (s *IntelligentTieringConfiguration) SetFilter(v *IntelligentTieringFilter) *IntelligentTieringConfiguration { + s.Filter = v + return s +} + +// SetId sets the Id field's value. +func (s *IntelligentTieringConfiguration) SetId(v string) *IntelligentTieringConfiguration { + s.Id = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *IntelligentTieringConfiguration) SetStatus(v string) *IntelligentTieringConfiguration { + s.Status = &v + return s +} + +// SetTierings sets the Tierings field's value. +func (s *IntelligentTieringConfiguration) SetTierings(v []*Tiering) *IntelligentTieringConfiguration { + s.Tierings = v + return s +} + +// The Filter is used to identify objects that the S3 Intelligent-Tiering configuration +// applies to. +type IntelligentTieringFilter struct { + _ struct{} `type:"structure"` + + // A conjunction (logical AND) of predicates, which is used in evaluating a + // metrics filter. The operator must have at least two predicates, and an object + // must match all of the predicates in order for the filter to apply. + And *IntelligentTieringAndOperator `type:"structure"` + + // An object key name prefix that identifies the subset of objects to which + // the rule applies. + // + // Replacement must be made for object keys containing special characters (such + // as carriage returns) when using XML requests. For more information, see XML + // related object key constraints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints). + Prefix *string `type:"string"` + + // A container of a key value name pair. + Tag *Tag `type:"structure"` +} + +// String returns the string representation +func (s IntelligentTieringFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s IntelligentTieringFilter) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *IntelligentTieringFilter) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "IntelligentTieringFilter"} + if s.And != nil { + if err := s.And.Validate(); err != nil { + invalidParams.AddNested("And", err.(request.ErrInvalidParams)) + } + } + if s.Tag != nil { + if err := s.Tag.Validate(); err != nil { + invalidParams.AddNested("Tag", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAnd sets the And field's value. +func (s *IntelligentTieringFilter) SetAnd(v *IntelligentTieringAndOperator) *IntelligentTieringFilter { + s.And = v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *IntelligentTieringFilter) SetPrefix(v string) *IntelligentTieringFilter { + s.Prefix = &v + return s +} + +// SetTag sets the Tag field's value. +func (s *IntelligentTieringFilter) SetTag(v *Tag) *IntelligentTieringFilter { + s.Tag = v + return s +} + +// Specifies the inventory configuration for an Amazon S3 bucket. For more information, +// see GET Bucket inventory (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETInventoryConfig.html) +// in the Amazon Simple Storage Service API Reference. +type InventoryConfiguration struct { + _ struct{} `type:"structure"` + + // Contains information about where to publish the inventory results. + // + // Destination is a required field + Destination *InventoryDestination `type:"structure" required:"true"` + + // Specifies an inventory filter. The inventory only includes objects that meet + // the filter's criteria. + Filter *InventoryFilter `type:"structure"` + + // The ID used to identify the inventory configuration. + // + // Id is a required field + Id *string `type:"string" required:"true"` + + // Object versions to include in the inventory list. If set to All, the list + // includes all the object versions, which adds the version-related fields VersionId, + // IsLatest, and DeleteMarker to the list. If set to Current, the list does + // not contain these version-related fields. + // + // IncludedObjectVersions is a required field + IncludedObjectVersions *string `type:"string" required:"true" enum:"InventoryIncludedObjectVersions"` + + // Specifies whether the inventory is enabled or disabled. If set to True, an + // inventory list is generated. If set to False, no inventory list is generated. + // + // IsEnabled is a required field + IsEnabled *bool `type:"boolean" required:"true"` + + // Contains the optional fields that are included in the inventory results. + OptionalFields []*string `locationNameList:"Field" type:"list"` + + // Specifies the schedule for generating inventory results. + // + // Schedule is a required field + Schedule *InventorySchedule `type:"structure" required:"true"` +} + +// String returns the string representation +func (s InventoryConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InventoryConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *InventoryConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "InventoryConfiguration"} + if s.Destination == nil { + invalidParams.Add(request.NewErrParamRequired("Destination")) + } + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + if s.IncludedObjectVersions == nil { + invalidParams.Add(request.NewErrParamRequired("IncludedObjectVersions")) + } + if s.IsEnabled == nil { + invalidParams.Add(request.NewErrParamRequired("IsEnabled")) + } + if s.Schedule == nil { + invalidParams.Add(request.NewErrParamRequired("Schedule")) + } + if s.Destination != nil { + if err := s.Destination.Validate(); err != nil { + invalidParams.AddNested("Destination", err.(request.ErrInvalidParams)) + } + } + if s.Filter != nil { + if err := s.Filter.Validate(); err != nil { + invalidParams.AddNested("Filter", err.(request.ErrInvalidParams)) + } + } + if s.Schedule != nil { + if err := s.Schedule.Validate(); err != nil { + invalidParams.AddNested("Schedule", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDestination sets the Destination field's value. +func (s *InventoryConfiguration) SetDestination(v *InventoryDestination) *InventoryConfiguration { + s.Destination = v + return s +} + +// SetFilter sets the Filter field's value. +func (s *InventoryConfiguration) SetFilter(v *InventoryFilter) *InventoryConfiguration { + s.Filter = v + return s +} + +// SetId sets the Id field's value. +func (s *InventoryConfiguration) SetId(v string) *InventoryConfiguration { + s.Id = &v + return s +} + +// SetIncludedObjectVersions sets the IncludedObjectVersions field's value. +func (s *InventoryConfiguration) SetIncludedObjectVersions(v string) *InventoryConfiguration { + s.IncludedObjectVersions = &v + return s +} + +// SetIsEnabled sets the IsEnabled field's value. +func (s *InventoryConfiguration) SetIsEnabled(v bool) *InventoryConfiguration { + s.IsEnabled = &v + return s +} + +// SetOptionalFields sets the OptionalFields field's value. +func (s *InventoryConfiguration) SetOptionalFields(v []*string) *InventoryConfiguration { + s.OptionalFields = v + return s +} + +// SetSchedule sets the Schedule field's value. +func (s *InventoryConfiguration) SetSchedule(v *InventorySchedule) *InventoryConfiguration { + s.Schedule = v + return s +} + +// Specifies the inventory configuration for an Amazon S3 bucket. +type InventoryDestination struct { + _ struct{} `type:"structure"` + + // Contains the bucket name, file format, bucket owner (optional), and prefix + // (optional) where inventory results are published. + // + // S3BucketDestination is a required field + S3BucketDestination *InventoryS3BucketDestination `type:"structure" required:"true"` +} + +// String returns the string representation +func (s InventoryDestination) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InventoryDestination) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *InventoryDestination) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "InventoryDestination"} + if s.S3BucketDestination == nil { + invalidParams.Add(request.NewErrParamRequired("S3BucketDestination")) + } + if s.S3BucketDestination != nil { + if err := s.S3BucketDestination.Validate(); err != nil { + invalidParams.AddNested("S3BucketDestination", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetS3BucketDestination sets the S3BucketDestination field's value. +func (s *InventoryDestination) SetS3BucketDestination(v *InventoryS3BucketDestination) *InventoryDestination { + s.S3BucketDestination = v + return s +} + +// Contains the type of server-side encryption used to encrypt the inventory +// results. +type InventoryEncryption struct { + _ struct{} `type:"structure"` + + // Specifies the use of SSE-KMS to encrypt delivered inventory reports. + SSEKMS *SSEKMS `locationName:"SSE-KMS" type:"structure"` + + // Specifies the use of SSE-S3 to encrypt delivered inventory reports. + SSES3 *SSES3 `locationName:"SSE-S3" type:"structure"` +} + +// String returns the string representation +func (s InventoryEncryption) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InventoryEncryption) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *InventoryEncryption) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "InventoryEncryption"} + if s.SSEKMS != nil { + if err := s.SSEKMS.Validate(); err != nil { + invalidParams.AddNested("SSEKMS", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetSSEKMS sets the SSEKMS field's value. +func (s *InventoryEncryption) SetSSEKMS(v *SSEKMS) *InventoryEncryption { + s.SSEKMS = v + return s +} + +// SetSSES3 sets the SSES3 field's value. +func (s *InventoryEncryption) SetSSES3(v *SSES3) *InventoryEncryption { + s.SSES3 = v + return s +} + +// Specifies an inventory filter. The inventory only includes objects that meet +// the filter's criteria. +type InventoryFilter struct { + _ struct{} `type:"structure"` + + // The prefix that an object must have to be included in the inventory results. + // + // Prefix is a required field + Prefix *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s InventoryFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InventoryFilter) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *InventoryFilter) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "InventoryFilter"} + if s.Prefix == nil { + invalidParams.Add(request.NewErrParamRequired("Prefix")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetPrefix sets the Prefix field's value. +func (s *InventoryFilter) SetPrefix(v string) *InventoryFilter { + s.Prefix = &v + return s +} + +// Contains the bucket name, file format, bucket owner (optional), and prefix +// (optional) where inventory results are published. +type InventoryS3BucketDestination struct { + _ struct{} `type:"structure"` + + // The account ID that owns the destination S3 bucket. If no account ID is provided, + // the owner is not validated before exporting data. + // + // Although this value is optional, we strongly recommend that you set it to + // help prevent problems if the destination bucket ownership changes. + AccountId *string `type:"string"` + + // The Amazon Resource Name (ARN) of the bucket where inventory results will + // be published. + // + // Bucket is a required field + Bucket *string `type:"string" required:"true"` + + // Contains the type of server-side encryption used to encrypt the inventory + // results. + Encryption *InventoryEncryption `type:"structure"` + + // Specifies the output format of the inventory results. + // + // Format is a required field + Format *string `type:"string" required:"true" enum:"InventoryFormat"` + + // The prefix that is prepended to all inventory results. + Prefix *string `type:"string"` +} + +// String returns the string representation +func (s InventoryS3BucketDestination) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InventoryS3BucketDestination) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *InventoryS3BucketDestination) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "InventoryS3BucketDestination"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Format == nil { + invalidParams.Add(request.NewErrParamRequired("Format")) + } + if s.Encryption != nil { + if err := s.Encryption.Validate(); err != nil { + invalidParams.AddNested("Encryption", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAccountId sets the AccountId field's value. +func (s *InventoryS3BucketDestination) SetAccountId(v string) *InventoryS3BucketDestination { + s.AccountId = &v + return s +} + +// SetBucket sets the Bucket field's value. +func (s *InventoryS3BucketDestination) SetBucket(v string) *InventoryS3BucketDestination { + s.Bucket = &v + return s +} + +func (s *InventoryS3BucketDestination) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetEncryption sets the Encryption field's value. +func (s *InventoryS3BucketDestination) SetEncryption(v *InventoryEncryption) *InventoryS3BucketDestination { + s.Encryption = v + return s +} + +// SetFormat sets the Format field's value. +func (s *InventoryS3BucketDestination) SetFormat(v string) *InventoryS3BucketDestination { + s.Format = &v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *InventoryS3BucketDestination) SetPrefix(v string) *InventoryS3BucketDestination { + s.Prefix = &v + return s +} + +// Specifies the schedule for generating inventory results. +type InventorySchedule struct { + _ struct{} `type:"structure"` + + // Specifies how frequently inventory results are produced. + // + // Frequency is a required field + Frequency *string `type:"string" required:"true" enum:"InventoryFrequency"` +} + +// String returns the string representation +func (s InventorySchedule) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InventorySchedule) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *InventorySchedule) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "InventorySchedule"} + if s.Frequency == nil { + invalidParams.Add(request.NewErrParamRequired("Frequency")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetFrequency sets the Frequency field's value. +func (s *InventorySchedule) SetFrequency(v string) *InventorySchedule { + s.Frequency = &v + return s +} + +// Specifies JSON as object's input serialization format. +type JSONInput struct { + _ struct{} `type:"structure"` + + // The type of JSON. Valid values: Document, Lines. + Type *string `type:"string" enum:"JSONType"` +} + +// String returns the string representation +func (s JSONInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s JSONInput) GoString() string { + return s.String() +} + +// SetType sets the Type field's value. +func (s *JSONInput) SetType(v string) *JSONInput { + s.Type = &v + return s +} + +// Specifies JSON as request's output serialization format. +type JSONOutput struct { + _ struct{} `type:"structure"` + + // The value used to separate individual records in the output. If no value + // is specified, Amazon S3 uses a newline character ('\n'). + RecordDelimiter *string `type:"string"` +} + +// String returns the string representation +func (s JSONOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s JSONOutput) GoString() string { + return s.String() +} + +// SetRecordDelimiter sets the RecordDelimiter field's value. +func (s *JSONOutput) SetRecordDelimiter(v string) *JSONOutput { + s.RecordDelimiter = &v + return s +} + +// A container for object key name prefix and suffix filtering rules. +type KeyFilter struct { + _ struct{} `type:"structure"` + + // A list of containers for the key-value pair that defines the criteria for + // the filter rule. + FilterRules []*FilterRule `locationName:"FilterRule" type:"list" flattened:"true"` +} + +// String returns the string representation +func (s KeyFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s KeyFilter) GoString() string { + return s.String() +} + +// SetFilterRules sets the FilterRules field's value. +func (s *KeyFilter) SetFilterRules(v []*FilterRule) *KeyFilter { + s.FilterRules = v + return s +} + +// A container for specifying the configuration for AWS Lambda notifications. +type LambdaFunctionConfiguration struct { + _ struct{} `type:"structure"` + + // The Amazon S3 bucket event for which to invoke the AWS Lambda function. For + // more information, see Supported Event Types (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) + // in the Amazon Simple Storage Service Developer Guide. + // + // Events is a required field + Events []*string `locationName:"Event" type:"list" flattened:"true" required:"true"` + + // Specifies object key name filtering rules. For information about key name + // filtering, see Configuring Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) + // in the Amazon Simple Storage Service Developer Guide. + Filter *NotificationConfigurationFilter `type:"structure"` + + // An optional unique identifier for configurations in a notification configuration. + // If you don't provide one, Amazon S3 will assign an ID. + Id *string `type:"string"` + + // The Amazon Resource Name (ARN) of the AWS Lambda function that Amazon S3 + // invokes when the specified event type occurs. + // + // LambdaFunctionArn is a required field + LambdaFunctionArn *string `locationName:"CloudFunction" type:"string" required:"true"` +} + +// String returns the string representation +func (s LambdaFunctionConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LambdaFunctionConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *LambdaFunctionConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "LambdaFunctionConfiguration"} + if s.Events == nil { + invalidParams.Add(request.NewErrParamRequired("Events")) + } + if s.LambdaFunctionArn == nil { + invalidParams.Add(request.NewErrParamRequired("LambdaFunctionArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetEvents sets the Events field's value. +func (s *LambdaFunctionConfiguration) SetEvents(v []*string) *LambdaFunctionConfiguration { + s.Events = v + return s +} + +// SetFilter sets the Filter field's value. +func (s *LambdaFunctionConfiguration) SetFilter(v *NotificationConfigurationFilter) *LambdaFunctionConfiguration { + s.Filter = v + return s +} + +// SetId sets the Id field's value. +func (s *LambdaFunctionConfiguration) SetId(v string) *LambdaFunctionConfiguration { + s.Id = &v + return s +} + +// SetLambdaFunctionArn sets the LambdaFunctionArn field's value. +func (s *LambdaFunctionConfiguration) SetLambdaFunctionArn(v string) *LambdaFunctionConfiguration { + s.LambdaFunctionArn = &v + return s +} + +// Container for lifecycle rules. You can add as many as 1000 rules. +type LifecycleConfiguration struct { + _ struct{} `type:"structure"` + + // Specifies lifecycle configuration rules for an Amazon S3 bucket. + // + // Rules is a required field + Rules []*Rule `locationName:"Rule" type:"list" flattened:"true" required:"true"` +} + +// String returns the string representation +func (s LifecycleConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LifecycleConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *LifecycleConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "LifecycleConfiguration"} + if s.Rules == nil { + invalidParams.Add(request.NewErrParamRequired("Rules")) + } + if s.Rules != nil { + for i, v := range s.Rules { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Rules", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetRules sets the Rules field's value. +func (s *LifecycleConfiguration) SetRules(v []*Rule) *LifecycleConfiguration { + s.Rules = v + return s +} + +// Container for the expiration for the lifecycle of the object. +type LifecycleExpiration struct { + _ struct{} `type:"structure"` + + // Indicates at what date the object is to be moved or deleted. Should be in + // GMT ISO 8601 Format. + Date *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // Indicates the lifetime, in days, of the objects that are subject to the rule. + // The value must be a non-zero positive integer. + Days *int64 `type:"integer"` + + // Indicates whether Amazon S3 will remove a delete marker with no noncurrent + // versions. If set to true, the delete marker will be expired; if set to false + // the policy takes no action. This cannot be specified with Days or Date in + // a Lifecycle Expiration Policy. + ExpiredObjectDeleteMarker *bool `type:"boolean"` +} + +// String returns the string representation +func (s LifecycleExpiration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LifecycleExpiration) GoString() string { + return s.String() +} + +// SetDate sets the Date field's value. +func (s *LifecycleExpiration) SetDate(v time.Time) *LifecycleExpiration { + s.Date = &v + return s +} + +// SetDays sets the Days field's value. +func (s *LifecycleExpiration) SetDays(v int64) *LifecycleExpiration { + s.Days = &v + return s +} + +// SetExpiredObjectDeleteMarker sets the ExpiredObjectDeleteMarker field's value. +func (s *LifecycleExpiration) SetExpiredObjectDeleteMarker(v bool) *LifecycleExpiration { + s.ExpiredObjectDeleteMarker = &v + return s +} + +// A lifecycle rule for individual objects in an Amazon S3 bucket. +type LifecycleRule struct { + _ struct{} `type:"structure"` + + // Specifies the days since the initiation of an incomplete multipart upload + // that Amazon S3 will wait before permanently removing all parts of the upload. + // For more information, see Aborting Incomplete Multipart Uploads Using a Bucket + // Lifecycle Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config) + // in the Amazon Simple Storage Service Developer Guide. + AbortIncompleteMultipartUpload *AbortIncompleteMultipartUpload `type:"structure"` + + // Specifies the expiration for the lifecycle of the object in the form of date, + // days and, whether the object has a delete marker. + Expiration *LifecycleExpiration `type:"structure"` + + // The Filter is used to identify objects that a Lifecycle Rule applies to. + // A Filter must have exactly one of Prefix, Tag, or And specified. + Filter *LifecycleRuleFilter `type:"structure"` + + // Unique identifier for the rule. The value cannot be longer than 255 characters. + ID *string `type:"string"` + + // Specifies when noncurrent object versions expire. Upon expiration, Amazon + // S3 permanently deletes the noncurrent object versions. You set this lifecycle + // configuration action on a bucket that has versioning enabled (or suspended) + // to request that Amazon S3 delete noncurrent object versions at a specific + // period in the object's lifetime. + NoncurrentVersionExpiration *NoncurrentVersionExpiration `type:"structure"` + + // Specifies the transition rule for the lifecycle rule that describes when + // noncurrent objects transition to a specific storage class. If your bucket + // is versioning-enabled (or versioning is suspended), you can set this action + // to request that Amazon S3 transition noncurrent object versions to a specific + // storage class at a set period in the object's lifetime. + NoncurrentVersionTransitions []*NoncurrentVersionTransition `locationName:"NoncurrentVersionTransition" type:"list" flattened:"true"` + + // Prefix identifying one or more objects to which the rule applies. This is + // No longer used; use Filter instead. + // + // Replacement must be made for object keys containing special characters (such + // as carriage returns) when using XML requests. For more information, see XML + // related object key constraints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints). + // + // Deprecated: Prefix has been deprecated + Prefix *string `deprecated:"true" type:"string"` + + // If 'Enabled', the rule is currently being applied. If 'Disabled', the rule + // is not currently being applied. + // + // Status is a required field + Status *string `type:"string" required:"true" enum:"ExpirationStatus"` + + // Specifies when an Amazon S3 object transitions to a specified storage class. + Transitions []*Transition `locationName:"Transition" type:"list" flattened:"true"` +} + +// String returns the string representation +func (s LifecycleRule) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LifecycleRule) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *LifecycleRule) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "LifecycleRule"} + if s.Status == nil { + invalidParams.Add(request.NewErrParamRequired("Status")) + } + if s.Filter != nil { + if err := s.Filter.Validate(); err != nil { + invalidParams.AddNested("Filter", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAbortIncompleteMultipartUpload sets the AbortIncompleteMultipartUpload field's value. +func (s *LifecycleRule) SetAbortIncompleteMultipartUpload(v *AbortIncompleteMultipartUpload) *LifecycleRule { + s.AbortIncompleteMultipartUpload = v + return s +} + +// SetExpiration sets the Expiration field's value. +func (s *LifecycleRule) SetExpiration(v *LifecycleExpiration) *LifecycleRule { + s.Expiration = v + return s +} + +// SetFilter sets the Filter field's value. +func (s *LifecycleRule) SetFilter(v *LifecycleRuleFilter) *LifecycleRule { + s.Filter = v + return s +} + +// SetID sets the ID field's value. +func (s *LifecycleRule) SetID(v string) *LifecycleRule { + s.ID = &v + return s +} + +// SetNoncurrentVersionExpiration sets the NoncurrentVersionExpiration field's value. +func (s *LifecycleRule) SetNoncurrentVersionExpiration(v *NoncurrentVersionExpiration) *LifecycleRule { + s.NoncurrentVersionExpiration = v + return s +} + +// SetNoncurrentVersionTransitions sets the NoncurrentVersionTransitions field's value. +func (s *LifecycleRule) SetNoncurrentVersionTransitions(v []*NoncurrentVersionTransition) *LifecycleRule { + s.NoncurrentVersionTransitions = v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *LifecycleRule) SetPrefix(v string) *LifecycleRule { + s.Prefix = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *LifecycleRule) SetStatus(v string) *LifecycleRule { + s.Status = &v + return s +} + +// SetTransitions sets the Transitions field's value. +func (s *LifecycleRule) SetTransitions(v []*Transition) *LifecycleRule { + s.Transitions = v + return s +} + +// This is used in a Lifecycle Rule Filter to apply a logical AND to two or +// more predicates. The Lifecycle Rule will apply to any object matching all +// of the predicates configured inside the And operator. +type LifecycleRuleAndOperator struct { + _ struct{} `type:"structure"` + + // Prefix identifying one or more objects to which the rule applies. + Prefix *string `type:"string"` + + // All of these tags must exist in the object's tag set in order for the rule + // to apply. + Tags []*Tag `locationName:"Tag" locationNameList:"Tag" type:"list" flattened:"true"` +} + +// String returns the string representation +func (s LifecycleRuleAndOperator) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LifecycleRuleAndOperator) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *LifecycleRuleAndOperator) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "LifecycleRuleAndOperator"} + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetPrefix sets the Prefix field's value. +func (s *LifecycleRuleAndOperator) SetPrefix(v string) *LifecycleRuleAndOperator { + s.Prefix = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *LifecycleRuleAndOperator) SetTags(v []*Tag) *LifecycleRuleAndOperator { + s.Tags = v + return s +} + +// The Filter is used to identify objects that a Lifecycle Rule applies to. +// A Filter must have exactly one of Prefix, Tag, or And specified. +type LifecycleRuleFilter struct { + _ struct{} `type:"structure"` + + // This is used in a Lifecycle Rule Filter to apply a logical AND to two or + // more predicates. The Lifecycle Rule will apply to any object matching all + // of the predicates configured inside the And operator. + And *LifecycleRuleAndOperator `type:"structure"` + + // Prefix identifying one or more objects to which the rule applies. + // + // Replacement must be made for object keys containing special characters (such + // as carriage returns) when using XML requests. For more information, see XML + // related object key constraints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints). + Prefix *string `type:"string"` + + // This tag must exist in the object's tag set in order for the rule to apply. + Tag *Tag `type:"structure"` +} + +// String returns the string representation +func (s LifecycleRuleFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LifecycleRuleFilter) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *LifecycleRuleFilter) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "LifecycleRuleFilter"} + if s.And != nil { + if err := s.And.Validate(); err != nil { + invalidParams.AddNested("And", err.(request.ErrInvalidParams)) + } + } + if s.Tag != nil { + if err := s.Tag.Validate(); err != nil { + invalidParams.AddNested("Tag", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAnd sets the And field's value. +func (s *LifecycleRuleFilter) SetAnd(v *LifecycleRuleAndOperator) *LifecycleRuleFilter { + s.And = v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *LifecycleRuleFilter) SetPrefix(v string) *LifecycleRuleFilter { + s.Prefix = &v + return s +} + +// SetTag sets the Tag field's value. +func (s *LifecycleRuleFilter) SetTag(v *Tag) *LifecycleRuleFilter { + s.Tag = v + return s +} + +type ListBucketAnalyticsConfigurationsInput struct { + _ struct{} `locationName:"ListBucketAnalyticsConfigurationsRequest" type:"structure"` + + // The name of the bucket from which analytics configurations are retrieved. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The ContinuationToken that represents a placeholder from where this request + // should begin. + ContinuationToken *string `location:"querystring" locationName:"continuation-token" type:"string"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation +func (s ListBucketAnalyticsConfigurationsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListBucketAnalyticsConfigurationsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListBucketAnalyticsConfigurationsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListBucketAnalyticsConfigurationsInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *ListBucketAnalyticsConfigurationsInput) SetBucket(v string) *ListBucketAnalyticsConfigurationsInput { + s.Bucket = &v + return s +} + +func (s *ListBucketAnalyticsConfigurationsInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetContinuationToken sets the ContinuationToken field's value. +func (s *ListBucketAnalyticsConfigurationsInput) SetContinuationToken(v string) *ListBucketAnalyticsConfigurationsInput { + s.ContinuationToken = &v + return s +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *ListBucketAnalyticsConfigurationsInput) SetExpectedBucketOwner(v string) *ListBucketAnalyticsConfigurationsInput { + s.ExpectedBucketOwner = &v + return s +} + +func (s *ListBucketAnalyticsConfigurationsInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *ListBucketAnalyticsConfigurationsInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s ListBucketAnalyticsConfigurationsInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type ListBucketAnalyticsConfigurationsOutput struct { + _ struct{} `type:"structure"` + + // The list of analytics configurations for a bucket. + AnalyticsConfigurationList []*AnalyticsConfiguration `locationName:"AnalyticsConfiguration" type:"list" flattened:"true"` + + // The marker that is used as a starting point for this analytics configuration + // list response. This value is present if it was sent in the request. + ContinuationToken *string `type:"string"` + + // Indicates whether the returned list of analytics configurations is complete. + // A value of true indicates that the list is not complete and the NextContinuationToken + // will be provided for a subsequent request. + IsTruncated *bool `type:"boolean"` + + // NextContinuationToken is sent when isTruncated is true, which indicates that + // there are more analytics configurations to list. The next request must include + // this NextContinuationToken. The token is obfuscated and is not a usable value. + NextContinuationToken *string `type:"string"` +} + +// String returns the string representation +func (s ListBucketAnalyticsConfigurationsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListBucketAnalyticsConfigurationsOutput) GoString() string { + return s.String() +} + +// SetAnalyticsConfigurationList sets the AnalyticsConfigurationList field's value. +func (s *ListBucketAnalyticsConfigurationsOutput) SetAnalyticsConfigurationList(v []*AnalyticsConfiguration) *ListBucketAnalyticsConfigurationsOutput { + s.AnalyticsConfigurationList = v + return s +} + +// SetContinuationToken sets the ContinuationToken field's value. +func (s *ListBucketAnalyticsConfigurationsOutput) SetContinuationToken(v string) *ListBucketAnalyticsConfigurationsOutput { + s.ContinuationToken = &v + return s +} + +// SetIsTruncated sets the IsTruncated field's value. +func (s *ListBucketAnalyticsConfigurationsOutput) SetIsTruncated(v bool) *ListBucketAnalyticsConfigurationsOutput { + s.IsTruncated = &v + return s +} + +// SetNextContinuationToken sets the NextContinuationToken field's value. +func (s *ListBucketAnalyticsConfigurationsOutput) SetNextContinuationToken(v string) *ListBucketAnalyticsConfigurationsOutput { + s.NextContinuationToken = &v + return s +} + +type ListBucketIntelligentTieringConfigurationsInput struct { + _ struct{} `locationName:"ListBucketIntelligentTieringConfigurationsRequest" type:"structure"` + + // The name of the Amazon S3 bucket whose configuration you want to modify or + // retrieve. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The ContinuationToken that represents a placeholder from where this request + // should begin. + ContinuationToken *string `location:"querystring" locationName:"continuation-token" type:"string"` +} + +// String returns the string representation +func (s ListBucketIntelligentTieringConfigurationsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListBucketIntelligentTieringConfigurationsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListBucketIntelligentTieringConfigurationsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListBucketIntelligentTieringConfigurationsInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *ListBucketIntelligentTieringConfigurationsInput) SetBucket(v string) *ListBucketIntelligentTieringConfigurationsInput { + s.Bucket = &v + return s +} + +func (s *ListBucketIntelligentTieringConfigurationsInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetContinuationToken sets the ContinuationToken field's value. +func (s *ListBucketIntelligentTieringConfigurationsInput) SetContinuationToken(v string) *ListBucketIntelligentTieringConfigurationsInput { + s.ContinuationToken = &v + return s +} + +func (s *ListBucketIntelligentTieringConfigurationsInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *ListBucketIntelligentTieringConfigurationsInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s ListBucketIntelligentTieringConfigurationsInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type ListBucketIntelligentTieringConfigurationsOutput struct { + _ struct{} `type:"structure"` + + // The ContinuationToken that represents a placeholder from where this request + // should begin. + ContinuationToken *string `type:"string"` + + // The list of S3 Intelligent-Tiering configurations for a bucket. + IntelligentTieringConfigurationList []*IntelligentTieringConfiguration `locationName:"IntelligentTieringConfiguration" type:"list" flattened:"true"` + + // Indicates whether the returned list of analytics configurations is complete. + // A value of true indicates that the list is not complete and the NextContinuationToken + // will be provided for a subsequent request. + IsTruncated *bool `type:"boolean"` + + // The marker used to continue this inventory configuration listing. Use the + // NextContinuationToken from this response to continue the listing in a subsequent + // request. The continuation token is an opaque value that Amazon S3 understands. + NextContinuationToken *string `type:"string"` +} + +// String returns the string representation +func (s ListBucketIntelligentTieringConfigurationsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListBucketIntelligentTieringConfigurationsOutput) GoString() string { + return s.String() +} + +// SetContinuationToken sets the ContinuationToken field's value. +func (s *ListBucketIntelligentTieringConfigurationsOutput) SetContinuationToken(v string) *ListBucketIntelligentTieringConfigurationsOutput { + s.ContinuationToken = &v + return s +} + +// SetIntelligentTieringConfigurationList sets the IntelligentTieringConfigurationList field's value. +func (s *ListBucketIntelligentTieringConfigurationsOutput) SetIntelligentTieringConfigurationList(v []*IntelligentTieringConfiguration) *ListBucketIntelligentTieringConfigurationsOutput { + s.IntelligentTieringConfigurationList = v + return s +} + +// SetIsTruncated sets the IsTruncated field's value. +func (s *ListBucketIntelligentTieringConfigurationsOutput) SetIsTruncated(v bool) *ListBucketIntelligentTieringConfigurationsOutput { + s.IsTruncated = &v + return s +} + +// SetNextContinuationToken sets the NextContinuationToken field's value. +func (s *ListBucketIntelligentTieringConfigurationsOutput) SetNextContinuationToken(v string) *ListBucketIntelligentTieringConfigurationsOutput { + s.NextContinuationToken = &v + return s +} + +type ListBucketInventoryConfigurationsInput struct { + _ struct{} `locationName:"ListBucketInventoryConfigurationsRequest" type:"structure"` + + // The name of the bucket containing the inventory configurations to retrieve. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The marker used to continue an inventory configuration listing that has been + // truncated. Use the NextContinuationToken from a previously truncated list + // response to continue the listing. The continuation token is an opaque value + // that Amazon S3 understands. + ContinuationToken *string `location:"querystring" locationName:"continuation-token" type:"string"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation +func (s ListBucketInventoryConfigurationsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListBucketInventoryConfigurationsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListBucketInventoryConfigurationsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListBucketInventoryConfigurationsInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *ListBucketInventoryConfigurationsInput) SetBucket(v string) *ListBucketInventoryConfigurationsInput { + s.Bucket = &v + return s +} + +func (s *ListBucketInventoryConfigurationsInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetContinuationToken sets the ContinuationToken field's value. +func (s *ListBucketInventoryConfigurationsInput) SetContinuationToken(v string) *ListBucketInventoryConfigurationsInput { + s.ContinuationToken = &v + return s +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *ListBucketInventoryConfigurationsInput) SetExpectedBucketOwner(v string) *ListBucketInventoryConfigurationsInput { + s.ExpectedBucketOwner = &v + return s +} + +func (s *ListBucketInventoryConfigurationsInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *ListBucketInventoryConfigurationsInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s ListBucketInventoryConfigurationsInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type ListBucketInventoryConfigurationsOutput struct { + _ struct{} `type:"structure"` + + // If sent in the request, the marker that is used as a starting point for this + // inventory configuration list response. + ContinuationToken *string `type:"string"` + + // The list of inventory configurations for a bucket. + InventoryConfigurationList []*InventoryConfiguration `locationName:"InventoryConfiguration" type:"list" flattened:"true"` + + // Tells whether the returned list of inventory configurations is complete. + // A value of true indicates that the list is not complete and the NextContinuationToken + // is provided for a subsequent request. + IsTruncated *bool `type:"boolean"` + + // The marker used to continue this inventory configuration listing. Use the + // NextContinuationToken from this response to continue the listing in a subsequent + // request. The continuation token is an opaque value that Amazon S3 understands. + NextContinuationToken *string `type:"string"` +} + +// String returns the string representation +func (s ListBucketInventoryConfigurationsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListBucketInventoryConfigurationsOutput) GoString() string { + return s.String() +} + +// SetContinuationToken sets the ContinuationToken field's value. +func (s *ListBucketInventoryConfigurationsOutput) SetContinuationToken(v string) *ListBucketInventoryConfigurationsOutput { + s.ContinuationToken = &v + return s +} + +// SetInventoryConfigurationList sets the InventoryConfigurationList field's value. +func (s *ListBucketInventoryConfigurationsOutput) SetInventoryConfigurationList(v []*InventoryConfiguration) *ListBucketInventoryConfigurationsOutput { + s.InventoryConfigurationList = v + return s +} + +// SetIsTruncated sets the IsTruncated field's value. +func (s *ListBucketInventoryConfigurationsOutput) SetIsTruncated(v bool) *ListBucketInventoryConfigurationsOutput { + s.IsTruncated = &v + return s +} + +// SetNextContinuationToken sets the NextContinuationToken field's value. +func (s *ListBucketInventoryConfigurationsOutput) SetNextContinuationToken(v string) *ListBucketInventoryConfigurationsOutput { + s.NextContinuationToken = &v + return s +} + +type ListBucketMetricsConfigurationsInput struct { + _ struct{} `locationName:"ListBucketMetricsConfigurationsRequest" type:"structure"` + + // The name of the bucket containing the metrics configurations to retrieve. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The marker that is used to continue a metrics configuration listing that + // has been truncated. Use the NextContinuationToken from a previously truncated + // list response to continue the listing. The continuation token is an opaque + // value that Amazon S3 understands. + ContinuationToken *string `location:"querystring" locationName:"continuation-token" type:"string"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation +func (s ListBucketMetricsConfigurationsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListBucketMetricsConfigurationsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListBucketMetricsConfigurationsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListBucketMetricsConfigurationsInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *ListBucketMetricsConfigurationsInput) SetBucket(v string) *ListBucketMetricsConfigurationsInput { + s.Bucket = &v + return s +} + +func (s *ListBucketMetricsConfigurationsInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetContinuationToken sets the ContinuationToken field's value. +func (s *ListBucketMetricsConfigurationsInput) SetContinuationToken(v string) *ListBucketMetricsConfigurationsInput { + s.ContinuationToken = &v + return s +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *ListBucketMetricsConfigurationsInput) SetExpectedBucketOwner(v string) *ListBucketMetricsConfigurationsInput { + s.ExpectedBucketOwner = &v + return s +} + +func (s *ListBucketMetricsConfigurationsInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *ListBucketMetricsConfigurationsInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s ListBucketMetricsConfigurationsInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type ListBucketMetricsConfigurationsOutput struct { + _ struct{} `type:"structure"` + + // The marker that is used as a starting point for this metrics configuration + // list response. This value is present if it was sent in the request. + ContinuationToken *string `type:"string"` + + // Indicates whether the returned list of metrics configurations is complete. + // A value of true indicates that the list is not complete and the NextContinuationToken + // will be provided for a subsequent request. + IsTruncated *bool `type:"boolean"` + + // The list of metrics configurations for a bucket. + MetricsConfigurationList []*MetricsConfiguration `locationName:"MetricsConfiguration" type:"list" flattened:"true"` + + // The marker used to continue a metrics configuration listing that has been + // truncated. Use the NextContinuationToken from a previously truncated list + // response to continue the listing. The continuation token is an opaque value + // that Amazon S3 understands. + NextContinuationToken *string `type:"string"` +} + +// String returns the string representation +func (s ListBucketMetricsConfigurationsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListBucketMetricsConfigurationsOutput) GoString() string { + return s.String() +} + +// SetContinuationToken sets the ContinuationToken field's value. +func (s *ListBucketMetricsConfigurationsOutput) SetContinuationToken(v string) *ListBucketMetricsConfigurationsOutput { + s.ContinuationToken = &v + return s +} + +// SetIsTruncated sets the IsTruncated field's value. +func (s *ListBucketMetricsConfigurationsOutput) SetIsTruncated(v bool) *ListBucketMetricsConfigurationsOutput { + s.IsTruncated = &v + return s +} + +// SetMetricsConfigurationList sets the MetricsConfigurationList field's value. +func (s *ListBucketMetricsConfigurationsOutput) SetMetricsConfigurationList(v []*MetricsConfiguration) *ListBucketMetricsConfigurationsOutput { + s.MetricsConfigurationList = v + return s +} + +// SetNextContinuationToken sets the NextContinuationToken field's value. +func (s *ListBucketMetricsConfigurationsOutput) SetNextContinuationToken(v string) *ListBucketMetricsConfigurationsOutput { + s.NextContinuationToken = &v + return s +} + +type ListBucketsInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s ListBucketsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListBucketsInput) GoString() string { + return s.String() +} + +type ListBucketsOutput struct { + _ struct{} `type:"structure"` + + // The list of buckets owned by the requestor. + Buckets []*Bucket `locationNameList:"Bucket" type:"list"` + + // The owner of the buckets listed. + Owner *Owner `type:"structure"` +} + +// String returns the string representation +func (s ListBucketsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListBucketsOutput) GoString() string { + return s.String() +} + +// SetBuckets sets the Buckets field's value. +func (s *ListBucketsOutput) SetBuckets(v []*Bucket) *ListBucketsOutput { + s.Buckets = v + return s +} + +// SetOwner sets the Owner field's value. +func (s *ListBucketsOutput) SetOwner(v *Owner) *ListBucketsOutput { + s.Owner = v + return s +} + +type ListMultipartUploadsInput struct { + _ struct{} `locationName:"ListMultipartUploadsRequest" type:"structure"` + + // The name of the bucket to which the multipart upload was initiated. + // + // When using this API with an access point, you must direct requests to the + // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this operation with an access point through the AWS SDKs, you + // provide the access point ARN in place of the bucket name. For more information + // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // in the Amazon Simple Storage Service Developer Guide. + // + // When using this API with Amazon S3 on Outposts, you must direct requests + // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When + // using this operation using S3 on Outposts through the AWS SDKs, you provide + // the Outposts bucket ARN in place of the bucket name. For more information + // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) + // in the Amazon Simple Storage Service Developer Guide. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Character you use to group keys. + // + // All keys that contain the same string between the prefix, if specified, and + // the first occurrence of the delimiter after the prefix are grouped under + // a single result element, CommonPrefixes. If you don't specify the prefix + // parameter, then the substring starts at the beginning of the key. The keys + // that are grouped under CommonPrefixes result element are not returned elsewhere + // in the response. + Delimiter *string `location:"querystring" locationName:"delimiter" type:"string"` + + // Requests Amazon S3 to encode the object keys in the response and specifies + // the encoding method to use. An object key may contain any Unicode character; + // however, XML 1.0 parser cannot parse some characters, such as characters + // with an ASCII value from 0 to 10. For characters that are not supported in + // XML 1.0, you can add this parameter to request that Amazon S3 encode the + // keys in the response. + EncodingType *string `location:"querystring" locationName:"encoding-type" type:"string" enum:"EncodingType"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // Together with upload-id-marker, this parameter specifies the multipart upload + // after which listing should begin. + // + // If upload-id-marker is not specified, only the keys lexicographically greater + // than the specified key-marker will be included in the list. + // + // If upload-id-marker is specified, any multipart uploads for a key equal to + // the key-marker might also be included, provided those multipart uploads have + // upload IDs lexicographically greater than the specified upload-id-marker. + KeyMarker *string `location:"querystring" locationName:"key-marker" type:"string"` + + // Sets the maximum number of multipart uploads, from 1 to 1,000, to return + // in the response body. 1,000 is the maximum number of uploads that can be + // returned in a response. + MaxUploads *int64 `location:"querystring" locationName:"max-uploads" type:"integer"` + + // Lists in-progress uploads only for those keys that begin with the specified + // prefix. You can use prefixes to separate a bucket into different grouping + // of keys. (You can think of using prefix to make groups in the same way you'd + // use a folder in a file system.) + Prefix *string `location:"querystring" locationName:"prefix" type:"string"` + + // Together with key-marker, specifies the multipart upload after which listing + // should begin. If key-marker is not specified, the upload-id-marker parameter + // is ignored. Otherwise, any multipart uploads for a key equal to the key-marker + // might be included in the list only if they have an upload ID lexicographically + // greater than the specified upload-id-marker. + UploadIdMarker *string `location:"querystring" locationName:"upload-id-marker" type:"string"` +} + +// String returns the string representation +func (s ListMultipartUploadsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListMultipartUploadsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListMultipartUploadsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListMultipartUploadsInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *ListMultipartUploadsInput) SetBucket(v string) *ListMultipartUploadsInput { + s.Bucket = &v + return s +} + +func (s *ListMultipartUploadsInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetDelimiter sets the Delimiter field's value. +func (s *ListMultipartUploadsInput) SetDelimiter(v string) *ListMultipartUploadsInput { + s.Delimiter = &v + return s +} + +// SetEncodingType sets the EncodingType field's value. +func (s *ListMultipartUploadsInput) SetEncodingType(v string) *ListMultipartUploadsInput { + s.EncodingType = &v + return s +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *ListMultipartUploadsInput) SetExpectedBucketOwner(v string) *ListMultipartUploadsInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetKeyMarker sets the KeyMarker field's value. +func (s *ListMultipartUploadsInput) SetKeyMarker(v string) *ListMultipartUploadsInput { + s.KeyMarker = &v + return s +} + +// SetMaxUploads sets the MaxUploads field's value. +func (s *ListMultipartUploadsInput) SetMaxUploads(v int64) *ListMultipartUploadsInput { + s.MaxUploads = &v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *ListMultipartUploadsInput) SetPrefix(v string) *ListMultipartUploadsInput { + s.Prefix = &v + return s +} + +// SetUploadIdMarker sets the UploadIdMarker field's value. +func (s *ListMultipartUploadsInput) SetUploadIdMarker(v string) *ListMultipartUploadsInput { + s.UploadIdMarker = &v + return s +} + +func (s *ListMultipartUploadsInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *ListMultipartUploadsInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s ListMultipartUploadsInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type ListMultipartUploadsOutput struct { + _ struct{} `type:"structure"` + + // The name of the bucket to which the multipart upload was initiated. + Bucket *string `type:"string"` + + // If you specify a delimiter in the request, then the result returns each distinct + // key prefix containing the delimiter in a CommonPrefixes element. The distinct + // key prefixes are returned in the Prefix child element. + CommonPrefixes []*CommonPrefix `type:"list" flattened:"true"` + + // Contains the delimiter you specified in the request. If you don't specify + // a delimiter in your request, this element is absent from the response. + Delimiter *string `type:"string"` + + // Encoding type used by Amazon S3 to encode object keys in the response. + // + // If you specify encoding-type request parameter, Amazon S3 includes this element + // in the response, and returns encoded key name values in the following response + // elements: + // + // Delimiter, KeyMarker, Prefix, NextKeyMarker, Key. + EncodingType *string `type:"string" enum:"EncodingType"` + + // Indicates whether the returned list of multipart uploads is truncated. A + // value of true indicates that the list was truncated. The list can be truncated + // if the number of multipart uploads exceeds the limit allowed or specified + // by max uploads. + IsTruncated *bool `type:"boolean"` + + // The key at or after which the listing began. + KeyMarker *string `type:"string"` + + // Maximum number of multipart uploads that could have been included in the + // response. + MaxUploads *int64 `type:"integer"` + + // When a list is truncated, this element specifies the value that should be + // used for the key-marker request parameter in a subsequent request. + NextKeyMarker *string `type:"string"` + + // When a list is truncated, this element specifies the value that should be + // used for the upload-id-marker request parameter in a subsequent request. + NextUploadIdMarker *string `type:"string"` + + // When a prefix is provided in the request, this field contains the specified + // prefix. The result contains only keys starting with the specified prefix. + Prefix *string `type:"string"` + + // Upload ID after which listing began. + UploadIdMarker *string `type:"string"` + + // Container for elements related to a particular multipart upload. A response + // can contain zero or more Upload elements. + Uploads []*MultipartUpload `locationName:"Upload" type:"list" flattened:"true"` +} + +// String returns the string representation +func (s ListMultipartUploadsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListMultipartUploadsOutput) GoString() string { + return s.String() +} + +// SetBucket sets the Bucket field's value. +func (s *ListMultipartUploadsOutput) SetBucket(v string) *ListMultipartUploadsOutput { + s.Bucket = &v + return s +} + +func (s *ListMultipartUploadsOutput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetCommonPrefixes sets the CommonPrefixes field's value. +func (s *ListMultipartUploadsOutput) SetCommonPrefixes(v []*CommonPrefix) *ListMultipartUploadsOutput { + s.CommonPrefixes = v + return s +} + +// SetDelimiter sets the Delimiter field's value. +func (s *ListMultipartUploadsOutput) SetDelimiter(v string) *ListMultipartUploadsOutput { + s.Delimiter = &v + return s +} + +// SetEncodingType sets the EncodingType field's value. +func (s *ListMultipartUploadsOutput) SetEncodingType(v string) *ListMultipartUploadsOutput { + s.EncodingType = &v + return s +} + +// SetIsTruncated sets the IsTruncated field's value. +func (s *ListMultipartUploadsOutput) SetIsTruncated(v bool) *ListMultipartUploadsOutput { + s.IsTruncated = &v + return s +} + +// SetKeyMarker sets the KeyMarker field's value. +func (s *ListMultipartUploadsOutput) SetKeyMarker(v string) *ListMultipartUploadsOutput { + s.KeyMarker = &v + return s +} + +// SetMaxUploads sets the MaxUploads field's value. +func (s *ListMultipartUploadsOutput) SetMaxUploads(v int64) *ListMultipartUploadsOutput { + s.MaxUploads = &v + return s +} + +// SetNextKeyMarker sets the NextKeyMarker field's value. +func (s *ListMultipartUploadsOutput) SetNextKeyMarker(v string) *ListMultipartUploadsOutput { + s.NextKeyMarker = &v + return s +} + +// SetNextUploadIdMarker sets the NextUploadIdMarker field's value. +func (s *ListMultipartUploadsOutput) SetNextUploadIdMarker(v string) *ListMultipartUploadsOutput { + s.NextUploadIdMarker = &v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *ListMultipartUploadsOutput) SetPrefix(v string) *ListMultipartUploadsOutput { + s.Prefix = &v + return s +} + +// SetUploadIdMarker sets the UploadIdMarker field's value. +func (s *ListMultipartUploadsOutput) SetUploadIdMarker(v string) *ListMultipartUploadsOutput { + s.UploadIdMarker = &v + return s +} + +// SetUploads sets the Uploads field's value. +func (s *ListMultipartUploadsOutput) SetUploads(v []*MultipartUpload) *ListMultipartUploadsOutput { + s.Uploads = v + return s +} + +type ListObjectVersionsInput struct { + _ struct{} `locationName:"ListObjectVersionsRequest" type:"structure"` + + // The bucket name that contains the objects. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // A delimiter is a character that you specify to group keys. All keys that + // contain the same string between the prefix and the first occurrence of the + // delimiter are grouped under a single result element in CommonPrefixes. These + // groups are counted as one result against the max-keys limitation. These keys + // are not returned elsewhere in the response. + Delimiter *string `location:"querystring" locationName:"delimiter" type:"string"` + + // Requests Amazon S3 to encode the object keys in the response and specifies + // the encoding method to use. An object key may contain any Unicode character; + // however, XML 1.0 parser cannot parse some characters, such as characters + // with an ASCII value from 0 to 10. For characters that are not supported in + // XML 1.0, you can add this parameter to request that Amazon S3 encode the + // keys in the response. + EncodingType *string `location:"querystring" locationName:"encoding-type" type:"string" enum:"EncodingType"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // Specifies the key to start with when listing objects in a bucket. + KeyMarker *string `location:"querystring" locationName:"key-marker" type:"string"` + + // Sets the maximum number of keys returned in the response. By default the + // API returns up to 1,000 key names. The response might contain fewer keys + // but will never contain more. If additional keys satisfy the search criteria, + // but were not returned because max-keys was exceeded, the response contains + // true. To return the additional keys, see key-marker + // and version-id-marker. + MaxKeys *int64 `location:"querystring" locationName:"max-keys" type:"integer"` + + // Use this parameter to select only those keys that begin with the specified + // prefix. You can use prefixes to separate a bucket into different groupings + // of keys. (You can think of using prefix to make groups in the same way you'd + // use a folder in a file system.) You can use prefix with delimiter to roll + // up numerous objects into a single result under CommonPrefixes. + Prefix *string `location:"querystring" locationName:"prefix" type:"string"` + + // Specifies the object version you want to start listing from. + VersionIdMarker *string `location:"querystring" locationName:"version-id-marker" type:"string"` +} + +// String returns the string representation +func (s ListObjectVersionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListObjectVersionsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListObjectVersionsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListObjectVersionsInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *ListObjectVersionsInput) SetBucket(v string) *ListObjectVersionsInput { + s.Bucket = &v + return s +} + +func (s *ListObjectVersionsInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetDelimiter sets the Delimiter field's value. +func (s *ListObjectVersionsInput) SetDelimiter(v string) *ListObjectVersionsInput { + s.Delimiter = &v + return s +} + +// SetEncodingType sets the EncodingType field's value. +func (s *ListObjectVersionsInput) SetEncodingType(v string) *ListObjectVersionsInput { + s.EncodingType = &v + return s +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *ListObjectVersionsInput) SetExpectedBucketOwner(v string) *ListObjectVersionsInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetKeyMarker sets the KeyMarker field's value. +func (s *ListObjectVersionsInput) SetKeyMarker(v string) *ListObjectVersionsInput { + s.KeyMarker = &v + return s +} + +// SetMaxKeys sets the MaxKeys field's value. +func (s *ListObjectVersionsInput) SetMaxKeys(v int64) *ListObjectVersionsInput { + s.MaxKeys = &v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *ListObjectVersionsInput) SetPrefix(v string) *ListObjectVersionsInput { + s.Prefix = &v + return s +} + +// SetVersionIdMarker sets the VersionIdMarker field's value. +func (s *ListObjectVersionsInput) SetVersionIdMarker(v string) *ListObjectVersionsInput { + s.VersionIdMarker = &v + return s +} + +func (s *ListObjectVersionsInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *ListObjectVersionsInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s ListObjectVersionsInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type ListObjectVersionsOutput struct { + _ struct{} `type:"structure"` + + // All of the keys rolled up into a common prefix count as a single return when + // calculating the number of returns. + CommonPrefixes []*CommonPrefix `type:"list" flattened:"true"` + + // Container for an object that is a delete marker. + DeleteMarkers []*DeleteMarkerEntry `locationName:"DeleteMarker" type:"list" flattened:"true"` + + // The delimiter grouping the included keys. A delimiter is a character that + // you specify to group keys. All keys that contain the same string between + // the prefix and the first occurrence of the delimiter are grouped under a + // single result element in CommonPrefixes. These groups are counted as one + // result against the max-keys limitation. These keys are not returned elsewhere + // in the response. + Delimiter *string `type:"string"` + + // Encoding type used by Amazon S3 to encode object key names in the XML response. + // + // If you specify encoding-type request parameter, Amazon S3 includes this element + // in the response, and returns encoded key name values in the following response + // elements: + // + // KeyMarker, NextKeyMarker, Prefix, Key, and Delimiter. + EncodingType *string `type:"string" enum:"EncodingType"` + + // A flag that indicates whether Amazon S3 returned all of the results that + // satisfied the search criteria. If your results were truncated, you can make + // a follow-up paginated request using the NextKeyMarker and NextVersionIdMarker + // response parameters as a starting place in another request to return the + // rest of the results. + IsTruncated *bool `type:"boolean"` + + // Marks the last key returned in a truncated response. + KeyMarker *string `type:"string"` + + // Specifies the maximum number of objects to return. + MaxKeys *int64 `type:"integer"` + + // The bucket name. + Name *string `type:"string"` + + // When the number of responses exceeds the value of MaxKeys, NextKeyMarker + // specifies the first key not returned that satisfies the search criteria. + // Use this value for the key-marker request parameter in a subsequent request. + NextKeyMarker *string `type:"string"` + + // When the number of responses exceeds the value of MaxKeys, NextVersionIdMarker + // specifies the first object version not returned that satisfies the search + // criteria. Use this value for the version-id-marker request parameter in a + // subsequent request. + NextVersionIdMarker *string `type:"string"` + + // Selects objects that start with the value supplied by this parameter. + Prefix *string `type:"string"` + + // Marks the last version of the key returned in a truncated response. + VersionIdMarker *string `type:"string"` + + // Container for version information. + Versions []*ObjectVersion `locationName:"Version" type:"list" flattened:"true"` +} + +// String returns the string representation +func (s ListObjectVersionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListObjectVersionsOutput) GoString() string { + return s.String() +} + +// SetCommonPrefixes sets the CommonPrefixes field's value. +func (s *ListObjectVersionsOutput) SetCommonPrefixes(v []*CommonPrefix) *ListObjectVersionsOutput { + s.CommonPrefixes = v + return s +} + +// SetDeleteMarkers sets the DeleteMarkers field's value. +func (s *ListObjectVersionsOutput) SetDeleteMarkers(v []*DeleteMarkerEntry) *ListObjectVersionsOutput { + s.DeleteMarkers = v + return s +} + +// SetDelimiter sets the Delimiter field's value. +func (s *ListObjectVersionsOutput) SetDelimiter(v string) *ListObjectVersionsOutput { + s.Delimiter = &v + return s +} + +// SetEncodingType sets the EncodingType field's value. +func (s *ListObjectVersionsOutput) SetEncodingType(v string) *ListObjectVersionsOutput { + s.EncodingType = &v + return s +} + +// SetIsTruncated sets the IsTruncated field's value. +func (s *ListObjectVersionsOutput) SetIsTruncated(v bool) *ListObjectVersionsOutput { + s.IsTruncated = &v + return s +} + +// SetKeyMarker sets the KeyMarker field's value. +func (s *ListObjectVersionsOutput) SetKeyMarker(v string) *ListObjectVersionsOutput { + s.KeyMarker = &v + return s +} + +// SetMaxKeys sets the MaxKeys field's value. +func (s *ListObjectVersionsOutput) SetMaxKeys(v int64) *ListObjectVersionsOutput { + s.MaxKeys = &v + return s +} + +// SetName sets the Name field's value. +func (s *ListObjectVersionsOutput) SetName(v string) *ListObjectVersionsOutput { + s.Name = &v + return s +} + +// SetNextKeyMarker sets the NextKeyMarker field's value. +func (s *ListObjectVersionsOutput) SetNextKeyMarker(v string) *ListObjectVersionsOutput { + s.NextKeyMarker = &v + return s +} + +// SetNextVersionIdMarker sets the NextVersionIdMarker field's value. +func (s *ListObjectVersionsOutput) SetNextVersionIdMarker(v string) *ListObjectVersionsOutput { + s.NextVersionIdMarker = &v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *ListObjectVersionsOutput) SetPrefix(v string) *ListObjectVersionsOutput { + s.Prefix = &v + return s +} + +// SetVersionIdMarker sets the VersionIdMarker field's value. +func (s *ListObjectVersionsOutput) SetVersionIdMarker(v string) *ListObjectVersionsOutput { + s.VersionIdMarker = &v + return s +} + +// SetVersions sets the Versions field's value. +func (s *ListObjectVersionsOutput) SetVersions(v []*ObjectVersion) *ListObjectVersionsOutput { + s.Versions = v + return s +} + +type ListObjectsInput struct { + _ struct{} `locationName:"ListObjectsRequest" type:"structure"` + + // The name of the bucket containing the objects. + // + // When using this API with an access point, you must direct requests to the + // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this operation with an access point through the AWS SDKs, you + // provide the access point ARN in place of the bucket name. For more information + // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // in the Amazon Simple Storage Service Developer Guide. + // + // When using this API with Amazon S3 on Outposts, you must direct requests + // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When + // using this operation using S3 on Outposts through the AWS SDKs, you provide + // the Outposts bucket ARN in place of the bucket name. For more information + // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) + // in the Amazon Simple Storage Service Developer Guide. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // A delimiter is a character you use to group keys. + Delimiter *string `location:"querystring" locationName:"delimiter" type:"string"` + + // Requests Amazon S3 to encode the object keys in the response and specifies + // the encoding method to use. An object key may contain any Unicode character; + // however, XML 1.0 parser cannot parse some characters, such as characters + // with an ASCII value from 0 to 10. For characters that are not supported in + // XML 1.0, you can add this parameter to request that Amazon S3 encode the + // keys in the response. + EncodingType *string `location:"querystring" locationName:"encoding-type" type:"string" enum:"EncodingType"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // Specifies the key to start with when listing objects in a bucket. + Marker *string `location:"querystring" locationName:"marker" type:"string"` + + // Sets the maximum number of keys returned in the response. By default the + // API returns up to 1,000 key names. The response might contain fewer keys + // but will never contain more. + MaxKeys *int64 `location:"querystring" locationName:"max-keys" type:"integer"` + + // Limits the response to keys that begin with the specified prefix. + Prefix *string `location:"querystring" locationName:"prefix" type:"string"` + + // Confirms that the requester knows that she or he will be charged for the + // list objects request. Bucket owners need not specify this parameter in their + // requests. + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` +} + +// String returns the string representation +func (s ListObjectsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListObjectsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListObjectsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListObjectsInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *ListObjectsInput) SetBucket(v string) *ListObjectsInput { + s.Bucket = &v + return s +} + +func (s *ListObjectsInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetDelimiter sets the Delimiter field's value. +func (s *ListObjectsInput) SetDelimiter(v string) *ListObjectsInput { + s.Delimiter = &v + return s +} + +// SetEncodingType sets the EncodingType field's value. +func (s *ListObjectsInput) SetEncodingType(v string) *ListObjectsInput { + s.EncodingType = &v + return s +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *ListObjectsInput) SetExpectedBucketOwner(v string) *ListObjectsInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetMarker sets the Marker field's value. +func (s *ListObjectsInput) SetMarker(v string) *ListObjectsInput { + s.Marker = &v + return s +} + +// SetMaxKeys sets the MaxKeys field's value. +func (s *ListObjectsInput) SetMaxKeys(v int64) *ListObjectsInput { + s.MaxKeys = &v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *ListObjectsInput) SetPrefix(v string) *ListObjectsInput { + s.Prefix = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *ListObjectsInput) SetRequestPayer(v string) *ListObjectsInput { + s.RequestPayer = &v + return s +} + +func (s *ListObjectsInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *ListObjectsInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s ListObjectsInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type ListObjectsOutput struct { + _ struct{} `type:"structure"` + + // All of the keys (up to 1,000) rolled up in a common prefix count as a single + // return when calculating the number of returns. + // + // A response can contain CommonPrefixes only if you specify a delimiter. + // + // CommonPrefixes contains all (if there are any) keys between Prefix and the + // next occurrence of the string specified by the delimiter. + // + // CommonPrefixes lists keys that act like subdirectories in the directory specified + // by Prefix. + // + // For example, if the prefix is notes/ and the delimiter is a slash (/) as + // in notes/summer/july, the common prefix is notes/summer/. All of the keys + // that roll up into a common prefix count as a single return when calculating + // the number of returns. + CommonPrefixes []*CommonPrefix `type:"list" flattened:"true"` + + // Metadata about each object returned. + Contents []*Object `type:"list" flattened:"true"` + + // Causes keys that contain the same string between the prefix and the first + // occurrence of the delimiter to be rolled up into a single result element + // in the CommonPrefixes collection. These rolled-up keys are not returned elsewhere + // in the response. Each rolled-up result counts as only one return against + // the MaxKeys value. + Delimiter *string `type:"string"` + + // Encoding type used by Amazon S3 to encode object keys in the response. + EncodingType *string `type:"string" enum:"EncodingType"` + + // A flag that indicates whether Amazon S3 returned all of the results that + // satisfied the search criteria. + IsTruncated *bool `type:"boolean"` + + // Indicates where in the bucket listing begins. Marker is included in the response + // if it was sent with the request. + Marker *string `type:"string"` + + // The maximum number of keys returned in the response body. + MaxKeys *int64 `type:"integer"` + + // The bucket name. + Name *string `type:"string"` + + // When response is truncated (the IsTruncated element value in the response + // is true), you can use the key name in this field as marker in the subsequent + // request to get next set of objects. Amazon S3 lists objects in alphabetical + // order Note: This element is returned only if you have delimiter request parameter + // specified. If response does not include the NextMarker and it is truncated, + // you can use the value of the last Key in the response as the marker in the + // subsequent request to get the next set of object keys. + NextMarker *string `type:"string"` + + // Keys that begin with the indicated prefix. + Prefix *string `type:"string"` +} + +// String returns the string representation +func (s ListObjectsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListObjectsOutput) GoString() string { + return s.String() +} + +// SetCommonPrefixes sets the CommonPrefixes field's value. +func (s *ListObjectsOutput) SetCommonPrefixes(v []*CommonPrefix) *ListObjectsOutput { + s.CommonPrefixes = v + return s +} + +// SetContents sets the Contents field's value. +func (s *ListObjectsOutput) SetContents(v []*Object) *ListObjectsOutput { + s.Contents = v + return s +} + +// SetDelimiter sets the Delimiter field's value. +func (s *ListObjectsOutput) SetDelimiter(v string) *ListObjectsOutput { + s.Delimiter = &v + return s +} + +// SetEncodingType sets the EncodingType field's value. +func (s *ListObjectsOutput) SetEncodingType(v string) *ListObjectsOutput { + s.EncodingType = &v + return s +} + +// SetIsTruncated sets the IsTruncated field's value. +func (s *ListObjectsOutput) SetIsTruncated(v bool) *ListObjectsOutput { + s.IsTruncated = &v + return s +} + +// SetMarker sets the Marker field's value. +func (s *ListObjectsOutput) SetMarker(v string) *ListObjectsOutput { + s.Marker = &v + return s +} + +// SetMaxKeys sets the MaxKeys field's value. +func (s *ListObjectsOutput) SetMaxKeys(v int64) *ListObjectsOutput { + s.MaxKeys = &v + return s +} + +// SetName sets the Name field's value. +func (s *ListObjectsOutput) SetName(v string) *ListObjectsOutput { + s.Name = &v + return s +} + +// SetNextMarker sets the NextMarker field's value. +func (s *ListObjectsOutput) SetNextMarker(v string) *ListObjectsOutput { + s.NextMarker = &v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *ListObjectsOutput) SetPrefix(v string) *ListObjectsOutput { + s.Prefix = &v + return s +} + +type ListObjectsV2Input struct { + _ struct{} `locationName:"ListObjectsV2Request" type:"structure"` + + // Bucket name to list. + // + // When using this API with an access point, you must direct requests to the + // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this operation with an access point through the AWS SDKs, you + // provide the access point ARN in place of the bucket name. For more information + // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // in the Amazon Simple Storage Service Developer Guide. + // + // When using this API with Amazon S3 on Outposts, you must direct requests + // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When + // using this operation using S3 on Outposts through the AWS SDKs, you provide + // the Outposts bucket ARN in place of the bucket name. For more information + // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) + // in the Amazon Simple Storage Service Developer Guide. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // ContinuationToken indicates Amazon S3 that the list is being continued on + // this bucket with a token. ContinuationToken is obfuscated and is not a real + // key. + ContinuationToken *string `location:"querystring" locationName:"continuation-token" type:"string"` + + // A delimiter is a character you use to group keys. + Delimiter *string `location:"querystring" locationName:"delimiter" type:"string"` + + // Encoding type used by Amazon S3 to encode object keys in the response. + EncodingType *string `location:"querystring" locationName:"encoding-type" type:"string" enum:"EncodingType"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // The owner field is not present in listV2 by default, if you want to return + // owner field with each key in the result then set the fetch owner field to + // true. + FetchOwner *bool `location:"querystring" locationName:"fetch-owner" type:"boolean"` + + // Sets the maximum number of keys returned in the response. By default the + // API returns up to 1,000 key names. The response might contain fewer keys + // but will never contain more. + MaxKeys *int64 `location:"querystring" locationName:"max-keys" type:"integer"` + + // Limits the response to keys that begin with the specified prefix. + Prefix *string `location:"querystring" locationName:"prefix" type:"string"` + + // Confirms that the requester knows that she or he will be charged for the + // list objects request in V2 style. Bucket owners need not specify this parameter + // in their requests. + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // StartAfter is where you want Amazon S3 to start listing from. Amazon S3 starts + // listing after this specified key. StartAfter can be any key in the bucket. + StartAfter *string `location:"querystring" locationName:"start-after" type:"string"` +} + +// String returns the string representation +func (s ListObjectsV2Input) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListObjectsV2Input) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListObjectsV2Input) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListObjectsV2Input"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *ListObjectsV2Input) SetBucket(v string) *ListObjectsV2Input { + s.Bucket = &v + return s +} + +func (s *ListObjectsV2Input) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetContinuationToken sets the ContinuationToken field's value. +func (s *ListObjectsV2Input) SetContinuationToken(v string) *ListObjectsV2Input { + s.ContinuationToken = &v + return s +} + +// SetDelimiter sets the Delimiter field's value. +func (s *ListObjectsV2Input) SetDelimiter(v string) *ListObjectsV2Input { + s.Delimiter = &v + return s +} + +// SetEncodingType sets the EncodingType field's value. +func (s *ListObjectsV2Input) SetEncodingType(v string) *ListObjectsV2Input { + s.EncodingType = &v + return s +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *ListObjectsV2Input) SetExpectedBucketOwner(v string) *ListObjectsV2Input { + s.ExpectedBucketOwner = &v + return s +} + +// SetFetchOwner sets the FetchOwner field's value. +func (s *ListObjectsV2Input) SetFetchOwner(v bool) *ListObjectsV2Input { + s.FetchOwner = &v + return s +} + +// SetMaxKeys sets the MaxKeys field's value. +func (s *ListObjectsV2Input) SetMaxKeys(v int64) *ListObjectsV2Input { + s.MaxKeys = &v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *ListObjectsV2Input) SetPrefix(v string) *ListObjectsV2Input { + s.Prefix = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *ListObjectsV2Input) SetRequestPayer(v string) *ListObjectsV2Input { + s.RequestPayer = &v + return s +} + +// SetStartAfter sets the StartAfter field's value. +func (s *ListObjectsV2Input) SetStartAfter(v string) *ListObjectsV2Input { + s.StartAfter = &v + return s +} + +func (s *ListObjectsV2Input) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *ListObjectsV2Input) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s ListObjectsV2Input) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type ListObjectsV2Output struct { + _ struct{} `type:"structure"` + + // All of the keys (up to 1,000) rolled up into a common prefix count as a single + // return when calculating the number of returns. + // + // A response can contain CommonPrefixes only if you specify a delimiter. + // + // CommonPrefixes contains all (if there are any) keys between Prefix and the + // next occurrence of the string specified by a delimiter. + // + // CommonPrefixes lists keys that act like subdirectories in the directory specified + // by Prefix. + // + // For example, if the prefix is notes/ and the delimiter is a slash (/) as + // in notes/summer/july, the common prefix is notes/summer/. All of the keys + // that roll up into a common prefix count as a single return when calculating + // the number of returns. + CommonPrefixes []*CommonPrefix `type:"list" flattened:"true"` + + // Metadata about each object returned. + Contents []*Object `type:"list" flattened:"true"` + + // If ContinuationToken was sent with the request, it is included in the response. + ContinuationToken *string `type:"string"` + + // Causes keys that contain the same string between the prefix and the first + // occurrence of the delimiter to be rolled up into a single result element + // in the CommonPrefixes collection. These rolled-up keys are not returned elsewhere + // in the response. Each rolled-up result counts as only one return against + // the MaxKeys value. + Delimiter *string `type:"string"` + + // Encoding type used by Amazon S3 to encode object key names in the XML response. + // + // If you specify the encoding-type request parameter, Amazon S3 includes this + // element in the response, and returns encoded key name values in the following + // response elements: + // + // Delimiter, Prefix, Key, and StartAfter. + EncodingType *string `type:"string" enum:"EncodingType"` + + // Set to false if all of the results were returned. Set to true if more keys + // are available to return. If the number of results exceeds that specified + // by MaxKeys, all of the results might not be returned. + IsTruncated *bool `type:"boolean"` + + // KeyCount is the number of keys returned with this request. KeyCount will + // always be less than or equals to MaxKeys field. Say you ask for 50 keys, + // your result will include less than equals 50 keys + KeyCount *int64 `type:"integer"` + + // Sets the maximum number of keys returned in the response. By default the + // API returns up to 1,000 key names. The response might contain fewer keys + // but will never contain more. + MaxKeys *int64 `type:"integer"` + + // The bucket name. + // + // When using this API with an access point, you must direct requests to the + // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this operation with an access point through the AWS SDKs, you + // provide the access point ARN in place of the bucket name. For more information + // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // in the Amazon Simple Storage Service Developer Guide. + // + // When using this API with Amazon S3 on Outposts, you must direct requests + // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When + // using this operation using S3 on Outposts through the AWS SDKs, you provide + // the Outposts bucket ARN in place of the bucket name. For more information + // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) + // in the Amazon Simple Storage Service Developer Guide. + Name *string `type:"string"` + + // NextContinuationToken is sent when isTruncated is true, which means there + // are more keys in the bucket that can be listed. The next list requests to + // Amazon S3 can be continued with this NextContinuationToken. NextContinuationToken + // is obfuscated and is not a real key + NextContinuationToken *string `type:"string"` + + // Keys that begin with the indicated prefix. + Prefix *string `type:"string"` + + // If StartAfter was sent with the request, it is included in the response. + StartAfter *string `type:"string"` +} + +// String returns the string representation +func (s ListObjectsV2Output) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListObjectsV2Output) GoString() string { + return s.String() +} + +// SetCommonPrefixes sets the CommonPrefixes field's value. +func (s *ListObjectsV2Output) SetCommonPrefixes(v []*CommonPrefix) *ListObjectsV2Output { + s.CommonPrefixes = v + return s +} + +// SetContents sets the Contents field's value. +func (s *ListObjectsV2Output) SetContents(v []*Object) *ListObjectsV2Output { + s.Contents = v + return s +} + +// SetContinuationToken sets the ContinuationToken field's value. +func (s *ListObjectsV2Output) SetContinuationToken(v string) *ListObjectsV2Output { + s.ContinuationToken = &v + return s +} + +// SetDelimiter sets the Delimiter field's value. +func (s *ListObjectsV2Output) SetDelimiter(v string) *ListObjectsV2Output { + s.Delimiter = &v + return s +} + +// SetEncodingType sets the EncodingType field's value. +func (s *ListObjectsV2Output) SetEncodingType(v string) *ListObjectsV2Output { + s.EncodingType = &v + return s +} + +// SetIsTruncated sets the IsTruncated field's value. +func (s *ListObjectsV2Output) SetIsTruncated(v bool) *ListObjectsV2Output { + s.IsTruncated = &v + return s +} + +// SetKeyCount sets the KeyCount field's value. +func (s *ListObjectsV2Output) SetKeyCount(v int64) *ListObjectsV2Output { + s.KeyCount = &v + return s +} + +// SetMaxKeys sets the MaxKeys field's value. +func (s *ListObjectsV2Output) SetMaxKeys(v int64) *ListObjectsV2Output { + s.MaxKeys = &v + return s +} + +// SetName sets the Name field's value. +func (s *ListObjectsV2Output) SetName(v string) *ListObjectsV2Output { + s.Name = &v + return s +} + +// SetNextContinuationToken sets the NextContinuationToken field's value. +func (s *ListObjectsV2Output) SetNextContinuationToken(v string) *ListObjectsV2Output { + s.NextContinuationToken = &v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *ListObjectsV2Output) SetPrefix(v string) *ListObjectsV2Output { + s.Prefix = &v + return s +} + +// SetStartAfter sets the StartAfter field's value. +func (s *ListObjectsV2Output) SetStartAfter(v string) *ListObjectsV2Output { + s.StartAfter = &v + return s +} + +type ListPartsInput struct { + _ struct{} `locationName:"ListPartsRequest" type:"structure"` + + // The name of the bucket to which the parts are being uploaded. + // + // When using this API with an access point, you must direct requests to the + // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this operation with an access point through the AWS SDKs, you + // provide the access point ARN in place of the bucket name. For more information + // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // in the Amazon Simple Storage Service Developer Guide. + // + // When using this API with Amazon S3 on Outposts, you must direct requests + // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When + // using this operation using S3 on Outposts through the AWS SDKs, you provide + // the Outposts bucket ARN in place of the bucket name. For more information + // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) + // in the Amazon Simple Storage Service Developer Guide. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // Object key for which the multipart upload was initiated. + // + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Sets the maximum number of parts to return. + MaxParts *int64 `location:"querystring" locationName:"max-parts" type:"integer"` + + // Specifies the part after which listing should begin. Only parts with higher + // part numbers will be listed. + PartNumberMarker *int64 `location:"querystring" locationName:"part-number-marker" type:"integer"` + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from requester pays buckets, see Downloading Objects + // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 Developer Guide. + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // Upload ID identifying the multipart upload whose parts are being listed. + // + // UploadId is a required field + UploadId *string `location:"querystring" locationName:"uploadId" type:"string" required:"true"` +} + +// String returns the string representation +func (s ListPartsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListPartsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListPartsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListPartsInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.UploadId == nil { + invalidParams.Add(request.NewErrParamRequired("UploadId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *ListPartsInput) SetBucket(v string) *ListPartsInput { + s.Bucket = &v + return s +} + +func (s *ListPartsInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *ListPartsInput) SetExpectedBucketOwner(v string) *ListPartsInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetKey sets the Key field's value. +func (s *ListPartsInput) SetKey(v string) *ListPartsInput { + s.Key = &v + return s +} + +// SetMaxParts sets the MaxParts field's value. +func (s *ListPartsInput) SetMaxParts(v int64) *ListPartsInput { + s.MaxParts = &v + return s +} + +// SetPartNumberMarker sets the PartNumberMarker field's value. +func (s *ListPartsInput) SetPartNumberMarker(v int64) *ListPartsInput { + s.PartNumberMarker = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *ListPartsInput) SetRequestPayer(v string) *ListPartsInput { + s.RequestPayer = &v + return s +} + +// SetUploadId sets the UploadId field's value. +func (s *ListPartsInput) SetUploadId(v string) *ListPartsInput { + s.UploadId = &v + return s +} + +func (s *ListPartsInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *ListPartsInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s ListPartsInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type ListPartsOutput struct { + _ struct{} `type:"structure"` + + // If the bucket has a lifecycle rule configured with an action to abort incomplete + // multipart uploads and the prefix in the lifecycle rule matches the object + // name in the request, then the response includes this header indicating when + // the initiated multipart upload will become eligible for abort operation. + // For more information, see Aborting Incomplete Multipart Uploads Using a Bucket + // Lifecycle Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config). + // + // The response will also include the x-amz-abort-rule-id header that will provide + // the ID of the lifecycle configuration rule that defines this action. + AbortDate *time.Time `location:"header" locationName:"x-amz-abort-date" type:"timestamp"` + + // This header is returned along with the x-amz-abort-date header. It identifies + // applicable lifecycle configuration rule that defines the action to abort + // incomplete multipart uploads. + AbortRuleId *string `location:"header" locationName:"x-amz-abort-rule-id" type:"string"` + + // The name of the bucket to which the multipart upload was initiated. + Bucket *string `type:"string"` + + // Container element that identifies who initiated the multipart upload. If + // the initiator is an AWS account, this element provides the same information + // as the Owner element. If the initiator is an IAM User, this element provides + // the user ARN and display name. + Initiator *Initiator `type:"structure"` + + // Indicates whether the returned list of parts is truncated. A true value indicates + // that the list was truncated. A list can be truncated if the number of parts + // exceeds the limit returned in the MaxParts element. + IsTruncated *bool `type:"boolean"` + + // Object key for which the multipart upload was initiated. + Key *string `min:"1" type:"string"` + + // Maximum number of parts that were allowed in the response. + MaxParts *int64 `type:"integer"` + + // When a list is truncated, this element specifies the last part in the list, + // as well as the value to use for the part-number-marker request parameter + // in a subsequent request. + NextPartNumberMarker *int64 `type:"integer"` + + // Container element that identifies the object owner, after the object is created. + // If multipart upload is initiated by an IAM user, this element provides the + // parent account ID and display name. + Owner *Owner `type:"structure"` + + // When a list is truncated, this element specifies the last part in the list, + // as well as the value to use for the part-number-marker request parameter + // in a subsequent request. + PartNumberMarker *int64 `type:"integer"` + + // Container for elements related to a particular part. A response can contain + // zero or more Part elements. + Parts []*Part `locationName:"Part" type:"list" flattened:"true"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // Class of storage (STANDARD or REDUCED_REDUNDANCY) used to store the uploaded + // object. + StorageClass *string `type:"string" enum:"StorageClass"` + + // Upload ID identifying the multipart upload whose parts are being listed. + UploadId *string `type:"string"` +} + +// String returns the string representation +func (s ListPartsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListPartsOutput) GoString() string { + return s.String() +} + +// SetAbortDate sets the AbortDate field's value. +func (s *ListPartsOutput) SetAbortDate(v time.Time) *ListPartsOutput { + s.AbortDate = &v + return s +} + +// SetAbortRuleId sets the AbortRuleId field's value. +func (s *ListPartsOutput) SetAbortRuleId(v string) *ListPartsOutput { + s.AbortRuleId = &v + return s +} + +// SetBucket sets the Bucket field's value. +func (s *ListPartsOutput) SetBucket(v string) *ListPartsOutput { + s.Bucket = &v + return s +} + +func (s *ListPartsOutput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetInitiator sets the Initiator field's value. +func (s *ListPartsOutput) SetInitiator(v *Initiator) *ListPartsOutput { + s.Initiator = v + return s +} + +// SetIsTruncated sets the IsTruncated field's value. +func (s *ListPartsOutput) SetIsTruncated(v bool) *ListPartsOutput { + s.IsTruncated = &v + return s +} + +// SetKey sets the Key field's value. +func (s *ListPartsOutput) SetKey(v string) *ListPartsOutput { + s.Key = &v + return s +} + +// SetMaxParts sets the MaxParts field's value. +func (s *ListPartsOutput) SetMaxParts(v int64) *ListPartsOutput { + s.MaxParts = &v + return s +} + +// SetNextPartNumberMarker sets the NextPartNumberMarker field's value. +func (s *ListPartsOutput) SetNextPartNumberMarker(v int64) *ListPartsOutput { + s.NextPartNumberMarker = &v + return s +} + +// SetOwner sets the Owner field's value. +func (s *ListPartsOutput) SetOwner(v *Owner) *ListPartsOutput { + s.Owner = v + return s +} + +// SetPartNumberMarker sets the PartNumberMarker field's value. +func (s *ListPartsOutput) SetPartNumberMarker(v int64) *ListPartsOutput { + s.PartNumberMarker = &v + return s +} + +// SetParts sets the Parts field's value. +func (s *ListPartsOutput) SetParts(v []*Part) *ListPartsOutput { + s.Parts = v + return s +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *ListPartsOutput) SetRequestCharged(v string) *ListPartsOutput { + s.RequestCharged = &v + return s +} + +// SetStorageClass sets the StorageClass field's value. +func (s *ListPartsOutput) SetStorageClass(v string) *ListPartsOutput { + s.StorageClass = &v + return s +} + +// SetUploadId sets the UploadId field's value. +func (s *ListPartsOutput) SetUploadId(v string) *ListPartsOutput { + s.UploadId = &v + return s +} + +// Describes an Amazon S3 location that will receive the results of the restore +// request. +type Location struct { + _ struct{} `type:"structure"` + + // A list of grants that control access to the staged results. + AccessControlList []*Grant `locationNameList:"Grant" type:"list"` + + // The name of the bucket where the restore results will be placed. + // + // BucketName is a required field + BucketName *string `type:"string" required:"true"` + + // The canned ACL to apply to the restore results. + CannedACL *string `type:"string" enum:"ObjectCannedACL"` + + // Contains the type of server-side encryption used. + Encryption *Encryption `type:"structure"` + + // The prefix that is prepended to the restore results for this request. + // + // Prefix is a required field + Prefix *string `type:"string" required:"true"` + + // The class of storage used to store the restore results. + StorageClass *string `type:"string" enum:"StorageClass"` + + // The tag-set that is applied to the restore results. + Tagging *Tagging `type:"structure"` + + // A list of metadata to store with the restore results in S3. + UserMetadata []*MetadataEntry `locationNameList:"MetadataEntry" type:"list"` +} + +// String returns the string representation +func (s Location) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Location) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Location) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Location"} + if s.BucketName == nil { + invalidParams.Add(request.NewErrParamRequired("BucketName")) + } + if s.Prefix == nil { + invalidParams.Add(request.NewErrParamRequired("Prefix")) + } + if s.AccessControlList != nil { + for i, v := range s.AccessControlList { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "AccessControlList", i), err.(request.ErrInvalidParams)) + } + } + } + if s.Encryption != nil { + if err := s.Encryption.Validate(); err != nil { + invalidParams.AddNested("Encryption", err.(request.ErrInvalidParams)) + } + } + if s.Tagging != nil { + if err := s.Tagging.Validate(); err != nil { + invalidParams.AddNested("Tagging", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAccessControlList sets the AccessControlList field's value. +func (s *Location) SetAccessControlList(v []*Grant) *Location { + s.AccessControlList = v + return s +} + +// SetBucketName sets the BucketName field's value. +func (s *Location) SetBucketName(v string) *Location { + s.BucketName = &v + return s +} + +// SetCannedACL sets the CannedACL field's value. +func (s *Location) SetCannedACL(v string) *Location { + s.CannedACL = &v + return s +} + +// SetEncryption sets the Encryption field's value. +func (s *Location) SetEncryption(v *Encryption) *Location { + s.Encryption = v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *Location) SetPrefix(v string) *Location { + s.Prefix = &v + return s +} + +// SetStorageClass sets the StorageClass field's value. +func (s *Location) SetStorageClass(v string) *Location { + s.StorageClass = &v + return s +} + +// SetTagging sets the Tagging field's value. +func (s *Location) SetTagging(v *Tagging) *Location { + s.Tagging = v + return s +} + +// SetUserMetadata sets the UserMetadata field's value. +func (s *Location) SetUserMetadata(v []*MetadataEntry) *Location { + s.UserMetadata = v + return s +} + +// Describes where logs are stored and the prefix that Amazon S3 assigns to +// all log object keys for a bucket. For more information, see PUT Bucket logging +// (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTlogging.html) +// in the Amazon Simple Storage Service API Reference. +type LoggingEnabled struct { + _ struct{} `type:"structure"` + + // Specifies the bucket where you want Amazon S3 to store server access logs. + // You can have your logs delivered to any bucket that you own, including the + // same bucket that is being logged. You can also configure multiple buckets + // to deliver their logs to the same target bucket. In this case, you should + // choose a different TargetPrefix for each source bucket so that the delivered + // log files can be distinguished by key. + // + // TargetBucket is a required field + TargetBucket *string `type:"string" required:"true"` + + // Container for granting information. + TargetGrants []*TargetGrant `locationNameList:"Grant" type:"list"` + + // A prefix for all log object keys. If you store log files from multiple Amazon + // S3 buckets in a single bucket, you can use a prefix to distinguish which + // log files came from which bucket. + // + // TargetPrefix is a required field + TargetPrefix *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s LoggingEnabled) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LoggingEnabled) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *LoggingEnabled) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "LoggingEnabled"} + if s.TargetBucket == nil { + invalidParams.Add(request.NewErrParamRequired("TargetBucket")) + } + if s.TargetPrefix == nil { + invalidParams.Add(request.NewErrParamRequired("TargetPrefix")) + } + if s.TargetGrants != nil { + for i, v := range s.TargetGrants { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "TargetGrants", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetTargetBucket sets the TargetBucket field's value. +func (s *LoggingEnabled) SetTargetBucket(v string) *LoggingEnabled { + s.TargetBucket = &v + return s +} + +// SetTargetGrants sets the TargetGrants field's value. +func (s *LoggingEnabled) SetTargetGrants(v []*TargetGrant) *LoggingEnabled { + s.TargetGrants = v + return s +} + +// SetTargetPrefix sets the TargetPrefix field's value. +func (s *LoggingEnabled) SetTargetPrefix(v string) *LoggingEnabled { + s.TargetPrefix = &v + return s +} + +// A metadata key-value pair to store with an object. +type MetadataEntry struct { + _ struct{} `type:"structure"` + + // Name of the Object. + Name *string `type:"string"` + + // Value of the Object. + Value *string `type:"string"` +} + +// String returns the string representation +func (s MetadataEntry) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MetadataEntry) GoString() string { + return s.String() +} + +// SetName sets the Name field's value. +func (s *MetadataEntry) SetName(v string) *MetadataEntry { + s.Name = &v + return s +} + +// SetValue sets the Value field's value. +func (s *MetadataEntry) SetValue(v string) *MetadataEntry { + s.Value = &v + return s +} + +// A container specifying replication metrics-related settings enabling replication +// metrics and events. +type Metrics struct { + _ struct{} `type:"structure"` + + // A container specifying the time threshold for emitting the s3:Replication:OperationMissedThreshold + // event. + EventThreshold *ReplicationTimeValue `type:"structure"` + + // Specifies whether the replication metrics are enabled. + // + // Status is a required field + Status *string `type:"string" required:"true" enum:"MetricsStatus"` +} + +// String returns the string representation +func (s Metrics) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Metrics) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Metrics) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Metrics"} + if s.Status == nil { + invalidParams.Add(request.NewErrParamRequired("Status")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetEventThreshold sets the EventThreshold field's value. +func (s *Metrics) SetEventThreshold(v *ReplicationTimeValue) *Metrics { + s.EventThreshold = v + return s +} + +// SetStatus sets the Status field's value. +func (s *Metrics) SetStatus(v string) *Metrics { + s.Status = &v + return s +} + +// A conjunction (logical AND) of predicates, which is used in evaluating a +// metrics filter. The operator must have at least two predicates, and an object +// must match all of the predicates in order for the filter to apply. +type MetricsAndOperator struct { + _ struct{} `type:"structure"` + + // The prefix used when evaluating an AND predicate. + Prefix *string `type:"string"` + + // The list of tags used when evaluating an AND predicate. + Tags []*Tag `locationName:"Tag" locationNameList:"Tag" type:"list" flattened:"true"` +} + +// String returns the string representation +func (s MetricsAndOperator) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MetricsAndOperator) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *MetricsAndOperator) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "MetricsAndOperator"} + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetPrefix sets the Prefix field's value. +func (s *MetricsAndOperator) SetPrefix(v string) *MetricsAndOperator { + s.Prefix = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *MetricsAndOperator) SetTags(v []*Tag) *MetricsAndOperator { + s.Tags = v + return s +} + +// Specifies a metrics configuration for the CloudWatch request metrics (specified +// by the metrics configuration ID) from an Amazon S3 bucket. If you're updating +// an existing metrics configuration, note that this is a full replacement of +// the existing metrics configuration. If you don't include the elements you +// want to keep, they are erased. For more information, see PUT Bucket metrics +// (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTMetricConfiguration.html) +// in the Amazon Simple Storage Service API Reference. +type MetricsConfiguration struct { + _ struct{} `type:"structure"` + + // Specifies a metrics configuration filter. The metrics configuration will + // only include objects that meet the filter's criteria. A filter must be a + // prefix, a tag, or a conjunction (MetricsAndOperator). + Filter *MetricsFilter `type:"structure"` + + // The ID used to identify the metrics configuration. + // + // Id is a required field + Id *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s MetricsConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MetricsConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *MetricsConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "MetricsConfiguration"} + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + if s.Filter != nil { + if err := s.Filter.Validate(); err != nil { + invalidParams.AddNested("Filter", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetFilter sets the Filter field's value. +func (s *MetricsConfiguration) SetFilter(v *MetricsFilter) *MetricsConfiguration { + s.Filter = v + return s +} + +// SetId sets the Id field's value. +func (s *MetricsConfiguration) SetId(v string) *MetricsConfiguration { + s.Id = &v + return s +} + +// Specifies a metrics configuration filter. The metrics configuration only +// includes objects that meet the filter's criteria. A filter must be a prefix, +// a tag, or a conjunction (MetricsAndOperator). +type MetricsFilter struct { + _ struct{} `type:"structure"` + + // A conjunction (logical AND) of predicates, which is used in evaluating a + // metrics filter. The operator must have at least two predicates, and an object + // must match all of the predicates in order for the filter to apply. + And *MetricsAndOperator `type:"structure"` + + // The prefix used when evaluating a metrics filter. + Prefix *string `type:"string"` + + // The tag used when evaluating a metrics filter. + Tag *Tag `type:"structure"` +} + +// String returns the string representation +func (s MetricsFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MetricsFilter) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *MetricsFilter) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "MetricsFilter"} + if s.And != nil { + if err := s.And.Validate(); err != nil { + invalidParams.AddNested("And", err.(request.ErrInvalidParams)) + } + } + if s.Tag != nil { + if err := s.Tag.Validate(); err != nil { + invalidParams.AddNested("Tag", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAnd sets the And field's value. +func (s *MetricsFilter) SetAnd(v *MetricsAndOperator) *MetricsFilter { + s.And = v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *MetricsFilter) SetPrefix(v string) *MetricsFilter { + s.Prefix = &v + return s +} + +// SetTag sets the Tag field's value. +func (s *MetricsFilter) SetTag(v *Tag) *MetricsFilter { + s.Tag = v + return s +} + +// Container for the MultipartUpload for the Amazon S3 object. +type MultipartUpload struct { + _ struct{} `type:"structure"` + + // Date and time at which the multipart upload was initiated. + Initiated *time.Time `type:"timestamp"` + + // Identifies who initiated the multipart upload. + Initiator *Initiator `type:"structure"` + + // Key of the object for which the multipart upload was initiated. + Key *string `min:"1" type:"string"` + + // Specifies the owner of the object that is part of the multipart upload. + Owner *Owner `type:"structure"` + + // The class of storage used to store the object. + StorageClass *string `type:"string" enum:"StorageClass"` + + // Upload ID that identifies the multipart upload. + UploadId *string `type:"string"` +} + +// String returns the string representation +func (s MultipartUpload) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MultipartUpload) GoString() string { + return s.String() +} + +// SetInitiated sets the Initiated field's value. +func (s *MultipartUpload) SetInitiated(v time.Time) *MultipartUpload { + s.Initiated = &v + return s +} + +// SetInitiator sets the Initiator field's value. +func (s *MultipartUpload) SetInitiator(v *Initiator) *MultipartUpload { + s.Initiator = v + return s +} + +// SetKey sets the Key field's value. +func (s *MultipartUpload) SetKey(v string) *MultipartUpload { + s.Key = &v + return s +} + +// SetOwner sets the Owner field's value. +func (s *MultipartUpload) SetOwner(v *Owner) *MultipartUpload { + s.Owner = v + return s +} + +// SetStorageClass sets the StorageClass field's value. +func (s *MultipartUpload) SetStorageClass(v string) *MultipartUpload { + s.StorageClass = &v + return s +} + +// SetUploadId sets the UploadId field's value. +func (s *MultipartUpload) SetUploadId(v string) *MultipartUpload { + s.UploadId = &v + return s +} + +// Specifies when noncurrent object versions expire. Upon expiration, Amazon +// S3 permanently deletes the noncurrent object versions. You set this lifecycle +// configuration action on a bucket that has versioning enabled (or suspended) +// to request that Amazon S3 delete noncurrent object versions at a specific +// period in the object's lifetime. +type NoncurrentVersionExpiration struct { + _ struct{} `type:"structure"` + + // Specifies the number of days an object is noncurrent before Amazon S3 can + // perform the associated action. For information about the noncurrent days + // calculations, see How Amazon S3 Calculates When an Object Became Noncurrent + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html#non-current-days-calculations) + // in the Amazon Simple Storage Service Developer Guide. + NoncurrentDays *int64 `type:"integer"` +} + +// String returns the string representation +func (s NoncurrentVersionExpiration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NoncurrentVersionExpiration) GoString() string { + return s.String() +} + +// SetNoncurrentDays sets the NoncurrentDays field's value. +func (s *NoncurrentVersionExpiration) SetNoncurrentDays(v int64) *NoncurrentVersionExpiration { + s.NoncurrentDays = &v + return s +} + +// Container for the transition rule that describes when noncurrent objects +// transition to the STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, GLACIER, +// or DEEP_ARCHIVE storage class. If your bucket is versioning-enabled (or versioning +// is suspended), you can set this action to request that Amazon S3 transition +// noncurrent object versions to the STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, +// GLACIER, or DEEP_ARCHIVE storage class at a specific period in the object's +// lifetime. +type NoncurrentVersionTransition struct { + _ struct{} `type:"structure"` + + // Specifies the number of days an object is noncurrent before Amazon S3 can + // perform the associated action. For information about the noncurrent days + // calculations, see How Amazon S3 Calculates How Long an Object Has Been Noncurrent + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html#non-current-days-calculations) + // in the Amazon Simple Storage Service Developer Guide. + NoncurrentDays *int64 `type:"integer"` + + // The class of storage used to store the object. + StorageClass *string `type:"string" enum:"TransitionStorageClass"` +} + +// String returns the string representation +func (s NoncurrentVersionTransition) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NoncurrentVersionTransition) GoString() string { + return s.String() +} + +// SetNoncurrentDays sets the NoncurrentDays field's value. +func (s *NoncurrentVersionTransition) SetNoncurrentDays(v int64) *NoncurrentVersionTransition { + s.NoncurrentDays = &v + return s +} + +// SetStorageClass sets the StorageClass field's value. +func (s *NoncurrentVersionTransition) SetStorageClass(v string) *NoncurrentVersionTransition { + s.StorageClass = &v + return s +} + +// A container for specifying the notification configuration of the bucket. +// If this element is empty, notifications are turned off for the bucket. +type NotificationConfiguration struct { + _ struct{} `type:"structure"` + + // Describes the AWS Lambda functions to invoke and the events for which to + // invoke them. + LambdaFunctionConfigurations []*LambdaFunctionConfiguration `locationName:"CloudFunctionConfiguration" type:"list" flattened:"true"` + + // The Amazon Simple Queue Service queues to publish messages to and the events + // for which to publish messages. + QueueConfigurations []*QueueConfiguration `locationName:"QueueConfiguration" type:"list" flattened:"true"` + + // The topic to which notifications are sent and the events for which notifications + // are generated. + TopicConfigurations []*TopicConfiguration `locationName:"TopicConfiguration" type:"list" flattened:"true"` +} + +// String returns the string representation +func (s NotificationConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NotificationConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *NotificationConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "NotificationConfiguration"} + if s.LambdaFunctionConfigurations != nil { + for i, v := range s.LambdaFunctionConfigurations { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "LambdaFunctionConfigurations", i), err.(request.ErrInvalidParams)) + } + } + } + if s.QueueConfigurations != nil { + for i, v := range s.QueueConfigurations { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "QueueConfigurations", i), err.(request.ErrInvalidParams)) + } + } + } + if s.TopicConfigurations != nil { + for i, v := range s.TopicConfigurations { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "TopicConfigurations", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetLambdaFunctionConfigurations sets the LambdaFunctionConfigurations field's value. +func (s *NotificationConfiguration) SetLambdaFunctionConfigurations(v []*LambdaFunctionConfiguration) *NotificationConfiguration { + s.LambdaFunctionConfigurations = v + return s +} + +// SetQueueConfigurations sets the QueueConfigurations field's value. +func (s *NotificationConfiguration) SetQueueConfigurations(v []*QueueConfiguration) *NotificationConfiguration { + s.QueueConfigurations = v + return s +} + +// SetTopicConfigurations sets the TopicConfigurations field's value. +func (s *NotificationConfiguration) SetTopicConfigurations(v []*TopicConfiguration) *NotificationConfiguration { + s.TopicConfigurations = v + return s +} + +type NotificationConfigurationDeprecated struct { + _ struct{} `type:"structure"` + + // Container for specifying the AWS Lambda notification configuration. + CloudFunctionConfiguration *CloudFunctionConfiguration `type:"structure"` + + // This data type is deprecated. This data type specifies the configuration + // for publishing messages to an Amazon Simple Queue Service (Amazon SQS) queue + // when Amazon S3 detects specified events. + QueueConfiguration *QueueConfigurationDeprecated `type:"structure"` + + // This data type is deprecated. A container for specifying the configuration + // for publication of messages to an Amazon Simple Notification Service (Amazon + // SNS) topic when Amazon S3 detects specified events. + TopicConfiguration *TopicConfigurationDeprecated `type:"structure"` +} + +// String returns the string representation +func (s NotificationConfigurationDeprecated) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NotificationConfigurationDeprecated) GoString() string { + return s.String() +} + +// SetCloudFunctionConfiguration sets the CloudFunctionConfiguration field's value. +func (s *NotificationConfigurationDeprecated) SetCloudFunctionConfiguration(v *CloudFunctionConfiguration) *NotificationConfigurationDeprecated { + s.CloudFunctionConfiguration = v + return s +} + +// SetQueueConfiguration sets the QueueConfiguration field's value. +func (s *NotificationConfigurationDeprecated) SetQueueConfiguration(v *QueueConfigurationDeprecated) *NotificationConfigurationDeprecated { + s.QueueConfiguration = v + return s +} + +// SetTopicConfiguration sets the TopicConfiguration field's value. +func (s *NotificationConfigurationDeprecated) SetTopicConfiguration(v *TopicConfigurationDeprecated) *NotificationConfigurationDeprecated { + s.TopicConfiguration = v + return s +} + +// Specifies object key name filtering rules. For information about key name +// filtering, see Configuring Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) +// in the Amazon Simple Storage Service Developer Guide. +type NotificationConfigurationFilter struct { + _ struct{} `type:"structure"` + + // A container for object key name prefix and suffix filtering rules. + Key *KeyFilter `locationName:"S3Key" type:"structure"` +} + +// String returns the string representation +func (s NotificationConfigurationFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NotificationConfigurationFilter) GoString() string { + return s.String() +} + +// SetKey sets the Key field's value. +func (s *NotificationConfigurationFilter) SetKey(v *KeyFilter) *NotificationConfigurationFilter { + s.Key = v + return s +} + +// An object consists of data and its descriptive metadata. +type Object struct { + _ struct{} `type:"structure"` + + // The entity tag is a hash of the object. The ETag reflects changes only to + // the contents of an object, not its metadata. The ETag may or may not be an + // MD5 digest of the object data. Whether or not it is depends on how the object + // was created and how it is encrypted as described below: + // + // * Objects created by the PUT Object, POST Object, or Copy operation, or + // through the AWS Management Console, and are encrypted by SSE-S3 or plaintext, + // have ETags that are an MD5 digest of their object data. + // + // * Objects created by the PUT Object, POST Object, or Copy operation, or + // through the AWS Management Console, and are encrypted by SSE-C or SSE-KMS, + // have ETags that are not an MD5 digest of their object data. + // + // * If an object is created by either the Multipart Upload or Part Copy + // operation, the ETag is not an MD5 digest, regardless of the method of + // encryption. + ETag *string `type:"string"` + + // The name that you assign to an object. You use the object key to retrieve + // the object. + Key *string `min:"1" type:"string"` + + // Creation date of the object. + LastModified *time.Time `type:"timestamp"` + + // The owner of the object + Owner *Owner `type:"structure"` + + // Size in bytes of the object + Size *int64 `type:"integer"` + + // The class of storage used to store the object. + StorageClass *string `type:"string" enum:"ObjectStorageClass"` +} + +// String returns the string representation +func (s Object) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Object) GoString() string { + return s.String() +} + +// SetETag sets the ETag field's value. +func (s *Object) SetETag(v string) *Object { + s.ETag = &v + return s +} + +// SetKey sets the Key field's value. +func (s *Object) SetKey(v string) *Object { + s.Key = &v + return s +} + +// SetLastModified sets the LastModified field's value. +func (s *Object) SetLastModified(v time.Time) *Object { + s.LastModified = &v + return s +} + +// SetOwner sets the Owner field's value. +func (s *Object) SetOwner(v *Owner) *Object { + s.Owner = v + return s +} + +// SetSize sets the Size field's value. +func (s *Object) SetSize(v int64) *Object { + s.Size = &v + return s +} + +// SetStorageClass sets the StorageClass field's value. +func (s *Object) SetStorageClass(v string) *Object { + s.StorageClass = &v + return s +} + +// Object Identifier is unique value to identify objects. +type ObjectIdentifier struct { + _ struct{} `type:"structure"` + + // Key name of the object. + // + // Replacement must be made for object keys containing special characters (such + // as carriage returns) when using XML requests. For more information, see XML + // related object key constraints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints). + // + // Key is a required field + Key *string `min:"1" type:"string" required:"true"` + + // VersionId for the specific version of the object to delete. + VersionId *string `type:"string"` +} + +// String returns the string representation +func (s ObjectIdentifier) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ObjectIdentifier) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ObjectIdentifier) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ObjectIdentifier"} + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetKey sets the Key field's value. +func (s *ObjectIdentifier) SetKey(v string) *ObjectIdentifier { + s.Key = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *ObjectIdentifier) SetVersionId(v string) *ObjectIdentifier { + s.VersionId = &v + return s +} + +// The container element for Object Lock configuration parameters. +type ObjectLockConfiguration struct { + _ struct{} `type:"structure"` + + // Indicates whether this bucket has an Object Lock configuration enabled. + ObjectLockEnabled *string `type:"string" enum:"ObjectLockEnabled"` + + // The Object Lock rule in place for the specified object. + Rule *ObjectLockRule `type:"structure"` +} + +// String returns the string representation +func (s ObjectLockConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ObjectLockConfiguration) GoString() string { + return s.String() +} + +// SetObjectLockEnabled sets the ObjectLockEnabled field's value. +func (s *ObjectLockConfiguration) SetObjectLockEnabled(v string) *ObjectLockConfiguration { + s.ObjectLockEnabled = &v + return s +} + +// SetRule sets the Rule field's value. +func (s *ObjectLockConfiguration) SetRule(v *ObjectLockRule) *ObjectLockConfiguration { + s.Rule = v + return s +} + +// A Legal Hold configuration for an object. +type ObjectLockLegalHold struct { + _ struct{} `type:"structure"` + + // Indicates whether the specified object has a Legal Hold in place. + Status *string `type:"string" enum:"ObjectLockLegalHoldStatus"` +} + +// String returns the string representation +func (s ObjectLockLegalHold) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ObjectLockLegalHold) GoString() string { + return s.String() +} + +// SetStatus sets the Status field's value. +func (s *ObjectLockLegalHold) SetStatus(v string) *ObjectLockLegalHold { + s.Status = &v + return s +} + +// A Retention configuration for an object. +type ObjectLockRetention struct { + _ struct{} `type:"structure"` + + // Indicates the Retention mode for the specified object. + Mode *string `type:"string" enum:"ObjectLockRetentionMode"` + + // The date on which this Object Lock Retention will expire. + RetainUntilDate *time.Time `type:"timestamp" timestampFormat:"iso8601"` +} + +// String returns the string representation +func (s ObjectLockRetention) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ObjectLockRetention) GoString() string { + return s.String() +} + +// SetMode sets the Mode field's value. +func (s *ObjectLockRetention) SetMode(v string) *ObjectLockRetention { + s.Mode = &v + return s +} + +// SetRetainUntilDate sets the RetainUntilDate field's value. +func (s *ObjectLockRetention) SetRetainUntilDate(v time.Time) *ObjectLockRetention { + s.RetainUntilDate = &v + return s +} + +// The container element for an Object Lock rule. +type ObjectLockRule struct { + _ struct{} `type:"structure"` + + // The default retention period that you want to apply to new objects placed + // in the specified bucket. + DefaultRetention *DefaultRetention `type:"structure"` +} + +// String returns the string representation +func (s ObjectLockRule) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ObjectLockRule) GoString() string { + return s.String() +} + +// SetDefaultRetention sets the DefaultRetention field's value. +func (s *ObjectLockRule) SetDefaultRetention(v *DefaultRetention) *ObjectLockRule { + s.DefaultRetention = v + return s +} + +// The version of an object. +type ObjectVersion struct { + _ struct{} `type:"structure"` + + // The entity tag is an MD5 hash of that version of the object. + ETag *string `type:"string"` + + // Specifies whether the object is (true) or is not (false) the latest version + // of an object. + IsLatest *bool `type:"boolean"` + + // The object key. + Key *string `min:"1" type:"string"` + + // Date and time the object was last modified. + LastModified *time.Time `type:"timestamp"` + + // Specifies the owner of the object. + Owner *Owner `type:"structure"` + + // Size in bytes of the object. + Size *int64 `type:"integer"` + + // The class of storage used to store the object. + StorageClass *string `type:"string" enum:"ObjectVersionStorageClass"` + + // Version ID of an object. + VersionId *string `type:"string"` +} + +// String returns the string representation +func (s ObjectVersion) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ObjectVersion) GoString() string { + return s.String() +} + +// SetETag sets the ETag field's value. +func (s *ObjectVersion) SetETag(v string) *ObjectVersion { + s.ETag = &v + return s +} + +// SetIsLatest sets the IsLatest field's value. +func (s *ObjectVersion) SetIsLatest(v bool) *ObjectVersion { + s.IsLatest = &v + return s +} + +// SetKey sets the Key field's value. +func (s *ObjectVersion) SetKey(v string) *ObjectVersion { + s.Key = &v + return s +} + +// SetLastModified sets the LastModified field's value. +func (s *ObjectVersion) SetLastModified(v time.Time) *ObjectVersion { + s.LastModified = &v + return s +} + +// SetOwner sets the Owner field's value. +func (s *ObjectVersion) SetOwner(v *Owner) *ObjectVersion { + s.Owner = v + return s +} + +// SetSize sets the Size field's value. +func (s *ObjectVersion) SetSize(v int64) *ObjectVersion { + s.Size = &v + return s +} + +// SetStorageClass sets the StorageClass field's value. +func (s *ObjectVersion) SetStorageClass(v string) *ObjectVersion { + s.StorageClass = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *ObjectVersion) SetVersionId(v string) *ObjectVersion { + s.VersionId = &v + return s +} + +// Describes the location where the restore job's output is stored. +type OutputLocation struct { + _ struct{} `type:"structure"` + + // Describes an S3 location that will receive the results of the restore request. + S3 *Location `type:"structure"` +} + +// String returns the string representation +func (s OutputLocation) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s OutputLocation) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *OutputLocation) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "OutputLocation"} + if s.S3 != nil { + if err := s.S3.Validate(); err != nil { + invalidParams.AddNested("S3", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetS3 sets the S3 field's value. +func (s *OutputLocation) SetS3(v *Location) *OutputLocation { + s.S3 = v + return s +} + +// Describes how results of the Select job are serialized. +type OutputSerialization struct { + _ struct{} `type:"structure"` + + // Describes the serialization of CSV-encoded Select results. + CSV *CSVOutput `type:"structure"` + + // Specifies JSON as request's output serialization format. + JSON *JSONOutput `type:"structure"` +} + +// String returns the string representation +func (s OutputSerialization) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s OutputSerialization) GoString() string { + return s.String() +} + +// SetCSV sets the CSV field's value. +func (s *OutputSerialization) SetCSV(v *CSVOutput) *OutputSerialization { + s.CSV = v + return s +} + +// SetJSON sets the JSON field's value. +func (s *OutputSerialization) SetJSON(v *JSONOutput) *OutputSerialization { + s.JSON = v + return s +} + +// Container for the owner's display name and ID. +type Owner struct { + _ struct{} `type:"structure"` + + // Container for the display name of the owner. + DisplayName *string `type:"string"` + + // Container for the ID of the owner. + ID *string `type:"string"` +} + +// String returns the string representation +func (s Owner) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Owner) GoString() string { + return s.String() +} + +// SetDisplayName sets the DisplayName field's value. +func (s *Owner) SetDisplayName(v string) *Owner { + s.DisplayName = &v + return s +} + +// SetID sets the ID field's value. +func (s *Owner) SetID(v string) *Owner { + s.ID = &v + return s +} + +// The container element for a bucket's ownership controls. +type OwnershipControls struct { + _ struct{} `type:"structure"` + + // The container element for an ownership control rule. + // + // Rules is a required field + Rules []*OwnershipControlsRule `locationName:"Rule" type:"list" flattened:"true" required:"true"` +} + +// String returns the string representation +func (s OwnershipControls) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s OwnershipControls) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *OwnershipControls) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "OwnershipControls"} + if s.Rules == nil { + invalidParams.Add(request.NewErrParamRequired("Rules")) + } + if s.Rules != nil { + for i, v := range s.Rules { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Rules", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetRules sets the Rules field's value. +func (s *OwnershipControls) SetRules(v []*OwnershipControlsRule) *OwnershipControls { + s.Rules = v + return s +} + +// The container element for an ownership control rule. +type OwnershipControlsRule struct { + _ struct{} `type:"structure"` + + // The container element for object ownership for a bucket's ownership controls. + // + // BucketOwnerPreferred - Objects uploaded to the bucket change ownership to + // the bucket owner if the objects are uploaded with the bucket-owner-full-control + // canned ACL. + // + // ObjectWriter - The uploading account will own the object if the object is + // uploaded with the bucket-owner-full-control canned ACL. + // + // ObjectOwnership is a required field + ObjectOwnership *string `type:"string" required:"true" enum:"ObjectOwnership"` +} + +// String returns the string representation +func (s OwnershipControlsRule) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s OwnershipControlsRule) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *OwnershipControlsRule) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "OwnershipControlsRule"} + if s.ObjectOwnership == nil { + invalidParams.Add(request.NewErrParamRequired("ObjectOwnership")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetObjectOwnership sets the ObjectOwnership field's value. +func (s *OwnershipControlsRule) SetObjectOwnership(v string) *OwnershipControlsRule { + s.ObjectOwnership = &v + return s +} + +// Container for Parquet. +type ParquetInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s ParquetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ParquetInput) GoString() string { + return s.String() +} + +// Container for elements related to a part. +type Part struct { + _ struct{} `type:"structure"` + + // Entity tag returned when the part was uploaded. + ETag *string `type:"string"` + + // Date and time at which the part was uploaded. + LastModified *time.Time `type:"timestamp"` + + // Part number identifying the part. This is a positive integer between 1 and + // 10,000. + PartNumber *int64 `type:"integer"` + + // Size in bytes of the uploaded part data. + Size *int64 `type:"integer"` +} + +// String returns the string representation +func (s Part) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Part) GoString() string { + return s.String() +} + +// SetETag sets the ETag field's value. +func (s *Part) SetETag(v string) *Part { + s.ETag = &v + return s +} + +// SetLastModified sets the LastModified field's value. +func (s *Part) SetLastModified(v time.Time) *Part { + s.LastModified = &v + return s +} + +// SetPartNumber sets the PartNumber field's value. +func (s *Part) SetPartNumber(v int64) *Part { + s.PartNumber = &v + return s +} + +// SetSize sets the Size field's value. +func (s *Part) SetSize(v int64) *Part { + s.Size = &v + return s +} + +// The container element for a bucket's policy status. +type PolicyStatus struct { + _ struct{} `type:"structure"` + + // The policy status for this bucket. TRUE indicates that this bucket is public. + // FALSE indicates that the bucket is not public. + IsPublic *bool `locationName:"IsPublic" type:"boolean"` +} + +// String returns the string representation +func (s PolicyStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PolicyStatus) GoString() string { + return s.String() +} + +// SetIsPublic sets the IsPublic field's value. +func (s *PolicyStatus) SetIsPublic(v bool) *PolicyStatus { + s.IsPublic = &v + return s +} + +// This data type contains information about progress of an operation. +type Progress struct { + _ struct{} `type:"structure"` + + // The current number of uncompressed object bytes processed. + BytesProcessed *int64 `type:"long"` + + // The current number of bytes of records payload data returned. + BytesReturned *int64 `type:"long"` + + // The current number of object bytes scanned. + BytesScanned *int64 `type:"long"` +} + +// String returns the string representation +func (s Progress) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Progress) GoString() string { + return s.String() +} + +// SetBytesProcessed sets the BytesProcessed field's value. +func (s *Progress) SetBytesProcessed(v int64) *Progress { + s.BytesProcessed = &v + return s +} + +// SetBytesReturned sets the BytesReturned field's value. +func (s *Progress) SetBytesReturned(v int64) *Progress { + s.BytesReturned = &v + return s +} + +// SetBytesScanned sets the BytesScanned field's value. +func (s *Progress) SetBytesScanned(v int64) *Progress { + s.BytesScanned = &v + return s +} + +// This data type contains information about the progress event of an operation. +type ProgressEvent struct { + _ struct{} `locationName:"ProgressEvent" type:"structure" payload:"Details"` + + // The Progress event details. + Details *Progress `locationName:"Details" type:"structure"` +} + +// String returns the string representation +func (s ProgressEvent) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ProgressEvent) GoString() string { + return s.String() +} + +// SetDetails sets the Details field's value. +func (s *ProgressEvent) SetDetails(v *Progress) *ProgressEvent { + s.Details = v + return s +} + +// The ProgressEvent is and event in the SelectObjectContentEventStream group of events. +func (s *ProgressEvent) eventSelectObjectContentEventStream() {} + +// UnmarshalEvent unmarshals the EventStream Message into the ProgressEvent value. +// This method is only used internally within the SDK's EventStream handling. +func (s *ProgressEvent) UnmarshalEvent( + payloadUnmarshaler protocol.PayloadUnmarshaler, + msg eventstream.Message, +) error { + if err := payloadUnmarshaler.UnmarshalPayload( + bytes.NewReader(msg.Payload), s, + ); err != nil { + return err + } + return nil +} + +// MarshalEvent marshals the type into an stream event value. This method +// should only used internally within the SDK's EventStream handling. +func (s *ProgressEvent) MarshalEvent(pm protocol.PayloadMarshaler) (msg eventstream.Message, err error) { + msg.Headers.Set(eventstreamapi.MessageTypeHeader, eventstream.StringValue(eventstreamapi.EventMessageType)) + var buf bytes.Buffer + if err = pm.MarshalPayload(&buf, s); err != nil { + return eventstream.Message{}, err + } + msg.Payload = buf.Bytes() + return msg, err +} + +// The PublicAccessBlock configuration that you want to apply to this Amazon +// S3 bucket. You can enable the configuration options in any combination. For +// more information about when Amazon S3 considers a bucket or object public, +// see The Meaning of "Public" (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status) +// in the Amazon Simple Storage Service Developer Guide. +type PublicAccessBlockConfiguration struct { + _ struct{} `type:"structure"` + + // Specifies whether Amazon S3 should block public access control lists (ACLs) + // for this bucket and objects in this bucket. Setting this element to TRUE + // causes the following behavior: + // + // * PUT Bucket acl and PUT Object acl calls fail if the specified ACL is + // public. + // + // * PUT Object calls fail if the request includes a public ACL. + // + // * PUT Bucket calls fail if the request includes a public ACL. + // + // Enabling this setting doesn't affect existing policies or ACLs. + BlockPublicAcls *bool `locationName:"BlockPublicAcls" type:"boolean"` + + // Specifies whether Amazon S3 should block public bucket policies for this + // bucket. Setting this element to TRUE causes Amazon S3 to reject calls to + // PUT Bucket policy if the specified bucket policy allows public access. + // + // Enabling this setting doesn't affect existing bucket policies. + BlockPublicPolicy *bool `locationName:"BlockPublicPolicy" type:"boolean"` + + // Specifies whether Amazon S3 should ignore public ACLs for this bucket and + // objects in this bucket. Setting this element to TRUE causes Amazon S3 to + // ignore all public ACLs on this bucket and objects in this bucket. + // + // Enabling this setting doesn't affect the persistence of any existing ACLs + // and doesn't prevent new public ACLs from being set. + IgnorePublicAcls *bool `locationName:"IgnorePublicAcls" type:"boolean"` + + // Specifies whether Amazon S3 should restrict public bucket policies for this + // bucket. Setting this element to TRUE restricts access to this bucket to only + // AWS service principals and authorized users within this account if the bucket + // has a public policy. + // + // Enabling this setting doesn't affect previously stored bucket policies, except + // that public and cross-account access within any public bucket policy, including + // non-public delegation to specific accounts, is blocked. + RestrictPublicBuckets *bool `locationName:"RestrictPublicBuckets" type:"boolean"` +} + +// String returns the string representation +func (s PublicAccessBlockConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PublicAccessBlockConfiguration) GoString() string { + return s.String() +} + +// SetBlockPublicAcls sets the BlockPublicAcls field's value. +func (s *PublicAccessBlockConfiguration) SetBlockPublicAcls(v bool) *PublicAccessBlockConfiguration { + s.BlockPublicAcls = &v + return s +} + +// SetBlockPublicPolicy sets the BlockPublicPolicy field's value. +func (s *PublicAccessBlockConfiguration) SetBlockPublicPolicy(v bool) *PublicAccessBlockConfiguration { + s.BlockPublicPolicy = &v + return s +} + +// SetIgnorePublicAcls sets the IgnorePublicAcls field's value. +func (s *PublicAccessBlockConfiguration) SetIgnorePublicAcls(v bool) *PublicAccessBlockConfiguration { + s.IgnorePublicAcls = &v + return s +} + +// SetRestrictPublicBuckets sets the RestrictPublicBuckets field's value. +func (s *PublicAccessBlockConfiguration) SetRestrictPublicBuckets(v bool) *PublicAccessBlockConfiguration { + s.RestrictPublicBuckets = &v + return s +} + +type PutBucketAccelerateConfigurationInput struct { + _ struct{} `locationName:"PutBucketAccelerateConfigurationRequest" type:"structure" payload:"AccelerateConfiguration"` + + // Container for setting the transfer acceleration state. + // + // AccelerateConfiguration is a required field + AccelerateConfiguration *AccelerateConfiguration `locationName:"AccelerateConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` + + // The name of the bucket for which the accelerate configuration is set. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation +func (s PutBucketAccelerateConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketAccelerateConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketAccelerateConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketAccelerateConfigurationInput"} + if s.AccelerateConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("AccelerateConfiguration")) + } + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAccelerateConfiguration sets the AccelerateConfiguration field's value. +func (s *PutBucketAccelerateConfigurationInput) SetAccelerateConfiguration(v *AccelerateConfiguration) *PutBucketAccelerateConfigurationInput { + s.AccelerateConfiguration = v + return s +} + +// SetBucket sets the Bucket field's value. +func (s *PutBucketAccelerateConfigurationInput) SetBucket(v string) *PutBucketAccelerateConfigurationInput { + s.Bucket = &v + return s +} + +func (s *PutBucketAccelerateConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *PutBucketAccelerateConfigurationInput) SetExpectedBucketOwner(v string) *PutBucketAccelerateConfigurationInput { + s.ExpectedBucketOwner = &v + return s +} + +func (s *PutBucketAccelerateConfigurationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutBucketAccelerateConfigurationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s PutBucketAccelerateConfigurationInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type PutBucketAccelerateConfigurationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketAccelerateConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketAccelerateConfigurationOutput) GoString() string { + return s.String() +} + +type PutBucketAclInput struct { + _ struct{} `locationName:"PutBucketAclRequest" type:"structure" payload:"AccessControlPolicy"` + + // The canned ACL to apply to the bucket. + ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"BucketCannedACL"` + + // Contains the elements that set the ACL permissions for an object per grantee. + AccessControlPolicy *AccessControlPolicy `locationName:"AccessControlPolicy" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` + + // The bucket to which to apply the ACL. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // Allows grantee the read, write, read ACP, and write ACP permissions on the + // bucket. + GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` + + // Allows grantee to list the objects in the bucket. + GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"` + + // Allows grantee to read the bucket ACL. + GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"` + + // Allows grantee to create, overwrite, and delete any object in the bucket. + GrantWrite *string `location:"header" locationName:"x-amz-grant-write" type:"string"` + + // Allows grantee to write the ACL for the applicable bucket. + GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` +} + +// String returns the string representation +func (s PutBucketAclInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketAclInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketAclInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketAclInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.AccessControlPolicy != nil { + if err := s.AccessControlPolicy.Validate(); err != nil { + invalidParams.AddNested("AccessControlPolicy", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetACL sets the ACL field's value. +func (s *PutBucketAclInput) SetACL(v string) *PutBucketAclInput { + s.ACL = &v + return s +} + +// SetAccessControlPolicy sets the AccessControlPolicy field's value. +func (s *PutBucketAclInput) SetAccessControlPolicy(v *AccessControlPolicy) *PutBucketAclInput { + s.AccessControlPolicy = v + return s +} + +// SetBucket sets the Bucket field's value. +func (s *PutBucketAclInput) SetBucket(v string) *PutBucketAclInput { + s.Bucket = &v + return s +} + +func (s *PutBucketAclInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *PutBucketAclInput) SetExpectedBucketOwner(v string) *PutBucketAclInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetGrantFullControl sets the GrantFullControl field's value. +func (s *PutBucketAclInput) SetGrantFullControl(v string) *PutBucketAclInput { + s.GrantFullControl = &v + return s +} + +// SetGrantRead sets the GrantRead field's value. +func (s *PutBucketAclInput) SetGrantRead(v string) *PutBucketAclInput { + s.GrantRead = &v + return s +} + +// SetGrantReadACP sets the GrantReadACP field's value. +func (s *PutBucketAclInput) SetGrantReadACP(v string) *PutBucketAclInput { + s.GrantReadACP = &v + return s +} + +// SetGrantWrite sets the GrantWrite field's value. +func (s *PutBucketAclInput) SetGrantWrite(v string) *PutBucketAclInput { + s.GrantWrite = &v + return s +} + +// SetGrantWriteACP sets the GrantWriteACP field's value. +func (s *PutBucketAclInput) SetGrantWriteACP(v string) *PutBucketAclInput { + s.GrantWriteACP = &v + return s +} + +func (s *PutBucketAclInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutBucketAclInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s PutBucketAclInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type PutBucketAclOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketAclOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketAclOutput) GoString() string { + return s.String() +} + +type PutBucketAnalyticsConfigurationInput struct { + _ struct{} `locationName:"PutBucketAnalyticsConfigurationRequest" type:"structure" payload:"AnalyticsConfiguration"` + + // The configuration and any analyses for the analytics filter. + // + // AnalyticsConfiguration is a required field + AnalyticsConfiguration *AnalyticsConfiguration `locationName:"AnalyticsConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` + + // The name of the bucket to which an analytics configuration is stored. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // The ID that identifies the analytics configuration. + // + // Id is a required field + Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` +} + +// String returns the string representation +func (s PutBucketAnalyticsConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketAnalyticsConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketAnalyticsConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketAnalyticsConfigurationInput"} + if s.AnalyticsConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("AnalyticsConfiguration")) + } + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + if s.AnalyticsConfiguration != nil { + if err := s.AnalyticsConfiguration.Validate(); err != nil { + invalidParams.AddNested("AnalyticsConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAnalyticsConfiguration sets the AnalyticsConfiguration field's value. +func (s *PutBucketAnalyticsConfigurationInput) SetAnalyticsConfiguration(v *AnalyticsConfiguration) *PutBucketAnalyticsConfigurationInput { + s.AnalyticsConfiguration = v + return s +} + +// SetBucket sets the Bucket field's value. +func (s *PutBucketAnalyticsConfigurationInput) SetBucket(v string) *PutBucketAnalyticsConfigurationInput { + s.Bucket = &v + return s +} + +func (s *PutBucketAnalyticsConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *PutBucketAnalyticsConfigurationInput) SetExpectedBucketOwner(v string) *PutBucketAnalyticsConfigurationInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetId sets the Id field's value. +func (s *PutBucketAnalyticsConfigurationInput) SetId(v string) *PutBucketAnalyticsConfigurationInput { + s.Id = &v + return s +} + +func (s *PutBucketAnalyticsConfigurationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutBucketAnalyticsConfigurationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s PutBucketAnalyticsConfigurationInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type PutBucketAnalyticsConfigurationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketAnalyticsConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketAnalyticsConfigurationOutput) GoString() string { + return s.String() +} + +type PutBucketCorsInput struct { + _ struct{} `locationName:"PutBucketCorsRequest" type:"structure" payload:"CORSConfiguration"` + + // Specifies the bucket impacted by the corsconfiguration. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Describes the cross-origin access configuration for objects in an Amazon + // S3 bucket. For more information, see Enabling Cross-Origin Resource Sharing + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html) in the Amazon + // Simple Storage Service Developer Guide. + // + // CORSConfiguration is a required field + CORSConfiguration *CORSConfiguration `locationName:"CORSConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation +func (s PutBucketCorsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketCorsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketCorsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketCorsInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.CORSConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("CORSConfiguration")) + } + if s.CORSConfiguration != nil { + if err := s.CORSConfiguration.Validate(); err != nil { + invalidParams.AddNested("CORSConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutBucketCorsInput) SetBucket(v string) *PutBucketCorsInput { + s.Bucket = &v + return s +} + +func (s *PutBucketCorsInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetCORSConfiguration sets the CORSConfiguration field's value. +func (s *PutBucketCorsInput) SetCORSConfiguration(v *CORSConfiguration) *PutBucketCorsInput { + s.CORSConfiguration = v + return s +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *PutBucketCorsInput) SetExpectedBucketOwner(v string) *PutBucketCorsInput { + s.ExpectedBucketOwner = &v + return s +} + +func (s *PutBucketCorsInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutBucketCorsInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s PutBucketCorsInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type PutBucketCorsOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketCorsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketCorsOutput) GoString() string { + return s.String() +} + +type PutBucketEncryptionInput struct { + _ struct{} `locationName:"PutBucketEncryptionRequest" type:"structure" payload:"ServerSideEncryptionConfiguration"` + + // Specifies default encryption for a bucket using server-side encryption with + // Amazon S3-managed keys (SSE-S3) or customer master keys stored in AWS KMS + // (SSE-KMS). For information about the Amazon S3 default encryption feature, + // see Amazon S3 Default Bucket Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html) + // in the Amazon Simple Storage Service Developer Guide. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // Specifies the default server-side-encryption configuration. + // + // ServerSideEncryptionConfiguration is a required field + ServerSideEncryptionConfiguration *ServerSideEncryptionConfiguration `locationName:"ServerSideEncryptionConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` +} + +// String returns the string representation +func (s PutBucketEncryptionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketEncryptionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketEncryptionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketEncryptionInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.ServerSideEncryptionConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("ServerSideEncryptionConfiguration")) + } + if s.ServerSideEncryptionConfiguration != nil { + if err := s.ServerSideEncryptionConfiguration.Validate(); err != nil { + invalidParams.AddNested("ServerSideEncryptionConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutBucketEncryptionInput) SetBucket(v string) *PutBucketEncryptionInput { + s.Bucket = &v + return s +} + +func (s *PutBucketEncryptionInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *PutBucketEncryptionInput) SetExpectedBucketOwner(v string) *PutBucketEncryptionInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetServerSideEncryptionConfiguration sets the ServerSideEncryptionConfiguration field's value. +func (s *PutBucketEncryptionInput) SetServerSideEncryptionConfiguration(v *ServerSideEncryptionConfiguration) *PutBucketEncryptionInput { + s.ServerSideEncryptionConfiguration = v + return s +} + +func (s *PutBucketEncryptionInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutBucketEncryptionInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s PutBucketEncryptionInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type PutBucketEncryptionOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketEncryptionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketEncryptionOutput) GoString() string { + return s.String() +} + +type PutBucketIntelligentTieringConfigurationInput struct { + _ struct{} `locationName:"PutBucketIntelligentTieringConfigurationRequest" type:"structure" payload:"IntelligentTieringConfiguration"` + + // The name of the Amazon S3 bucket whose configuration you want to modify or + // retrieve. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The ID used to identify the S3 Intelligent-Tiering configuration. + // + // Id is a required field + Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` + + // Container for S3 Intelligent-Tiering configuration. + // + // IntelligentTieringConfiguration is a required field + IntelligentTieringConfiguration *IntelligentTieringConfiguration `locationName:"IntelligentTieringConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` +} + +// String returns the string representation +func (s PutBucketIntelligentTieringConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketIntelligentTieringConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketIntelligentTieringConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketIntelligentTieringConfigurationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + if s.IntelligentTieringConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("IntelligentTieringConfiguration")) + } + if s.IntelligentTieringConfiguration != nil { + if err := s.IntelligentTieringConfiguration.Validate(); err != nil { + invalidParams.AddNested("IntelligentTieringConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutBucketIntelligentTieringConfigurationInput) SetBucket(v string) *PutBucketIntelligentTieringConfigurationInput { + s.Bucket = &v + return s +} + +func (s *PutBucketIntelligentTieringConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetId sets the Id field's value. +func (s *PutBucketIntelligentTieringConfigurationInput) SetId(v string) *PutBucketIntelligentTieringConfigurationInput { + s.Id = &v + return s +} + +// SetIntelligentTieringConfiguration sets the IntelligentTieringConfiguration field's value. +func (s *PutBucketIntelligentTieringConfigurationInput) SetIntelligentTieringConfiguration(v *IntelligentTieringConfiguration) *PutBucketIntelligentTieringConfigurationInput { + s.IntelligentTieringConfiguration = v + return s +} + +func (s *PutBucketIntelligentTieringConfigurationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutBucketIntelligentTieringConfigurationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s PutBucketIntelligentTieringConfigurationInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type PutBucketIntelligentTieringConfigurationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketIntelligentTieringConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketIntelligentTieringConfigurationOutput) GoString() string { + return s.String() +} + +type PutBucketInventoryConfigurationInput struct { + _ struct{} `locationName:"PutBucketInventoryConfigurationRequest" type:"structure" payload:"InventoryConfiguration"` + + // The name of the bucket where the inventory configuration will be stored. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // The ID used to identify the inventory configuration. + // + // Id is a required field + Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` + + // Specifies the inventory configuration. + // + // InventoryConfiguration is a required field + InventoryConfiguration *InventoryConfiguration `locationName:"InventoryConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` +} + +// String returns the string representation +func (s PutBucketInventoryConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketInventoryConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketInventoryConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketInventoryConfigurationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + if s.InventoryConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("InventoryConfiguration")) + } + if s.InventoryConfiguration != nil { + if err := s.InventoryConfiguration.Validate(); err != nil { + invalidParams.AddNested("InventoryConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutBucketInventoryConfigurationInput) SetBucket(v string) *PutBucketInventoryConfigurationInput { + s.Bucket = &v + return s +} + +func (s *PutBucketInventoryConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *PutBucketInventoryConfigurationInput) SetExpectedBucketOwner(v string) *PutBucketInventoryConfigurationInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetId sets the Id field's value. +func (s *PutBucketInventoryConfigurationInput) SetId(v string) *PutBucketInventoryConfigurationInput { + s.Id = &v + return s +} + +// SetInventoryConfiguration sets the InventoryConfiguration field's value. +func (s *PutBucketInventoryConfigurationInput) SetInventoryConfiguration(v *InventoryConfiguration) *PutBucketInventoryConfigurationInput { + s.InventoryConfiguration = v + return s +} + +func (s *PutBucketInventoryConfigurationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutBucketInventoryConfigurationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s PutBucketInventoryConfigurationInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type PutBucketInventoryConfigurationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketInventoryConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketInventoryConfigurationOutput) GoString() string { + return s.String() +} + +type PutBucketLifecycleConfigurationInput struct { + _ struct{} `locationName:"PutBucketLifecycleConfigurationRequest" type:"structure" payload:"LifecycleConfiguration"` + + // The name of the bucket for which to set the configuration. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // Container for lifecycle rules. You can add as many as 1,000 rules. + LifecycleConfiguration *BucketLifecycleConfiguration `locationName:"LifecycleConfiguration" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` +} + +// String returns the string representation +func (s PutBucketLifecycleConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketLifecycleConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketLifecycleConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketLifecycleConfigurationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.LifecycleConfiguration != nil { + if err := s.LifecycleConfiguration.Validate(); err != nil { + invalidParams.AddNested("LifecycleConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutBucketLifecycleConfigurationInput) SetBucket(v string) *PutBucketLifecycleConfigurationInput { + s.Bucket = &v + return s +} + +func (s *PutBucketLifecycleConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *PutBucketLifecycleConfigurationInput) SetExpectedBucketOwner(v string) *PutBucketLifecycleConfigurationInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetLifecycleConfiguration sets the LifecycleConfiguration field's value. +func (s *PutBucketLifecycleConfigurationInput) SetLifecycleConfiguration(v *BucketLifecycleConfiguration) *PutBucketLifecycleConfigurationInput { + s.LifecycleConfiguration = v + return s +} + +func (s *PutBucketLifecycleConfigurationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutBucketLifecycleConfigurationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s PutBucketLifecycleConfigurationInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type PutBucketLifecycleConfigurationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketLifecycleConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketLifecycleConfigurationOutput) GoString() string { + return s.String() +} + +type PutBucketLifecycleInput struct { + _ struct{} `locationName:"PutBucketLifecycleRequest" type:"structure" payload:"LifecycleConfiguration"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // Container for lifecycle rules. You can add as many as 1000 rules. + LifecycleConfiguration *LifecycleConfiguration `locationName:"LifecycleConfiguration" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` +} + +// String returns the string representation +func (s PutBucketLifecycleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketLifecycleInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketLifecycleInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketLifecycleInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.LifecycleConfiguration != nil { + if err := s.LifecycleConfiguration.Validate(); err != nil { + invalidParams.AddNested("LifecycleConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutBucketLifecycleInput) SetBucket(v string) *PutBucketLifecycleInput { + s.Bucket = &v + return s +} + +func (s *PutBucketLifecycleInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *PutBucketLifecycleInput) SetExpectedBucketOwner(v string) *PutBucketLifecycleInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetLifecycleConfiguration sets the LifecycleConfiguration field's value. +func (s *PutBucketLifecycleInput) SetLifecycleConfiguration(v *LifecycleConfiguration) *PutBucketLifecycleInput { + s.LifecycleConfiguration = v + return s +} + +func (s *PutBucketLifecycleInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutBucketLifecycleInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s PutBucketLifecycleInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type PutBucketLifecycleOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketLifecycleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketLifecycleOutput) GoString() string { + return s.String() +} + +type PutBucketLoggingInput struct { + _ struct{} `locationName:"PutBucketLoggingRequest" type:"structure" payload:"BucketLoggingStatus"` + + // The name of the bucket for which to set the logging parameters. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Container for logging status information. + // + // BucketLoggingStatus is a required field + BucketLoggingStatus *BucketLoggingStatus `locationName:"BucketLoggingStatus" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation +func (s PutBucketLoggingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketLoggingInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketLoggingInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketLoggingInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.BucketLoggingStatus == nil { + invalidParams.Add(request.NewErrParamRequired("BucketLoggingStatus")) + } + if s.BucketLoggingStatus != nil { + if err := s.BucketLoggingStatus.Validate(); err != nil { + invalidParams.AddNested("BucketLoggingStatus", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutBucketLoggingInput) SetBucket(v string) *PutBucketLoggingInput { + s.Bucket = &v + return s +} + +func (s *PutBucketLoggingInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetBucketLoggingStatus sets the BucketLoggingStatus field's value. +func (s *PutBucketLoggingInput) SetBucketLoggingStatus(v *BucketLoggingStatus) *PutBucketLoggingInput { + s.BucketLoggingStatus = v + return s +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *PutBucketLoggingInput) SetExpectedBucketOwner(v string) *PutBucketLoggingInput { + s.ExpectedBucketOwner = &v + return s +} + +func (s *PutBucketLoggingInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutBucketLoggingInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s PutBucketLoggingInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type PutBucketLoggingOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketLoggingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketLoggingOutput) GoString() string { + return s.String() +} + +type PutBucketMetricsConfigurationInput struct { + _ struct{} `locationName:"PutBucketMetricsConfigurationRequest" type:"structure" payload:"MetricsConfiguration"` + + // The name of the bucket for which the metrics configuration is set. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // The ID used to identify the metrics configuration. + // + // Id is a required field + Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` + + // Specifies the metrics configuration. + // + // MetricsConfiguration is a required field + MetricsConfiguration *MetricsConfiguration `locationName:"MetricsConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` +} + +// String returns the string representation +func (s PutBucketMetricsConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketMetricsConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketMetricsConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketMetricsConfigurationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + if s.MetricsConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("MetricsConfiguration")) + } + if s.MetricsConfiguration != nil { + if err := s.MetricsConfiguration.Validate(); err != nil { + invalidParams.AddNested("MetricsConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutBucketMetricsConfigurationInput) SetBucket(v string) *PutBucketMetricsConfigurationInput { + s.Bucket = &v + return s +} + +func (s *PutBucketMetricsConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *PutBucketMetricsConfigurationInput) SetExpectedBucketOwner(v string) *PutBucketMetricsConfigurationInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetId sets the Id field's value. +func (s *PutBucketMetricsConfigurationInput) SetId(v string) *PutBucketMetricsConfigurationInput { + s.Id = &v + return s +} + +// SetMetricsConfiguration sets the MetricsConfiguration field's value. +func (s *PutBucketMetricsConfigurationInput) SetMetricsConfiguration(v *MetricsConfiguration) *PutBucketMetricsConfigurationInput { + s.MetricsConfiguration = v + return s +} + +func (s *PutBucketMetricsConfigurationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutBucketMetricsConfigurationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s PutBucketMetricsConfigurationInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type PutBucketMetricsConfigurationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketMetricsConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketMetricsConfigurationOutput) GoString() string { + return s.String() +} + +type PutBucketNotificationConfigurationInput struct { + _ struct{} `locationName:"PutBucketNotificationConfigurationRequest" type:"structure" payload:"NotificationConfiguration"` + + // The name of the bucket. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // A container for specifying the notification configuration of the bucket. + // If this element is empty, notifications are turned off for the bucket. + // + // NotificationConfiguration is a required field + NotificationConfiguration *NotificationConfiguration `locationName:"NotificationConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` +} + +// String returns the string representation +func (s PutBucketNotificationConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketNotificationConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketNotificationConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketNotificationConfigurationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.NotificationConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("NotificationConfiguration")) + } + if s.NotificationConfiguration != nil { + if err := s.NotificationConfiguration.Validate(); err != nil { + invalidParams.AddNested("NotificationConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutBucketNotificationConfigurationInput) SetBucket(v string) *PutBucketNotificationConfigurationInput { + s.Bucket = &v + return s +} + +func (s *PutBucketNotificationConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *PutBucketNotificationConfigurationInput) SetExpectedBucketOwner(v string) *PutBucketNotificationConfigurationInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetNotificationConfiguration sets the NotificationConfiguration field's value. +func (s *PutBucketNotificationConfigurationInput) SetNotificationConfiguration(v *NotificationConfiguration) *PutBucketNotificationConfigurationInput { + s.NotificationConfiguration = v + return s +} + +func (s *PutBucketNotificationConfigurationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutBucketNotificationConfigurationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s PutBucketNotificationConfigurationInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type PutBucketNotificationConfigurationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketNotificationConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketNotificationConfigurationOutput) GoString() string { + return s.String() +} + +type PutBucketNotificationInput struct { + _ struct{} `locationName:"PutBucketNotificationRequest" type:"structure" payload:"NotificationConfiguration"` + + // The name of the bucket. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // The container for the configuration. + // + // NotificationConfiguration is a required field + NotificationConfiguration *NotificationConfigurationDeprecated `locationName:"NotificationConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` +} + +// String returns the string representation +func (s PutBucketNotificationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketNotificationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketNotificationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketNotificationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.NotificationConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("NotificationConfiguration")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutBucketNotificationInput) SetBucket(v string) *PutBucketNotificationInput { + s.Bucket = &v + return s +} + +func (s *PutBucketNotificationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *PutBucketNotificationInput) SetExpectedBucketOwner(v string) *PutBucketNotificationInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetNotificationConfiguration sets the NotificationConfiguration field's value. +func (s *PutBucketNotificationInput) SetNotificationConfiguration(v *NotificationConfigurationDeprecated) *PutBucketNotificationInput { + s.NotificationConfiguration = v + return s +} + +func (s *PutBucketNotificationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutBucketNotificationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s PutBucketNotificationInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type PutBucketNotificationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketNotificationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketNotificationOutput) GoString() string { + return s.String() +} + +type PutBucketOwnershipControlsInput struct { + _ struct{} `locationName:"PutBucketOwnershipControlsRequest" type:"structure" payload:"OwnershipControls"` + + // The name of the Amazon S3 bucket whose OwnershipControls you want to set. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // The OwnershipControls (BucketOwnerPreferred or ObjectWriter) that you want + // to apply to this Amazon S3 bucket. + // + // OwnershipControls is a required field + OwnershipControls *OwnershipControls `locationName:"OwnershipControls" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` +} + +// String returns the string representation +func (s PutBucketOwnershipControlsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketOwnershipControlsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketOwnershipControlsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketOwnershipControlsInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.OwnershipControls == nil { + invalidParams.Add(request.NewErrParamRequired("OwnershipControls")) + } + if s.OwnershipControls != nil { + if err := s.OwnershipControls.Validate(); err != nil { + invalidParams.AddNested("OwnershipControls", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutBucketOwnershipControlsInput) SetBucket(v string) *PutBucketOwnershipControlsInput { + s.Bucket = &v + return s +} + +func (s *PutBucketOwnershipControlsInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *PutBucketOwnershipControlsInput) SetExpectedBucketOwner(v string) *PutBucketOwnershipControlsInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetOwnershipControls sets the OwnershipControls field's value. +func (s *PutBucketOwnershipControlsInput) SetOwnershipControls(v *OwnershipControls) *PutBucketOwnershipControlsInput { + s.OwnershipControls = v + return s +} + +func (s *PutBucketOwnershipControlsInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutBucketOwnershipControlsInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s PutBucketOwnershipControlsInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type PutBucketOwnershipControlsOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketOwnershipControlsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketOwnershipControlsOutput) GoString() string { + return s.String() +} + +type PutBucketPolicyInput struct { + _ struct{} `locationName:"PutBucketPolicyRequest" type:"structure" payload:"Policy"` + + // The name of the bucket. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Set this parameter to true to confirm that you want to remove your permissions + // to change this bucket policy in the future. + ConfirmRemoveSelfBucketAccess *bool `location:"header" locationName:"x-amz-confirm-remove-self-bucket-access" type:"boolean"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // The bucket policy as a JSON document. + // + // Policy is a required field + Policy *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s PutBucketPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketPolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketPolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketPolicyInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Policy == nil { + invalidParams.Add(request.NewErrParamRequired("Policy")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutBucketPolicyInput) SetBucket(v string) *PutBucketPolicyInput { + s.Bucket = &v + return s +} + +func (s *PutBucketPolicyInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetConfirmRemoveSelfBucketAccess sets the ConfirmRemoveSelfBucketAccess field's value. +func (s *PutBucketPolicyInput) SetConfirmRemoveSelfBucketAccess(v bool) *PutBucketPolicyInput { + s.ConfirmRemoveSelfBucketAccess = &v + return s +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *PutBucketPolicyInput) SetExpectedBucketOwner(v string) *PutBucketPolicyInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetPolicy sets the Policy field's value. +func (s *PutBucketPolicyInput) SetPolicy(v string) *PutBucketPolicyInput { + s.Policy = &v + return s +} + +func (s *PutBucketPolicyInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutBucketPolicyInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s PutBucketPolicyInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type PutBucketPolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketPolicyOutput) GoString() string { + return s.String() +} + +type PutBucketReplicationInput struct { + _ struct{} `locationName:"PutBucketReplicationRequest" type:"structure" payload:"ReplicationConfiguration"` + + // The name of the bucket + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // A container for replication rules. You can add up to 1,000 rules. The maximum + // size of a replication configuration is 2 MB. + // + // ReplicationConfiguration is a required field + ReplicationConfiguration *ReplicationConfiguration `locationName:"ReplicationConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` + + // A token to allow Object Lock to be enabled for an existing bucket. + Token *string `location:"header" locationName:"x-amz-bucket-object-lock-token" type:"string"` +} + +// String returns the string representation +func (s PutBucketReplicationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketReplicationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketReplicationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketReplicationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.ReplicationConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("ReplicationConfiguration")) + } + if s.ReplicationConfiguration != nil { + if err := s.ReplicationConfiguration.Validate(); err != nil { + invalidParams.AddNested("ReplicationConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutBucketReplicationInput) SetBucket(v string) *PutBucketReplicationInput { + s.Bucket = &v + return s +} + +func (s *PutBucketReplicationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *PutBucketReplicationInput) SetExpectedBucketOwner(v string) *PutBucketReplicationInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetReplicationConfiguration sets the ReplicationConfiguration field's value. +func (s *PutBucketReplicationInput) SetReplicationConfiguration(v *ReplicationConfiguration) *PutBucketReplicationInput { + s.ReplicationConfiguration = v + return s +} + +// SetToken sets the Token field's value. +func (s *PutBucketReplicationInput) SetToken(v string) *PutBucketReplicationInput { + s.Token = &v + return s +} + +func (s *PutBucketReplicationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutBucketReplicationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s PutBucketReplicationInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type PutBucketReplicationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketReplicationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketReplicationOutput) GoString() string { + return s.String() +} + +type PutBucketRequestPaymentInput struct { + _ struct{} `locationName:"PutBucketRequestPaymentRequest" type:"structure" payload:"RequestPaymentConfiguration"` + + // The bucket name. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // Container for Payer. + // + // RequestPaymentConfiguration is a required field + RequestPaymentConfiguration *RequestPaymentConfiguration `locationName:"RequestPaymentConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` +} + +// String returns the string representation +func (s PutBucketRequestPaymentInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketRequestPaymentInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketRequestPaymentInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketRequestPaymentInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.RequestPaymentConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("RequestPaymentConfiguration")) + } + if s.RequestPaymentConfiguration != nil { + if err := s.RequestPaymentConfiguration.Validate(); err != nil { + invalidParams.AddNested("RequestPaymentConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutBucketRequestPaymentInput) SetBucket(v string) *PutBucketRequestPaymentInput { + s.Bucket = &v + return s +} + +func (s *PutBucketRequestPaymentInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *PutBucketRequestPaymentInput) SetExpectedBucketOwner(v string) *PutBucketRequestPaymentInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetRequestPaymentConfiguration sets the RequestPaymentConfiguration field's value. +func (s *PutBucketRequestPaymentInput) SetRequestPaymentConfiguration(v *RequestPaymentConfiguration) *PutBucketRequestPaymentInput { + s.RequestPaymentConfiguration = v + return s +} + +func (s *PutBucketRequestPaymentInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutBucketRequestPaymentInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s PutBucketRequestPaymentInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type PutBucketRequestPaymentOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketRequestPaymentOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketRequestPaymentOutput) GoString() string { + return s.String() +} + +type PutBucketTaggingInput struct { + _ struct{} `locationName:"PutBucketTaggingRequest" type:"structure" payload:"Tagging"` + + // The bucket name. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // Container for the TagSet and Tag elements. + // + // Tagging is a required field + Tagging *Tagging `locationName:"Tagging" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` +} + +// String returns the string representation +func (s PutBucketTaggingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketTaggingInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketTaggingInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketTaggingInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Tagging == nil { + invalidParams.Add(request.NewErrParamRequired("Tagging")) + } + if s.Tagging != nil { + if err := s.Tagging.Validate(); err != nil { + invalidParams.AddNested("Tagging", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutBucketTaggingInput) SetBucket(v string) *PutBucketTaggingInput { + s.Bucket = &v + return s +} + +func (s *PutBucketTaggingInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *PutBucketTaggingInput) SetExpectedBucketOwner(v string) *PutBucketTaggingInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetTagging sets the Tagging field's value. +func (s *PutBucketTaggingInput) SetTagging(v *Tagging) *PutBucketTaggingInput { + s.Tagging = v + return s +} + +func (s *PutBucketTaggingInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutBucketTaggingInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s PutBucketTaggingInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type PutBucketTaggingOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketTaggingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketTaggingOutput) GoString() string { + return s.String() +} + +type PutBucketVersioningInput struct { + _ struct{} `locationName:"PutBucketVersioningRequest" type:"structure" payload:"VersioningConfiguration"` + + // The bucket name. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // The concatenation of the authentication device's serial number, a space, + // and the value that is displayed on your authentication device. + MFA *string `location:"header" locationName:"x-amz-mfa" type:"string"` + + // Container for setting the versioning state. + // + // VersioningConfiguration is a required field + VersioningConfiguration *VersioningConfiguration `locationName:"VersioningConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` +} + +// String returns the string representation +func (s PutBucketVersioningInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketVersioningInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketVersioningInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketVersioningInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.VersioningConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("VersioningConfiguration")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutBucketVersioningInput) SetBucket(v string) *PutBucketVersioningInput { + s.Bucket = &v + return s +} + +func (s *PutBucketVersioningInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *PutBucketVersioningInput) SetExpectedBucketOwner(v string) *PutBucketVersioningInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetMFA sets the MFA field's value. +func (s *PutBucketVersioningInput) SetMFA(v string) *PutBucketVersioningInput { + s.MFA = &v + return s +} + +// SetVersioningConfiguration sets the VersioningConfiguration field's value. +func (s *PutBucketVersioningInput) SetVersioningConfiguration(v *VersioningConfiguration) *PutBucketVersioningInput { + s.VersioningConfiguration = v + return s +} + +func (s *PutBucketVersioningInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutBucketVersioningInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s PutBucketVersioningInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type PutBucketVersioningOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketVersioningOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketVersioningOutput) GoString() string { + return s.String() +} + +type PutBucketWebsiteInput struct { + _ struct{} `locationName:"PutBucketWebsiteRequest" type:"structure" payload:"WebsiteConfiguration"` + + // The bucket name. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // Container for the request. + // + // WebsiteConfiguration is a required field + WebsiteConfiguration *WebsiteConfiguration `locationName:"WebsiteConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` +} + +// String returns the string representation +func (s PutBucketWebsiteInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketWebsiteInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketWebsiteInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketWebsiteInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.WebsiteConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("WebsiteConfiguration")) + } + if s.WebsiteConfiguration != nil { + if err := s.WebsiteConfiguration.Validate(); err != nil { + invalidParams.AddNested("WebsiteConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutBucketWebsiteInput) SetBucket(v string) *PutBucketWebsiteInput { + s.Bucket = &v + return s +} + +func (s *PutBucketWebsiteInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *PutBucketWebsiteInput) SetExpectedBucketOwner(v string) *PutBucketWebsiteInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetWebsiteConfiguration sets the WebsiteConfiguration field's value. +func (s *PutBucketWebsiteInput) SetWebsiteConfiguration(v *WebsiteConfiguration) *PutBucketWebsiteInput { + s.WebsiteConfiguration = v + return s +} + +func (s *PutBucketWebsiteInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutBucketWebsiteInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s PutBucketWebsiteInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type PutBucketWebsiteOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketWebsiteOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketWebsiteOutput) GoString() string { + return s.String() +} + +type PutObjectAclInput struct { + _ struct{} `locationName:"PutObjectAclRequest" type:"structure" payload:"AccessControlPolicy"` + + // The canned ACL to apply to the object. For more information, see Canned ACL + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL). + ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"` + + // Contains the elements that set the ACL permissions for an object per grantee. + AccessControlPolicy *AccessControlPolicy `locationName:"AccessControlPolicy" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` + + // The bucket name that contains the object to which you want to attach the + // ACL. + // + // When using this API with an access point, you must direct requests to the + // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this operation with an access point through the AWS SDKs, you + // provide the access point ARN in place of the bucket name. For more information + // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // in the Amazon Simple Storage Service Developer Guide. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // Allows grantee the read, write, read ACP, and write ACP permissions on the + // bucket. + // + // This action is not supported by Amazon S3 on Outposts. + GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` + + // Allows grantee to list the objects in the bucket. + // + // This action is not supported by Amazon S3 on Outposts. + GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"` + + // Allows grantee to read the bucket ACL. + // + // This action is not supported by Amazon S3 on Outposts. + GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"` + + // Allows grantee to create, overwrite, and delete any object in the bucket. + GrantWrite *string `location:"header" locationName:"x-amz-grant-write" type:"string"` + + // Allows grantee to write the ACL for the applicable bucket. + // + // This action is not supported by Amazon S3 on Outposts. + GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` + + // Key for which the PUT operation was initiated. + // + // When using this API with an access point, you must direct requests to the + // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this operation with an access point through the AWS SDKs, you + // provide the access point ARN in place of the bucket name. For more information + // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // in the Amazon Simple Storage Service Developer Guide. + // + // When using this API with Amazon S3 on Outposts, you must direct requests + // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When + // using this operation using S3 on Outposts through the AWS SDKs, you provide + // the Outposts bucket ARN in place of the bucket name. For more information + // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) + // in the Amazon Simple Storage Service Developer Guide. + // + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from requester pays buckets, see Downloading Objects + // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 Developer Guide. + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // VersionId used to reference a specific version of the object. + VersionId *string `location:"querystring" locationName:"versionId" type:"string"` +} + +// String returns the string representation +func (s PutObjectAclInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutObjectAclInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutObjectAclInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutObjectAclInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.AccessControlPolicy != nil { + if err := s.AccessControlPolicy.Validate(); err != nil { + invalidParams.AddNested("AccessControlPolicy", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetACL sets the ACL field's value. +func (s *PutObjectAclInput) SetACL(v string) *PutObjectAclInput { + s.ACL = &v + return s +} + +// SetAccessControlPolicy sets the AccessControlPolicy field's value. +func (s *PutObjectAclInput) SetAccessControlPolicy(v *AccessControlPolicy) *PutObjectAclInput { + s.AccessControlPolicy = v + return s +} + +// SetBucket sets the Bucket field's value. +func (s *PutObjectAclInput) SetBucket(v string) *PutObjectAclInput { + s.Bucket = &v + return s +} + +func (s *PutObjectAclInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *PutObjectAclInput) SetExpectedBucketOwner(v string) *PutObjectAclInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetGrantFullControl sets the GrantFullControl field's value. +func (s *PutObjectAclInput) SetGrantFullControl(v string) *PutObjectAclInput { + s.GrantFullControl = &v + return s +} + +// SetGrantRead sets the GrantRead field's value. +func (s *PutObjectAclInput) SetGrantRead(v string) *PutObjectAclInput { + s.GrantRead = &v + return s +} + +// SetGrantReadACP sets the GrantReadACP field's value. +func (s *PutObjectAclInput) SetGrantReadACP(v string) *PutObjectAclInput { + s.GrantReadACP = &v + return s +} + +// SetGrantWrite sets the GrantWrite field's value. +func (s *PutObjectAclInput) SetGrantWrite(v string) *PutObjectAclInput { + s.GrantWrite = &v + return s +} + +// SetGrantWriteACP sets the GrantWriteACP field's value. +func (s *PutObjectAclInput) SetGrantWriteACP(v string) *PutObjectAclInput { + s.GrantWriteACP = &v + return s +} + +// SetKey sets the Key field's value. +func (s *PutObjectAclInput) SetKey(v string) *PutObjectAclInput { + s.Key = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *PutObjectAclInput) SetRequestPayer(v string) *PutObjectAclInput { + s.RequestPayer = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *PutObjectAclInput) SetVersionId(v string) *PutObjectAclInput { + s.VersionId = &v + return s +} + +func (s *PutObjectAclInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutObjectAclInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s PutObjectAclInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type PutObjectAclOutput struct { + _ struct{} `type:"structure"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` +} + +// String returns the string representation +func (s PutObjectAclOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutObjectAclOutput) GoString() string { + return s.String() +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *PutObjectAclOutput) SetRequestCharged(v string) *PutObjectAclOutput { + s.RequestCharged = &v + return s +} + +type PutObjectInput struct { + _ struct{} `locationName:"PutObjectRequest" type:"structure" payload:"Body"` + + // The canned ACL to apply to the object. For more information, see Canned ACL + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL). + // + // This action is not supported by Amazon S3 on Outposts. + ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"` + + // Object data. + Body io.ReadSeeker `type:"blob"` + + // The bucket name to which the PUT operation was initiated. + // + // When using this API with an access point, you must direct requests to the + // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this operation with an access point through the AWS SDKs, you + // provide the access point ARN in place of the bucket name. For more information + // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // in the Amazon Simple Storage Service Developer Guide. + // + // When using this API with Amazon S3 on Outposts, you must direct requests + // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When + // using this operation using S3 on Outposts through the AWS SDKs, you provide + // the Outposts bucket ARN in place of the bucket name. For more information + // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) + // in the Amazon Simple Storage Service Developer Guide. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption + // with server-side encryption using AWS KMS (SSE-KMS). Setting this header + // to true causes Amazon S3 to use an S3 Bucket Key for object encryption with + // SSE-KMS. + // + // Specifying this header with a PUT operation doesn’t affect bucket-level + // settings for S3 Bucket Key. + BucketKeyEnabled *bool `location:"header" locationName:"x-amz-server-side-encryption-bucket-key-enabled" type:"boolean"` + + // Can be used to specify caching behavior along the request/reply chain. For + // more information, see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9 + // (http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9). + CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"` + + // Specifies presentational information for the object. For more information, + // see http://www.w3.org/Protocols/rfc2616/rfc2616-sec19.html#sec19.5.1 (http://www.w3.org/Protocols/rfc2616/rfc2616-sec19.html#sec19.5.1). + ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"` + + // Specifies what content encodings have been applied to the object and thus + // what decoding mechanisms must be applied to obtain the media-type referenced + // by the Content-Type header field. For more information, see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.11 + // (http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.11). + ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"` + + // The language the content is in. + ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"` + + // Size of the body in bytes. This parameter is useful when the size of the + // body cannot be determined automatically. For more information, see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.13 + // (http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.13). + ContentLength *int64 `location:"header" locationName:"Content-Length" type:"long"` + + // The base64-encoded 128-bit MD5 digest of the message (without the headers) + // according to RFC 1864. This header can be used as a message integrity check + // to verify that the data is the same data that was originally sent. Although + // it is optional, we recommend using the Content-MD5 mechanism as an end-to-end + // integrity check. For more information about REST request authentication, + // see REST Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html). + ContentMD5 *string `location:"header" locationName:"Content-MD5" type:"string"` + + // A standard MIME type describing the format of the contents. For more information, + // see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.17 (http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.17). + ContentType *string `location:"header" locationName:"Content-Type" type:"string"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // The date and time at which the object is no longer cacheable. For more information, + // see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.21 (http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.21). + Expires *time.Time `location:"header" locationName:"Expires" type:"timestamp"` + + // Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object. + // + // This action is not supported by Amazon S3 on Outposts. + GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` + + // Allows grantee to read the object data and its metadata. + // + // This action is not supported by Amazon S3 on Outposts. + GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"` + + // Allows grantee to read the object ACL. + // + // This action is not supported by Amazon S3 on Outposts. + GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"` + + // Allows grantee to write the ACL for the applicable object. + // + // This action is not supported by Amazon S3 on Outposts. + GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` + + // Object key for which the PUT operation was initiated. + // + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // A map of metadata to store with the object in S3. + Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"` + + // Specifies whether a legal hold will be applied to this object. For more information + // about S3 Object Lock, see Object Lock (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html). + ObjectLockLegalHoldStatus *string `location:"header" locationName:"x-amz-object-lock-legal-hold" type:"string" enum:"ObjectLockLegalHoldStatus"` + + // The Object Lock mode that you want to apply to this object. + ObjectLockMode *string `location:"header" locationName:"x-amz-object-lock-mode" type:"string" enum:"ObjectLockMode"` + + // The date and time when you want this object's Object Lock to expire. + ObjectLockRetainUntilDate *time.Time `location:"header" locationName:"x-amz-object-lock-retain-until-date" type:"timestamp" timestampFormat:"iso8601"` + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from requester pays buckets, see Downloading Objects + // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 Developer Guide. + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // Specifies the algorithm to use to when encrypting the object (for example, + // AES256). + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting + // data. This value is used to store the object and then it is discarded; Amazon + // S3 does not store the encryption key. The key must be appropriate for use + // with the algorithm specified in the x-amz-server-side-encryption-customer-algorithm + // header. + SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure that the + // encryption key was transmitted without error. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // Specifies the AWS KMS Encryption Context to use for object encryption. The + // value of this header is a base64-encoded UTF-8 string holding JSON with the + // encryption context key-value pairs. + SSEKMSEncryptionContext *string `location:"header" locationName:"x-amz-server-side-encryption-context" type:"string" sensitive:"true"` + + // If x-amz-server-side-encryption is present and has the value of aws:kms, + // this header specifies the ID of the AWS Key Management Service (AWS KMS) + // symmetrical customer managed customer master key (CMK) that was used for + // the object. + // + // If the value of x-amz-server-side-encryption is aws:kms, this header specifies + // the ID of the symmetric customer managed AWS KMS CMK that will be used for + // the object. If you specify x-amz-server-side-encryption:aws:kms, but do not + // providex-amz-server-side-encryption-aws-kms-key-id, Amazon S3 uses the AWS + // managed CMK in AWS to protect the data. + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` + + // The server-side encryption algorithm used when storing this object in Amazon + // S3 (for example, AES256, aws:kms). + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` + + // By default, Amazon S3 uses the STANDARD Storage Class to store newly created + // objects. The STANDARD storage class provides high durability and high availability. + // Depending on performance needs, you can specify a different Storage Class. + // Amazon S3 on Outposts only uses the OUTPOSTS Storage Class. For more information, + // see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html) + // in the Amazon S3 Service Developer Guide. + StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"` + + // The tag-set for the object. The tag-set must be encoded as URL Query parameters. + // (For example, "Key1=Value1") + Tagging *string `location:"header" locationName:"x-amz-tagging" type:"string"` + + // If the bucket is configured as a website, redirects requests for this object + // to another object in the same bucket or to an external URL. Amazon S3 stores + // the value of this header in the object metadata. For information about object + // metadata, see Object Key and Metadata (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html). + // + // In the following example, the request header sets the redirect to an object + // (anotherPage.html) in the same bucket: + // + // x-amz-website-redirect-location: /anotherPage.html + // + // In the following example, the request header sets the object redirect to + // another website: + // + // x-amz-website-redirect-location: http://www.example.com/ + // + // For more information about website hosting in Amazon S3, see Hosting Websites + // on Amazon S3 (https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html) + // and How to Configure Website Page Redirects (https://docs.aws.amazon.com/AmazonS3/latest/dev/how-to-page-redirect.html). + WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"` +} + +// String returns the string representation +func (s PutObjectInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutObjectInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutObjectInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutObjectInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetACL sets the ACL field's value. +func (s *PutObjectInput) SetACL(v string) *PutObjectInput { + s.ACL = &v + return s +} + +// SetBody sets the Body field's value. +func (s *PutObjectInput) SetBody(v io.ReadSeeker) *PutObjectInput { + s.Body = v + return s +} + +// SetBucket sets the Bucket field's value. +func (s *PutObjectInput) SetBucket(v string) *PutObjectInput { + s.Bucket = &v + return s +} + +func (s *PutObjectInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetBucketKeyEnabled sets the BucketKeyEnabled field's value. +func (s *PutObjectInput) SetBucketKeyEnabled(v bool) *PutObjectInput { + s.BucketKeyEnabled = &v + return s +} + +// SetCacheControl sets the CacheControl field's value. +func (s *PutObjectInput) SetCacheControl(v string) *PutObjectInput { + s.CacheControl = &v + return s +} + +// SetContentDisposition sets the ContentDisposition field's value. +func (s *PutObjectInput) SetContentDisposition(v string) *PutObjectInput { + s.ContentDisposition = &v + return s +} + +// SetContentEncoding sets the ContentEncoding field's value. +func (s *PutObjectInput) SetContentEncoding(v string) *PutObjectInput { + s.ContentEncoding = &v + return s +} + +// SetContentLanguage sets the ContentLanguage field's value. +func (s *PutObjectInput) SetContentLanguage(v string) *PutObjectInput { + s.ContentLanguage = &v + return s +} + +// SetContentLength sets the ContentLength field's value. +func (s *PutObjectInput) SetContentLength(v int64) *PutObjectInput { + s.ContentLength = &v + return s +} + +// SetContentMD5 sets the ContentMD5 field's value. +func (s *PutObjectInput) SetContentMD5(v string) *PutObjectInput { + s.ContentMD5 = &v + return s +} + +// SetContentType sets the ContentType field's value. +func (s *PutObjectInput) SetContentType(v string) *PutObjectInput { + s.ContentType = &v + return s +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *PutObjectInput) SetExpectedBucketOwner(v string) *PutObjectInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetExpires sets the Expires field's value. +func (s *PutObjectInput) SetExpires(v time.Time) *PutObjectInput { + s.Expires = &v + return s +} + +// SetGrantFullControl sets the GrantFullControl field's value. +func (s *PutObjectInput) SetGrantFullControl(v string) *PutObjectInput { + s.GrantFullControl = &v + return s +} + +// SetGrantRead sets the GrantRead field's value. +func (s *PutObjectInput) SetGrantRead(v string) *PutObjectInput { + s.GrantRead = &v + return s +} + +// SetGrantReadACP sets the GrantReadACP field's value. +func (s *PutObjectInput) SetGrantReadACP(v string) *PutObjectInput { + s.GrantReadACP = &v + return s +} + +// SetGrantWriteACP sets the GrantWriteACP field's value. +func (s *PutObjectInput) SetGrantWriteACP(v string) *PutObjectInput { + s.GrantWriteACP = &v + return s +} + +// SetKey sets the Key field's value. +func (s *PutObjectInput) SetKey(v string) *PutObjectInput { + s.Key = &v + return s +} + +// SetMetadata sets the Metadata field's value. +func (s *PutObjectInput) SetMetadata(v map[string]*string) *PutObjectInput { + s.Metadata = v + return s +} + +// SetObjectLockLegalHoldStatus sets the ObjectLockLegalHoldStatus field's value. +func (s *PutObjectInput) SetObjectLockLegalHoldStatus(v string) *PutObjectInput { + s.ObjectLockLegalHoldStatus = &v + return s +} + +// SetObjectLockMode sets the ObjectLockMode field's value. +func (s *PutObjectInput) SetObjectLockMode(v string) *PutObjectInput { + s.ObjectLockMode = &v + return s +} + +// SetObjectLockRetainUntilDate sets the ObjectLockRetainUntilDate field's value. +func (s *PutObjectInput) SetObjectLockRetainUntilDate(v time.Time) *PutObjectInput { + s.ObjectLockRetainUntilDate = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *PutObjectInput) SetRequestPayer(v string) *PutObjectInput { + s.RequestPayer = &v + return s +} + +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *PutObjectInput) SetSSECustomerAlgorithm(v string) *PutObjectInput { + s.SSECustomerAlgorithm = &v + return s +} + +// SetSSECustomerKey sets the SSECustomerKey field's value. +func (s *PutObjectInput) SetSSECustomerKey(v string) *PutObjectInput { + s.SSECustomerKey = &v + return s +} + +func (s *PutObjectInput) getSSECustomerKey() (v string) { + if s.SSECustomerKey == nil { + return v + } + return *s.SSECustomerKey +} + +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *PutObjectInput) SetSSECustomerKeyMD5(v string) *PutObjectInput { + s.SSECustomerKeyMD5 = &v + return s +} + +// SetSSEKMSEncryptionContext sets the SSEKMSEncryptionContext field's value. +func (s *PutObjectInput) SetSSEKMSEncryptionContext(v string) *PutObjectInput { + s.SSEKMSEncryptionContext = &v + return s +} + +// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. +func (s *PutObjectInput) SetSSEKMSKeyId(v string) *PutObjectInput { + s.SSEKMSKeyId = &v + return s +} + +// SetServerSideEncryption sets the ServerSideEncryption field's value. +func (s *PutObjectInput) SetServerSideEncryption(v string) *PutObjectInput { + s.ServerSideEncryption = &v + return s +} + +// SetStorageClass sets the StorageClass field's value. +func (s *PutObjectInput) SetStorageClass(v string) *PutObjectInput { + s.StorageClass = &v + return s +} + +// SetTagging sets the Tagging field's value. +func (s *PutObjectInput) SetTagging(v string) *PutObjectInput { + s.Tagging = &v + return s +} + +// SetWebsiteRedirectLocation sets the WebsiteRedirectLocation field's value. +func (s *PutObjectInput) SetWebsiteRedirectLocation(v string) *PutObjectInput { + s.WebsiteRedirectLocation = &v + return s +} + +func (s *PutObjectInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutObjectInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s PutObjectInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type PutObjectLegalHoldInput struct { + _ struct{} `locationName:"PutObjectLegalHoldRequest" type:"structure" payload:"LegalHold"` + + // The bucket name containing the object that you want to place a Legal Hold + // on. + // + // When using this API with an access point, you must direct requests to the + // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this operation with an access point through the AWS SDKs, you + // provide the access point ARN in place of the bucket name. For more information + // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // in the Amazon Simple Storage Service Developer Guide. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // The key name for the object that you want to place a Legal Hold on. + // + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Container element for the Legal Hold configuration you want to apply to the + // specified object. + LegalHold *ObjectLockLegalHold `locationName:"LegalHold" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from requester pays buckets, see Downloading Objects + // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 Developer Guide. + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // The version ID of the object that you want to place a Legal Hold on. + VersionId *string `location:"querystring" locationName:"versionId" type:"string"` +} + +// String returns the string representation +func (s PutObjectLegalHoldInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutObjectLegalHoldInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutObjectLegalHoldInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutObjectLegalHoldInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutObjectLegalHoldInput) SetBucket(v string) *PutObjectLegalHoldInput { + s.Bucket = &v + return s +} + +func (s *PutObjectLegalHoldInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *PutObjectLegalHoldInput) SetExpectedBucketOwner(v string) *PutObjectLegalHoldInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetKey sets the Key field's value. +func (s *PutObjectLegalHoldInput) SetKey(v string) *PutObjectLegalHoldInput { + s.Key = &v + return s +} + +// SetLegalHold sets the LegalHold field's value. +func (s *PutObjectLegalHoldInput) SetLegalHold(v *ObjectLockLegalHold) *PutObjectLegalHoldInput { + s.LegalHold = v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *PutObjectLegalHoldInput) SetRequestPayer(v string) *PutObjectLegalHoldInput { + s.RequestPayer = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *PutObjectLegalHoldInput) SetVersionId(v string) *PutObjectLegalHoldInput { + s.VersionId = &v + return s +} + +func (s *PutObjectLegalHoldInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutObjectLegalHoldInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s PutObjectLegalHoldInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type PutObjectLegalHoldOutput struct { + _ struct{} `type:"structure"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` +} + +// String returns the string representation +func (s PutObjectLegalHoldOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutObjectLegalHoldOutput) GoString() string { + return s.String() +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *PutObjectLegalHoldOutput) SetRequestCharged(v string) *PutObjectLegalHoldOutput { + s.RequestCharged = &v + return s +} + +type PutObjectLockConfigurationInput struct { + _ struct{} `locationName:"PutObjectLockConfigurationRequest" type:"structure" payload:"ObjectLockConfiguration"` + + // The bucket whose Object Lock configuration you want to create or replace. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // The Object Lock configuration that you want to apply to the specified bucket. + ObjectLockConfiguration *ObjectLockConfiguration `locationName:"ObjectLockConfiguration" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from requester pays buckets, see Downloading Objects + // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 Developer Guide. + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // A token to allow Object Lock to be enabled for an existing bucket. + Token *string `location:"header" locationName:"x-amz-bucket-object-lock-token" type:"string"` +} + +// String returns the string representation +func (s PutObjectLockConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutObjectLockConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutObjectLockConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutObjectLockConfigurationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutObjectLockConfigurationInput) SetBucket(v string) *PutObjectLockConfigurationInput { + s.Bucket = &v + return s +} + +func (s *PutObjectLockConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *PutObjectLockConfigurationInput) SetExpectedBucketOwner(v string) *PutObjectLockConfigurationInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetObjectLockConfiguration sets the ObjectLockConfiguration field's value. +func (s *PutObjectLockConfigurationInput) SetObjectLockConfiguration(v *ObjectLockConfiguration) *PutObjectLockConfigurationInput { + s.ObjectLockConfiguration = v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *PutObjectLockConfigurationInput) SetRequestPayer(v string) *PutObjectLockConfigurationInput { + s.RequestPayer = &v + return s +} + +// SetToken sets the Token field's value. +func (s *PutObjectLockConfigurationInput) SetToken(v string) *PutObjectLockConfigurationInput { + s.Token = &v + return s +} + +func (s *PutObjectLockConfigurationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutObjectLockConfigurationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s PutObjectLockConfigurationInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type PutObjectLockConfigurationOutput struct { + _ struct{} `type:"structure"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` +} + +// String returns the string representation +func (s PutObjectLockConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutObjectLockConfigurationOutput) GoString() string { + return s.String() +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *PutObjectLockConfigurationOutput) SetRequestCharged(v string) *PutObjectLockConfigurationOutput { + s.RequestCharged = &v + return s +} + +type PutObjectOutput struct { + _ struct{} `type:"structure"` + + // Indicates whether the uploaded object uses an S3 Bucket Key for server-side + // encryption with AWS KMS (SSE-KMS). + BucketKeyEnabled *bool `location:"header" locationName:"x-amz-server-side-encryption-bucket-key-enabled" type:"boolean"` + + // Entity tag for the uploaded object. + ETag *string `location:"header" locationName:"ETag" type:"string"` + + // If the expiration is configured for the object (see PutBucketLifecycleConfiguration + // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html)), + // the response includes this header. It includes the expiry-date and rule-id + // key-value pairs that provide information about object expiration. The value + // of the rule-id is URL encoded. + Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header confirming the encryption algorithm + // used. + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header to provide round-trip message integrity + // verification of the customer-provided encryption key. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // If present, specifies the AWS KMS Encryption Context to use for object encryption. + // The value of this header is a base64-encoded UTF-8 string holding JSON with + // the encryption context key-value pairs. + SSEKMSEncryptionContext *string `location:"header" locationName:"x-amz-server-side-encryption-context" type:"string" sensitive:"true"` + + // If x-amz-server-side-encryption is present and has the value of aws:kms, + // this header specifies the ID of the AWS Key Management Service (AWS KMS) + // symmetric customer managed customer master key (CMK) that was used for the + // object. + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` + + // If you specified server-side encryption either with an AWS KMS customer master + // key (CMK) or Amazon S3-managed encryption key in your PUT request, the response + // includes this header. It confirms the encryption algorithm that Amazon S3 + // used to encrypt the object. + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` + + // Version of the object. + VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` +} + +// String returns the string representation +func (s PutObjectOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutObjectOutput) GoString() string { + return s.String() +} + +// SetBucketKeyEnabled sets the BucketKeyEnabled field's value. +func (s *PutObjectOutput) SetBucketKeyEnabled(v bool) *PutObjectOutput { + s.BucketKeyEnabled = &v + return s +} + +// SetETag sets the ETag field's value. +func (s *PutObjectOutput) SetETag(v string) *PutObjectOutput { + s.ETag = &v + return s +} + +// SetExpiration sets the Expiration field's value. +func (s *PutObjectOutput) SetExpiration(v string) *PutObjectOutput { + s.Expiration = &v + return s +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *PutObjectOutput) SetRequestCharged(v string) *PutObjectOutput { + s.RequestCharged = &v + return s +} + +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *PutObjectOutput) SetSSECustomerAlgorithm(v string) *PutObjectOutput { + s.SSECustomerAlgorithm = &v + return s +} + +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *PutObjectOutput) SetSSECustomerKeyMD5(v string) *PutObjectOutput { + s.SSECustomerKeyMD5 = &v + return s +} + +// SetSSEKMSEncryptionContext sets the SSEKMSEncryptionContext field's value. +func (s *PutObjectOutput) SetSSEKMSEncryptionContext(v string) *PutObjectOutput { + s.SSEKMSEncryptionContext = &v + return s +} + +// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. +func (s *PutObjectOutput) SetSSEKMSKeyId(v string) *PutObjectOutput { + s.SSEKMSKeyId = &v + return s +} + +// SetServerSideEncryption sets the ServerSideEncryption field's value. +func (s *PutObjectOutput) SetServerSideEncryption(v string) *PutObjectOutput { + s.ServerSideEncryption = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *PutObjectOutput) SetVersionId(v string) *PutObjectOutput { + s.VersionId = &v + return s +} + +type PutObjectRetentionInput struct { + _ struct{} `locationName:"PutObjectRetentionRequest" type:"structure" payload:"Retention"` + + // The bucket name that contains the object you want to apply this Object Retention + // configuration to. + // + // When using this API with an access point, you must direct requests to the + // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this operation with an access point through the AWS SDKs, you + // provide the access point ARN in place of the bucket name. For more information + // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // in the Amazon Simple Storage Service Developer Guide. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Indicates whether this operation should bypass Governance-mode restrictions. + BypassGovernanceRetention *bool `location:"header" locationName:"x-amz-bypass-governance-retention" type:"boolean"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // The key name for the object that you want to apply this Object Retention + // configuration to. + // + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from requester pays buckets, see Downloading Objects + // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 Developer Guide. + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // The container element for the Object Retention configuration. + Retention *ObjectLockRetention `locationName:"Retention" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` + + // The version ID for the object that you want to apply this Object Retention + // configuration to. + VersionId *string `location:"querystring" locationName:"versionId" type:"string"` +} + +// String returns the string representation +func (s PutObjectRetentionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutObjectRetentionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutObjectRetentionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutObjectRetentionInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutObjectRetentionInput) SetBucket(v string) *PutObjectRetentionInput { + s.Bucket = &v + return s +} + +func (s *PutObjectRetentionInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetBypassGovernanceRetention sets the BypassGovernanceRetention field's value. +func (s *PutObjectRetentionInput) SetBypassGovernanceRetention(v bool) *PutObjectRetentionInput { + s.BypassGovernanceRetention = &v + return s +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *PutObjectRetentionInput) SetExpectedBucketOwner(v string) *PutObjectRetentionInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetKey sets the Key field's value. +func (s *PutObjectRetentionInput) SetKey(v string) *PutObjectRetentionInput { + s.Key = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *PutObjectRetentionInput) SetRequestPayer(v string) *PutObjectRetentionInput { + s.RequestPayer = &v + return s +} + +// SetRetention sets the Retention field's value. +func (s *PutObjectRetentionInput) SetRetention(v *ObjectLockRetention) *PutObjectRetentionInput { + s.Retention = v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *PutObjectRetentionInput) SetVersionId(v string) *PutObjectRetentionInput { + s.VersionId = &v + return s +} + +func (s *PutObjectRetentionInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutObjectRetentionInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s PutObjectRetentionInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type PutObjectRetentionOutput struct { + _ struct{} `type:"structure"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` +} + +// String returns the string representation +func (s PutObjectRetentionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutObjectRetentionOutput) GoString() string { + return s.String() +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *PutObjectRetentionOutput) SetRequestCharged(v string) *PutObjectRetentionOutput { + s.RequestCharged = &v + return s +} + +type PutObjectTaggingInput struct { + _ struct{} `locationName:"PutObjectTaggingRequest" type:"structure" payload:"Tagging"` + + // The bucket name containing the object. + // + // When using this API with an access point, you must direct requests to the + // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this operation with an access point through the AWS SDKs, you + // provide the access point ARN in place of the bucket name. For more information + // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // in the Amazon Simple Storage Service Developer Guide. + // + // When using this API with Amazon S3 on Outposts, you must direct requests + // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When + // using this operation using S3 on Outposts through the AWS SDKs, you provide + // the Outposts bucket ARN in place of the bucket name. For more information + // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) + // in the Amazon Simple Storage Service Developer Guide. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // Name of the object key. + // + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from requester pays buckets, see Downloading Objects + // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 Developer Guide. + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // Container for the TagSet and Tag elements + // + // Tagging is a required field + Tagging *Tagging `locationName:"Tagging" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` + + // The versionId of the object that the tag-set will be added to. + VersionId *string `location:"querystring" locationName:"versionId" type:"string"` +} + +// String returns the string representation +func (s PutObjectTaggingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutObjectTaggingInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutObjectTaggingInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutObjectTaggingInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.Tagging == nil { + invalidParams.Add(request.NewErrParamRequired("Tagging")) + } + if s.Tagging != nil { + if err := s.Tagging.Validate(); err != nil { + invalidParams.AddNested("Tagging", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutObjectTaggingInput) SetBucket(v string) *PutObjectTaggingInput { + s.Bucket = &v + return s +} + +func (s *PutObjectTaggingInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *PutObjectTaggingInput) SetExpectedBucketOwner(v string) *PutObjectTaggingInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetKey sets the Key field's value. +func (s *PutObjectTaggingInput) SetKey(v string) *PutObjectTaggingInput { + s.Key = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *PutObjectTaggingInput) SetRequestPayer(v string) *PutObjectTaggingInput { + s.RequestPayer = &v + return s +} + +// SetTagging sets the Tagging field's value. +func (s *PutObjectTaggingInput) SetTagging(v *Tagging) *PutObjectTaggingInput { + s.Tagging = v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *PutObjectTaggingInput) SetVersionId(v string) *PutObjectTaggingInput { + s.VersionId = &v + return s +} + +func (s *PutObjectTaggingInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutObjectTaggingInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s PutObjectTaggingInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type PutObjectTaggingOutput struct { + _ struct{} `type:"structure"` + + // The versionId of the object the tag-set was added to. + VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` +} + +// String returns the string representation +func (s PutObjectTaggingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutObjectTaggingOutput) GoString() string { + return s.String() +} + +// SetVersionId sets the VersionId field's value. +func (s *PutObjectTaggingOutput) SetVersionId(v string) *PutObjectTaggingOutput { + s.VersionId = &v + return s +} + +type PutPublicAccessBlockInput struct { + _ struct{} `locationName:"PutPublicAccessBlockRequest" type:"structure" payload:"PublicAccessBlockConfiguration"` + + // The name of the Amazon S3 bucket whose PublicAccessBlock configuration you + // want to set. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // The PublicAccessBlock configuration that you want to apply to this Amazon + // S3 bucket. You can enable the configuration options in any combination. For + // more information about when Amazon S3 considers a bucket or object public, + // see The Meaning of "Public" (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status) + // in the Amazon Simple Storage Service Developer Guide. + // + // PublicAccessBlockConfiguration is a required field + PublicAccessBlockConfiguration *PublicAccessBlockConfiguration `locationName:"PublicAccessBlockConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` +} + +// String returns the string representation +func (s PutPublicAccessBlockInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutPublicAccessBlockInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutPublicAccessBlockInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutPublicAccessBlockInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.PublicAccessBlockConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("PublicAccessBlockConfiguration")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutPublicAccessBlockInput) SetBucket(v string) *PutPublicAccessBlockInput { + s.Bucket = &v + return s +} + +func (s *PutPublicAccessBlockInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *PutPublicAccessBlockInput) SetExpectedBucketOwner(v string) *PutPublicAccessBlockInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetPublicAccessBlockConfiguration sets the PublicAccessBlockConfiguration field's value. +func (s *PutPublicAccessBlockInput) SetPublicAccessBlockConfiguration(v *PublicAccessBlockConfiguration) *PutPublicAccessBlockInput { + s.PublicAccessBlockConfiguration = v + return s +} + +func (s *PutPublicAccessBlockInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutPublicAccessBlockInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s PutPublicAccessBlockInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type PutPublicAccessBlockOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutPublicAccessBlockOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutPublicAccessBlockOutput) GoString() string { + return s.String() +} + +// Specifies the configuration for publishing messages to an Amazon Simple Queue +// Service (Amazon SQS) queue when Amazon S3 detects specified events. +type QueueConfiguration struct { + _ struct{} `type:"structure"` + + // A collection of bucket events for which to send notifications + // + // Events is a required field + Events []*string `locationName:"Event" type:"list" flattened:"true" required:"true"` + + // Specifies object key name filtering rules. For information about key name + // filtering, see Configuring Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) + // in the Amazon Simple Storage Service Developer Guide. + Filter *NotificationConfigurationFilter `type:"structure"` + + // An optional unique identifier for configurations in a notification configuration. + // If you don't provide one, Amazon S3 will assign an ID. + Id *string `type:"string"` + + // The Amazon Resource Name (ARN) of the Amazon SQS queue to which Amazon S3 + // publishes a message when it detects events of the specified type. + // + // QueueArn is a required field + QueueArn *string `locationName:"Queue" type:"string" required:"true"` +} + +// String returns the string representation +func (s QueueConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s QueueConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *QueueConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "QueueConfiguration"} + if s.Events == nil { + invalidParams.Add(request.NewErrParamRequired("Events")) + } + if s.QueueArn == nil { + invalidParams.Add(request.NewErrParamRequired("QueueArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetEvents sets the Events field's value. +func (s *QueueConfiguration) SetEvents(v []*string) *QueueConfiguration { + s.Events = v + return s +} + +// SetFilter sets the Filter field's value. +func (s *QueueConfiguration) SetFilter(v *NotificationConfigurationFilter) *QueueConfiguration { + s.Filter = v + return s +} + +// SetId sets the Id field's value. +func (s *QueueConfiguration) SetId(v string) *QueueConfiguration { + s.Id = &v + return s +} + +// SetQueueArn sets the QueueArn field's value. +func (s *QueueConfiguration) SetQueueArn(v string) *QueueConfiguration { + s.QueueArn = &v + return s +} + +// This data type is deprecated. Use QueueConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_QueueConfiguration.html) +// for the same purposes. This data type specifies the configuration for publishing +// messages to an Amazon Simple Queue Service (Amazon SQS) queue when Amazon +// S3 detects specified events. +type QueueConfigurationDeprecated struct { + _ struct{} `type:"structure"` + + // The bucket event for which to send notifications. + // + // Deprecated: Event has been deprecated + Event *string `deprecated:"true" type:"string" enum:"Event"` + + // A collection of bucket events for which to send notifications + Events []*string `locationName:"Event" type:"list" flattened:"true"` + + // An optional unique identifier for configurations in a notification configuration. + // If you don't provide one, Amazon S3 will assign an ID. + Id *string `type:"string"` + + // The Amazon Resource Name (ARN) of the Amazon SQS queue to which Amazon S3 + // publishes a message when it detects events of the specified type. + Queue *string `type:"string"` +} + +// String returns the string representation +func (s QueueConfigurationDeprecated) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s QueueConfigurationDeprecated) GoString() string { + return s.String() +} + +// SetEvent sets the Event field's value. +func (s *QueueConfigurationDeprecated) SetEvent(v string) *QueueConfigurationDeprecated { + s.Event = &v + return s +} + +// SetEvents sets the Events field's value. +func (s *QueueConfigurationDeprecated) SetEvents(v []*string) *QueueConfigurationDeprecated { + s.Events = v + return s +} + +// SetId sets the Id field's value. +func (s *QueueConfigurationDeprecated) SetId(v string) *QueueConfigurationDeprecated { + s.Id = &v + return s +} + +// SetQueue sets the Queue field's value. +func (s *QueueConfigurationDeprecated) SetQueue(v string) *QueueConfigurationDeprecated { + s.Queue = &v + return s +} + +// The container for the records event. +type RecordsEvent struct { + _ struct{} `locationName:"RecordsEvent" type:"structure" payload:"Payload"` + + // The byte array of partial, one or more result records. + // + // Payload is automatically base64 encoded/decoded by the SDK. + Payload []byte `type:"blob"` +} + +// String returns the string representation +func (s RecordsEvent) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RecordsEvent) GoString() string { + return s.String() +} + +// SetPayload sets the Payload field's value. +func (s *RecordsEvent) SetPayload(v []byte) *RecordsEvent { + s.Payload = v + return s +} + +// The RecordsEvent is and event in the SelectObjectContentEventStream group of events. +func (s *RecordsEvent) eventSelectObjectContentEventStream() {} + +// UnmarshalEvent unmarshals the EventStream Message into the RecordsEvent value. +// This method is only used internally within the SDK's EventStream handling. +func (s *RecordsEvent) UnmarshalEvent( + payloadUnmarshaler protocol.PayloadUnmarshaler, + msg eventstream.Message, +) error { + s.Payload = make([]byte, len(msg.Payload)) + copy(s.Payload, msg.Payload) + return nil +} + +// MarshalEvent marshals the type into an stream event value. This method +// should only used internally within the SDK's EventStream handling. +func (s *RecordsEvent) MarshalEvent(pm protocol.PayloadMarshaler) (msg eventstream.Message, err error) { + msg.Headers.Set(eventstreamapi.MessageTypeHeader, eventstream.StringValue(eventstreamapi.EventMessageType)) + msg.Headers.Set(":content-type", eventstream.StringValue("application/octet-stream")) + msg.Payload = s.Payload + return msg, err +} + +// Specifies how requests are redirected. In the event of an error, you can +// specify a different error code to return. +type Redirect struct { + _ struct{} `type:"structure"` + + // The host name to use in the redirect request. + HostName *string `type:"string"` + + // The HTTP redirect code to use on the response. Not required if one of the + // siblings is present. + HttpRedirectCode *string `type:"string"` + + // Protocol to use when redirecting requests. The default is the protocol that + // is used in the original request. + Protocol *string `type:"string" enum:"Protocol"` + + // The object key prefix to use in the redirect request. For example, to redirect + // requests for all pages with prefix docs/ (objects in the docs/ folder) to + // documents/, you can set a condition block with KeyPrefixEquals set to docs/ + // and in the Redirect set ReplaceKeyPrefixWith to /documents. Not required + // if one of the siblings is present. Can be present only if ReplaceKeyWith + // is not provided. + // + // Replacement must be made for object keys containing special characters (such + // as carriage returns) when using XML requests. For more information, see XML + // related object key constraints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints). + ReplaceKeyPrefixWith *string `type:"string"` + + // The specific object key to use in the redirect request. For example, redirect + // request to error.html. Not required if one of the siblings is present. Can + // be present only if ReplaceKeyPrefixWith is not provided. + // + // Replacement must be made for object keys containing special characters (such + // as carriage returns) when using XML requests. For more information, see XML + // related object key constraints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints). + ReplaceKeyWith *string `type:"string"` +} + +// String returns the string representation +func (s Redirect) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Redirect) GoString() string { + return s.String() +} + +// SetHostName sets the HostName field's value. +func (s *Redirect) SetHostName(v string) *Redirect { + s.HostName = &v + return s +} + +// SetHttpRedirectCode sets the HttpRedirectCode field's value. +func (s *Redirect) SetHttpRedirectCode(v string) *Redirect { + s.HttpRedirectCode = &v + return s +} + +// SetProtocol sets the Protocol field's value. +func (s *Redirect) SetProtocol(v string) *Redirect { + s.Protocol = &v + return s +} + +// SetReplaceKeyPrefixWith sets the ReplaceKeyPrefixWith field's value. +func (s *Redirect) SetReplaceKeyPrefixWith(v string) *Redirect { + s.ReplaceKeyPrefixWith = &v + return s +} + +// SetReplaceKeyWith sets the ReplaceKeyWith field's value. +func (s *Redirect) SetReplaceKeyWith(v string) *Redirect { + s.ReplaceKeyWith = &v + return s +} + +// Specifies the redirect behavior of all requests to a website endpoint of +// an Amazon S3 bucket. +type RedirectAllRequestsTo struct { + _ struct{} `type:"structure"` + + // Name of the host where requests are redirected. + // + // HostName is a required field + HostName *string `type:"string" required:"true"` + + // Protocol to use when redirecting requests. The default is the protocol that + // is used in the original request. + Protocol *string `type:"string" enum:"Protocol"` +} + +// String returns the string representation +func (s RedirectAllRequestsTo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RedirectAllRequestsTo) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RedirectAllRequestsTo) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RedirectAllRequestsTo"} + if s.HostName == nil { + invalidParams.Add(request.NewErrParamRequired("HostName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetHostName sets the HostName field's value. +func (s *RedirectAllRequestsTo) SetHostName(v string) *RedirectAllRequestsTo { + s.HostName = &v + return s +} + +// SetProtocol sets the Protocol field's value. +func (s *RedirectAllRequestsTo) SetProtocol(v string) *RedirectAllRequestsTo { + s.Protocol = &v + return s +} + +// A filter that you can specify for selection for modifications on replicas. +// Amazon S3 doesn't replicate replica modifications by default. In the latest +// version of replication configuration (when Filter is specified), you can +// specify this element and set the status to Enabled to replicate modifications +// on replicas. +// +// If you don't specify the Filter element, Amazon S3 assumes that the replication +// configuration is the earlier version, V1. In the earlier version, this element +// is not allowed. +type ReplicaModifications struct { + _ struct{} `type:"structure"` + + // Specifies whether Amazon S3 replicates modifications on replicas. + // + // Status is a required field + Status *string `type:"string" required:"true" enum:"ReplicaModificationsStatus"` +} + +// String returns the string representation +func (s ReplicaModifications) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReplicaModifications) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ReplicaModifications) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ReplicaModifications"} + if s.Status == nil { + invalidParams.Add(request.NewErrParamRequired("Status")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetStatus sets the Status field's value. +func (s *ReplicaModifications) SetStatus(v string) *ReplicaModifications { + s.Status = &v + return s +} + +// A container for replication rules. You can add up to 1,000 rules. The maximum +// size of a replication configuration is 2 MB. +type ReplicationConfiguration struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the AWS Identity and Access Management + // (IAM) role that Amazon S3 assumes when replicating objects. For more information, + // see How to Set Up Replication (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-how-setup.html) + // in the Amazon Simple Storage Service Developer Guide. + // + // Role is a required field + Role *string `type:"string" required:"true"` + + // A container for one or more replication rules. A replication configuration + // must have at least one rule and can contain a maximum of 1,000 rules. + // + // Rules is a required field + Rules []*ReplicationRule `locationName:"Rule" type:"list" flattened:"true" required:"true"` +} + +// String returns the string representation +func (s ReplicationConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReplicationConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ReplicationConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ReplicationConfiguration"} + if s.Role == nil { + invalidParams.Add(request.NewErrParamRequired("Role")) + } + if s.Rules == nil { + invalidParams.Add(request.NewErrParamRequired("Rules")) + } + if s.Rules != nil { + for i, v := range s.Rules { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Rules", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetRole sets the Role field's value. +func (s *ReplicationConfiguration) SetRole(v string) *ReplicationConfiguration { + s.Role = &v + return s +} + +// SetRules sets the Rules field's value. +func (s *ReplicationConfiguration) SetRules(v []*ReplicationRule) *ReplicationConfiguration { + s.Rules = v + return s +} + +// Specifies which Amazon S3 objects to replicate and where to store the replicas. +type ReplicationRule struct { + _ struct{} `type:"structure"` + + // Specifies whether Amazon S3 replicates delete markers. If you specify a Filter + // in your replication configuration, you must also include a DeleteMarkerReplication + // element. If your Filter includes a Tag element, the DeleteMarkerReplication + // Status must be set to Disabled, because Amazon S3 does not support replicating + // delete markers for tag-based rules. For an example configuration, see Basic + // Rule Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html#replication-config-min-rule-config). + // + // For more information about delete marker replication, see Basic Rule Configuration + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/delete-marker-replication.html). + // + // If you are using an earlier version of the replication configuration, Amazon + // S3 handles replication of delete markers differently. For more information, + // see Backward Compatibility (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html#replication-backward-compat-considerations). + DeleteMarkerReplication *DeleteMarkerReplication `type:"structure"` + + // A container for information about the replication destination and its configurations + // including enabling the S3 Replication Time Control (S3 RTC). + // + // Destination is a required field + Destination *Destination `type:"structure" required:"true"` + + // Optional configuration to replicate existing source bucket objects. For more + // information, see Replicating Existing Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-what-is-isnot-replicated.html#existing-object-replication) + // in the Amazon S3 Developer Guide. + ExistingObjectReplication *ExistingObjectReplication `type:"structure"` + + // A filter that identifies the subset of objects to which the replication rule + // applies. A Filter must specify exactly one Prefix, Tag, or an And child element. + Filter *ReplicationRuleFilter `type:"structure"` + + // A unique identifier for the rule. The maximum value is 255 characters. + ID *string `type:"string"` + + // An object key name prefix that identifies the object or objects to which + // the rule applies. The maximum prefix length is 1,024 characters. To include + // all objects in a bucket, specify an empty string. + // + // Replacement must be made for object keys containing special characters (such + // as carriage returns) when using XML requests. For more information, see XML + // related object key constraints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints). + // + // Deprecated: Prefix has been deprecated + Prefix *string `deprecated:"true" type:"string"` + + // The priority indicates which rule has precedence whenever two or more replication + // rules conflict. Amazon S3 will attempt to replicate objects according to + // all replication rules. However, if there are two or more rules with the same + // destination bucket, then objects will be replicated according to the rule + // with the highest priority. The higher the number, the higher the priority. + // + // For more information, see Replication (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication.html) + // in the Amazon Simple Storage Service Developer Guide. + Priority *int64 `type:"integer"` + + // A container that describes additional filters for identifying the source + // objects that you want to replicate. You can choose to enable or disable the + // replication of these objects. Currently, Amazon S3 supports only the filter + // that you can specify for objects created with server-side encryption using + // a customer master key (CMK) stored in AWS Key Management Service (SSE-KMS). + SourceSelectionCriteria *SourceSelectionCriteria `type:"structure"` + + // Specifies whether the rule is enabled. + // + // Status is a required field + Status *string `type:"string" required:"true" enum:"ReplicationRuleStatus"` +} + +// String returns the string representation +func (s ReplicationRule) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReplicationRule) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ReplicationRule) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ReplicationRule"} + if s.Destination == nil { + invalidParams.Add(request.NewErrParamRequired("Destination")) + } + if s.Status == nil { + invalidParams.Add(request.NewErrParamRequired("Status")) + } + if s.Destination != nil { + if err := s.Destination.Validate(); err != nil { + invalidParams.AddNested("Destination", err.(request.ErrInvalidParams)) + } + } + if s.ExistingObjectReplication != nil { + if err := s.ExistingObjectReplication.Validate(); err != nil { + invalidParams.AddNested("ExistingObjectReplication", err.(request.ErrInvalidParams)) + } + } + if s.Filter != nil { + if err := s.Filter.Validate(); err != nil { + invalidParams.AddNested("Filter", err.(request.ErrInvalidParams)) + } + } + if s.SourceSelectionCriteria != nil { + if err := s.SourceSelectionCriteria.Validate(); err != nil { + invalidParams.AddNested("SourceSelectionCriteria", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDeleteMarkerReplication sets the DeleteMarkerReplication field's value. +func (s *ReplicationRule) SetDeleteMarkerReplication(v *DeleteMarkerReplication) *ReplicationRule { + s.DeleteMarkerReplication = v + return s +} + +// SetDestination sets the Destination field's value. +func (s *ReplicationRule) SetDestination(v *Destination) *ReplicationRule { + s.Destination = v + return s +} + +// SetExistingObjectReplication sets the ExistingObjectReplication field's value. +func (s *ReplicationRule) SetExistingObjectReplication(v *ExistingObjectReplication) *ReplicationRule { + s.ExistingObjectReplication = v + return s +} + +// SetFilter sets the Filter field's value. +func (s *ReplicationRule) SetFilter(v *ReplicationRuleFilter) *ReplicationRule { + s.Filter = v + return s +} + +// SetID sets the ID field's value. +func (s *ReplicationRule) SetID(v string) *ReplicationRule { + s.ID = &v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *ReplicationRule) SetPrefix(v string) *ReplicationRule { + s.Prefix = &v + return s +} + +// SetPriority sets the Priority field's value. +func (s *ReplicationRule) SetPriority(v int64) *ReplicationRule { + s.Priority = &v + return s +} + +// SetSourceSelectionCriteria sets the SourceSelectionCriteria field's value. +func (s *ReplicationRule) SetSourceSelectionCriteria(v *SourceSelectionCriteria) *ReplicationRule { + s.SourceSelectionCriteria = v + return s +} + +// SetStatus sets the Status field's value. +func (s *ReplicationRule) SetStatus(v string) *ReplicationRule { + s.Status = &v + return s +} + +// A container for specifying rule filters. The filters determine the subset +// of objects to which the rule applies. This element is required only if you +// specify more than one filter. +// +// For example: +// +// * If you specify both a Prefix and a Tag filter, wrap these filters in +// an And tag. +// +// * If you specify a filter based on multiple tags, wrap the Tag elements +// in an And tag +type ReplicationRuleAndOperator struct { + _ struct{} `type:"structure"` + + // An object key name prefix that identifies the subset of objects to which + // the rule applies. + Prefix *string `type:"string"` + + // An array of tags containing key and value pairs. + Tags []*Tag `locationName:"Tag" locationNameList:"Tag" type:"list" flattened:"true"` +} + +// String returns the string representation +func (s ReplicationRuleAndOperator) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReplicationRuleAndOperator) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ReplicationRuleAndOperator) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ReplicationRuleAndOperator"} + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetPrefix sets the Prefix field's value. +func (s *ReplicationRuleAndOperator) SetPrefix(v string) *ReplicationRuleAndOperator { + s.Prefix = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *ReplicationRuleAndOperator) SetTags(v []*Tag) *ReplicationRuleAndOperator { + s.Tags = v + return s +} + +// A filter that identifies the subset of objects to which the replication rule +// applies. A Filter must specify exactly one Prefix, Tag, or an And child element. +type ReplicationRuleFilter struct { + _ struct{} `type:"structure"` + + // A container for specifying rule filters. The filters determine the subset + // of objects to which the rule applies. This element is required only if you + // specify more than one filter. For example: + // + // * If you specify both a Prefix and a Tag filter, wrap these filters in + // an And tag. + // + // * If you specify a filter based on multiple tags, wrap the Tag elements + // in an And tag. + And *ReplicationRuleAndOperator `type:"structure"` + + // An object key name prefix that identifies the subset of objects to which + // the rule applies. + // + // Replacement must be made for object keys containing special characters (such + // as carriage returns) when using XML requests. For more information, see XML + // related object key constraints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints). + Prefix *string `type:"string"` + + // A container for specifying a tag key and value. + // + // The rule applies only to objects that have the tag in their tag set. + Tag *Tag `type:"structure"` +} + +// String returns the string representation +func (s ReplicationRuleFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReplicationRuleFilter) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ReplicationRuleFilter) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ReplicationRuleFilter"} + if s.And != nil { + if err := s.And.Validate(); err != nil { + invalidParams.AddNested("And", err.(request.ErrInvalidParams)) + } + } + if s.Tag != nil { + if err := s.Tag.Validate(); err != nil { + invalidParams.AddNested("Tag", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAnd sets the And field's value. +func (s *ReplicationRuleFilter) SetAnd(v *ReplicationRuleAndOperator) *ReplicationRuleFilter { + s.And = v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *ReplicationRuleFilter) SetPrefix(v string) *ReplicationRuleFilter { + s.Prefix = &v + return s +} + +// SetTag sets the Tag field's value. +func (s *ReplicationRuleFilter) SetTag(v *Tag) *ReplicationRuleFilter { + s.Tag = v + return s +} + +// A container specifying S3 Replication Time Control (S3 RTC) related information, +// including whether S3 RTC is enabled and the time when all objects and operations +// on objects must be replicated. Must be specified together with a Metrics +// block. +type ReplicationTime struct { + _ struct{} `type:"structure"` + + // Specifies whether the replication time is enabled. + // + // Status is a required field + Status *string `type:"string" required:"true" enum:"ReplicationTimeStatus"` + + // A container specifying the time by which replication should be complete for + // all objects and operations on objects. + // + // Time is a required field + Time *ReplicationTimeValue `type:"structure" required:"true"` +} + +// String returns the string representation +func (s ReplicationTime) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReplicationTime) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ReplicationTime) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ReplicationTime"} + if s.Status == nil { + invalidParams.Add(request.NewErrParamRequired("Status")) + } + if s.Time == nil { + invalidParams.Add(request.NewErrParamRequired("Time")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetStatus sets the Status field's value. +func (s *ReplicationTime) SetStatus(v string) *ReplicationTime { + s.Status = &v + return s +} + +// SetTime sets the Time field's value. +func (s *ReplicationTime) SetTime(v *ReplicationTimeValue) *ReplicationTime { + s.Time = v + return s +} + +// A container specifying the time value for S3 Replication Time Control (S3 +// RTC) and replication metrics EventThreshold. +type ReplicationTimeValue struct { + _ struct{} `type:"structure"` + + // Contains an integer specifying time in minutes. + // + // Valid values: 15 minutes. + Minutes *int64 `type:"integer"` +} + +// String returns the string representation +func (s ReplicationTimeValue) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReplicationTimeValue) GoString() string { + return s.String() +} + +// SetMinutes sets the Minutes field's value. +func (s *ReplicationTimeValue) SetMinutes(v int64) *ReplicationTimeValue { + s.Minutes = &v + return s +} + +// Container for Payer. +type RequestPaymentConfiguration struct { + _ struct{} `type:"structure"` + + // Specifies who pays for the download and request fees. + // + // Payer is a required field + Payer *string `type:"string" required:"true" enum:"Payer"` +} + +// String returns the string representation +func (s RequestPaymentConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RequestPaymentConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RequestPaymentConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RequestPaymentConfiguration"} + if s.Payer == nil { + invalidParams.Add(request.NewErrParamRequired("Payer")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetPayer sets the Payer field's value. +func (s *RequestPaymentConfiguration) SetPayer(v string) *RequestPaymentConfiguration { + s.Payer = &v + return s +} + +// Container for specifying if periodic QueryProgress messages should be sent. +type RequestProgress struct { + _ struct{} `type:"structure"` + + // Specifies whether periodic QueryProgress frames should be sent. Valid values: + // TRUE, FALSE. Default value: FALSE. + Enabled *bool `type:"boolean"` +} + +// String returns the string representation +func (s RequestProgress) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RequestProgress) GoString() string { + return s.String() +} + +// SetEnabled sets the Enabled field's value. +func (s *RequestProgress) SetEnabled(v bool) *RequestProgress { + s.Enabled = &v + return s +} + +type RestoreObjectInput struct { + _ struct{} `locationName:"RestoreObjectRequest" type:"structure" payload:"RestoreRequest"` + + // The bucket name containing the object to restore. + // + // When using this API with an access point, you must direct requests to the + // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this operation with an access point through the AWS SDKs, you + // provide the access point ARN in place of the bucket name. For more information + // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // in the Amazon Simple Storage Service Developer Guide. + // + // When using this API with Amazon S3 on Outposts, you must direct requests + // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When + // using this operation using S3 on Outposts through the AWS SDKs, you provide + // the Outposts bucket ARN in place of the bucket name. For more information + // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) + // in the Amazon Simple Storage Service Developer Guide. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // Object key for which the operation was initiated. + // + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from requester pays buckets, see Downloading Objects + // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 Developer Guide. + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // Container for restore job parameters. + RestoreRequest *RestoreRequest `locationName:"RestoreRequest" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` + + // VersionId used to reference a specific version of the object. + VersionId *string `location:"querystring" locationName:"versionId" type:"string"` +} + +// String returns the string representation +func (s RestoreObjectInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RestoreObjectInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RestoreObjectInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RestoreObjectInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.RestoreRequest != nil { + if err := s.RestoreRequest.Validate(); err != nil { + invalidParams.AddNested("RestoreRequest", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *RestoreObjectInput) SetBucket(v string) *RestoreObjectInput { + s.Bucket = &v + return s +} + +func (s *RestoreObjectInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *RestoreObjectInput) SetExpectedBucketOwner(v string) *RestoreObjectInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetKey sets the Key field's value. +func (s *RestoreObjectInput) SetKey(v string) *RestoreObjectInput { + s.Key = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *RestoreObjectInput) SetRequestPayer(v string) *RestoreObjectInput { + s.RequestPayer = &v + return s +} + +// SetRestoreRequest sets the RestoreRequest field's value. +func (s *RestoreObjectInput) SetRestoreRequest(v *RestoreRequest) *RestoreObjectInput { + s.RestoreRequest = v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *RestoreObjectInput) SetVersionId(v string) *RestoreObjectInput { + s.VersionId = &v + return s +} + +func (s *RestoreObjectInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *RestoreObjectInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s RestoreObjectInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type RestoreObjectOutput struct { + _ struct{} `type:"structure"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // Indicates the path in the provided S3 output location where Select results + // will be restored to. + RestoreOutputPath *string `location:"header" locationName:"x-amz-restore-output-path" type:"string"` +} + +// String returns the string representation +func (s RestoreObjectOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RestoreObjectOutput) GoString() string { + return s.String() +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *RestoreObjectOutput) SetRequestCharged(v string) *RestoreObjectOutput { + s.RequestCharged = &v + return s +} + +// SetRestoreOutputPath sets the RestoreOutputPath field's value. +func (s *RestoreObjectOutput) SetRestoreOutputPath(v string) *RestoreObjectOutput { + s.RestoreOutputPath = &v + return s +} + +// Container for restore job parameters. +type RestoreRequest struct { + _ struct{} `type:"structure"` + + // Lifetime of the active copy in days. Do not use with restores that specify + // OutputLocation. + // + // The Days element is required for regular restores, and must not be provided + // for select requests. + Days *int64 `type:"integer"` + + // The optional description for the job. + Description *string `type:"string"` + + // S3 Glacier related parameters pertaining to this job. Do not use with restores + // that specify OutputLocation. + GlacierJobParameters *GlacierJobParameters `type:"structure"` + + // Describes the location where the restore job's output is stored. + OutputLocation *OutputLocation `type:"structure"` + + // Describes the parameters for Select job types. + SelectParameters *SelectParameters `type:"structure"` + + // Retrieval tier at which the restore will be processed. + Tier *string `type:"string" enum:"Tier"` + + // Type of restore request. + Type *string `type:"string" enum:"RestoreRequestType"` +} + +// String returns the string representation +func (s RestoreRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RestoreRequest) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RestoreRequest) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RestoreRequest"} + if s.GlacierJobParameters != nil { + if err := s.GlacierJobParameters.Validate(); err != nil { + invalidParams.AddNested("GlacierJobParameters", err.(request.ErrInvalidParams)) + } + } + if s.OutputLocation != nil { + if err := s.OutputLocation.Validate(); err != nil { + invalidParams.AddNested("OutputLocation", err.(request.ErrInvalidParams)) + } + } + if s.SelectParameters != nil { + if err := s.SelectParameters.Validate(); err != nil { + invalidParams.AddNested("SelectParameters", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDays sets the Days field's value. +func (s *RestoreRequest) SetDays(v int64) *RestoreRequest { + s.Days = &v + return s +} + +// SetDescription sets the Description field's value. +func (s *RestoreRequest) SetDescription(v string) *RestoreRequest { + s.Description = &v + return s +} + +// SetGlacierJobParameters sets the GlacierJobParameters field's value. +func (s *RestoreRequest) SetGlacierJobParameters(v *GlacierJobParameters) *RestoreRequest { + s.GlacierJobParameters = v + return s +} + +// SetOutputLocation sets the OutputLocation field's value. +func (s *RestoreRequest) SetOutputLocation(v *OutputLocation) *RestoreRequest { + s.OutputLocation = v + return s +} + +// SetSelectParameters sets the SelectParameters field's value. +func (s *RestoreRequest) SetSelectParameters(v *SelectParameters) *RestoreRequest { + s.SelectParameters = v + return s +} + +// SetTier sets the Tier field's value. +func (s *RestoreRequest) SetTier(v string) *RestoreRequest { + s.Tier = &v + return s +} + +// SetType sets the Type field's value. +func (s *RestoreRequest) SetType(v string) *RestoreRequest { + s.Type = &v + return s +} + +// Specifies the redirect behavior and when a redirect is applied. For more +// information about routing rules, see Configuring advanced conditional redirects +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/how-to-page-redirect.html#advanced-conditional-redirects) +// in the Amazon Simple Storage Service Developer Guide. +type RoutingRule struct { + _ struct{} `type:"structure"` + + // A container for describing a condition that must be met for the specified + // redirect to apply. For example, 1. If request is for pages in the /docs folder, + // redirect to the /documents folder. 2. If request results in HTTP error 4xx, + // redirect request to another host where you might process the error. + Condition *Condition `type:"structure"` + + // Container for redirect information. You can redirect requests to another + // host, to another page, or with another protocol. In the event of an error, + // you can specify a different error code to return. + // + // Redirect is a required field + Redirect *Redirect `type:"structure" required:"true"` +} + +// String returns the string representation +func (s RoutingRule) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RoutingRule) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RoutingRule) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RoutingRule"} + if s.Redirect == nil { + invalidParams.Add(request.NewErrParamRequired("Redirect")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCondition sets the Condition field's value. +func (s *RoutingRule) SetCondition(v *Condition) *RoutingRule { + s.Condition = v + return s +} + +// SetRedirect sets the Redirect field's value. +func (s *RoutingRule) SetRedirect(v *Redirect) *RoutingRule { + s.Redirect = v + return s +} + +// Specifies lifecycle rules for an Amazon S3 bucket. For more information, +// see Put Bucket Lifecycle Configuration (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTlifecycle.html) +// in the Amazon Simple Storage Service API Reference. For examples, see Put +// Bucket Lifecycle Configuration Examples (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html#API_PutBucketLifecycleConfiguration_Examples) +type Rule struct { + _ struct{} `type:"structure"` + + // Specifies the days since the initiation of an incomplete multipart upload + // that Amazon S3 will wait before permanently removing all parts of the upload. + // For more information, see Aborting Incomplete Multipart Uploads Using a Bucket + // Lifecycle Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config) + // in the Amazon Simple Storage Service Developer Guide. + AbortIncompleteMultipartUpload *AbortIncompleteMultipartUpload `type:"structure"` + + // Specifies the expiration for the lifecycle of the object. + Expiration *LifecycleExpiration `type:"structure"` + + // Unique identifier for the rule. The value can't be longer than 255 characters. + ID *string `type:"string"` + + // Specifies when noncurrent object versions expire. Upon expiration, Amazon + // S3 permanently deletes the noncurrent object versions. You set this lifecycle + // configuration action on a bucket that has versioning enabled (or suspended) + // to request that Amazon S3 delete noncurrent object versions at a specific + // period in the object's lifetime. + NoncurrentVersionExpiration *NoncurrentVersionExpiration `type:"structure"` + + // Container for the transition rule that describes when noncurrent objects + // transition to the STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, GLACIER, + // or DEEP_ARCHIVE storage class. If your bucket is versioning-enabled (or versioning + // is suspended), you can set this action to request that Amazon S3 transition + // noncurrent object versions to the STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, + // GLACIER, or DEEP_ARCHIVE storage class at a specific period in the object's + // lifetime. + NoncurrentVersionTransition *NoncurrentVersionTransition `type:"structure"` + + // Object key prefix that identifies one or more objects to which this rule + // applies. + // + // Replacement must be made for object keys containing special characters (such + // as carriage returns) when using XML requests. For more information, see XML + // related object key constraints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints). + // + // Prefix is a required field + Prefix *string `type:"string" required:"true"` + + // If Enabled, the rule is currently being applied. If Disabled, the rule is + // not currently being applied. + // + // Status is a required field + Status *string `type:"string" required:"true" enum:"ExpirationStatus"` + + // Specifies when an object transitions to a specified storage class. For more + // information about Amazon S3 lifecycle configuration rules, see Transitioning + // Objects Using Amazon S3 Lifecycle (https://docs.aws.amazon.com/AmazonS3/latest/dev/lifecycle-transition-general-considerations.html) + // in the Amazon Simple Storage Service Developer Guide. + Transition *Transition `type:"structure"` +} + +// String returns the string representation +func (s Rule) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Rule) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Rule) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Rule"} + if s.Prefix == nil { + invalidParams.Add(request.NewErrParamRequired("Prefix")) + } + if s.Status == nil { + invalidParams.Add(request.NewErrParamRequired("Status")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAbortIncompleteMultipartUpload sets the AbortIncompleteMultipartUpload field's value. +func (s *Rule) SetAbortIncompleteMultipartUpload(v *AbortIncompleteMultipartUpload) *Rule { + s.AbortIncompleteMultipartUpload = v + return s +} + +// SetExpiration sets the Expiration field's value. +func (s *Rule) SetExpiration(v *LifecycleExpiration) *Rule { + s.Expiration = v + return s +} + +// SetID sets the ID field's value. +func (s *Rule) SetID(v string) *Rule { + s.ID = &v + return s +} + +// SetNoncurrentVersionExpiration sets the NoncurrentVersionExpiration field's value. +func (s *Rule) SetNoncurrentVersionExpiration(v *NoncurrentVersionExpiration) *Rule { + s.NoncurrentVersionExpiration = v + return s +} + +// SetNoncurrentVersionTransition sets the NoncurrentVersionTransition field's value. +func (s *Rule) SetNoncurrentVersionTransition(v *NoncurrentVersionTransition) *Rule { + s.NoncurrentVersionTransition = v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *Rule) SetPrefix(v string) *Rule { + s.Prefix = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *Rule) SetStatus(v string) *Rule { + s.Status = &v + return s +} + +// SetTransition sets the Transition field's value. +func (s *Rule) SetTransition(v *Transition) *Rule { + s.Transition = v + return s +} + +// Specifies the use of SSE-KMS to encrypt delivered inventory reports. +type SSEKMS struct { + _ struct{} `locationName:"SSE-KMS" type:"structure"` + + // Specifies the ID of the AWS Key Management Service (AWS KMS) symmetric customer + // managed customer master key (CMK) to use for encrypting inventory reports. + // + // KeyId is a required field + KeyId *string `type:"string" required:"true" sensitive:"true"` +} + +// String returns the string representation +func (s SSEKMS) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SSEKMS) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SSEKMS) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SSEKMS"} + if s.KeyId == nil { + invalidParams.Add(request.NewErrParamRequired("KeyId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetKeyId sets the KeyId field's value. +func (s *SSEKMS) SetKeyId(v string) *SSEKMS { + s.KeyId = &v + return s +} + +// Specifies the use of SSE-S3 to encrypt delivered inventory reports. +type SSES3 struct { + _ struct{} `locationName:"SSE-S3" type:"structure"` +} + +// String returns the string representation +func (s SSES3) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SSES3) GoString() string { + return s.String() +} + +// Specifies the byte range of the object to get the records from. A record +// is processed when its first byte is contained by the range. This parameter +// is optional, but when specified, it must not be empty. See RFC 2616, Section +// 14.35.1 about how to specify the start and end of the range. +type ScanRange struct { + _ struct{} `type:"structure"` + + // Specifies the end of the byte range. This parameter is optional. Valid values: + // non-negative integers. The default value is one less than the size of the + // object being queried. If only the End parameter is supplied, it is interpreted + // to mean scan the last N bytes of the file. For example, 50 + // means scan the last 50 bytes. + End *int64 `type:"long"` + + // Specifies the start of the byte range. This parameter is optional. Valid + // values: non-negative integers. The default value is 0. If only start is supplied, + // it means scan from that point to the end of the file.For example; 50 + // means scan from byte 50 until the end of the file. + Start *int64 `type:"long"` +} + +// String returns the string representation +func (s ScanRange) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ScanRange) GoString() string { + return s.String() +} + +// SetEnd sets the End field's value. +func (s *ScanRange) SetEnd(v int64) *ScanRange { + s.End = &v + return s +} + +// SetStart sets the Start field's value. +func (s *ScanRange) SetStart(v int64) *ScanRange { + s.Start = &v + return s +} + +// SelectObjectContentEventStreamEvent groups together all EventStream +// events writes for SelectObjectContentEventStream. +// +// These events are: +// +// * ContinuationEvent +// * EndEvent +// * ProgressEvent +// * RecordsEvent +// * StatsEvent +type SelectObjectContentEventStreamEvent interface { + eventSelectObjectContentEventStream() + eventstreamapi.Marshaler + eventstreamapi.Unmarshaler +} + +// SelectObjectContentEventStreamReader provides the interface for reading to the stream. The +// default implementation for this interface will be SelectObjectContentEventStreamData. +// +// The reader's Close method must allow multiple concurrent calls. +// +// These events are: +// +// * ContinuationEvent +// * EndEvent +// * ProgressEvent +// * RecordsEvent +// * StatsEvent +// * SelectObjectContentEventStreamUnknownEvent +type SelectObjectContentEventStreamReader interface { + // Returns a channel of events as they are read from the event stream. + Events() <-chan SelectObjectContentEventStreamEvent + + // Close will stop the reader reading events from the stream. + Close() error + + // Returns any error that has occurred while reading from the event stream. + Err() error +} + +type readSelectObjectContentEventStream struct { + eventReader *eventstreamapi.EventReader + stream chan SelectObjectContentEventStreamEvent + err *eventstreamapi.OnceError + + done chan struct{} + closeOnce sync.Once +} + +func newReadSelectObjectContentEventStream(eventReader *eventstreamapi.EventReader) *readSelectObjectContentEventStream { + r := &readSelectObjectContentEventStream{ + eventReader: eventReader, + stream: make(chan SelectObjectContentEventStreamEvent), + done: make(chan struct{}), + err: eventstreamapi.NewOnceError(), + } + go r.readEventStream() + + return r +} + +// Close will close the underlying event stream reader. +func (r *readSelectObjectContentEventStream) Close() error { + r.closeOnce.Do(r.safeClose) + return r.Err() +} + +func (r *readSelectObjectContentEventStream) ErrorSet() <-chan struct{} { + return r.err.ErrorSet() +} + +func (r *readSelectObjectContentEventStream) Closed() <-chan struct{} { + return r.done +} + +func (r *readSelectObjectContentEventStream) safeClose() { + close(r.done) +} + +func (r *readSelectObjectContentEventStream) Err() error { + return r.err.Err() +} + +func (r *readSelectObjectContentEventStream) Events() <-chan SelectObjectContentEventStreamEvent { + return r.stream +} + +func (r *readSelectObjectContentEventStream) readEventStream() { + defer r.Close() + defer close(r.stream) + + for { + event, err := r.eventReader.ReadEvent() + if err != nil { + if err == io.EOF { + return + } + select { + case <-r.done: + // If closed already ignore the error + return + default: + } + if _, ok := err.(*eventstreamapi.UnknownMessageTypeError); ok { + continue + } + r.err.SetError(err) + return + } + + select { + case r.stream <- event.(SelectObjectContentEventStreamEvent): + case <-r.done: + return + } + } +} + +type unmarshalerForSelectObjectContentEventStreamEvent struct { + metadata protocol.ResponseMetadata +} + +func (u unmarshalerForSelectObjectContentEventStreamEvent) UnmarshalerForEventName(eventType string) (eventstreamapi.Unmarshaler, error) { + switch eventType { + case "Cont": + return &ContinuationEvent{}, nil + case "End": + return &EndEvent{}, nil + case "Progress": + return &ProgressEvent{}, nil + case "Records": + return &RecordsEvent{}, nil + case "Stats": + return &StatsEvent{}, nil + default: + return &SelectObjectContentEventStreamUnknownEvent{Type: eventType}, nil + } +} + +// SelectObjectContentEventStreamUnknownEvent provides a failsafe event for the +// SelectObjectContentEventStream group of events when an unknown event is received. +type SelectObjectContentEventStreamUnknownEvent struct { + Type string + Message eventstream.Message +} + +// The SelectObjectContentEventStreamUnknownEvent is and event in the SelectObjectContentEventStream +// group of events. +func (s *SelectObjectContentEventStreamUnknownEvent) eventSelectObjectContentEventStream() {} + +// MarshalEvent marshals the type into an stream event value. This method +// should only used internally within the SDK's EventStream handling. +func (e *SelectObjectContentEventStreamUnknownEvent) MarshalEvent(pm protocol.PayloadMarshaler) ( + msg eventstream.Message, err error, +) { + return e.Message.Clone(), nil +} + +// UnmarshalEvent unmarshals the EventStream Message into the SelectObjectContentEventStreamData value. +// This method is only used internally within the SDK's EventStream handling. +func (e *SelectObjectContentEventStreamUnknownEvent) UnmarshalEvent( + payloadUnmarshaler protocol.PayloadUnmarshaler, + msg eventstream.Message, +) error { + e.Message = msg.Clone() + return nil +} + +// Request to filter the contents of an Amazon S3 object based on a simple Structured +// Query Language (SQL) statement. In the request, along with the SQL expression, +// you must specify a data serialization format (JSON or CSV) of the object. +// Amazon S3 uses this to parse object data into records. It returns only records +// that match the specified SQL expression. You must also specify the data serialization +// format for the response. For more information, see S3Select API Documentation +// (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectSELECTContent.html). +type SelectObjectContentInput struct { + _ struct{} `locationName:"SelectObjectContentRequest" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` + + // The S3 bucket. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // The expression that is used to query the object. + // + // Expression is a required field + Expression *string `type:"string" required:"true"` + + // The type of the provided expression (for example, SQL). + // + // ExpressionType is a required field + ExpressionType *string `type:"string" required:"true" enum:"ExpressionType"` + + // Describes the format of the data in the object that is being queried. + // + // InputSerialization is a required field + InputSerialization *InputSerialization `type:"structure" required:"true"` + + // The object key. + // + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Describes the format of the data that you want Amazon S3 to return in response. + // + // OutputSerialization is a required field + OutputSerialization *OutputSerialization `type:"structure" required:"true"` + + // Specifies if periodic request progress information should be enabled. + RequestProgress *RequestProgress `type:"structure"` + + // The SSE Algorithm used to encrypt the object. For more information, see Server-Side + // Encryption (Using Customer-Provided Encryption Keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html). + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // The SSE Customer Key. For more information, see Server-Side Encryption (Using + // Customer-Provided Encryption Keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html). + SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` + + // The SSE Customer Key MD5. For more information, see Server-Side Encryption + // (Using Customer-Provided Encryption Keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html). + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // Specifies the byte range of the object to get the records from. A record + // is processed when its first byte is contained by the range. This parameter + // is optional, but when specified, it must not be empty. See RFC 2616, Section + // 14.35.1 about how to specify the start and end of the range. + // + // ScanRangemay be used in the following ways: + // + // * 50100 - process only + // the records starting between the bytes 50 and 100 (inclusive, counting + // from zero) + // + // * 50 - process only the records + // starting after the byte 50 + // + // * 50 - process only the records within + // the last 50 bytes of the file. + ScanRange *ScanRange `type:"structure"` +} + +// String returns the string representation +func (s SelectObjectContentInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SelectObjectContentInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SelectObjectContentInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SelectObjectContentInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Expression == nil { + invalidParams.Add(request.NewErrParamRequired("Expression")) + } + if s.ExpressionType == nil { + invalidParams.Add(request.NewErrParamRequired("ExpressionType")) + } + if s.InputSerialization == nil { + invalidParams.Add(request.NewErrParamRequired("InputSerialization")) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.OutputSerialization == nil { + invalidParams.Add(request.NewErrParamRequired("OutputSerialization")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *SelectObjectContentInput) SetBucket(v string) *SelectObjectContentInput { + s.Bucket = &v + return s +} + +func (s *SelectObjectContentInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *SelectObjectContentInput) SetExpectedBucketOwner(v string) *SelectObjectContentInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetExpression sets the Expression field's value. +func (s *SelectObjectContentInput) SetExpression(v string) *SelectObjectContentInput { + s.Expression = &v + return s +} + +// SetExpressionType sets the ExpressionType field's value. +func (s *SelectObjectContentInput) SetExpressionType(v string) *SelectObjectContentInput { + s.ExpressionType = &v + return s +} + +// SetInputSerialization sets the InputSerialization field's value. +func (s *SelectObjectContentInput) SetInputSerialization(v *InputSerialization) *SelectObjectContentInput { + s.InputSerialization = v + return s +} + +// SetKey sets the Key field's value. +func (s *SelectObjectContentInput) SetKey(v string) *SelectObjectContentInput { + s.Key = &v + return s +} + +// SetOutputSerialization sets the OutputSerialization field's value. +func (s *SelectObjectContentInput) SetOutputSerialization(v *OutputSerialization) *SelectObjectContentInput { + s.OutputSerialization = v + return s +} + +// SetRequestProgress sets the RequestProgress field's value. +func (s *SelectObjectContentInput) SetRequestProgress(v *RequestProgress) *SelectObjectContentInput { + s.RequestProgress = v + return s +} + +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *SelectObjectContentInput) SetSSECustomerAlgorithm(v string) *SelectObjectContentInput { + s.SSECustomerAlgorithm = &v + return s +} + +// SetSSECustomerKey sets the SSECustomerKey field's value. +func (s *SelectObjectContentInput) SetSSECustomerKey(v string) *SelectObjectContentInput { + s.SSECustomerKey = &v + return s +} + +func (s *SelectObjectContentInput) getSSECustomerKey() (v string) { + if s.SSECustomerKey == nil { + return v + } + return *s.SSECustomerKey +} + +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *SelectObjectContentInput) SetSSECustomerKeyMD5(v string) *SelectObjectContentInput { + s.SSECustomerKeyMD5 = &v + return s +} + +// SetScanRange sets the ScanRange field's value. +func (s *SelectObjectContentInput) SetScanRange(v *ScanRange) *SelectObjectContentInput { + s.ScanRange = v + return s +} + +func (s *SelectObjectContentInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *SelectObjectContentInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s SelectObjectContentInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type SelectObjectContentOutput struct { + _ struct{} `type:"structure" payload:"Payload"` + + EventStream *SelectObjectContentEventStream +} + +// String returns the string representation +func (s SelectObjectContentOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SelectObjectContentOutput) GoString() string { + return s.String() +} + +func (s *SelectObjectContentOutput) SetEventStream(v *SelectObjectContentEventStream) *SelectObjectContentOutput { + s.EventStream = v + return s +} +func (s *SelectObjectContentOutput) GetEventStream() *SelectObjectContentEventStream { + return s.EventStream +} + +// GetStream returns the type to interact with the event stream. +func (s *SelectObjectContentOutput) GetStream() *SelectObjectContentEventStream { + return s.EventStream +} + +// Describes the parameters for Select job types. +type SelectParameters struct { + _ struct{} `type:"structure"` + + // The expression that is used to query the object. + // + // Expression is a required field + Expression *string `type:"string" required:"true"` + + // The type of the provided expression (for example, SQL). + // + // ExpressionType is a required field + ExpressionType *string `type:"string" required:"true" enum:"ExpressionType"` + + // Describes the serialization format of the object. + // + // InputSerialization is a required field + InputSerialization *InputSerialization `type:"structure" required:"true"` + + // Describes how the results of the Select job are serialized. + // + // OutputSerialization is a required field + OutputSerialization *OutputSerialization `type:"structure" required:"true"` +} + +// String returns the string representation +func (s SelectParameters) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SelectParameters) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SelectParameters) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SelectParameters"} + if s.Expression == nil { + invalidParams.Add(request.NewErrParamRequired("Expression")) + } + if s.ExpressionType == nil { + invalidParams.Add(request.NewErrParamRequired("ExpressionType")) + } + if s.InputSerialization == nil { + invalidParams.Add(request.NewErrParamRequired("InputSerialization")) + } + if s.OutputSerialization == nil { + invalidParams.Add(request.NewErrParamRequired("OutputSerialization")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetExpression sets the Expression field's value. +func (s *SelectParameters) SetExpression(v string) *SelectParameters { + s.Expression = &v + return s +} + +// SetExpressionType sets the ExpressionType field's value. +func (s *SelectParameters) SetExpressionType(v string) *SelectParameters { + s.ExpressionType = &v + return s +} + +// SetInputSerialization sets the InputSerialization field's value. +func (s *SelectParameters) SetInputSerialization(v *InputSerialization) *SelectParameters { + s.InputSerialization = v + return s +} + +// SetOutputSerialization sets the OutputSerialization field's value. +func (s *SelectParameters) SetOutputSerialization(v *OutputSerialization) *SelectParameters { + s.OutputSerialization = v + return s +} + +// Describes the default server-side encryption to apply to new objects in the +// bucket. If a PUT Object request doesn't specify any server-side encryption, +// this default encryption will be applied. For more information, see PUT Bucket +// encryption (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTencryption.html) +// in the Amazon Simple Storage Service API Reference. +type ServerSideEncryptionByDefault struct { + _ struct{} `type:"structure"` + + // AWS Key Management Service (KMS) customer master key ID to use for the default + // encryption. This parameter is allowed if and only if SSEAlgorithm is set + // to aws:kms. + // + // You can specify the key ID or the Amazon Resource Name (ARN) of the CMK. + // However, if you are using encryption with cross-account operations, you must + // use a fully qualified CMK ARN. For more information, see Using encryption + // for cross-account operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html#bucket-encryption-update-bucket-policy). + // + // For example: + // + // * Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab + // + // * Key ARN: arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab + // + // Amazon S3 only supports symmetric CMKs and not asymmetric CMKs. For more + // information, see Using Symmetric and Asymmetric Keys (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html) + // in the AWS Key Management Service Developer Guide. + KMSMasterKeyID *string `type:"string" sensitive:"true"` + + // Server-side encryption algorithm to use for the default encryption. + // + // SSEAlgorithm is a required field + SSEAlgorithm *string `type:"string" required:"true" enum:"ServerSideEncryption"` +} + +// String returns the string representation +func (s ServerSideEncryptionByDefault) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ServerSideEncryptionByDefault) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ServerSideEncryptionByDefault) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ServerSideEncryptionByDefault"} + if s.SSEAlgorithm == nil { + invalidParams.Add(request.NewErrParamRequired("SSEAlgorithm")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetKMSMasterKeyID sets the KMSMasterKeyID field's value. +func (s *ServerSideEncryptionByDefault) SetKMSMasterKeyID(v string) *ServerSideEncryptionByDefault { + s.KMSMasterKeyID = &v + return s +} + +// SetSSEAlgorithm sets the SSEAlgorithm field's value. +func (s *ServerSideEncryptionByDefault) SetSSEAlgorithm(v string) *ServerSideEncryptionByDefault { + s.SSEAlgorithm = &v + return s +} + +// Specifies the default server-side-encryption configuration. +type ServerSideEncryptionConfiguration struct { + _ struct{} `type:"structure"` + + // Container for information about a particular server-side encryption configuration + // rule. + // + // Rules is a required field + Rules []*ServerSideEncryptionRule `locationName:"Rule" type:"list" flattened:"true" required:"true"` +} + +// String returns the string representation +func (s ServerSideEncryptionConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ServerSideEncryptionConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ServerSideEncryptionConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ServerSideEncryptionConfiguration"} + if s.Rules == nil { + invalidParams.Add(request.NewErrParamRequired("Rules")) + } + if s.Rules != nil { + for i, v := range s.Rules { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Rules", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetRules sets the Rules field's value. +func (s *ServerSideEncryptionConfiguration) SetRules(v []*ServerSideEncryptionRule) *ServerSideEncryptionConfiguration { + s.Rules = v + return s +} + +// Specifies the default server-side encryption configuration. +type ServerSideEncryptionRule struct { + _ struct{} `type:"structure"` + + // Specifies the default server-side encryption to apply to new objects in the + // bucket. If a PUT Object request doesn't specify any server-side encryption, + // this default encryption will be applied. + ApplyServerSideEncryptionByDefault *ServerSideEncryptionByDefault `type:"structure"` + + // Specifies whether Amazon S3 should use an S3 Bucket Key with server-side + // encryption using KMS (SSE-KMS) for new objects in the bucket. Existing objects + // are not affected. Setting the BucketKeyEnabled element to true causes Amazon + // S3 to use an S3 Bucket Key. By default, S3 Bucket Key is not enabled. + // + // For more information, see Amazon S3 Bucket Keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html) + // in the Amazon Simple Storage Service Developer Guide. + BucketKeyEnabled *bool `type:"boolean"` +} + +// String returns the string representation +func (s ServerSideEncryptionRule) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ServerSideEncryptionRule) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ServerSideEncryptionRule) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ServerSideEncryptionRule"} + if s.ApplyServerSideEncryptionByDefault != nil { + if err := s.ApplyServerSideEncryptionByDefault.Validate(); err != nil { + invalidParams.AddNested("ApplyServerSideEncryptionByDefault", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetApplyServerSideEncryptionByDefault sets the ApplyServerSideEncryptionByDefault field's value. +func (s *ServerSideEncryptionRule) SetApplyServerSideEncryptionByDefault(v *ServerSideEncryptionByDefault) *ServerSideEncryptionRule { + s.ApplyServerSideEncryptionByDefault = v + return s +} + +// SetBucketKeyEnabled sets the BucketKeyEnabled field's value. +func (s *ServerSideEncryptionRule) SetBucketKeyEnabled(v bool) *ServerSideEncryptionRule { + s.BucketKeyEnabled = &v + return s +} + +// A container that describes additional filters for identifying the source +// objects that you want to replicate. You can choose to enable or disable the +// replication of these objects. Currently, Amazon S3 supports only the filter +// that you can specify for objects created with server-side encryption using +// a customer master key (CMK) stored in AWS Key Management Service (SSE-KMS). +type SourceSelectionCriteria struct { + _ struct{} `type:"structure"` + + // A filter that you can specify for selections for modifications on replicas. + // Amazon S3 doesn't replicate replica modifications by default. In the latest + // version of replication configuration (when Filter is specified), you can + // specify this element and set the status to Enabled to replicate modifications + // on replicas. + // + // If you don't specify the Filter element, Amazon S3 assumes that the replication + // configuration is the earlier version, V1. In the earlier version, this element + // is not allowed + ReplicaModifications *ReplicaModifications `type:"structure"` + + // A container for filter information for the selection of Amazon S3 objects + // encrypted with AWS KMS. If you include SourceSelectionCriteria in the replication + // configuration, this element is required. + SseKmsEncryptedObjects *SseKmsEncryptedObjects `type:"structure"` +} + +// String returns the string representation +func (s SourceSelectionCriteria) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SourceSelectionCriteria) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SourceSelectionCriteria) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SourceSelectionCriteria"} + if s.ReplicaModifications != nil { + if err := s.ReplicaModifications.Validate(); err != nil { + invalidParams.AddNested("ReplicaModifications", err.(request.ErrInvalidParams)) + } + } + if s.SseKmsEncryptedObjects != nil { + if err := s.SseKmsEncryptedObjects.Validate(); err != nil { + invalidParams.AddNested("SseKmsEncryptedObjects", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetReplicaModifications sets the ReplicaModifications field's value. +func (s *SourceSelectionCriteria) SetReplicaModifications(v *ReplicaModifications) *SourceSelectionCriteria { + s.ReplicaModifications = v + return s +} + +// SetSseKmsEncryptedObjects sets the SseKmsEncryptedObjects field's value. +func (s *SourceSelectionCriteria) SetSseKmsEncryptedObjects(v *SseKmsEncryptedObjects) *SourceSelectionCriteria { + s.SseKmsEncryptedObjects = v + return s +} + +// A container for filter information for the selection of S3 objects encrypted +// with AWS KMS. +type SseKmsEncryptedObjects struct { + _ struct{} `type:"structure"` + + // Specifies whether Amazon S3 replicates objects created with server-side encryption + // using a customer master key (CMK) stored in AWS Key Management Service. + // + // Status is a required field + Status *string `type:"string" required:"true" enum:"SseKmsEncryptedObjectsStatus"` +} + +// String returns the string representation +func (s SseKmsEncryptedObjects) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SseKmsEncryptedObjects) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SseKmsEncryptedObjects) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SseKmsEncryptedObjects"} + if s.Status == nil { + invalidParams.Add(request.NewErrParamRequired("Status")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetStatus sets the Status field's value. +func (s *SseKmsEncryptedObjects) SetStatus(v string) *SseKmsEncryptedObjects { + s.Status = &v + return s +} + +// Container for the stats details. +type Stats struct { + _ struct{} `type:"structure"` + + // The total number of uncompressed object bytes processed. + BytesProcessed *int64 `type:"long"` + + // The total number of bytes of records payload data returned. + BytesReturned *int64 `type:"long"` + + // The total number of object bytes scanned. + BytesScanned *int64 `type:"long"` +} + +// String returns the string representation +func (s Stats) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Stats) GoString() string { + return s.String() +} + +// SetBytesProcessed sets the BytesProcessed field's value. +func (s *Stats) SetBytesProcessed(v int64) *Stats { + s.BytesProcessed = &v + return s +} + +// SetBytesReturned sets the BytesReturned field's value. +func (s *Stats) SetBytesReturned(v int64) *Stats { + s.BytesReturned = &v + return s +} + +// SetBytesScanned sets the BytesScanned field's value. +func (s *Stats) SetBytesScanned(v int64) *Stats { + s.BytesScanned = &v + return s +} + +// Container for the Stats Event. +type StatsEvent struct { + _ struct{} `locationName:"StatsEvent" type:"structure" payload:"Details"` + + // The Stats event details. + Details *Stats `locationName:"Details" type:"structure"` +} + +// String returns the string representation +func (s StatsEvent) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StatsEvent) GoString() string { + return s.String() +} + +// SetDetails sets the Details field's value. +func (s *StatsEvent) SetDetails(v *Stats) *StatsEvent { + s.Details = v + return s +} + +// The StatsEvent is and event in the SelectObjectContentEventStream group of events. +func (s *StatsEvent) eventSelectObjectContentEventStream() {} + +// UnmarshalEvent unmarshals the EventStream Message into the StatsEvent value. +// This method is only used internally within the SDK's EventStream handling. +func (s *StatsEvent) UnmarshalEvent( + payloadUnmarshaler protocol.PayloadUnmarshaler, + msg eventstream.Message, +) error { + if err := payloadUnmarshaler.UnmarshalPayload( + bytes.NewReader(msg.Payload), s, + ); err != nil { + return err + } + return nil +} + +// MarshalEvent marshals the type into an stream event value. This method +// should only used internally within the SDK's EventStream handling. +func (s *StatsEvent) MarshalEvent(pm protocol.PayloadMarshaler) (msg eventstream.Message, err error) { + msg.Headers.Set(eventstreamapi.MessageTypeHeader, eventstream.StringValue(eventstreamapi.EventMessageType)) + var buf bytes.Buffer + if err = pm.MarshalPayload(&buf, s); err != nil { + return eventstream.Message{}, err + } + msg.Payload = buf.Bytes() + return msg, err +} + +// Specifies data related to access patterns to be collected and made available +// to analyze the tradeoffs between different storage classes for an Amazon +// S3 bucket. +type StorageClassAnalysis struct { + _ struct{} `type:"structure"` + + // Specifies how data related to the storage class analysis for an Amazon S3 + // bucket should be exported. + DataExport *StorageClassAnalysisDataExport `type:"structure"` +} + +// String returns the string representation +func (s StorageClassAnalysis) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StorageClassAnalysis) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *StorageClassAnalysis) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "StorageClassAnalysis"} + if s.DataExport != nil { + if err := s.DataExport.Validate(); err != nil { + invalidParams.AddNested("DataExport", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDataExport sets the DataExport field's value. +func (s *StorageClassAnalysis) SetDataExport(v *StorageClassAnalysisDataExport) *StorageClassAnalysis { + s.DataExport = v + return s +} + +// Container for data related to the storage class analysis for an Amazon S3 +// bucket for export. +type StorageClassAnalysisDataExport struct { + _ struct{} `type:"structure"` + + // The place to store the data for an analysis. + // + // Destination is a required field + Destination *AnalyticsExportDestination `type:"structure" required:"true"` + + // The version of the output schema to use when exporting data. Must be V_1. + // + // OutputSchemaVersion is a required field + OutputSchemaVersion *string `type:"string" required:"true" enum:"StorageClassAnalysisSchemaVersion"` +} + +// String returns the string representation +func (s StorageClassAnalysisDataExport) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StorageClassAnalysisDataExport) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *StorageClassAnalysisDataExport) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "StorageClassAnalysisDataExport"} + if s.Destination == nil { + invalidParams.Add(request.NewErrParamRequired("Destination")) + } + if s.OutputSchemaVersion == nil { + invalidParams.Add(request.NewErrParamRequired("OutputSchemaVersion")) + } + if s.Destination != nil { + if err := s.Destination.Validate(); err != nil { + invalidParams.AddNested("Destination", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDestination sets the Destination field's value. +func (s *StorageClassAnalysisDataExport) SetDestination(v *AnalyticsExportDestination) *StorageClassAnalysisDataExport { + s.Destination = v + return s +} + +// SetOutputSchemaVersion sets the OutputSchemaVersion field's value. +func (s *StorageClassAnalysisDataExport) SetOutputSchemaVersion(v string) *StorageClassAnalysisDataExport { + s.OutputSchemaVersion = &v + return s +} + +// A container of a key value name pair. +type Tag struct { + _ struct{} `type:"structure"` + + // Name of the object key. + // + // Key is a required field + Key *string `min:"1" type:"string" required:"true"` + + // Value of the tag. + // + // Value is a required field + Value *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s Tag) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Tag) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Tag) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Tag"} + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.Value == nil { + invalidParams.Add(request.NewErrParamRequired("Value")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetKey sets the Key field's value. +func (s *Tag) SetKey(v string) *Tag { + s.Key = &v + return s +} + +// SetValue sets the Value field's value. +func (s *Tag) SetValue(v string) *Tag { + s.Value = &v + return s +} + +// Container for TagSet elements. +type Tagging struct { + _ struct{} `type:"structure"` + + // A collection for a set of tags + // + // TagSet is a required field + TagSet []*Tag `locationNameList:"Tag" type:"list" required:"true"` +} + +// String returns the string representation +func (s Tagging) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Tagging) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Tagging) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Tagging"} + if s.TagSet == nil { + invalidParams.Add(request.NewErrParamRequired("TagSet")) + } + if s.TagSet != nil { + for i, v := range s.TagSet { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "TagSet", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetTagSet sets the TagSet field's value. +func (s *Tagging) SetTagSet(v []*Tag) *Tagging { + s.TagSet = v + return s +} + +// Container for granting information. +type TargetGrant struct { + _ struct{} `type:"structure"` + + // Container for the person being granted permissions. + Grantee *Grantee `type:"structure" xmlPrefix:"xsi" xmlURI:"http://www.w3.org/2001/XMLSchema-instance"` + + // Logging permissions assigned to the grantee for the bucket. + Permission *string `type:"string" enum:"BucketLogsPermission"` +} + +// String returns the string representation +func (s TargetGrant) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TargetGrant) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *TargetGrant) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "TargetGrant"} + if s.Grantee != nil { + if err := s.Grantee.Validate(); err != nil { + invalidParams.AddNested("Grantee", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetGrantee sets the Grantee field's value. +func (s *TargetGrant) SetGrantee(v *Grantee) *TargetGrant { + s.Grantee = v + return s +} + +// SetPermission sets the Permission field's value. +func (s *TargetGrant) SetPermission(v string) *TargetGrant { + s.Permission = &v + return s +} + +// The S3 Intelligent-Tiering storage class is designed to optimize storage +// costs by automatically moving data to the most cost-effective storage access +// tier, without additional operational overhead. +type Tiering struct { + _ struct{} `type:"structure"` + + // S3 Intelligent-Tiering access tier. See Storage class for automatically optimizing + // frequently and infrequently accessed objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access) + // for a list of access tiers in the S3 Intelligent-Tiering storage class. + // + // AccessTier is a required field + AccessTier *string `type:"string" required:"true" enum:"IntelligentTieringAccessTier"` + + // The number of consecutive days of no access after which an object will be + // eligible to be transitioned to the corresponding tier. The minimum number + // of days specified for Archive Access tier must be at least 90 days and Deep + // Archive Access tier must be at least 180 days. The maximum can be up to 2 + // years (730 days). + // + // Days is a required field + Days *int64 `type:"integer" required:"true"` +} + +// String returns the string representation +func (s Tiering) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Tiering) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Tiering) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Tiering"} + if s.AccessTier == nil { + invalidParams.Add(request.NewErrParamRequired("AccessTier")) + } + if s.Days == nil { + invalidParams.Add(request.NewErrParamRequired("Days")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAccessTier sets the AccessTier field's value. +func (s *Tiering) SetAccessTier(v string) *Tiering { + s.AccessTier = &v + return s +} + +// SetDays sets the Days field's value. +func (s *Tiering) SetDays(v int64) *Tiering { + s.Days = &v + return s +} + +// A container for specifying the configuration for publication of messages +// to an Amazon Simple Notification Service (Amazon SNS) topic when Amazon S3 +// detects specified events. +type TopicConfiguration struct { + _ struct{} `type:"structure"` + + // The Amazon S3 bucket event about which to send notifications. For more information, + // see Supported Event Types (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) + // in the Amazon Simple Storage Service Developer Guide. + // + // Events is a required field + Events []*string `locationName:"Event" type:"list" flattened:"true" required:"true"` + + // Specifies object key name filtering rules. For information about key name + // filtering, see Configuring Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) + // in the Amazon Simple Storage Service Developer Guide. + Filter *NotificationConfigurationFilter `type:"structure"` + + // An optional unique identifier for configurations in a notification configuration. + // If you don't provide one, Amazon S3 will assign an ID. + Id *string `type:"string"` + + // The Amazon Resource Name (ARN) of the Amazon SNS topic to which Amazon S3 + // publishes a message when it detects events of the specified type. + // + // TopicArn is a required field + TopicArn *string `locationName:"Topic" type:"string" required:"true"` +} + +// String returns the string representation +func (s TopicConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TopicConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *TopicConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "TopicConfiguration"} + if s.Events == nil { + invalidParams.Add(request.NewErrParamRequired("Events")) + } + if s.TopicArn == nil { + invalidParams.Add(request.NewErrParamRequired("TopicArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetEvents sets the Events field's value. +func (s *TopicConfiguration) SetEvents(v []*string) *TopicConfiguration { + s.Events = v + return s +} + +// SetFilter sets the Filter field's value. +func (s *TopicConfiguration) SetFilter(v *NotificationConfigurationFilter) *TopicConfiguration { + s.Filter = v + return s +} + +// SetId sets the Id field's value. +func (s *TopicConfiguration) SetId(v string) *TopicConfiguration { + s.Id = &v + return s +} + +// SetTopicArn sets the TopicArn field's value. +func (s *TopicConfiguration) SetTopicArn(v string) *TopicConfiguration { + s.TopicArn = &v + return s +} + +// A container for specifying the configuration for publication of messages +// to an Amazon Simple Notification Service (Amazon SNS) topic when Amazon S3 +// detects specified events. This data type is deprecated. Use TopicConfiguration +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_TopicConfiguration.html) +// instead. +type TopicConfigurationDeprecated struct { + _ struct{} `type:"structure"` + + // Bucket event for which to send notifications. + // + // Deprecated: Event has been deprecated + Event *string `deprecated:"true" type:"string" enum:"Event"` + + // A collection of events related to objects + Events []*string `locationName:"Event" type:"list" flattened:"true"` + + // An optional unique identifier for configurations in a notification configuration. + // If you don't provide one, Amazon S3 will assign an ID. + Id *string `type:"string"` + + // Amazon SNS topic to which Amazon S3 will publish a message to report the + // specified events for the bucket. + Topic *string `type:"string"` +} + +// String returns the string representation +func (s TopicConfigurationDeprecated) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TopicConfigurationDeprecated) GoString() string { + return s.String() +} + +// SetEvent sets the Event field's value. +func (s *TopicConfigurationDeprecated) SetEvent(v string) *TopicConfigurationDeprecated { + s.Event = &v + return s +} + +// SetEvents sets the Events field's value. +func (s *TopicConfigurationDeprecated) SetEvents(v []*string) *TopicConfigurationDeprecated { + s.Events = v + return s +} + +// SetId sets the Id field's value. +func (s *TopicConfigurationDeprecated) SetId(v string) *TopicConfigurationDeprecated { + s.Id = &v + return s +} + +// SetTopic sets the Topic field's value. +func (s *TopicConfigurationDeprecated) SetTopic(v string) *TopicConfigurationDeprecated { + s.Topic = &v + return s +} + +// Specifies when an object transitions to a specified storage class. For more +// information about Amazon S3 lifecycle configuration rules, see Transitioning +// Objects Using Amazon S3 Lifecycle (https://docs.aws.amazon.com/AmazonS3/latest/dev/lifecycle-transition-general-considerations.html) +// in the Amazon Simple Storage Service Developer Guide. +type Transition struct { + _ struct{} `type:"structure"` + + // Indicates when objects are transitioned to the specified storage class. The + // date value must be in ISO 8601 format. The time is always midnight UTC. + Date *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // Indicates the number of days after creation when objects are transitioned + // to the specified storage class. The value must be a positive integer. + Days *int64 `type:"integer"` + + // The storage class to which you want the object to transition. + StorageClass *string `type:"string" enum:"TransitionStorageClass"` +} + +// String returns the string representation +func (s Transition) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Transition) GoString() string { + return s.String() +} + +// SetDate sets the Date field's value. +func (s *Transition) SetDate(v time.Time) *Transition { + s.Date = &v + return s +} + +// SetDays sets the Days field's value. +func (s *Transition) SetDays(v int64) *Transition { + s.Days = &v + return s +} + +// SetStorageClass sets the StorageClass field's value. +func (s *Transition) SetStorageClass(v string) *Transition { + s.StorageClass = &v + return s +} + +type UploadPartCopyInput struct { + _ struct{} `locationName:"UploadPartCopyRequest" type:"structure"` + + // The bucket name. + // + // When using this API with an access point, you must direct requests to the + // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this operation with an access point through the AWS SDKs, you + // provide the access point ARN in place of the bucket name. For more information + // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // in the Amazon Simple Storage Service Developer Guide. + // + // When using this API with Amazon S3 on Outposts, you must direct requests + // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When + // using this operation using S3 on Outposts through the AWS SDKs, you provide + // the Outposts bucket ARN in place of the bucket name. For more information + // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) + // in the Amazon Simple Storage Service Developer Guide. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Specifies the source object for the copy operation. You specify the value + // in one of two formats, depending on whether you want to access the source + // object through an access point (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-points.html): + // + // * For objects not accessed through an access point, specify the name of + // the source bucket and key of the source object, separated by a slash (/). + // For example, to copy the object reports/january.pdf from the bucket awsexamplebucket, + // use awsexamplebucket/reports/january.pdf. The value must be URL encoded. + // + // * For objects accessed through access points, specify the Amazon Resource + // Name (ARN) of the object as accessed through the access point, in the + // format arn:aws:s3:::accesspoint//object/. + // For example, to copy the object reports/january.pdf through access point + // my-access-point owned by account 123456789012 in Region us-west-2, use + // the URL encoding of arn:aws:s3:us-west-2:123456789012:accesspoint/my-access-point/object/reports/january.pdf. + // The value must be URL encoded. Amazon S3 supports copy operations using + // access points only when the source and destination buckets are in the + // same AWS Region. Alternatively, for objects accessed through Amazon S3 + // on Outposts, specify the ARN of the object as accessed in the format arn:aws:s3-outposts:::outpost//object/. + // For example, to copy the object reports/january.pdf through outpost my-outpost + // owned by account 123456789012 in Region us-west-2, use the URL encoding + // of arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/object/reports/january.pdf. + // The value must be URL encoded. + // + // To copy a specific version of an object, append ?versionId= to + // the value (for example, awsexamplebucket/reports/january.pdf?versionId=QUpfdndhfd8438MNFDN93jdnJFkdmqnh893). + // If you don't specify a version ID, Amazon S3 copies the latest version of + // the source object. + // + // CopySource is a required field + CopySource *string `location:"header" locationName:"x-amz-copy-source" type:"string" required:"true"` + + // Copies the object if its entity tag (ETag) matches the specified tag. + CopySourceIfMatch *string `location:"header" locationName:"x-amz-copy-source-if-match" type:"string"` + + // Copies the object if it has been modified since the specified time. + CopySourceIfModifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-modified-since" type:"timestamp"` + + // Copies the object if its entity tag (ETag) is different than the specified + // ETag. + CopySourceIfNoneMatch *string `location:"header" locationName:"x-amz-copy-source-if-none-match" type:"string"` + + // Copies the object if it hasn't been modified since the specified time. + CopySourceIfUnmodifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-unmodified-since" type:"timestamp"` + + // The range of bytes to copy from the source object. The range value must use + // the form bytes=first-last, where the first and last are the zero-based byte + // offsets to copy. For example, bytes=0-9 indicates that you want to copy the + // first 10 bytes of the source. You can copy a range only if the source object + // is greater than 5 MB. + CopySourceRange *string `location:"header" locationName:"x-amz-copy-source-range" type:"string"` + + // Specifies the algorithm to use when decrypting the source object (for example, + // AES256). + CopySourceSSECustomerAlgorithm *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-algorithm" type:"string"` + + // Specifies the customer-provided encryption key for Amazon S3 to use to decrypt + // the source object. The encryption key provided in this header must be one + // that was used when the source object was created. + CopySourceSSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key" type:"string" sensitive:"true"` + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure that the + // encryption key was transmitted without error. + CopySourceSSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key-MD5" type:"string"` + + // The account id of the expected destination bucket owner. If the destination + // bucket is owned by a different account, the request will fail with an HTTP + // 403 (Access Denied) error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // The account id of the expected source bucket owner. If the source bucket + // is owned by a different account, the request will fail with an HTTP 403 (Access + // Denied) error. + ExpectedSourceBucketOwner *string `location:"header" locationName:"x-amz-source-expected-bucket-owner" type:"string"` + + // Object key for which the multipart upload was initiated. + // + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Part number of part being copied. This is a positive integer between 1 and + // 10,000. + // + // PartNumber is a required field + PartNumber *int64 `location:"querystring" locationName:"partNumber" type:"integer" required:"true"` + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from requester pays buckets, see Downloading Objects + // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 Developer Guide. + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // Specifies the algorithm to use to when encrypting the object (for example, + // AES256). + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting + // data. This value is used to store the object and then it is discarded; Amazon + // S3 does not store the encryption key. The key must be appropriate for use + // with the algorithm specified in the x-amz-server-side-encryption-customer-algorithm + // header. This must be the same encryption key specified in the initiate multipart + // upload request. + SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure that the + // encryption key was transmitted without error. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // Upload ID identifying the multipart upload whose part is being copied. + // + // UploadId is a required field + UploadId *string `location:"querystring" locationName:"uploadId" type:"string" required:"true"` +} + +// String returns the string representation +func (s UploadPartCopyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UploadPartCopyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UploadPartCopyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UploadPartCopyInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.CopySource == nil { + invalidParams.Add(request.NewErrParamRequired("CopySource")) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.PartNumber == nil { + invalidParams.Add(request.NewErrParamRequired("PartNumber")) + } + if s.UploadId == nil { + invalidParams.Add(request.NewErrParamRequired("UploadId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *UploadPartCopyInput) SetBucket(v string) *UploadPartCopyInput { + s.Bucket = &v + return s +} + +func (s *UploadPartCopyInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetCopySource sets the CopySource field's value. +func (s *UploadPartCopyInput) SetCopySource(v string) *UploadPartCopyInput { + s.CopySource = &v + return s +} + +// SetCopySourceIfMatch sets the CopySourceIfMatch field's value. +func (s *UploadPartCopyInput) SetCopySourceIfMatch(v string) *UploadPartCopyInput { + s.CopySourceIfMatch = &v + return s +} + +// SetCopySourceIfModifiedSince sets the CopySourceIfModifiedSince field's value. +func (s *UploadPartCopyInput) SetCopySourceIfModifiedSince(v time.Time) *UploadPartCopyInput { + s.CopySourceIfModifiedSince = &v + return s +} + +// SetCopySourceIfNoneMatch sets the CopySourceIfNoneMatch field's value. +func (s *UploadPartCopyInput) SetCopySourceIfNoneMatch(v string) *UploadPartCopyInput { + s.CopySourceIfNoneMatch = &v + return s +} + +// SetCopySourceIfUnmodifiedSince sets the CopySourceIfUnmodifiedSince field's value. +func (s *UploadPartCopyInput) SetCopySourceIfUnmodifiedSince(v time.Time) *UploadPartCopyInput { + s.CopySourceIfUnmodifiedSince = &v + return s +} + +// SetCopySourceRange sets the CopySourceRange field's value. +func (s *UploadPartCopyInput) SetCopySourceRange(v string) *UploadPartCopyInput { + s.CopySourceRange = &v + return s +} + +// SetCopySourceSSECustomerAlgorithm sets the CopySourceSSECustomerAlgorithm field's value. +func (s *UploadPartCopyInput) SetCopySourceSSECustomerAlgorithm(v string) *UploadPartCopyInput { + s.CopySourceSSECustomerAlgorithm = &v + return s +} + +// SetCopySourceSSECustomerKey sets the CopySourceSSECustomerKey field's value. +func (s *UploadPartCopyInput) SetCopySourceSSECustomerKey(v string) *UploadPartCopyInput { + s.CopySourceSSECustomerKey = &v + return s +} + +func (s *UploadPartCopyInput) getCopySourceSSECustomerKey() (v string) { + if s.CopySourceSSECustomerKey == nil { + return v + } + return *s.CopySourceSSECustomerKey +} + +// SetCopySourceSSECustomerKeyMD5 sets the CopySourceSSECustomerKeyMD5 field's value. +func (s *UploadPartCopyInput) SetCopySourceSSECustomerKeyMD5(v string) *UploadPartCopyInput { + s.CopySourceSSECustomerKeyMD5 = &v + return s +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *UploadPartCopyInput) SetExpectedBucketOwner(v string) *UploadPartCopyInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetExpectedSourceBucketOwner sets the ExpectedSourceBucketOwner field's value. +func (s *UploadPartCopyInput) SetExpectedSourceBucketOwner(v string) *UploadPartCopyInput { + s.ExpectedSourceBucketOwner = &v + return s +} + +// SetKey sets the Key field's value. +func (s *UploadPartCopyInput) SetKey(v string) *UploadPartCopyInput { + s.Key = &v + return s +} + +// SetPartNumber sets the PartNumber field's value. +func (s *UploadPartCopyInput) SetPartNumber(v int64) *UploadPartCopyInput { + s.PartNumber = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *UploadPartCopyInput) SetRequestPayer(v string) *UploadPartCopyInput { + s.RequestPayer = &v + return s +} + +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *UploadPartCopyInput) SetSSECustomerAlgorithm(v string) *UploadPartCopyInput { + s.SSECustomerAlgorithm = &v + return s +} + +// SetSSECustomerKey sets the SSECustomerKey field's value. +func (s *UploadPartCopyInput) SetSSECustomerKey(v string) *UploadPartCopyInput { + s.SSECustomerKey = &v + return s +} + +func (s *UploadPartCopyInput) getSSECustomerKey() (v string) { + if s.SSECustomerKey == nil { + return v + } + return *s.SSECustomerKey +} + +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *UploadPartCopyInput) SetSSECustomerKeyMD5(v string) *UploadPartCopyInput { + s.SSECustomerKeyMD5 = &v + return s +} + +// SetUploadId sets the UploadId field's value. +func (s *UploadPartCopyInput) SetUploadId(v string) *UploadPartCopyInput { + s.UploadId = &v + return s +} + +func (s *UploadPartCopyInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *UploadPartCopyInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s UploadPartCopyInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type UploadPartCopyOutput struct { + _ struct{} `type:"structure" payload:"CopyPartResult"` + + // Indicates whether the multipart upload uses an S3 Bucket Key for server-side + // encryption with AWS KMS (SSE-KMS). + BucketKeyEnabled *bool `location:"header" locationName:"x-amz-server-side-encryption-bucket-key-enabled" type:"boolean"` + + // Container for all response elements. + CopyPartResult *CopyPartResult `type:"structure"` + + // The version of the source object that was copied, if you have enabled versioning + // on the source bucket. + CopySourceVersionId *string `location:"header" locationName:"x-amz-copy-source-version-id" type:"string"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header confirming the encryption algorithm + // used. + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header to provide round-trip message integrity + // verification of the customer-provided encryption key. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // If present, specifies the ID of the AWS Key Management Service (AWS KMS) + // symmetric customer managed customer master key (CMK) that was used for the + // object. + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` + + // The server-side encryption algorithm used when storing this object in Amazon + // S3 (for example, AES256, aws:kms). + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` +} + +// String returns the string representation +func (s UploadPartCopyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UploadPartCopyOutput) GoString() string { + return s.String() +} + +// SetBucketKeyEnabled sets the BucketKeyEnabled field's value. +func (s *UploadPartCopyOutput) SetBucketKeyEnabled(v bool) *UploadPartCopyOutput { + s.BucketKeyEnabled = &v + return s +} + +// SetCopyPartResult sets the CopyPartResult field's value. +func (s *UploadPartCopyOutput) SetCopyPartResult(v *CopyPartResult) *UploadPartCopyOutput { + s.CopyPartResult = v + return s +} + +// SetCopySourceVersionId sets the CopySourceVersionId field's value. +func (s *UploadPartCopyOutput) SetCopySourceVersionId(v string) *UploadPartCopyOutput { + s.CopySourceVersionId = &v + return s +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *UploadPartCopyOutput) SetRequestCharged(v string) *UploadPartCopyOutput { + s.RequestCharged = &v + return s +} + +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *UploadPartCopyOutput) SetSSECustomerAlgorithm(v string) *UploadPartCopyOutput { + s.SSECustomerAlgorithm = &v + return s +} + +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *UploadPartCopyOutput) SetSSECustomerKeyMD5(v string) *UploadPartCopyOutput { + s.SSECustomerKeyMD5 = &v + return s +} + +// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. +func (s *UploadPartCopyOutput) SetSSEKMSKeyId(v string) *UploadPartCopyOutput { + s.SSEKMSKeyId = &v + return s +} + +// SetServerSideEncryption sets the ServerSideEncryption field's value. +func (s *UploadPartCopyOutput) SetServerSideEncryption(v string) *UploadPartCopyOutput { + s.ServerSideEncryption = &v + return s +} + +type UploadPartInput struct { + _ struct{} `locationName:"UploadPartRequest" type:"structure" payload:"Body"` + + // Object data. + Body io.ReadSeeker `type:"blob"` + + // The name of the bucket to which the multipart upload was initiated. + // + // When using this API with an access point, you must direct requests to the + // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this operation with an access point through the AWS SDKs, you + // provide the access point ARN in place of the bucket name. For more information + // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // in the Amazon Simple Storage Service Developer Guide. + // + // When using this API with Amazon S3 on Outposts, you must direct requests + // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When + // using this operation using S3 on Outposts through the AWS SDKs, you provide + // the Outposts bucket ARN in place of the bucket name. For more information + // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) + // in the Amazon Simple Storage Service Developer Guide. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Size of the body in bytes. This parameter is useful when the size of the + // body cannot be determined automatically. + ContentLength *int64 `location:"header" locationName:"Content-Length" type:"long"` + + // The base64-encoded 128-bit MD5 digest of the part data. This parameter is + // auto-populated when using the command from the CLI. This parameter is required + // if object lock parameters are specified. + ContentMD5 *string `location:"header" locationName:"Content-MD5" type:"string"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // Object key for which the multipart upload was initiated. + // + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Part number of part being uploaded. This is a positive integer between 1 + // and 10,000. + // + // PartNumber is a required field + PartNumber *int64 `location:"querystring" locationName:"partNumber" type:"integer" required:"true"` + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from requester pays buckets, see Downloading Objects + // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 Developer Guide. + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // Specifies the algorithm to use to when encrypting the object (for example, + // AES256). + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting + // data. This value is used to store the object and then it is discarded; Amazon + // S3 does not store the encryption key. The key must be appropriate for use + // with the algorithm specified in the x-amz-server-side-encryption-customer-algorithm + // header. This must be the same encryption key specified in the initiate multipart + // upload request. + SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure that the + // encryption key was transmitted without error. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // Upload ID identifying the multipart upload whose part is being uploaded. + // + // UploadId is a required field + UploadId *string `location:"querystring" locationName:"uploadId" type:"string" required:"true"` +} + +// String returns the string representation +func (s UploadPartInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UploadPartInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UploadPartInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UploadPartInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.PartNumber == nil { + invalidParams.Add(request.NewErrParamRequired("PartNumber")) + } + if s.UploadId == nil { + invalidParams.Add(request.NewErrParamRequired("UploadId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBody sets the Body field's value. +func (s *UploadPartInput) SetBody(v io.ReadSeeker) *UploadPartInput { + s.Body = v + return s +} + +// SetBucket sets the Bucket field's value. +func (s *UploadPartInput) SetBucket(v string) *UploadPartInput { + s.Bucket = &v + return s +} + +func (s *UploadPartInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetContentLength sets the ContentLength field's value. +func (s *UploadPartInput) SetContentLength(v int64) *UploadPartInput { + s.ContentLength = &v + return s +} + +// SetContentMD5 sets the ContentMD5 field's value. +func (s *UploadPartInput) SetContentMD5(v string) *UploadPartInput { + s.ContentMD5 = &v + return s +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *UploadPartInput) SetExpectedBucketOwner(v string) *UploadPartInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetKey sets the Key field's value. +func (s *UploadPartInput) SetKey(v string) *UploadPartInput { + s.Key = &v + return s +} + +// SetPartNumber sets the PartNumber field's value. +func (s *UploadPartInput) SetPartNumber(v int64) *UploadPartInput { + s.PartNumber = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *UploadPartInput) SetRequestPayer(v string) *UploadPartInput { + s.RequestPayer = &v + return s +} + +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *UploadPartInput) SetSSECustomerAlgorithm(v string) *UploadPartInput { + s.SSECustomerAlgorithm = &v + return s +} + +// SetSSECustomerKey sets the SSECustomerKey field's value. +func (s *UploadPartInput) SetSSECustomerKey(v string) *UploadPartInput { + s.SSECustomerKey = &v + return s +} + +func (s *UploadPartInput) getSSECustomerKey() (v string) { + if s.SSECustomerKey == nil { + return v + } + return *s.SSECustomerKey +} + +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *UploadPartInput) SetSSECustomerKeyMD5(v string) *UploadPartInput { + s.SSECustomerKeyMD5 = &v + return s +} + +// SetUploadId sets the UploadId field's value. +func (s *UploadPartInput) SetUploadId(v string) *UploadPartInput { + s.UploadId = &v + return s +} + +func (s *UploadPartInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *UploadPartInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s UploadPartInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type UploadPartOutput struct { + _ struct{} `type:"structure"` + + // Indicates whether the multipart upload uses an S3 Bucket Key for server-side + // encryption with AWS KMS (SSE-KMS). + BucketKeyEnabled *bool `location:"header" locationName:"x-amz-server-side-encryption-bucket-key-enabled" type:"boolean"` + + // Entity tag for the uploaded object. + ETag *string `location:"header" locationName:"ETag" type:"string"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header confirming the encryption algorithm + // used. + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header to provide round-trip message integrity + // verification of the customer-provided encryption key. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // If present, specifies the ID of the AWS Key Management Service (AWS KMS) + // symmetric customer managed customer master key (CMK) was used for the object. + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` + + // The server-side encryption algorithm used when storing this object in Amazon + // S3 (for example, AES256, aws:kms). + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` +} + +// String returns the string representation +func (s UploadPartOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UploadPartOutput) GoString() string { + return s.String() +} + +// SetBucketKeyEnabled sets the BucketKeyEnabled field's value. +func (s *UploadPartOutput) SetBucketKeyEnabled(v bool) *UploadPartOutput { + s.BucketKeyEnabled = &v + return s +} + +// SetETag sets the ETag field's value. +func (s *UploadPartOutput) SetETag(v string) *UploadPartOutput { + s.ETag = &v + return s +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *UploadPartOutput) SetRequestCharged(v string) *UploadPartOutput { + s.RequestCharged = &v + return s +} + +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *UploadPartOutput) SetSSECustomerAlgorithm(v string) *UploadPartOutput { + s.SSECustomerAlgorithm = &v + return s +} + +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *UploadPartOutput) SetSSECustomerKeyMD5(v string) *UploadPartOutput { + s.SSECustomerKeyMD5 = &v + return s +} + +// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. +func (s *UploadPartOutput) SetSSEKMSKeyId(v string) *UploadPartOutput { + s.SSEKMSKeyId = &v + return s +} + +// SetServerSideEncryption sets the ServerSideEncryption field's value. +func (s *UploadPartOutput) SetServerSideEncryption(v string) *UploadPartOutput { + s.ServerSideEncryption = &v + return s +} + +// Describes the versioning state of an Amazon S3 bucket. For more information, +// see PUT Bucket versioning (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTVersioningStatus.html) +// in the Amazon Simple Storage Service API Reference. +type VersioningConfiguration struct { + _ struct{} `type:"structure"` + + // Specifies whether MFA delete is enabled in the bucket versioning configuration. + // This element is only returned if the bucket has been configured with MFA + // delete. If the bucket has never been so configured, this element is not returned. + MFADelete *string `locationName:"MfaDelete" type:"string" enum:"MFADelete"` + + // The versioning state of the bucket. + Status *string `type:"string" enum:"BucketVersioningStatus"` +} + +// String returns the string representation +func (s VersioningConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VersioningConfiguration) GoString() string { + return s.String() +} + +// SetMFADelete sets the MFADelete field's value. +func (s *VersioningConfiguration) SetMFADelete(v string) *VersioningConfiguration { + s.MFADelete = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *VersioningConfiguration) SetStatus(v string) *VersioningConfiguration { + s.Status = &v + return s +} + +// Specifies website configuration parameters for an Amazon S3 bucket. +type WebsiteConfiguration struct { + _ struct{} `type:"structure"` + + // The name of the error document for the website. + ErrorDocument *ErrorDocument `type:"structure"` + + // The name of the index document for the website. + IndexDocument *IndexDocument `type:"structure"` + + // The redirect behavior for every request to this bucket's website endpoint. + // + // If you specify this property, you can't specify any other property. + RedirectAllRequestsTo *RedirectAllRequestsTo `type:"structure"` + + // Rules that define when a redirect is applied and the redirect behavior. + RoutingRules []*RoutingRule `locationNameList:"RoutingRule" type:"list"` +} + +// String returns the string representation +func (s WebsiteConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s WebsiteConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *WebsiteConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "WebsiteConfiguration"} + if s.ErrorDocument != nil { + if err := s.ErrorDocument.Validate(); err != nil { + invalidParams.AddNested("ErrorDocument", err.(request.ErrInvalidParams)) + } + } + if s.IndexDocument != nil { + if err := s.IndexDocument.Validate(); err != nil { + invalidParams.AddNested("IndexDocument", err.(request.ErrInvalidParams)) + } + } + if s.RedirectAllRequestsTo != nil { + if err := s.RedirectAllRequestsTo.Validate(); err != nil { + invalidParams.AddNested("RedirectAllRequestsTo", err.(request.ErrInvalidParams)) + } + } + if s.RoutingRules != nil { + for i, v := range s.RoutingRules { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "RoutingRules", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetErrorDocument sets the ErrorDocument field's value. +func (s *WebsiteConfiguration) SetErrorDocument(v *ErrorDocument) *WebsiteConfiguration { + s.ErrorDocument = v + return s +} + +// SetIndexDocument sets the IndexDocument field's value. +func (s *WebsiteConfiguration) SetIndexDocument(v *IndexDocument) *WebsiteConfiguration { + s.IndexDocument = v + return s +} + +// SetRedirectAllRequestsTo sets the RedirectAllRequestsTo field's value. +func (s *WebsiteConfiguration) SetRedirectAllRequestsTo(v *RedirectAllRequestsTo) *WebsiteConfiguration { + s.RedirectAllRequestsTo = v + return s +} + +// SetRoutingRules sets the RoutingRules field's value. +func (s *WebsiteConfiguration) SetRoutingRules(v []*RoutingRule) *WebsiteConfiguration { + s.RoutingRules = v + return s +} + +const ( + // AnalyticsS3ExportFileFormatCsv is a AnalyticsS3ExportFileFormat enum value + AnalyticsS3ExportFileFormatCsv = "CSV" +) + +// AnalyticsS3ExportFileFormat_Values returns all elements of the AnalyticsS3ExportFileFormat enum +func AnalyticsS3ExportFileFormat_Values() []string { + return []string{ + AnalyticsS3ExportFileFormatCsv, + } +} + +const ( + // ArchiveStatusArchiveAccess is a ArchiveStatus enum value + ArchiveStatusArchiveAccess = "ARCHIVE_ACCESS" + + // ArchiveStatusDeepArchiveAccess is a ArchiveStatus enum value + ArchiveStatusDeepArchiveAccess = "DEEP_ARCHIVE_ACCESS" +) + +// ArchiveStatus_Values returns all elements of the ArchiveStatus enum +func ArchiveStatus_Values() []string { + return []string{ + ArchiveStatusArchiveAccess, + ArchiveStatusDeepArchiveAccess, + } +} + +const ( + // BucketAccelerateStatusEnabled is a BucketAccelerateStatus enum value + BucketAccelerateStatusEnabled = "Enabled" + + // BucketAccelerateStatusSuspended is a BucketAccelerateStatus enum value + BucketAccelerateStatusSuspended = "Suspended" +) + +// BucketAccelerateStatus_Values returns all elements of the BucketAccelerateStatus enum +func BucketAccelerateStatus_Values() []string { + return []string{ + BucketAccelerateStatusEnabled, + BucketAccelerateStatusSuspended, + } +} + +const ( + // BucketCannedACLPrivate is a BucketCannedACL enum value + BucketCannedACLPrivate = "private" + + // BucketCannedACLPublicRead is a BucketCannedACL enum value + BucketCannedACLPublicRead = "public-read" + + // BucketCannedACLPublicReadWrite is a BucketCannedACL enum value + BucketCannedACLPublicReadWrite = "public-read-write" + + // BucketCannedACLAuthenticatedRead is a BucketCannedACL enum value + BucketCannedACLAuthenticatedRead = "authenticated-read" +) + +// BucketCannedACL_Values returns all elements of the BucketCannedACL enum +func BucketCannedACL_Values() []string { + return []string{ + BucketCannedACLPrivate, + BucketCannedACLPublicRead, + BucketCannedACLPublicReadWrite, + BucketCannedACLAuthenticatedRead, + } +} + +const ( + // BucketLocationConstraintAfSouth1 is a BucketLocationConstraint enum value + BucketLocationConstraintAfSouth1 = "af-south-1" + + // BucketLocationConstraintApEast1 is a BucketLocationConstraint enum value + BucketLocationConstraintApEast1 = "ap-east-1" + + // BucketLocationConstraintApNortheast1 is a BucketLocationConstraint enum value + BucketLocationConstraintApNortheast1 = "ap-northeast-1" + + // BucketLocationConstraintApNortheast2 is a BucketLocationConstraint enum value + BucketLocationConstraintApNortheast2 = "ap-northeast-2" + + // BucketLocationConstraintApNortheast3 is a BucketLocationConstraint enum value + BucketLocationConstraintApNortheast3 = "ap-northeast-3" + + // BucketLocationConstraintApSouth1 is a BucketLocationConstraint enum value + BucketLocationConstraintApSouth1 = "ap-south-1" + + // BucketLocationConstraintApSoutheast1 is a BucketLocationConstraint enum value + BucketLocationConstraintApSoutheast1 = "ap-southeast-1" + + // BucketLocationConstraintApSoutheast2 is a BucketLocationConstraint enum value + BucketLocationConstraintApSoutheast2 = "ap-southeast-2" + + // BucketLocationConstraintCaCentral1 is a BucketLocationConstraint enum value + BucketLocationConstraintCaCentral1 = "ca-central-1" + + // BucketLocationConstraintCnNorth1 is a BucketLocationConstraint enum value + BucketLocationConstraintCnNorth1 = "cn-north-1" + + // BucketLocationConstraintCnNorthwest1 is a BucketLocationConstraint enum value + BucketLocationConstraintCnNorthwest1 = "cn-northwest-1" + + // BucketLocationConstraintEu is a BucketLocationConstraint enum value + BucketLocationConstraintEu = "EU" + + // BucketLocationConstraintEuCentral1 is a BucketLocationConstraint enum value + BucketLocationConstraintEuCentral1 = "eu-central-1" + + // BucketLocationConstraintEuNorth1 is a BucketLocationConstraint enum value + BucketLocationConstraintEuNorth1 = "eu-north-1" + + // BucketLocationConstraintEuSouth1 is a BucketLocationConstraint enum value + BucketLocationConstraintEuSouth1 = "eu-south-1" + + // BucketLocationConstraintEuWest1 is a BucketLocationConstraint enum value + BucketLocationConstraintEuWest1 = "eu-west-1" + + // BucketLocationConstraintEuWest2 is a BucketLocationConstraint enum value + BucketLocationConstraintEuWest2 = "eu-west-2" + + // BucketLocationConstraintEuWest3 is a BucketLocationConstraint enum value + BucketLocationConstraintEuWest3 = "eu-west-3" + + // BucketLocationConstraintMeSouth1 is a BucketLocationConstraint enum value + BucketLocationConstraintMeSouth1 = "me-south-1" + + // BucketLocationConstraintSaEast1 is a BucketLocationConstraint enum value + BucketLocationConstraintSaEast1 = "sa-east-1" + + // BucketLocationConstraintUsEast2 is a BucketLocationConstraint enum value + BucketLocationConstraintUsEast2 = "us-east-2" + + // BucketLocationConstraintUsGovEast1 is a BucketLocationConstraint enum value + BucketLocationConstraintUsGovEast1 = "us-gov-east-1" + + // BucketLocationConstraintUsGovWest1 is a BucketLocationConstraint enum value + BucketLocationConstraintUsGovWest1 = "us-gov-west-1" + + // BucketLocationConstraintUsWest1 is a BucketLocationConstraint enum value + BucketLocationConstraintUsWest1 = "us-west-1" + + // BucketLocationConstraintUsWest2 is a BucketLocationConstraint enum value + BucketLocationConstraintUsWest2 = "us-west-2" +) + +// BucketLocationConstraint_Values returns all elements of the BucketLocationConstraint enum +func BucketLocationConstraint_Values() []string { + return []string{ + BucketLocationConstraintAfSouth1, + BucketLocationConstraintApEast1, + BucketLocationConstraintApNortheast1, + BucketLocationConstraintApNortheast2, + BucketLocationConstraintApNortheast3, + BucketLocationConstraintApSouth1, + BucketLocationConstraintApSoutheast1, + BucketLocationConstraintApSoutheast2, + BucketLocationConstraintCaCentral1, + BucketLocationConstraintCnNorth1, + BucketLocationConstraintCnNorthwest1, + BucketLocationConstraintEu, + BucketLocationConstraintEuCentral1, + BucketLocationConstraintEuNorth1, + BucketLocationConstraintEuSouth1, + BucketLocationConstraintEuWest1, + BucketLocationConstraintEuWest2, + BucketLocationConstraintEuWest3, + BucketLocationConstraintMeSouth1, + BucketLocationConstraintSaEast1, + BucketLocationConstraintUsEast2, + BucketLocationConstraintUsGovEast1, + BucketLocationConstraintUsGovWest1, + BucketLocationConstraintUsWest1, + BucketLocationConstraintUsWest2, + } +} + +const ( + // BucketLogsPermissionFullControl is a BucketLogsPermission enum value + BucketLogsPermissionFullControl = "FULL_CONTROL" + + // BucketLogsPermissionRead is a BucketLogsPermission enum value + BucketLogsPermissionRead = "READ" + + // BucketLogsPermissionWrite is a BucketLogsPermission enum value + BucketLogsPermissionWrite = "WRITE" +) + +// BucketLogsPermission_Values returns all elements of the BucketLogsPermission enum +func BucketLogsPermission_Values() []string { + return []string{ + BucketLogsPermissionFullControl, + BucketLogsPermissionRead, + BucketLogsPermissionWrite, + } +} + +const ( + // BucketVersioningStatusEnabled is a BucketVersioningStatus enum value + BucketVersioningStatusEnabled = "Enabled" + + // BucketVersioningStatusSuspended is a BucketVersioningStatus enum value + BucketVersioningStatusSuspended = "Suspended" +) + +// BucketVersioningStatus_Values returns all elements of the BucketVersioningStatus enum +func BucketVersioningStatus_Values() []string { + return []string{ + BucketVersioningStatusEnabled, + BucketVersioningStatusSuspended, + } +} + +const ( + // CompressionTypeNone is a CompressionType enum value + CompressionTypeNone = "NONE" + + // CompressionTypeGzip is a CompressionType enum value + CompressionTypeGzip = "GZIP" + + // CompressionTypeBzip2 is a CompressionType enum value + CompressionTypeBzip2 = "BZIP2" +) + +// CompressionType_Values returns all elements of the CompressionType enum +func CompressionType_Values() []string { + return []string{ + CompressionTypeNone, + CompressionTypeGzip, + CompressionTypeBzip2, + } +} + +const ( + // DeleteMarkerReplicationStatusEnabled is a DeleteMarkerReplicationStatus enum value + DeleteMarkerReplicationStatusEnabled = "Enabled" + + // DeleteMarkerReplicationStatusDisabled is a DeleteMarkerReplicationStatus enum value + DeleteMarkerReplicationStatusDisabled = "Disabled" +) + +// DeleteMarkerReplicationStatus_Values returns all elements of the DeleteMarkerReplicationStatus enum +func DeleteMarkerReplicationStatus_Values() []string { + return []string{ + DeleteMarkerReplicationStatusEnabled, + DeleteMarkerReplicationStatusDisabled, + } +} + +// Requests Amazon S3 to encode the object keys in the response and specifies +// the encoding method to use. An object key may contain any Unicode character; +// however, XML 1.0 parser cannot parse some characters, such as characters +// with an ASCII value from 0 to 10. For characters that are not supported in +// XML 1.0, you can add this parameter to request that Amazon S3 encode the +// keys in the response. +const ( + // EncodingTypeUrl is a EncodingType enum value + EncodingTypeUrl = "url" +) + +// EncodingType_Values returns all elements of the EncodingType enum +func EncodingType_Values() []string { + return []string{ + EncodingTypeUrl, + } +} + +// The bucket event for which to send notifications. +const ( + // EventS3ReducedRedundancyLostObject is a Event enum value + EventS3ReducedRedundancyLostObject = "s3:ReducedRedundancyLostObject" + + // EventS3ObjectCreated is a Event enum value + EventS3ObjectCreated = "s3:ObjectCreated:*" + + // EventS3ObjectCreatedPut is a Event enum value + EventS3ObjectCreatedPut = "s3:ObjectCreated:Put" + + // EventS3ObjectCreatedPost is a Event enum value + EventS3ObjectCreatedPost = "s3:ObjectCreated:Post" + + // EventS3ObjectCreatedCopy is a Event enum value + EventS3ObjectCreatedCopy = "s3:ObjectCreated:Copy" + + // EventS3ObjectCreatedCompleteMultipartUpload is a Event enum value + EventS3ObjectCreatedCompleteMultipartUpload = "s3:ObjectCreated:CompleteMultipartUpload" + + // EventS3ObjectRemoved is a Event enum value + EventS3ObjectRemoved = "s3:ObjectRemoved:*" + + // EventS3ObjectRemovedDelete is a Event enum value + EventS3ObjectRemovedDelete = "s3:ObjectRemoved:Delete" + + // EventS3ObjectRemovedDeleteMarkerCreated is a Event enum value + EventS3ObjectRemovedDeleteMarkerCreated = "s3:ObjectRemoved:DeleteMarkerCreated" + + // EventS3ObjectRestore is a Event enum value + EventS3ObjectRestore = "s3:ObjectRestore:*" + + // EventS3ObjectRestorePost is a Event enum value + EventS3ObjectRestorePost = "s3:ObjectRestore:Post" + + // EventS3ObjectRestoreCompleted is a Event enum value + EventS3ObjectRestoreCompleted = "s3:ObjectRestore:Completed" + + // EventS3Replication is a Event enum value + EventS3Replication = "s3:Replication:*" + + // EventS3ReplicationOperationFailedReplication is a Event enum value + EventS3ReplicationOperationFailedReplication = "s3:Replication:OperationFailedReplication" + + // EventS3ReplicationOperationNotTracked is a Event enum value + EventS3ReplicationOperationNotTracked = "s3:Replication:OperationNotTracked" + + // EventS3ReplicationOperationMissedThreshold is a Event enum value + EventS3ReplicationOperationMissedThreshold = "s3:Replication:OperationMissedThreshold" + + // EventS3ReplicationOperationReplicatedAfterThreshold is a Event enum value + EventS3ReplicationOperationReplicatedAfterThreshold = "s3:Replication:OperationReplicatedAfterThreshold" +) + +// Event_Values returns all elements of the Event enum +func Event_Values() []string { + return []string{ + EventS3ReducedRedundancyLostObject, + EventS3ObjectCreated, + EventS3ObjectCreatedPut, + EventS3ObjectCreatedPost, + EventS3ObjectCreatedCopy, + EventS3ObjectCreatedCompleteMultipartUpload, + EventS3ObjectRemoved, + EventS3ObjectRemovedDelete, + EventS3ObjectRemovedDeleteMarkerCreated, + EventS3ObjectRestore, + EventS3ObjectRestorePost, + EventS3ObjectRestoreCompleted, + EventS3Replication, + EventS3ReplicationOperationFailedReplication, + EventS3ReplicationOperationNotTracked, + EventS3ReplicationOperationMissedThreshold, + EventS3ReplicationOperationReplicatedAfterThreshold, + } +} + +const ( + // ExistingObjectReplicationStatusEnabled is a ExistingObjectReplicationStatus enum value + ExistingObjectReplicationStatusEnabled = "Enabled" + + // ExistingObjectReplicationStatusDisabled is a ExistingObjectReplicationStatus enum value + ExistingObjectReplicationStatusDisabled = "Disabled" +) + +// ExistingObjectReplicationStatus_Values returns all elements of the ExistingObjectReplicationStatus enum +func ExistingObjectReplicationStatus_Values() []string { + return []string{ + ExistingObjectReplicationStatusEnabled, + ExistingObjectReplicationStatusDisabled, + } +} + +const ( + // ExpirationStatusEnabled is a ExpirationStatus enum value + ExpirationStatusEnabled = "Enabled" + + // ExpirationStatusDisabled is a ExpirationStatus enum value + ExpirationStatusDisabled = "Disabled" +) + +// ExpirationStatus_Values returns all elements of the ExpirationStatus enum +func ExpirationStatus_Values() []string { + return []string{ + ExpirationStatusEnabled, + ExpirationStatusDisabled, + } +} + +const ( + // ExpressionTypeSql is a ExpressionType enum value + ExpressionTypeSql = "SQL" +) + +// ExpressionType_Values returns all elements of the ExpressionType enum +func ExpressionType_Values() []string { + return []string{ + ExpressionTypeSql, + } +} + +const ( + // FileHeaderInfoUse is a FileHeaderInfo enum value + FileHeaderInfoUse = "USE" + + // FileHeaderInfoIgnore is a FileHeaderInfo enum value + FileHeaderInfoIgnore = "IGNORE" + + // FileHeaderInfoNone is a FileHeaderInfo enum value + FileHeaderInfoNone = "NONE" +) + +// FileHeaderInfo_Values returns all elements of the FileHeaderInfo enum +func FileHeaderInfo_Values() []string { + return []string{ + FileHeaderInfoUse, + FileHeaderInfoIgnore, + FileHeaderInfoNone, + } +} + +const ( + // FilterRuleNamePrefix is a FilterRuleName enum value + FilterRuleNamePrefix = "prefix" + + // FilterRuleNameSuffix is a FilterRuleName enum value + FilterRuleNameSuffix = "suffix" +) + +// FilterRuleName_Values returns all elements of the FilterRuleName enum +func FilterRuleName_Values() []string { + return []string{ + FilterRuleNamePrefix, + FilterRuleNameSuffix, + } +} + +const ( + // IntelligentTieringAccessTierArchiveAccess is a IntelligentTieringAccessTier enum value + IntelligentTieringAccessTierArchiveAccess = "ARCHIVE_ACCESS" + + // IntelligentTieringAccessTierDeepArchiveAccess is a IntelligentTieringAccessTier enum value + IntelligentTieringAccessTierDeepArchiveAccess = "DEEP_ARCHIVE_ACCESS" +) + +// IntelligentTieringAccessTier_Values returns all elements of the IntelligentTieringAccessTier enum +func IntelligentTieringAccessTier_Values() []string { + return []string{ + IntelligentTieringAccessTierArchiveAccess, + IntelligentTieringAccessTierDeepArchiveAccess, + } +} + +const ( + // IntelligentTieringStatusEnabled is a IntelligentTieringStatus enum value + IntelligentTieringStatusEnabled = "Enabled" + + // IntelligentTieringStatusDisabled is a IntelligentTieringStatus enum value + IntelligentTieringStatusDisabled = "Disabled" +) + +// IntelligentTieringStatus_Values returns all elements of the IntelligentTieringStatus enum +func IntelligentTieringStatus_Values() []string { + return []string{ + IntelligentTieringStatusEnabled, + IntelligentTieringStatusDisabled, + } +} + +const ( + // InventoryFormatCsv is a InventoryFormat enum value + InventoryFormatCsv = "CSV" + + // InventoryFormatOrc is a InventoryFormat enum value + InventoryFormatOrc = "ORC" + + // InventoryFormatParquet is a InventoryFormat enum value + InventoryFormatParquet = "Parquet" +) + +// InventoryFormat_Values returns all elements of the InventoryFormat enum +func InventoryFormat_Values() []string { + return []string{ + InventoryFormatCsv, + InventoryFormatOrc, + InventoryFormatParquet, + } +} + +const ( + // InventoryFrequencyDaily is a InventoryFrequency enum value + InventoryFrequencyDaily = "Daily" + + // InventoryFrequencyWeekly is a InventoryFrequency enum value + InventoryFrequencyWeekly = "Weekly" +) + +// InventoryFrequency_Values returns all elements of the InventoryFrequency enum +func InventoryFrequency_Values() []string { + return []string{ + InventoryFrequencyDaily, + InventoryFrequencyWeekly, + } +} + +const ( + // InventoryIncludedObjectVersionsAll is a InventoryIncludedObjectVersions enum value + InventoryIncludedObjectVersionsAll = "All" + + // InventoryIncludedObjectVersionsCurrent is a InventoryIncludedObjectVersions enum value + InventoryIncludedObjectVersionsCurrent = "Current" +) + +// InventoryIncludedObjectVersions_Values returns all elements of the InventoryIncludedObjectVersions enum +func InventoryIncludedObjectVersions_Values() []string { + return []string{ + InventoryIncludedObjectVersionsAll, + InventoryIncludedObjectVersionsCurrent, + } +} + +const ( + // InventoryOptionalFieldSize is a InventoryOptionalField enum value + InventoryOptionalFieldSize = "Size" + + // InventoryOptionalFieldLastModifiedDate is a InventoryOptionalField enum value + InventoryOptionalFieldLastModifiedDate = "LastModifiedDate" + + // InventoryOptionalFieldStorageClass is a InventoryOptionalField enum value + InventoryOptionalFieldStorageClass = "StorageClass" + + // InventoryOptionalFieldEtag is a InventoryOptionalField enum value + InventoryOptionalFieldEtag = "ETag" + + // InventoryOptionalFieldIsMultipartUploaded is a InventoryOptionalField enum value + InventoryOptionalFieldIsMultipartUploaded = "IsMultipartUploaded" + + // InventoryOptionalFieldReplicationStatus is a InventoryOptionalField enum value + InventoryOptionalFieldReplicationStatus = "ReplicationStatus" + + // InventoryOptionalFieldEncryptionStatus is a InventoryOptionalField enum value + InventoryOptionalFieldEncryptionStatus = "EncryptionStatus" + + // InventoryOptionalFieldObjectLockRetainUntilDate is a InventoryOptionalField enum value + InventoryOptionalFieldObjectLockRetainUntilDate = "ObjectLockRetainUntilDate" + + // InventoryOptionalFieldObjectLockMode is a InventoryOptionalField enum value + InventoryOptionalFieldObjectLockMode = "ObjectLockMode" + + // InventoryOptionalFieldObjectLockLegalHoldStatus is a InventoryOptionalField enum value + InventoryOptionalFieldObjectLockLegalHoldStatus = "ObjectLockLegalHoldStatus" + + // InventoryOptionalFieldIntelligentTieringAccessTier is a InventoryOptionalField enum value + InventoryOptionalFieldIntelligentTieringAccessTier = "IntelligentTieringAccessTier" +) + +// InventoryOptionalField_Values returns all elements of the InventoryOptionalField enum +func InventoryOptionalField_Values() []string { + return []string{ + InventoryOptionalFieldSize, + InventoryOptionalFieldLastModifiedDate, + InventoryOptionalFieldStorageClass, + InventoryOptionalFieldEtag, + InventoryOptionalFieldIsMultipartUploaded, + InventoryOptionalFieldReplicationStatus, + InventoryOptionalFieldEncryptionStatus, + InventoryOptionalFieldObjectLockRetainUntilDate, + InventoryOptionalFieldObjectLockMode, + InventoryOptionalFieldObjectLockLegalHoldStatus, + InventoryOptionalFieldIntelligentTieringAccessTier, + } +} + +const ( + // JSONTypeDocument is a JSONType enum value + JSONTypeDocument = "DOCUMENT" + + // JSONTypeLines is a JSONType enum value + JSONTypeLines = "LINES" +) + +// JSONType_Values returns all elements of the JSONType enum +func JSONType_Values() []string { + return []string{ + JSONTypeDocument, + JSONTypeLines, + } +} + +const ( + // MFADeleteEnabled is a MFADelete enum value + MFADeleteEnabled = "Enabled" + + // MFADeleteDisabled is a MFADelete enum value + MFADeleteDisabled = "Disabled" +) + +// MFADelete_Values returns all elements of the MFADelete enum +func MFADelete_Values() []string { + return []string{ + MFADeleteEnabled, + MFADeleteDisabled, + } +} + +const ( + // MFADeleteStatusEnabled is a MFADeleteStatus enum value + MFADeleteStatusEnabled = "Enabled" + + // MFADeleteStatusDisabled is a MFADeleteStatus enum value + MFADeleteStatusDisabled = "Disabled" +) + +// MFADeleteStatus_Values returns all elements of the MFADeleteStatus enum +func MFADeleteStatus_Values() []string { + return []string{ + MFADeleteStatusEnabled, + MFADeleteStatusDisabled, + } +} + +const ( + // MetadataDirectiveCopy is a MetadataDirective enum value + MetadataDirectiveCopy = "COPY" + + // MetadataDirectiveReplace is a MetadataDirective enum value + MetadataDirectiveReplace = "REPLACE" +) + +// MetadataDirective_Values returns all elements of the MetadataDirective enum +func MetadataDirective_Values() []string { + return []string{ + MetadataDirectiveCopy, + MetadataDirectiveReplace, + } +} + +const ( + // MetricsStatusEnabled is a MetricsStatus enum value + MetricsStatusEnabled = "Enabled" + + // MetricsStatusDisabled is a MetricsStatus enum value + MetricsStatusDisabled = "Disabled" +) + +// MetricsStatus_Values returns all elements of the MetricsStatus enum +func MetricsStatus_Values() []string { + return []string{ + MetricsStatusEnabled, + MetricsStatusDisabled, + } +} + +const ( + // ObjectCannedACLPrivate is a ObjectCannedACL enum value + ObjectCannedACLPrivate = "private" + + // ObjectCannedACLPublicRead is a ObjectCannedACL enum value + ObjectCannedACLPublicRead = "public-read" + + // ObjectCannedACLPublicReadWrite is a ObjectCannedACL enum value + ObjectCannedACLPublicReadWrite = "public-read-write" + + // ObjectCannedACLAuthenticatedRead is a ObjectCannedACL enum value + ObjectCannedACLAuthenticatedRead = "authenticated-read" + + // ObjectCannedACLAwsExecRead is a ObjectCannedACL enum value + ObjectCannedACLAwsExecRead = "aws-exec-read" + + // ObjectCannedACLBucketOwnerRead is a ObjectCannedACL enum value + ObjectCannedACLBucketOwnerRead = "bucket-owner-read" + + // ObjectCannedACLBucketOwnerFullControl is a ObjectCannedACL enum value + ObjectCannedACLBucketOwnerFullControl = "bucket-owner-full-control" +) + +// ObjectCannedACL_Values returns all elements of the ObjectCannedACL enum +func ObjectCannedACL_Values() []string { + return []string{ + ObjectCannedACLPrivate, + ObjectCannedACLPublicRead, + ObjectCannedACLPublicReadWrite, + ObjectCannedACLAuthenticatedRead, + ObjectCannedACLAwsExecRead, + ObjectCannedACLBucketOwnerRead, + ObjectCannedACLBucketOwnerFullControl, + } +} + +const ( + // ObjectLockEnabledEnabled is a ObjectLockEnabled enum value + ObjectLockEnabledEnabled = "Enabled" +) + +// ObjectLockEnabled_Values returns all elements of the ObjectLockEnabled enum +func ObjectLockEnabled_Values() []string { + return []string{ + ObjectLockEnabledEnabled, + } +} + +const ( + // ObjectLockLegalHoldStatusOn is a ObjectLockLegalHoldStatus enum value + ObjectLockLegalHoldStatusOn = "ON" + + // ObjectLockLegalHoldStatusOff is a ObjectLockLegalHoldStatus enum value + ObjectLockLegalHoldStatusOff = "OFF" +) + +// ObjectLockLegalHoldStatus_Values returns all elements of the ObjectLockLegalHoldStatus enum +func ObjectLockLegalHoldStatus_Values() []string { + return []string{ + ObjectLockLegalHoldStatusOn, + ObjectLockLegalHoldStatusOff, + } +} + +const ( + // ObjectLockModeGovernance is a ObjectLockMode enum value + ObjectLockModeGovernance = "GOVERNANCE" + + // ObjectLockModeCompliance is a ObjectLockMode enum value + ObjectLockModeCompliance = "COMPLIANCE" +) + +// ObjectLockMode_Values returns all elements of the ObjectLockMode enum +func ObjectLockMode_Values() []string { + return []string{ + ObjectLockModeGovernance, + ObjectLockModeCompliance, + } +} + +const ( + // ObjectLockRetentionModeGovernance is a ObjectLockRetentionMode enum value + ObjectLockRetentionModeGovernance = "GOVERNANCE" + + // ObjectLockRetentionModeCompliance is a ObjectLockRetentionMode enum value + ObjectLockRetentionModeCompliance = "COMPLIANCE" +) + +// ObjectLockRetentionMode_Values returns all elements of the ObjectLockRetentionMode enum +func ObjectLockRetentionMode_Values() []string { + return []string{ + ObjectLockRetentionModeGovernance, + ObjectLockRetentionModeCompliance, + } +} + +// The container element for object ownership for a bucket's ownership controls. +// +// BucketOwnerPreferred - Objects uploaded to the bucket change ownership to +// the bucket owner if the objects are uploaded with the bucket-owner-full-control +// canned ACL. +// +// ObjectWriter - The uploading account will own the object if the object is +// uploaded with the bucket-owner-full-control canned ACL. +const ( + // ObjectOwnershipBucketOwnerPreferred is a ObjectOwnership enum value + ObjectOwnershipBucketOwnerPreferred = "BucketOwnerPreferred" + + // ObjectOwnershipObjectWriter is a ObjectOwnership enum value + ObjectOwnershipObjectWriter = "ObjectWriter" +) + +// ObjectOwnership_Values returns all elements of the ObjectOwnership enum +func ObjectOwnership_Values() []string { + return []string{ + ObjectOwnershipBucketOwnerPreferred, + ObjectOwnershipObjectWriter, + } +} + +const ( + // ObjectStorageClassStandard is a ObjectStorageClass enum value + ObjectStorageClassStandard = "STANDARD" + + // ObjectStorageClassReducedRedundancy is a ObjectStorageClass enum value + ObjectStorageClassReducedRedundancy = "REDUCED_REDUNDANCY" + + // ObjectStorageClassGlacier is a ObjectStorageClass enum value + ObjectStorageClassGlacier = "GLACIER" + + // ObjectStorageClassStandardIa is a ObjectStorageClass enum value + ObjectStorageClassStandardIa = "STANDARD_IA" + + // ObjectStorageClassOnezoneIa is a ObjectStorageClass enum value + ObjectStorageClassOnezoneIa = "ONEZONE_IA" + + // ObjectStorageClassIntelligentTiering is a ObjectStorageClass enum value + ObjectStorageClassIntelligentTiering = "INTELLIGENT_TIERING" + + // ObjectStorageClassDeepArchive is a ObjectStorageClass enum value + ObjectStorageClassDeepArchive = "DEEP_ARCHIVE" + + // ObjectStorageClassOutposts is a ObjectStorageClass enum value + ObjectStorageClassOutposts = "OUTPOSTS" +) + +// ObjectStorageClass_Values returns all elements of the ObjectStorageClass enum +func ObjectStorageClass_Values() []string { + return []string{ + ObjectStorageClassStandard, + ObjectStorageClassReducedRedundancy, + ObjectStorageClassGlacier, + ObjectStorageClassStandardIa, + ObjectStorageClassOnezoneIa, + ObjectStorageClassIntelligentTiering, + ObjectStorageClassDeepArchive, + ObjectStorageClassOutposts, + } +} + +const ( + // ObjectVersionStorageClassStandard is a ObjectVersionStorageClass enum value + ObjectVersionStorageClassStandard = "STANDARD" +) + +// ObjectVersionStorageClass_Values returns all elements of the ObjectVersionStorageClass enum +func ObjectVersionStorageClass_Values() []string { + return []string{ + ObjectVersionStorageClassStandard, + } +} + +const ( + // OwnerOverrideDestination is a OwnerOverride enum value + OwnerOverrideDestination = "Destination" +) + +// OwnerOverride_Values returns all elements of the OwnerOverride enum +func OwnerOverride_Values() []string { + return []string{ + OwnerOverrideDestination, + } +} + +const ( + // PayerRequester is a Payer enum value + PayerRequester = "Requester" + + // PayerBucketOwner is a Payer enum value + PayerBucketOwner = "BucketOwner" +) + +// Payer_Values returns all elements of the Payer enum +func Payer_Values() []string { + return []string{ + PayerRequester, + PayerBucketOwner, + } +} + +const ( + // PermissionFullControl is a Permission enum value + PermissionFullControl = "FULL_CONTROL" + + // PermissionWrite is a Permission enum value + PermissionWrite = "WRITE" + + // PermissionWriteAcp is a Permission enum value + PermissionWriteAcp = "WRITE_ACP" + + // PermissionRead is a Permission enum value + PermissionRead = "READ" + + // PermissionReadAcp is a Permission enum value + PermissionReadAcp = "READ_ACP" +) + +// Permission_Values returns all elements of the Permission enum +func Permission_Values() []string { + return []string{ + PermissionFullControl, + PermissionWrite, + PermissionWriteAcp, + PermissionRead, + PermissionReadAcp, + } +} + +const ( + // ProtocolHttp is a Protocol enum value + ProtocolHttp = "http" + + // ProtocolHttps is a Protocol enum value + ProtocolHttps = "https" +) + +// Protocol_Values returns all elements of the Protocol enum +func Protocol_Values() []string { + return []string{ + ProtocolHttp, + ProtocolHttps, + } +} + +const ( + // QuoteFieldsAlways is a QuoteFields enum value + QuoteFieldsAlways = "ALWAYS" + + // QuoteFieldsAsneeded is a QuoteFields enum value + QuoteFieldsAsneeded = "ASNEEDED" +) + +// QuoteFields_Values returns all elements of the QuoteFields enum +func QuoteFields_Values() []string { + return []string{ + QuoteFieldsAlways, + QuoteFieldsAsneeded, + } +} + +const ( + // ReplicaModificationsStatusEnabled is a ReplicaModificationsStatus enum value + ReplicaModificationsStatusEnabled = "Enabled" + + // ReplicaModificationsStatusDisabled is a ReplicaModificationsStatus enum value + ReplicaModificationsStatusDisabled = "Disabled" +) + +// ReplicaModificationsStatus_Values returns all elements of the ReplicaModificationsStatus enum +func ReplicaModificationsStatus_Values() []string { + return []string{ + ReplicaModificationsStatusEnabled, + ReplicaModificationsStatusDisabled, + } +} + +const ( + // ReplicationRuleStatusEnabled is a ReplicationRuleStatus enum value + ReplicationRuleStatusEnabled = "Enabled" + + // ReplicationRuleStatusDisabled is a ReplicationRuleStatus enum value + ReplicationRuleStatusDisabled = "Disabled" +) + +// ReplicationRuleStatus_Values returns all elements of the ReplicationRuleStatus enum +func ReplicationRuleStatus_Values() []string { + return []string{ + ReplicationRuleStatusEnabled, + ReplicationRuleStatusDisabled, + } +} + +const ( + // ReplicationStatusComplete is a ReplicationStatus enum value + ReplicationStatusComplete = "COMPLETE" + + // ReplicationStatusPending is a ReplicationStatus enum value + ReplicationStatusPending = "PENDING" + + // ReplicationStatusFailed is a ReplicationStatus enum value + ReplicationStatusFailed = "FAILED" + + // ReplicationStatusReplica is a ReplicationStatus enum value + ReplicationStatusReplica = "REPLICA" +) + +// ReplicationStatus_Values returns all elements of the ReplicationStatus enum +func ReplicationStatus_Values() []string { + return []string{ + ReplicationStatusComplete, + ReplicationStatusPending, + ReplicationStatusFailed, + ReplicationStatusReplica, + } +} + +const ( + // ReplicationTimeStatusEnabled is a ReplicationTimeStatus enum value + ReplicationTimeStatusEnabled = "Enabled" + + // ReplicationTimeStatusDisabled is a ReplicationTimeStatus enum value + ReplicationTimeStatusDisabled = "Disabled" +) + +// ReplicationTimeStatus_Values returns all elements of the ReplicationTimeStatus enum +func ReplicationTimeStatus_Values() []string { + return []string{ + ReplicationTimeStatusEnabled, + ReplicationTimeStatusDisabled, + } +} + +// If present, indicates that the requester was successfully charged for the +// request. +const ( + // RequestChargedRequester is a RequestCharged enum value + RequestChargedRequester = "requester" +) + +// RequestCharged_Values returns all elements of the RequestCharged enum +func RequestCharged_Values() []string { + return []string{ + RequestChargedRequester, + } +} + +// Confirms that the requester knows that they will be charged for the request. +// Bucket owners need not specify this parameter in their requests. For information +// about downloading objects from requester pays buckets, see Downloading Objects +// in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) +// in the Amazon S3 Developer Guide. +const ( + // RequestPayerRequester is a RequestPayer enum value + RequestPayerRequester = "requester" +) + +// RequestPayer_Values returns all elements of the RequestPayer enum +func RequestPayer_Values() []string { + return []string{ + RequestPayerRequester, + } +} + +const ( + // RestoreRequestTypeSelect is a RestoreRequestType enum value + RestoreRequestTypeSelect = "SELECT" +) + +// RestoreRequestType_Values returns all elements of the RestoreRequestType enum +func RestoreRequestType_Values() []string { + return []string{ + RestoreRequestTypeSelect, + } +} + +const ( + // ServerSideEncryptionAes256 is a ServerSideEncryption enum value + ServerSideEncryptionAes256 = "AES256" + + // ServerSideEncryptionAwsKms is a ServerSideEncryption enum value + ServerSideEncryptionAwsKms = "aws:kms" +) + +// ServerSideEncryption_Values returns all elements of the ServerSideEncryption enum +func ServerSideEncryption_Values() []string { + return []string{ + ServerSideEncryptionAes256, + ServerSideEncryptionAwsKms, + } +} + +const ( + // SseKmsEncryptedObjectsStatusEnabled is a SseKmsEncryptedObjectsStatus enum value + SseKmsEncryptedObjectsStatusEnabled = "Enabled" + + // SseKmsEncryptedObjectsStatusDisabled is a SseKmsEncryptedObjectsStatus enum value + SseKmsEncryptedObjectsStatusDisabled = "Disabled" +) + +// SseKmsEncryptedObjectsStatus_Values returns all elements of the SseKmsEncryptedObjectsStatus enum +func SseKmsEncryptedObjectsStatus_Values() []string { + return []string{ + SseKmsEncryptedObjectsStatusEnabled, + SseKmsEncryptedObjectsStatusDisabled, + } +} + +const ( + // StorageClassStandard is a StorageClass enum value + StorageClassStandard = "STANDARD" + + // StorageClassReducedRedundancy is a StorageClass enum value + StorageClassReducedRedundancy = "REDUCED_REDUNDANCY" + + // StorageClassStandardIa is a StorageClass enum value + StorageClassStandardIa = "STANDARD_IA" + + // StorageClassOnezoneIa is a StorageClass enum value + StorageClassOnezoneIa = "ONEZONE_IA" + + // StorageClassIntelligentTiering is a StorageClass enum value + StorageClassIntelligentTiering = "INTELLIGENT_TIERING" + + // StorageClassGlacier is a StorageClass enum value + StorageClassGlacier = "GLACIER" + + // StorageClassDeepArchive is a StorageClass enum value + StorageClassDeepArchive = "DEEP_ARCHIVE" + + // StorageClassOutposts is a StorageClass enum value + StorageClassOutposts = "OUTPOSTS" +) + +// StorageClass_Values returns all elements of the StorageClass enum +func StorageClass_Values() []string { + return []string{ + StorageClassStandard, + StorageClassReducedRedundancy, + StorageClassStandardIa, + StorageClassOnezoneIa, + StorageClassIntelligentTiering, + StorageClassGlacier, + StorageClassDeepArchive, + StorageClassOutposts, + } +} + +const ( + // StorageClassAnalysisSchemaVersionV1 is a StorageClassAnalysisSchemaVersion enum value + StorageClassAnalysisSchemaVersionV1 = "V_1" +) + +// StorageClassAnalysisSchemaVersion_Values returns all elements of the StorageClassAnalysisSchemaVersion enum +func StorageClassAnalysisSchemaVersion_Values() []string { + return []string{ + StorageClassAnalysisSchemaVersionV1, + } +} + +const ( + // TaggingDirectiveCopy is a TaggingDirective enum value + TaggingDirectiveCopy = "COPY" + + // TaggingDirectiveReplace is a TaggingDirective enum value + TaggingDirectiveReplace = "REPLACE" +) + +// TaggingDirective_Values returns all elements of the TaggingDirective enum +func TaggingDirective_Values() []string { + return []string{ + TaggingDirectiveCopy, + TaggingDirectiveReplace, + } +} + +const ( + // TierStandard is a Tier enum value + TierStandard = "Standard" + + // TierBulk is a Tier enum value + TierBulk = "Bulk" + + // TierExpedited is a Tier enum value + TierExpedited = "Expedited" +) + +// Tier_Values returns all elements of the Tier enum +func Tier_Values() []string { + return []string{ + TierStandard, + TierBulk, + TierExpedited, + } +} + +const ( + // TransitionStorageClassGlacier is a TransitionStorageClass enum value + TransitionStorageClassGlacier = "GLACIER" + + // TransitionStorageClassStandardIa is a TransitionStorageClass enum value + TransitionStorageClassStandardIa = "STANDARD_IA" + + // TransitionStorageClassOnezoneIa is a TransitionStorageClass enum value + TransitionStorageClassOnezoneIa = "ONEZONE_IA" + + // TransitionStorageClassIntelligentTiering is a TransitionStorageClass enum value + TransitionStorageClassIntelligentTiering = "INTELLIGENT_TIERING" + + // TransitionStorageClassDeepArchive is a TransitionStorageClass enum value + TransitionStorageClassDeepArchive = "DEEP_ARCHIVE" +) + +// TransitionStorageClass_Values returns all elements of the TransitionStorageClass enum +func TransitionStorageClass_Values() []string { + return []string{ + TransitionStorageClassGlacier, + TransitionStorageClassStandardIa, + TransitionStorageClassOnezoneIa, + TransitionStorageClassIntelligentTiering, + TransitionStorageClassDeepArchive, + } +} + +const ( + // TypeCanonicalUser is a Type enum value + TypeCanonicalUser = "CanonicalUser" + + // TypeAmazonCustomerByEmail is a Type enum value + TypeAmazonCustomerByEmail = "AmazonCustomerByEmail" + + // TypeGroup is a Type enum value + TypeGroup = "Group" +) + +// Type_Values returns all elements of the Type enum +func Type_Values() []string { + return []string{ + TypeCanonicalUser, + TypeAmazonCustomerByEmail, + TypeGroup, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/body_hash.go b/vendor/github.com/aws/aws-sdk-go/service/s3/body_hash.go new file mode 100644 index 00000000000..407f06b6ede --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/body_hash.go @@ -0,0 +1,202 @@ +package s3 + +import ( + "bytes" + "crypto/md5" + "crypto/sha256" + "encoding/base64" + "encoding/hex" + "fmt" + "hash" + "io" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" +) + +const ( + contentMD5Header = "Content-Md5" + contentSha256Header = "X-Amz-Content-Sha256" + amzTeHeader = "X-Amz-Te" + amzTxEncodingHeader = "X-Amz-Transfer-Encoding" + + appendMD5TxEncoding = "append-md5" +) + +// computeBodyHashes will add Content MD5 and Content Sha256 hashes to the +// request. If the body is not seekable or S3DisableContentMD5Validation set +// this handler will be ignored. +func computeBodyHashes(r *request.Request) { + if aws.BoolValue(r.Config.S3DisableContentMD5Validation) { + return + } + if r.IsPresigned() { + return + } + if r.Error != nil || !aws.IsReaderSeekable(r.Body) { + return + } + + var md5Hash, sha256Hash hash.Hash + hashers := make([]io.Writer, 0, 2) + + // Determine upfront which hashes can be set without overriding user + // provide header data. + if v := r.HTTPRequest.Header.Get(contentMD5Header); len(v) == 0 { + md5Hash = md5.New() + hashers = append(hashers, md5Hash) + } + + if v := r.HTTPRequest.Header.Get(contentSha256Header); len(v) == 0 { + sha256Hash = sha256.New() + hashers = append(hashers, sha256Hash) + } + + // Create the destination writer based on the hashes that are not already + // provided by the user. + var dst io.Writer + switch len(hashers) { + case 0: + return + case 1: + dst = hashers[0] + default: + dst = io.MultiWriter(hashers...) + } + + if _, err := aws.CopySeekableBody(dst, r.Body); err != nil { + r.Error = awserr.New("BodyHashError", "failed to compute body hashes", err) + return + } + + // For the hashes created, set the associated headers that the user did not + // already provide. + if md5Hash != nil { + sum := make([]byte, md5.Size) + encoded := make([]byte, md5Base64EncLen) + + base64.StdEncoding.Encode(encoded, md5Hash.Sum(sum[0:0])) + r.HTTPRequest.Header[contentMD5Header] = []string{string(encoded)} + } + + if sha256Hash != nil { + encoded := make([]byte, sha256HexEncLen) + sum := make([]byte, sha256.Size) + + hex.Encode(encoded, sha256Hash.Sum(sum[0:0])) + r.HTTPRequest.Header[contentSha256Header] = []string{string(encoded)} + } +} + +const ( + md5Base64EncLen = (md5.Size + 2) / 3 * 4 // base64.StdEncoding.EncodedLen + sha256HexEncLen = sha256.Size * 2 // hex.EncodedLen +) + +// Adds the x-amz-te: append_md5 header to the request. This requests the service +// responds with a trailing MD5 checksum. +// +// Will not ask for append MD5 if disabled, the request is presigned or, +// or the API operation does not support content MD5 validation. +func askForTxEncodingAppendMD5(r *request.Request) { + if aws.BoolValue(r.Config.S3DisableContentMD5Validation) { + return + } + if r.IsPresigned() { + return + } + r.HTTPRequest.Header.Set(amzTeHeader, appendMD5TxEncoding) +} + +func useMD5ValidationReader(r *request.Request) { + if r.Error != nil { + return + } + + if v := r.HTTPResponse.Header.Get(amzTxEncodingHeader); v != appendMD5TxEncoding { + return + } + + var bodyReader *io.ReadCloser + var contentLen int64 + switch tv := r.Data.(type) { + case *GetObjectOutput: + bodyReader = &tv.Body + contentLen = aws.Int64Value(tv.ContentLength) + // Update ContentLength hiden the trailing MD5 checksum. + tv.ContentLength = aws.Int64(contentLen - md5.Size) + tv.ContentRange = aws.String(r.HTTPResponse.Header.Get("X-Amz-Content-Range")) + default: + r.Error = awserr.New("ChecksumValidationError", + fmt.Sprintf("%s: %s header received on unsupported API, %s", + amzTxEncodingHeader, appendMD5TxEncoding, r.Operation.Name, + ), nil) + return + } + + if contentLen < md5.Size { + r.Error = awserr.New("ChecksumValidationError", + fmt.Sprintf("invalid Content-Length %d for %s %s", + contentLen, appendMD5TxEncoding, amzTxEncodingHeader, + ), nil) + return + } + + // Wrap and swap the response body reader with the validation reader. + *bodyReader = newMD5ValidationReader(*bodyReader, contentLen-md5.Size) +} + +type md5ValidationReader struct { + rawReader io.ReadCloser + payload io.Reader + hash hash.Hash + + payloadLen int64 + read int64 +} + +func newMD5ValidationReader(reader io.ReadCloser, payloadLen int64) *md5ValidationReader { + h := md5.New() + return &md5ValidationReader{ + rawReader: reader, + payload: io.TeeReader(&io.LimitedReader{R: reader, N: payloadLen}, h), + hash: h, + payloadLen: payloadLen, + } +} + +func (v *md5ValidationReader) Read(p []byte) (n int, err error) { + n, err = v.payload.Read(p) + if err != nil && err != io.EOF { + return n, err + } + + v.read += int64(n) + + if err == io.EOF { + if v.read != v.payloadLen { + return n, io.ErrUnexpectedEOF + } + expectSum := make([]byte, md5.Size) + actualSum := make([]byte, md5.Size) + if _, sumReadErr := io.ReadFull(v.rawReader, expectSum); sumReadErr != nil { + return n, sumReadErr + } + actualSum = v.hash.Sum(actualSum[0:0]) + if !bytes.Equal(expectSum, actualSum) { + return n, awserr.New("InvalidChecksum", + fmt.Sprintf("expected MD5 checksum %s, got %s", + hex.EncodeToString(expectSum), + hex.EncodeToString(actualSum), + ), + nil) + } + } + + return n, err +} + +func (v *md5ValidationReader) Close() error { + return v.rawReader.Close() +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/bucket_location.go b/vendor/github.com/aws/aws-sdk-go/service/s3/bucket_location.go new file mode 100644 index 00000000000..9ba8a788720 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/bucket_location.go @@ -0,0 +1,107 @@ +package s3 + +import ( + "io/ioutil" + "regexp" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" +) + +var reBucketLocation = regexp.MustCompile(`>([^<>]+)<\/Location`) + +// NormalizeBucketLocation is a utility function which will update the +// passed in value to always be a region ID. Generally this would be used +// with GetBucketLocation API operation. +// +// Replaces empty string with "us-east-1", and "EU" with "eu-west-1". +// +// See http://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETlocation.html +// for more information on the values that can be returned. +func NormalizeBucketLocation(loc string) string { + switch loc { + case "": + loc = "us-east-1" + case "EU": + loc = "eu-west-1" + } + + return loc +} + +// NormalizeBucketLocationHandler is a request handler which will update the +// GetBucketLocation's result LocationConstraint value to always be a region ID. +// +// Replaces empty string with "us-east-1", and "EU" with "eu-west-1". +// +// See http://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETlocation.html +// for more information on the values that can be returned. +// +// req, result := svc.GetBucketLocationRequest(&s3.GetBucketLocationInput{ +// Bucket: aws.String(bucket), +// }) +// req.Handlers.Unmarshal.PushBackNamed(NormalizeBucketLocationHandler) +// err := req.Send() +var NormalizeBucketLocationHandler = request.NamedHandler{ + Name: "awssdk.s3.NormalizeBucketLocation", + Fn: func(req *request.Request) { + if req.Error != nil { + return + } + + out := req.Data.(*GetBucketLocationOutput) + loc := NormalizeBucketLocation(aws.StringValue(out.LocationConstraint)) + out.LocationConstraint = aws.String(loc) + }, +} + +// WithNormalizeBucketLocation is a request option which will update the +// GetBucketLocation's result LocationConstraint value to always be a region ID. +// +// Replaces empty string with "us-east-1", and "EU" with "eu-west-1". +// +// See http://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETlocation.html +// for more information on the values that can be returned. +// +// result, err := svc.GetBucketLocationWithContext(ctx, +// &s3.GetBucketLocationInput{ +// Bucket: aws.String(bucket), +// }, +// s3.WithNormalizeBucketLocation, +// ) +func WithNormalizeBucketLocation(r *request.Request) { + r.Handlers.Unmarshal.PushBackNamed(NormalizeBucketLocationHandler) +} + +func buildGetBucketLocation(r *request.Request) { + if r.DataFilled() { + out := r.Data.(*GetBucketLocationOutput) + b, err := ioutil.ReadAll(r.HTTPResponse.Body) + if err != nil { + r.Error = awserr.New(request.ErrCodeSerialization, + "failed reading response body", err) + return + } + + match := reBucketLocation.FindSubmatch(b) + if len(match) > 1 { + loc := string(match[1]) + out.LocationConstraint = aws.String(loc) + } + } +} + +func populateLocationConstraint(r *request.Request) { + if r.ParamsFilled() && aws.StringValue(r.Config.Region) != "us-east-1" { + in := r.Params.(*CreateBucketInput) + if in.CreateBucketConfiguration == nil { + r.Params = awsutil.CopyOf(r.Params) + in = r.Params.(*CreateBucketInput) + in.CreateBucketConfiguration = &CreateBucketConfiguration{ + LocationConstraint: r.Config.Region, + } + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go b/vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go new file mode 100644 index 00000000000..f1959b03a95 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go @@ -0,0 +1,77 @@ +package s3 + +import ( + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/internal/s3shared/arn" + "github.com/aws/aws-sdk-go/internal/s3shared/s3err" +) + +func init() { + initClient = defaultInitClientFn + initRequest = defaultInitRequestFn +} + +func defaultInitClientFn(c *client.Client) { + // Support building custom endpoints based on config + c.Handlers.Build.PushFront(endpointHandler) + + // Require SSL when using SSE keys + c.Handlers.Validate.PushBack(validateSSERequiresSSL) + c.Handlers.Build.PushBack(computeSSEKeyMD5) + c.Handlers.Build.PushBack(computeCopySourceSSEKeyMD5) + + // S3 uses custom error unmarshaling logic + c.Handlers.UnmarshalError.Clear() + c.Handlers.UnmarshalError.PushBack(unmarshalError) + c.Handlers.UnmarshalError.PushBackNamed(s3err.RequestFailureWrapperHandler()) +} + +func defaultInitRequestFn(r *request.Request) { + // Add request handlers for specific platforms. + // e.g. 100-continue support for PUT requests using Go 1.6 + platformRequestHandlers(r) + + switch r.Operation.Name { + case opGetBucketLocation: + // GetBucketLocation has custom parsing logic + r.Handlers.Unmarshal.PushFront(buildGetBucketLocation) + case opCreateBucket: + // Auto-populate LocationConstraint with current region + r.Handlers.Validate.PushFront(populateLocationConstraint) + case opCopyObject, opUploadPartCopy, opCompleteMultipartUpload: + r.Handlers.Unmarshal.PushFront(copyMultipartStatusOKUnmarhsalError) + r.Handlers.Unmarshal.PushBackNamed(s3err.RequestFailureWrapperHandler()) + case opPutObject, opUploadPart: + r.Handlers.Build.PushBack(computeBodyHashes) + // Disabled until #1837 root issue is resolved. + // case opGetObject: + // r.Handlers.Build.PushBack(askForTxEncodingAppendMD5) + // r.Handlers.Unmarshal.PushBack(useMD5ValidationReader) + } +} + +// bucketGetter is an accessor interface to grab the "Bucket" field from +// an S3 type. +type bucketGetter interface { + getBucket() string +} + +// sseCustomerKeyGetter is an accessor interface to grab the "SSECustomerKey" +// field from an S3 type. +type sseCustomerKeyGetter interface { + getSSECustomerKey() string +} + +// copySourceSSECustomerKeyGetter is an accessor interface to grab the +// "CopySourceSSECustomerKey" field from an S3 type. +type copySourceSSECustomerKeyGetter interface { + getCopySourceSSECustomerKey() string +} + +// endpointARNGetter is an accessor interface to grab the +// the field corresponding to an endpoint ARN input. +type endpointARNGetter interface { + getEndpointARN() (arn.Resource, error) + hasEndpointARN() bool +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/doc.go b/vendor/github.com/aws/aws-sdk-go/service/s3/doc.go new file mode 100644 index 00000000000..0def02255ac --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/doc.go @@ -0,0 +1,26 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +// Package s3 provides the client and types for making API +// requests to Amazon Simple Storage Service. +// +// See https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01 for more information on this service. +// +// See s3 package documentation for more information. +// https://docs.aws.amazon.com/sdk-for-go/api/service/s3/ +// +// Using the Client +// +// To contact Amazon Simple Storage Service with the SDK use the New function to create +// a new service client. With that client you can make API requests to the service. +// These clients are safe to use concurrently. +// +// See the SDK's documentation for more information on how to use the SDK. +// https://docs.aws.amazon.com/sdk-for-go/api/ +// +// See aws.Config documentation for more information on configuring SDK clients. +// https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config +// +// See the Amazon Simple Storage Service client S3 for more +// information on creating client for this service. +// https://docs.aws.amazon.com/sdk-for-go/api/service/s3/#New +package s3 diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/doc_custom.go b/vendor/github.com/aws/aws-sdk-go/service/s3/doc_custom.go new file mode 100644 index 00000000000..7f7aca20859 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/doc_custom.go @@ -0,0 +1,110 @@ +// Upload Managers +// +// The s3manager package's Uploader provides concurrent upload of content to S3 +// by taking advantage of S3's Multipart APIs. The Uploader also supports both +// io.Reader for streaming uploads, and will also take advantage of io.ReadSeeker +// for optimizations if the Body satisfies that type. Once the Uploader instance +// is created you can call Upload concurrently from multiple goroutines safely. +// +// // The session the S3 Uploader will use +// sess := session.Must(session.NewSession()) +// +// // Create an uploader with the session and default options +// uploader := s3manager.NewUploader(sess) +// +// f, err := os.Open(filename) +// if err != nil { +// return fmt.Errorf("failed to open file %q, %v", filename, err) +// } +// +// // Upload the file to S3. +// result, err := uploader.Upload(&s3manager.UploadInput{ +// Bucket: aws.String(myBucket), +// Key: aws.String(myString), +// Body: f, +// }) +// if err != nil { +// return fmt.Errorf("failed to upload file, %v", err) +// } +// fmt.Printf("file uploaded to, %s\n", aws.StringValue(result.Location)) +// +// See the s3manager package's Uploader type documentation for more information. +// https://docs.aws.amazon.com/sdk-for-go/api/service/s3/s3manager/#Uploader +// +// Download Manager +// +// The s3manager package's Downloader provides concurrently downloading of Objects +// from S3. The Downloader will write S3 Object content with an io.WriterAt. +// Once the Downloader instance is created you can call Download concurrently from +// multiple goroutines safely. +// +// // The session the S3 Downloader will use +// sess := session.Must(session.NewSession()) +// +// // Create a downloader with the session and default options +// downloader := s3manager.NewDownloader(sess) +// +// // Create a file to write the S3 Object contents to. +// f, err := os.Create(filename) +// if err != nil { +// return fmt.Errorf("failed to create file %q, %v", filename, err) +// } +// +// // Write the contents of S3 Object to the file +// n, err := downloader.Download(f, &s3.GetObjectInput{ +// Bucket: aws.String(myBucket), +// Key: aws.String(myString), +// }) +// if err != nil { +// return fmt.Errorf("failed to download file, %v", err) +// } +// fmt.Printf("file downloaded, %d bytes\n", n) +// +// See the s3manager package's Downloader type documentation for more information. +// https://docs.aws.amazon.com/sdk-for-go/api/service/s3/s3manager/#Downloader +// +// Automatic URI cleaning +// +// Interacting with objects whose keys contain adjacent slashes (e.g. bucketname/foo//bar/objectname) +// requires setting DisableRestProtocolURICleaning to true in the aws.Config struct +// used by the service client. +// +// svc := s3.New(sess, &aws.Config{ +// DisableRestProtocolURICleaning: aws.Bool(true), +// }) +// out, err := svc.GetObject(&s3.GetObjectInput { +// Bucket: aws.String("bucketname"), +// Key: aws.String("//foo//bar//moo"), +// }) +// +// Get Bucket Region +// +// GetBucketRegion will attempt to get the region for a bucket using a region +// hint to determine which AWS partition to perform the query on. Use this utility +// to determine the region a bucket is in. +// +// sess := session.Must(session.NewSession()) +// +// bucket := "my-bucket" +// region, err := s3manager.GetBucketRegion(ctx, sess, bucket, "us-west-2") +// if err != nil { +// if aerr, ok := err.(awserr.Error); ok && aerr.Code() == "NotFound" { +// fmt.Fprintf(os.Stderr, "unable to find bucket %s's region not found\n", bucket) +// } +// return err +// } +// fmt.Printf("Bucket %s is in %s region\n", bucket, region) +// +// See the s3manager package's GetBucketRegion function documentation for more information +// https://docs.aws.amazon.com/sdk-for-go/api/service/s3/s3manager/#GetBucketRegion +// +// S3 Crypto Client +// +// The s3crypto package provides the tools to upload and download encrypted +// content from S3. The Encryption and Decryption clients can be used concurrently +// once the client is created. +// +// See the s3crypto package documentation for more information. +// https://docs.aws.amazon.com/sdk-for-go/api/service/s3/s3crypto/ +// +package s3 diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/endpoint.go b/vendor/github.com/aws/aws-sdk-go/service/s3/endpoint.go new file mode 100644 index 00000000000..6346b927960 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/endpoint.go @@ -0,0 +1,194 @@ +package s3 + +import ( + "net/url" + "strings" + + "github.com/aws/aws-sdk-go/aws" + awsarn "github.com/aws/aws-sdk-go/aws/arn" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/internal/s3shared" + "github.com/aws/aws-sdk-go/internal/s3shared/arn" +) + +// Used by shapes with members decorated as endpoint ARN. +func parseEndpointARN(v string) (arn.Resource, error) { + return arn.ParseResource(v, accessPointResourceParser) +} + +func accessPointResourceParser(a awsarn.ARN) (arn.Resource, error) { + resParts := arn.SplitResource(a.Resource) + switch resParts[0] { + case "accesspoint": + if a.Service != "s3" { + return arn.AccessPointARN{}, arn.InvalidARNError{ARN: a, Reason: "service is not s3"} + } + return arn.ParseAccessPointResource(a, resParts[1:]) + case "outpost": + if a.Service != "s3-outposts" { + return arn.OutpostAccessPointARN{}, arn.InvalidARNError{ARN: a, Reason: "service is not s3-outposts"} + } + return parseOutpostAccessPointResource(a, resParts[1:]) + default: + return nil, arn.InvalidARNError{ARN: a, Reason: "unknown resource type"} + } +} + +// parseOutpostAccessPointResource attempts to parse the ARNs resource as an +// outpost access-point resource. +// +// Supported Outpost AccessPoint ARN format: +// - ARN format: arn:{partition}:s3-outposts:{region}:{accountId}:outpost/{outpostId}/accesspoint/{accesspointName} +// - example: arn:aws:s3-outposts:us-west-2:012345678901:outpost/op-1234567890123456/accesspoint/myaccesspoint +// +func parseOutpostAccessPointResource(a awsarn.ARN, resParts []string) (arn.OutpostAccessPointARN, error) { + // outpost accesspoint arn is only valid if service is s3-outposts + if a.Service != "s3-outposts" { + return arn.OutpostAccessPointARN{}, arn.InvalidARNError{ARN: a, Reason: "service is not s3-outposts"} + } + + if len(resParts) == 0 { + return arn.OutpostAccessPointARN{}, arn.InvalidARNError{ARN: a, Reason: "outpost resource-id not set"} + } + + if len(resParts) < 3 { + return arn.OutpostAccessPointARN{}, arn.InvalidARNError{ + ARN: a, Reason: "access-point resource not set in Outpost ARN", + } + } + + resID := strings.TrimSpace(resParts[0]) + if len(resID) == 0 { + return arn.OutpostAccessPointARN{}, arn.InvalidARNError{ARN: a, Reason: "outpost resource-id not set"} + } + + var outpostAccessPointARN = arn.OutpostAccessPointARN{} + switch resParts[1] { + case "accesspoint": + accessPointARN, err := arn.ParseAccessPointResource(a, resParts[2:]) + if err != nil { + return arn.OutpostAccessPointARN{}, err + } + // set access-point arn + outpostAccessPointARN.AccessPointARN = accessPointARN + default: + return arn.OutpostAccessPointARN{}, arn.InvalidARNError{ARN: a, Reason: "access-point resource not set in Outpost ARN"} + } + + // set outpost id + outpostAccessPointARN.OutpostID = resID + return outpostAccessPointARN, nil +} + +func endpointHandler(req *request.Request) { + endpoint, ok := req.Params.(endpointARNGetter) + if !ok || !endpoint.hasEndpointARN() { + updateBucketEndpointFromParams(req) + return + } + + resource, err := endpoint.getEndpointARN() + if err != nil { + req.Error = s3shared.NewInvalidARNError(nil, err) + return + } + + resReq := s3shared.ResourceRequest{ + Resource: resource, + Request: req, + } + + if len(resReq.Request.ClientInfo.PartitionID) != 0 && resReq.IsCrossPartition() { + req.Error = s3shared.NewClientPartitionMismatchError(resource, + req.ClientInfo.PartitionID, aws.StringValue(req.Config.Region), nil) + return + } + + if !resReq.AllowCrossRegion() && resReq.IsCrossRegion() { + req.Error = s3shared.NewClientRegionMismatchError(resource, + req.ClientInfo.PartitionID, aws.StringValue(req.Config.Region), nil) + return + } + + switch tv := resource.(type) { + case arn.AccessPointARN: + err = updateRequestAccessPointEndpoint(req, tv) + if err != nil { + req.Error = err + } + case arn.OutpostAccessPointARN: + // outposts does not support FIPS regions + if resReq.ResourceConfiguredForFIPS() { + req.Error = s3shared.NewInvalidARNWithFIPSError(resource, nil) + return + } + + err = updateRequestOutpostAccessPointEndpoint(req, tv) + if err != nil { + req.Error = err + } + default: + req.Error = s3shared.NewInvalidARNError(resource, nil) + } +} + +func updateBucketEndpointFromParams(r *request.Request) { + bucket, ok := bucketNameFromReqParams(r.Params) + if !ok { + // Ignore operation requests if the bucket name was not provided + // if this is an input validation error the validation handler + // will report it. + return + } + updateEndpointForS3Config(r, bucket) +} + +func updateRequestAccessPointEndpoint(req *request.Request, accessPoint arn.AccessPointARN) error { + // Accelerate not supported + if aws.BoolValue(req.Config.S3UseAccelerate) { + return s3shared.NewClientConfiguredForAccelerateError(accessPoint, + req.ClientInfo.PartitionID, aws.StringValue(req.Config.Region), nil) + } + + // Ignore the disable host prefix for access points + req.Config.DisableEndpointHostPrefix = aws.Bool(false) + + if err := accessPointEndpointBuilder(accessPoint).build(req); err != nil { + return err + } + + removeBucketFromPath(req.HTTPRequest.URL) + + return nil +} + +func updateRequestOutpostAccessPointEndpoint(req *request.Request, accessPoint arn.OutpostAccessPointARN) error { + // Accelerate not supported + if aws.BoolValue(req.Config.S3UseAccelerate) { + return s3shared.NewClientConfiguredForAccelerateError(accessPoint, + req.ClientInfo.PartitionID, aws.StringValue(req.Config.Region), nil) + } + + // Dualstack not supported + if aws.BoolValue(req.Config.UseDualStack) { + return s3shared.NewClientConfiguredForDualStackError(accessPoint, + req.ClientInfo.PartitionID, aws.StringValue(req.Config.Region), nil) + } + + // Ignore the disable host prefix for access points + req.Config.DisableEndpointHostPrefix = aws.Bool(false) + + if err := outpostAccessPointEndpointBuilder(accessPoint).build(req); err != nil { + return err + } + + removeBucketFromPath(req.HTTPRequest.URL) + return nil +} + +func removeBucketFromPath(u *url.URL) { + u.Path = strings.Replace(u.Path, "/{Bucket}", "", -1) + if u.Path == "" { + u.Path = "/" + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/endpoint_builder.go b/vendor/github.com/aws/aws-sdk-go/service/s3/endpoint_builder.go new file mode 100644 index 00000000000..eb77d981ef6 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/endpoint_builder.go @@ -0,0 +1,187 @@ +package s3 + +import ( + "net/url" + "strings" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/endpoints" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/internal/s3shared" + "github.com/aws/aws-sdk-go/internal/s3shared/arn" + "github.com/aws/aws-sdk-go/private/protocol" +) + +const ( + accessPointPrefixLabel = "accesspoint" + accountIDPrefixLabel = "accountID" + accessPointPrefixTemplate = "{" + accessPointPrefixLabel + "}-{" + accountIDPrefixLabel + "}." + + outpostPrefixLabel = "outpost" + outpostAccessPointPrefixTemplate = accessPointPrefixTemplate + "{" + outpostPrefixLabel + "}." +) + +// hasCustomEndpoint returns true if endpoint is a custom endpoint +func hasCustomEndpoint(r *request.Request) bool { + return len(aws.StringValue(r.Config.Endpoint)) > 0 +} + +// accessPointEndpointBuilder represents the endpoint builder for access point arn +type accessPointEndpointBuilder arn.AccessPointARN + +// build builds the endpoint for corresponding access point arn +// +// For building an endpoint from access point arn, format used is: +// - Access point endpoint format : {accesspointName}-{accountId}.s3-accesspoint.{region}.{dnsSuffix} +// - example : myaccesspoint-012345678901.s3-accesspoint.us-west-2.amazonaws.com +// +// Access Point Endpoint requests are signed using "s3" as signing name. +// +func (a accessPointEndpointBuilder) build(req *request.Request) error { + resolveService := arn.AccessPointARN(a).Service + resolveRegion := arn.AccessPointARN(a).Region + cfgRegion := aws.StringValue(req.Config.Region) + + if s3shared.IsFIPS(cfgRegion) { + if aws.BoolValue(req.Config.S3UseARNRegion) && s3shared.IsCrossRegion(req, resolveRegion) { + // FIPS with cross region is not supported, the SDK must fail + // because there is no well defined method for SDK to construct a + // correct FIPS endpoint. + return s3shared.NewClientConfiguredForCrossRegionFIPSError(arn.AccessPointARN(a), + req.ClientInfo.PartitionID, cfgRegion, nil) + } + resolveRegion = cfgRegion + } + + endpoint, err := resolveRegionalEndpoint(req, resolveRegion, resolveService) + if err != nil { + return s3shared.NewFailedToResolveEndpointError(arn.AccessPointARN(a), + req.ClientInfo.PartitionID, cfgRegion, err) + } + + endpoint.URL = endpoints.AddScheme(endpoint.URL, aws.BoolValue(req.Config.DisableSSL)) + + if !hasCustomEndpoint(req) { + if err = updateRequestEndpoint(req, endpoint.URL); err != nil { + return err + } + const serviceEndpointLabel = "s3-accesspoint" + + // dual stack provided by endpoint resolver + cfgHost := req.HTTPRequest.URL.Host + if strings.HasPrefix(cfgHost, "s3") { + req.HTTPRequest.URL.Host = serviceEndpointLabel + cfgHost[2:] + } + } + + protocol.HostPrefixBuilder{ + Prefix: accessPointPrefixTemplate, + LabelsFn: a.hostPrefixLabelValues, + }.Build(req) + + // signer redirection + redirectSigner(req, endpoint.SigningName, endpoint.SigningRegion) + + err = protocol.ValidateEndpointHost(req.Operation.Name, req.HTTPRequest.URL.Host) + if err != nil { + return s3shared.NewInvalidARNError(arn.AccessPointARN(a), err) + } + + return nil +} + +func (a accessPointEndpointBuilder) hostPrefixLabelValues() map[string]string { + return map[string]string{ + accessPointPrefixLabel: arn.AccessPointARN(a).AccessPointName, + accountIDPrefixLabel: arn.AccessPointARN(a).AccountID, + } +} + +// outpostAccessPointEndpointBuilder represents the Endpoint builder for outpost access point arn. +type outpostAccessPointEndpointBuilder arn.OutpostAccessPointARN + +// build builds an endpoint corresponding to the outpost access point arn. +// +// For building an endpoint from outpost access point arn, format used is: +// - Outpost access point endpoint format : {accesspointName}-{accountId}.{outpostId}.s3-outposts.{region}.{dnsSuffix} +// - example : myaccesspoint-012345678901.op-01234567890123456.s3-outposts.us-west-2.amazonaws.com +// +// Outpost AccessPoint Endpoint request are signed using "s3-outposts" as signing name. +// +func (o outpostAccessPointEndpointBuilder) build(req *request.Request) error { + resolveRegion := o.Region + resolveService := o.Service + + endpointsID := resolveService + if resolveService == "s3-outposts" { + endpointsID = "s3" + } + + endpoint, err := resolveRegionalEndpoint(req, resolveRegion, endpointsID) + if err != nil { + return s3shared.NewFailedToResolveEndpointError(o, + req.ClientInfo.PartitionID, resolveRegion, err) + } + + endpoint.URL = endpoints.AddScheme(endpoint.URL, aws.BoolValue(req.Config.DisableSSL)) + + if !hasCustomEndpoint(req) { + if err = updateRequestEndpoint(req, endpoint.URL); err != nil { + return err + } + // add url host as s3-outposts + cfgHost := req.HTTPRequest.URL.Host + if strings.HasPrefix(cfgHost, endpointsID) { + req.HTTPRequest.URL.Host = resolveService + cfgHost[len(endpointsID):] + } + } + + protocol.HostPrefixBuilder{ + Prefix: outpostAccessPointPrefixTemplate, + LabelsFn: o.hostPrefixLabelValues, + }.Build(req) + + // set the signing region, name to resolved names from ARN + redirectSigner(req, resolveService, resolveRegion) + + err = protocol.ValidateEndpointHost(req.Operation.Name, req.HTTPRequest.URL.Host) + if err != nil { + return s3shared.NewInvalidARNError(o, err) + } + + return nil +} + +func (o outpostAccessPointEndpointBuilder) hostPrefixLabelValues() map[string]string { + return map[string]string{ + accessPointPrefixLabel: o.AccessPointName, + accountIDPrefixLabel: o.AccountID, + outpostPrefixLabel: o.OutpostID, + } +} + +func resolveRegionalEndpoint(r *request.Request, region string, endpointsID string) (endpoints.ResolvedEndpoint, error) { + return r.Config.EndpointResolver.EndpointFor(endpointsID, region, func(opts *endpoints.Options) { + opts.DisableSSL = aws.BoolValue(r.Config.DisableSSL) + opts.UseDualStack = aws.BoolValue(r.Config.UseDualStack) + opts.S3UsEast1RegionalEndpoint = endpoints.RegionalS3UsEast1Endpoint + }) +} + +func updateRequestEndpoint(r *request.Request, endpoint string) (err error) { + + r.HTTPRequest.URL, err = url.Parse(endpoint + r.Operation.HTTPPath) + if err != nil { + return awserr.New(request.ErrCodeSerialization, + "failed to parse endpoint URL", err) + } + + return nil +} + +// redirectSigner sets signing name, signing region for a request +func redirectSigner(req *request.Request, signingName string, signingRegion string) { + req.ClientInfo.SigningName = signingName + req.ClientInfo.SigningRegion = signingRegion +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/errors.go b/vendor/github.com/aws/aws-sdk-go/service/s3/errors.go new file mode 100644 index 00000000000..f64b55135ee --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/errors.go @@ -0,0 +1,60 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package s3 + +const ( + + // ErrCodeBucketAlreadyExists for service response error code + // "BucketAlreadyExists". + // + // The requested bucket name is not available. The bucket namespace is shared + // by all users of the system. Select a different name and try again. + ErrCodeBucketAlreadyExists = "BucketAlreadyExists" + + // ErrCodeBucketAlreadyOwnedByYou for service response error code + // "BucketAlreadyOwnedByYou". + // + // The bucket you tried to create already exists, and you own it. Amazon S3 + // returns this error in all AWS Regions except in the North Virginia Region. + // For legacy compatibility, if you re-create an existing bucket that you already + // own in the North Virginia Region, Amazon S3 returns 200 OK and resets the + // bucket access control lists (ACLs). + ErrCodeBucketAlreadyOwnedByYou = "BucketAlreadyOwnedByYou" + + // ErrCodeInvalidObjectState for service response error code + // "InvalidObjectState". + // + // Object is archived and inaccessible until restored. + ErrCodeInvalidObjectState = "InvalidObjectState" + + // ErrCodeNoSuchBucket for service response error code + // "NoSuchBucket". + // + // The specified bucket does not exist. + ErrCodeNoSuchBucket = "NoSuchBucket" + + // ErrCodeNoSuchKey for service response error code + // "NoSuchKey". + // + // The specified key does not exist. + ErrCodeNoSuchKey = "NoSuchKey" + + // ErrCodeNoSuchUpload for service response error code + // "NoSuchUpload". + // + // The specified multipart upload does not exist. + ErrCodeNoSuchUpload = "NoSuchUpload" + + // ErrCodeObjectAlreadyInActiveTierError for service response error code + // "ObjectAlreadyInActiveTierError". + // + // This operation is not allowed against this storage tier. + ErrCodeObjectAlreadyInActiveTierError = "ObjectAlreadyInActiveTierError" + + // ErrCodeObjectNotInActiveTierError for service response error code + // "ObjectNotInActiveTierError". + // + // The source object of the COPY operation is not in the active tier and is + // only stored in Amazon S3 Glacier. + ErrCodeObjectNotInActiveTierError = "ObjectNotInActiveTierError" +) diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/host_style_bucket.go b/vendor/github.com/aws/aws-sdk-go/service/s3/host_style_bucket.go new file mode 100644 index 00000000000..81cdec1ae75 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/host_style_bucket.go @@ -0,0 +1,136 @@ +package s3 + +import ( + "fmt" + "net/url" + "regexp" + "strings" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" +) + +// an operationBlacklist is a list of operation names that should a +// request handler should not be executed with. +type operationBlacklist []string + +// Continue will return true of the Request's operation name is not +// in the blacklist. False otherwise. +func (b operationBlacklist) Continue(r *request.Request) bool { + for i := 0; i < len(b); i++ { + if b[i] == r.Operation.Name { + return false + } + } + return true +} + +var accelerateOpBlacklist = operationBlacklist{ + opListBuckets, opCreateBucket, opDeleteBucket, +} + +// Automatically add the bucket name to the endpoint domain +// if possible. This style of bucket is valid for all bucket names which are +// DNS compatible and do not contain "." +func updateEndpointForS3Config(r *request.Request, bucketName string) { + forceHostStyle := aws.BoolValue(r.Config.S3ForcePathStyle) + accelerate := aws.BoolValue(r.Config.S3UseAccelerate) + + if accelerate && accelerateOpBlacklist.Continue(r) { + if forceHostStyle { + if r.Config.Logger != nil { + r.Config.Logger.Log("ERROR: aws.Config.S3UseAccelerate is not compatible with aws.Config.S3ForcePathStyle, ignoring S3ForcePathStyle.") + } + } + updateEndpointForAccelerate(r, bucketName) + } else if !forceHostStyle && r.Operation.Name != opGetBucketLocation { + updateEndpointForHostStyle(r, bucketName) + } +} + +func updateEndpointForHostStyle(r *request.Request, bucketName string) { + if !hostCompatibleBucketName(r.HTTPRequest.URL, bucketName) { + // bucket name must be valid to put into the host + return + } + + moveBucketToHost(r.HTTPRequest.URL, bucketName) +} + +var ( + accelElem = []byte("s3-accelerate.dualstack.") +) + +func updateEndpointForAccelerate(r *request.Request, bucketName string) { + if !hostCompatibleBucketName(r.HTTPRequest.URL, bucketName) { + r.Error = awserr.New("InvalidParameterException", + fmt.Sprintf("bucket name %s is not compatible with S3 Accelerate", bucketName), + nil) + return + } + + parts := strings.Split(r.HTTPRequest.URL.Host, ".") + if len(parts) < 3 { + r.Error = awserr.New("InvalidParameterExecption", + fmt.Sprintf("unable to update endpoint host for S3 accelerate, hostname invalid, %s", + r.HTTPRequest.URL.Host), nil) + return + } + + if parts[0] == "s3" || strings.HasPrefix(parts[0], "s3-") { + parts[0] = "s3-accelerate" + } + for i := 1; i+1 < len(parts); i++ { + if parts[i] == aws.StringValue(r.Config.Region) { + parts = append(parts[:i], parts[i+1:]...) + break + } + } + + r.HTTPRequest.URL.Host = strings.Join(parts, ".") + + moveBucketToHost(r.HTTPRequest.URL, bucketName) +} + +// Attempts to retrieve the bucket name from the request input parameters. +// If no bucket is found, or the field is empty "", false will be returned. +func bucketNameFromReqParams(params interface{}) (string, bool) { + if iface, ok := params.(bucketGetter); ok { + b := iface.getBucket() + return b, len(b) > 0 + } + + return "", false +} + +// hostCompatibleBucketName returns true if the request should +// put the bucket in the host. This is false if S3ForcePathStyle is +// explicitly set or if the bucket is not DNS compatible. +func hostCompatibleBucketName(u *url.URL, bucket string) bool { + // Bucket might be DNS compatible but dots in the hostname will fail + // certificate validation, so do not use host-style. + if u.Scheme == "https" && strings.Contains(bucket, ".") { + return false + } + + // if the bucket is DNS compatible + return dnsCompatibleBucketName(bucket) +} + +var reDomain = regexp.MustCompile(`^[a-z0-9][a-z0-9\.\-]{1,61}[a-z0-9]$`) +var reIPAddress = regexp.MustCompile(`^(\d+\.){3}\d+$`) + +// dnsCompatibleBucketName returns true if the bucket name is DNS compatible. +// Buckets created outside of the classic region MUST be DNS compatible. +func dnsCompatibleBucketName(bucket string) bool { + return reDomain.MatchString(bucket) && + !reIPAddress.MatchString(bucket) && + !strings.Contains(bucket, "..") +} + +// moveBucketToHost moves the bucket name from the URI path to URL host. +func moveBucketToHost(u *url.URL, bucket string) { + u.Host = bucket + "." + u.Host + removeBucketFromPath(u) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/platform_handlers.go b/vendor/github.com/aws/aws-sdk-go/service/s3/platform_handlers.go new file mode 100644 index 00000000000..8e6f3307d41 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/platform_handlers.go @@ -0,0 +1,8 @@ +// +build !go1.6 + +package s3 + +import "github.com/aws/aws-sdk-go/aws/request" + +func platformRequestHandlers(r *request.Request) { +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/platform_handlers_go1.6.go b/vendor/github.com/aws/aws-sdk-go/service/s3/platform_handlers_go1.6.go new file mode 100644 index 00000000000..14d05f7b75a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/platform_handlers_go1.6.go @@ -0,0 +1,28 @@ +// +build go1.6 + +package s3 + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/request" +) + +func platformRequestHandlers(r *request.Request) { + if r.Operation.HTTPMethod == "PUT" { + // 100-Continue should only be used on put requests. + r.Handlers.Sign.PushBack(add100Continue) + } +} + +func add100Continue(r *request.Request) { + if aws.BoolValue(r.Config.S3Disable100Continue) { + return + } + if r.HTTPRequest.ContentLength < 1024*1024*2 { + // Ignore requests smaller than 2MB. This helps prevent delaying + // requests unnecessarily. + return + } + + r.HTTPRequest.Header.Set("Expect", "100-Continue") +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/s3iface/interface.go b/vendor/github.com/aws/aws-sdk-go/service/s3/s3iface/interface.go new file mode 100644 index 00000000000..7c622187843 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/s3iface/interface.go @@ -0,0 +1,471 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +// Package s3iface provides an interface to enable mocking the Amazon Simple Storage Service service client +// for testing your code. +// +// It is important to note that this interface will have breaking changes +// when the service model is updated and adds new API operations, paginators, +// and waiters. +package s3iface + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/s3" +) + +// S3API provides an interface to enable mocking the +// s3.S3 service client's API operation, +// paginators, and waiters. This make unit testing your code that calls out +// to the SDK's service client's calls easier. +// +// The best way to use this interface is so the SDK's service client's calls +// can be stubbed out for unit testing your code with the SDK without needing +// to inject custom request handlers into the SDK's request pipeline. +// +// // myFunc uses an SDK service client to make a request to +// // Amazon Simple Storage Service. +// func myFunc(svc s3iface.S3API) bool { +// // Make svc.AbortMultipartUpload request +// } +// +// func main() { +// sess := session.New() +// svc := s3.New(sess) +// +// myFunc(svc) +// } +// +// In your _test.go file: +// +// // Define a mock struct to be used in your unit tests of myFunc. +// type mockS3Client struct { +// s3iface.S3API +// } +// func (m *mockS3Client) AbortMultipartUpload(input *s3.AbortMultipartUploadInput) (*s3.AbortMultipartUploadOutput, error) { +// // mock response/functionality +// } +// +// func TestMyFunc(t *testing.T) { +// // Setup Test +// mockSvc := &mockS3Client{} +// +// myfunc(mockSvc) +// +// // Verify myFunc's functionality +// } +// +// It is important to note that this interface will have breaking changes +// when the service model is updated and adds new API operations, paginators, +// and waiters. Its suggested to use the pattern above for testing, or using +// tooling to generate mocks to satisfy the interfaces. +type S3API interface { + AbortMultipartUpload(*s3.AbortMultipartUploadInput) (*s3.AbortMultipartUploadOutput, error) + AbortMultipartUploadWithContext(aws.Context, *s3.AbortMultipartUploadInput, ...request.Option) (*s3.AbortMultipartUploadOutput, error) + AbortMultipartUploadRequest(*s3.AbortMultipartUploadInput) (*request.Request, *s3.AbortMultipartUploadOutput) + + CompleteMultipartUpload(*s3.CompleteMultipartUploadInput) (*s3.CompleteMultipartUploadOutput, error) + CompleteMultipartUploadWithContext(aws.Context, *s3.CompleteMultipartUploadInput, ...request.Option) (*s3.CompleteMultipartUploadOutput, error) + CompleteMultipartUploadRequest(*s3.CompleteMultipartUploadInput) (*request.Request, *s3.CompleteMultipartUploadOutput) + + CopyObject(*s3.CopyObjectInput) (*s3.CopyObjectOutput, error) + CopyObjectWithContext(aws.Context, *s3.CopyObjectInput, ...request.Option) (*s3.CopyObjectOutput, error) + CopyObjectRequest(*s3.CopyObjectInput) (*request.Request, *s3.CopyObjectOutput) + + CreateBucket(*s3.CreateBucketInput) (*s3.CreateBucketOutput, error) + CreateBucketWithContext(aws.Context, *s3.CreateBucketInput, ...request.Option) (*s3.CreateBucketOutput, error) + CreateBucketRequest(*s3.CreateBucketInput) (*request.Request, *s3.CreateBucketOutput) + + CreateMultipartUpload(*s3.CreateMultipartUploadInput) (*s3.CreateMultipartUploadOutput, error) + CreateMultipartUploadWithContext(aws.Context, *s3.CreateMultipartUploadInput, ...request.Option) (*s3.CreateMultipartUploadOutput, error) + CreateMultipartUploadRequest(*s3.CreateMultipartUploadInput) (*request.Request, *s3.CreateMultipartUploadOutput) + + DeleteBucket(*s3.DeleteBucketInput) (*s3.DeleteBucketOutput, error) + DeleteBucketWithContext(aws.Context, *s3.DeleteBucketInput, ...request.Option) (*s3.DeleteBucketOutput, error) + DeleteBucketRequest(*s3.DeleteBucketInput) (*request.Request, *s3.DeleteBucketOutput) + + DeleteBucketAnalyticsConfiguration(*s3.DeleteBucketAnalyticsConfigurationInput) (*s3.DeleteBucketAnalyticsConfigurationOutput, error) + DeleteBucketAnalyticsConfigurationWithContext(aws.Context, *s3.DeleteBucketAnalyticsConfigurationInput, ...request.Option) (*s3.DeleteBucketAnalyticsConfigurationOutput, error) + DeleteBucketAnalyticsConfigurationRequest(*s3.DeleteBucketAnalyticsConfigurationInput) (*request.Request, *s3.DeleteBucketAnalyticsConfigurationOutput) + + DeleteBucketCors(*s3.DeleteBucketCorsInput) (*s3.DeleteBucketCorsOutput, error) + DeleteBucketCorsWithContext(aws.Context, *s3.DeleteBucketCorsInput, ...request.Option) (*s3.DeleteBucketCorsOutput, error) + DeleteBucketCorsRequest(*s3.DeleteBucketCorsInput) (*request.Request, *s3.DeleteBucketCorsOutput) + + DeleteBucketEncryption(*s3.DeleteBucketEncryptionInput) (*s3.DeleteBucketEncryptionOutput, error) + DeleteBucketEncryptionWithContext(aws.Context, *s3.DeleteBucketEncryptionInput, ...request.Option) (*s3.DeleteBucketEncryptionOutput, error) + DeleteBucketEncryptionRequest(*s3.DeleteBucketEncryptionInput) (*request.Request, *s3.DeleteBucketEncryptionOutput) + + DeleteBucketIntelligentTieringConfiguration(*s3.DeleteBucketIntelligentTieringConfigurationInput) (*s3.DeleteBucketIntelligentTieringConfigurationOutput, error) + DeleteBucketIntelligentTieringConfigurationWithContext(aws.Context, *s3.DeleteBucketIntelligentTieringConfigurationInput, ...request.Option) (*s3.DeleteBucketIntelligentTieringConfigurationOutput, error) + DeleteBucketIntelligentTieringConfigurationRequest(*s3.DeleteBucketIntelligentTieringConfigurationInput) (*request.Request, *s3.DeleteBucketIntelligentTieringConfigurationOutput) + + DeleteBucketInventoryConfiguration(*s3.DeleteBucketInventoryConfigurationInput) (*s3.DeleteBucketInventoryConfigurationOutput, error) + DeleteBucketInventoryConfigurationWithContext(aws.Context, *s3.DeleteBucketInventoryConfigurationInput, ...request.Option) (*s3.DeleteBucketInventoryConfigurationOutput, error) + DeleteBucketInventoryConfigurationRequest(*s3.DeleteBucketInventoryConfigurationInput) (*request.Request, *s3.DeleteBucketInventoryConfigurationOutput) + + DeleteBucketLifecycle(*s3.DeleteBucketLifecycleInput) (*s3.DeleteBucketLifecycleOutput, error) + DeleteBucketLifecycleWithContext(aws.Context, *s3.DeleteBucketLifecycleInput, ...request.Option) (*s3.DeleteBucketLifecycleOutput, error) + DeleteBucketLifecycleRequest(*s3.DeleteBucketLifecycleInput) (*request.Request, *s3.DeleteBucketLifecycleOutput) + + DeleteBucketMetricsConfiguration(*s3.DeleteBucketMetricsConfigurationInput) (*s3.DeleteBucketMetricsConfigurationOutput, error) + DeleteBucketMetricsConfigurationWithContext(aws.Context, *s3.DeleteBucketMetricsConfigurationInput, ...request.Option) (*s3.DeleteBucketMetricsConfigurationOutput, error) + DeleteBucketMetricsConfigurationRequest(*s3.DeleteBucketMetricsConfigurationInput) (*request.Request, *s3.DeleteBucketMetricsConfigurationOutput) + + DeleteBucketOwnershipControls(*s3.DeleteBucketOwnershipControlsInput) (*s3.DeleteBucketOwnershipControlsOutput, error) + DeleteBucketOwnershipControlsWithContext(aws.Context, *s3.DeleteBucketOwnershipControlsInput, ...request.Option) (*s3.DeleteBucketOwnershipControlsOutput, error) + DeleteBucketOwnershipControlsRequest(*s3.DeleteBucketOwnershipControlsInput) (*request.Request, *s3.DeleteBucketOwnershipControlsOutput) + + DeleteBucketPolicy(*s3.DeleteBucketPolicyInput) (*s3.DeleteBucketPolicyOutput, error) + DeleteBucketPolicyWithContext(aws.Context, *s3.DeleteBucketPolicyInput, ...request.Option) (*s3.DeleteBucketPolicyOutput, error) + DeleteBucketPolicyRequest(*s3.DeleteBucketPolicyInput) (*request.Request, *s3.DeleteBucketPolicyOutput) + + DeleteBucketReplication(*s3.DeleteBucketReplicationInput) (*s3.DeleteBucketReplicationOutput, error) + DeleteBucketReplicationWithContext(aws.Context, *s3.DeleteBucketReplicationInput, ...request.Option) (*s3.DeleteBucketReplicationOutput, error) + DeleteBucketReplicationRequest(*s3.DeleteBucketReplicationInput) (*request.Request, *s3.DeleteBucketReplicationOutput) + + DeleteBucketTagging(*s3.DeleteBucketTaggingInput) (*s3.DeleteBucketTaggingOutput, error) + DeleteBucketTaggingWithContext(aws.Context, *s3.DeleteBucketTaggingInput, ...request.Option) (*s3.DeleteBucketTaggingOutput, error) + DeleteBucketTaggingRequest(*s3.DeleteBucketTaggingInput) (*request.Request, *s3.DeleteBucketTaggingOutput) + + DeleteBucketWebsite(*s3.DeleteBucketWebsiteInput) (*s3.DeleteBucketWebsiteOutput, error) + DeleteBucketWebsiteWithContext(aws.Context, *s3.DeleteBucketWebsiteInput, ...request.Option) (*s3.DeleteBucketWebsiteOutput, error) + DeleteBucketWebsiteRequest(*s3.DeleteBucketWebsiteInput) (*request.Request, *s3.DeleteBucketWebsiteOutput) + + DeleteObject(*s3.DeleteObjectInput) (*s3.DeleteObjectOutput, error) + DeleteObjectWithContext(aws.Context, *s3.DeleteObjectInput, ...request.Option) (*s3.DeleteObjectOutput, error) + DeleteObjectRequest(*s3.DeleteObjectInput) (*request.Request, *s3.DeleteObjectOutput) + + DeleteObjectTagging(*s3.DeleteObjectTaggingInput) (*s3.DeleteObjectTaggingOutput, error) + DeleteObjectTaggingWithContext(aws.Context, *s3.DeleteObjectTaggingInput, ...request.Option) (*s3.DeleteObjectTaggingOutput, error) + DeleteObjectTaggingRequest(*s3.DeleteObjectTaggingInput) (*request.Request, *s3.DeleteObjectTaggingOutput) + + DeleteObjects(*s3.DeleteObjectsInput) (*s3.DeleteObjectsOutput, error) + DeleteObjectsWithContext(aws.Context, *s3.DeleteObjectsInput, ...request.Option) (*s3.DeleteObjectsOutput, error) + DeleteObjectsRequest(*s3.DeleteObjectsInput) (*request.Request, *s3.DeleteObjectsOutput) + + DeletePublicAccessBlock(*s3.DeletePublicAccessBlockInput) (*s3.DeletePublicAccessBlockOutput, error) + DeletePublicAccessBlockWithContext(aws.Context, *s3.DeletePublicAccessBlockInput, ...request.Option) (*s3.DeletePublicAccessBlockOutput, error) + DeletePublicAccessBlockRequest(*s3.DeletePublicAccessBlockInput) (*request.Request, *s3.DeletePublicAccessBlockOutput) + + GetBucketAccelerateConfiguration(*s3.GetBucketAccelerateConfigurationInput) (*s3.GetBucketAccelerateConfigurationOutput, error) + GetBucketAccelerateConfigurationWithContext(aws.Context, *s3.GetBucketAccelerateConfigurationInput, ...request.Option) (*s3.GetBucketAccelerateConfigurationOutput, error) + GetBucketAccelerateConfigurationRequest(*s3.GetBucketAccelerateConfigurationInput) (*request.Request, *s3.GetBucketAccelerateConfigurationOutput) + + GetBucketAcl(*s3.GetBucketAclInput) (*s3.GetBucketAclOutput, error) + GetBucketAclWithContext(aws.Context, *s3.GetBucketAclInput, ...request.Option) (*s3.GetBucketAclOutput, error) + GetBucketAclRequest(*s3.GetBucketAclInput) (*request.Request, *s3.GetBucketAclOutput) + + GetBucketAnalyticsConfiguration(*s3.GetBucketAnalyticsConfigurationInput) (*s3.GetBucketAnalyticsConfigurationOutput, error) + GetBucketAnalyticsConfigurationWithContext(aws.Context, *s3.GetBucketAnalyticsConfigurationInput, ...request.Option) (*s3.GetBucketAnalyticsConfigurationOutput, error) + GetBucketAnalyticsConfigurationRequest(*s3.GetBucketAnalyticsConfigurationInput) (*request.Request, *s3.GetBucketAnalyticsConfigurationOutput) + + GetBucketCors(*s3.GetBucketCorsInput) (*s3.GetBucketCorsOutput, error) + GetBucketCorsWithContext(aws.Context, *s3.GetBucketCorsInput, ...request.Option) (*s3.GetBucketCorsOutput, error) + GetBucketCorsRequest(*s3.GetBucketCorsInput) (*request.Request, *s3.GetBucketCorsOutput) + + GetBucketEncryption(*s3.GetBucketEncryptionInput) (*s3.GetBucketEncryptionOutput, error) + GetBucketEncryptionWithContext(aws.Context, *s3.GetBucketEncryptionInput, ...request.Option) (*s3.GetBucketEncryptionOutput, error) + GetBucketEncryptionRequest(*s3.GetBucketEncryptionInput) (*request.Request, *s3.GetBucketEncryptionOutput) + + GetBucketIntelligentTieringConfiguration(*s3.GetBucketIntelligentTieringConfigurationInput) (*s3.GetBucketIntelligentTieringConfigurationOutput, error) + GetBucketIntelligentTieringConfigurationWithContext(aws.Context, *s3.GetBucketIntelligentTieringConfigurationInput, ...request.Option) (*s3.GetBucketIntelligentTieringConfigurationOutput, error) + GetBucketIntelligentTieringConfigurationRequest(*s3.GetBucketIntelligentTieringConfigurationInput) (*request.Request, *s3.GetBucketIntelligentTieringConfigurationOutput) + + GetBucketInventoryConfiguration(*s3.GetBucketInventoryConfigurationInput) (*s3.GetBucketInventoryConfigurationOutput, error) + GetBucketInventoryConfigurationWithContext(aws.Context, *s3.GetBucketInventoryConfigurationInput, ...request.Option) (*s3.GetBucketInventoryConfigurationOutput, error) + GetBucketInventoryConfigurationRequest(*s3.GetBucketInventoryConfigurationInput) (*request.Request, *s3.GetBucketInventoryConfigurationOutput) + + GetBucketLifecycle(*s3.GetBucketLifecycleInput) (*s3.GetBucketLifecycleOutput, error) + GetBucketLifecycleWithContext(aws.Context, *s3.GetBucketLifecycleInput, ...request.Option) (*s3.GetBucketLifecycleOutput, error) + GetBucketLifecycleRequest(*s3.GetBucketLifecycleInput) (*request.Request, *s3.GetBucketLifecycleOutput) + + GetBucketLifecycleConfiguration(*s3.GetBucketLifecycleConfigurationInput) (*s3.GetBucketLifecycleConfigurationOutput, error) + GetBucketLifecycleConfigurationWithContext(aws.Context, *s3.GetBucketLifecycleConfigurationInput, ...request.Option) (*s3.GetBucketLifecycleConfigurationOutput, error) + GetBucketLifecycleConfigurationRequest(*s3.GetBucketLifecycleConfigurationInput) (*request.Request, *s3.GetBucketLifecycleConfigurationOutput) + + GetBucketLocation(*s3.GetBucketLocationInput) (*s3.GetBucketLocationOutput, error) + GetBucketLocationWithContext(aws.Context, *s3.GetBucketLocationInput, ...request.Option) (*s3.GetBucketLocationOutput, error) + GetBucketLocationRequest(*s3.GetBucketLocationInput) (*request.Request, *s3.GetBucketLocationOutput) + + GetBucketLogging(*s3.GetBucketLoggingInput) (*s3.GetBucketLoggingOutput, error) + GetBucketLoggingWithContext(aws.Context, *s3.GetBucketLoggingInput, ...request.Option) (*s3.GetBucketLoggingOutput, error) + GetBucketLoggingRequest(*s3.GetBucketLoggingInput) (*request.Request, *s3.GetBucketLoggingOutput) + + GetBucketMetricsConfiguration(*s3.GetBucketMetricsConfigurationInput) (*s3.GetBucketMetricsConfigurationOutput, error) + GetBucketMetricsConfigurationWithContext(aws.Context, *s3.GetBucketMetricsConfigurationInput, ...request.Option) (*s3.GetBucketMetricsConfigurationOutput, error) + GetBucketMetricsConfigurationRequest(*s3.GetBucketMetricsConfigurationInput) (*request.Request, *s3.GetBucketMetricsConfigurationOutput) + + GetBucketNotification(*s3.GetBucketNotificationConfigurationRequest) (*s3.NotificationConfigurationDeprecated, error) + GetBucketNotificationWithContext(aws.Context, *s3.GetBucketNotificationConfigurationRequest, ...request.Option) (*s3.NotificationConfigurationDeprecated, error) + GetBucketNotificationRequest(*s3.GetBucketNotificationConfigurationRequest) (*request.Request, *s3.NotificationConfigurationDeprecated) + + GetBucketNotificationConfiguration(*s3.GetBucketNotificationConfigurationRequest) (*s3.NotificationConfiguration, error) + GetBucketNotificationConfigurationWithContext(aws.Context, *s3.GetBucketNotificationConfigurationRequest, ...request.Option) (*s3.NotificationConfiguration, error) + GetBucketNotificationConfigurationRequest(*s3.GetBucketNotificationConfigurationRequest) (*request.Request, *s3.NotificationConfiguration) + + GetBucketOwnershipControls(*s3.GetBucketOwnershipControlsInput) (*s3.GetBucketOwnershipControlsOutput, error) + GetBucketOwnershipControlsWithContext(aws.Context, *s3.GetBucketOwnershipControlsInput, ...request.Option) (*s3.GetBucketOwnershipControlsOutput, error) + GetBucketOwnershipControlsRequest(*s3.GetBucketOwnershipControlsInput) (*request.Request, *s3.GetBucketOwnershipControlsOutput) + + GetBucketPolicy(*s3.GetBucketPolicyInput) (*s3.GetBucketPolicyOutput, error) + GetBucketPolicyWithContext(aws.Context, *s3.GetBucketPolicyInput, ...request.Option) (*s3.GetBucketPolicyOutput, error) + GetBucketPolicyRequest(*s3.GetBucketPolicyInput) (*request.Request, *s3.GetBucketPolicyOutput) + + GetBucketPolicyStatus(*s3.GetBucketPolicyStatusInput) (*s3.GetBucketPolicyStatusOutput, error) + GetBucketPolicyStatusWithContext(aws.Context, *s3.GetBucketPolicyStatusInput, ...request.Option) (*s3.GetBucketPolicyStatusOutput, error) + GetBucketPolicyStatusRequest(*s3.GetBucketPolicyStatusInput) (*request.Request, *s3.GetBucketPolicyStatusOutput) + + GetBucketReplication(*s3.GetBucketReplicationInput) (*s3.GetBucketReplicationOutput, error) + GetBucketReplicationWithContext(aws.Context, *s3.GetBucketReplicationInput, ...request.Option) (*s3.GetBucketReplicationOutput, error) + GetBucketReplicationRequest(*s3.GetBucketReplicationInput) (*request.Request, *s3.GetBucketReplicationOutput) + + GetBucketRequestPayment(*s3.GetBucketRequestPaymentInput) (*s3.GetBucketRequestPaymentOutput, error) + GetBucketRequestPaymentWithContext(aws.Context, *s3.GetBucketRequestPaymentInput, ...request.Option) (*s3.GetBucketRequestPaymentOutput, error) + GetBucketRequestPaymentRequest(*s3.GetBucketRequestPaymentInput) (*request.Request, *s3.GetBucketRequestPaymentOutput) + + GetBucketTagging(*s3.GetBucketTaggingInput) (*s3.GetBucketTaggingOutput, error) + GetBucketTaggingWithContext(aws.Context, *s3.GetBucketTaggingInput, ...request.Option) (*s3.GetBucketTaggingOutput, error) + GetBucketTaggingRequest(*s3.GetBucketTaggingInput) (*request.Request, *s3.GetBucketTaggingOutput) + + GetBucketVersioning(*s3.GetBucketVersioningInput) (*s3.GetBucketVersioningOutput, error) + GetBucketVersioningWithContext(aws.Context, *s3.GetBucketVersioningInput, ...request.Option) (*s3.GetBucketVersioningOutput, error) + GetBucketVersioningRequest(*s3.GetBucketVersioningInput) (*request.Request, *s3.GetBucketVersioningOutput) + + GetBucketWebsite(*s3.GetBucketWebsiteInput) (*s3.GetBucketWebsiteOutput, error) + GetBucketWebsiteWithContext(aws.Context, *s3.GetBucketWebsiteInput, ...request.Option) (*s3.GetBucketWebsiteOutput, error) + GetBucketWebsiteRequest(*s3.GetBucketWebsiteInput) (*request.Request, *s3.GetBucketWebsiteOutput) + + GetObject(*s3.GetObjectInput) (*s3.GetObjectOutput, error) + GetObjectWithContext(aws.Context, *s3.GetObjectInput, ...request.Option) (*s3.GetObjectOutput, error) + GetObjectRequest(*s3.GetObjectInput) (*request.Request, *s3.GetObjectOutput) + + GetObjectAcl(*s3.GetObjectAclInput) (*s3.GetObjectAclOutput, error) + GetObjectAclWithContext(aws.Context, *s3.GetObjectAclInput, ...request.Option) (*s3.GetObjectAclOutput, error) + GetObjectAclRequest(*s3.GetObjectAclInput) (*request.Request, *s3.GetObjectAclOutput) + + GetObjectLegalHold(*s3.GetObjectLegalHoldInput) (*s3.GetObjectLegalHoldOutput, error) + GetObjectLegalHoldWithContext(aws.Context, *s3.GetObjectLegalHoldInput, ...request.Option) (*s3.GetObjectLegalHoldOutput, error) + GetObjectLegalHoldRequest(*s3.GetObjectLegalHoldInput) (*request.Request, *s3.GetObjectLegalHoldOutput) + + GetObjectLockConfiguration(*s3.GetObjectLockConfigurationInput) (*s3.GetObjectLockConfigurationOutput, error) + GetObjectLockConfigurationWithContext(aws.Context, *s3.GetObjectLockConfigurationInput, ...request.Option) (*s3.GetObjectLockConfigurationOutput, error) + GetObjectLockConfigurationRequest(*s3.GetObjectLockConfigurationInput) (*request.Request, *s3.GetObjectLockConfigurationOutput) + + GetObjectRetention(*s3.GetObjectRetentionInput) (*s3.GetObjectRetentionOutput, error) + GetObjectRetentionWithContext(aws.Context, *s3.GetObjectRetentionInput, ...request.Option) (*s3.GetObjectRetentionOutput, error) + GetObjectRetentionRequest(*s3.GetObjectRetentionInput) (*request.Request, *s3.GetObjectRetentionOutput) + + GetObjectTagging(*s3.GetObjectTaggingInput) (*s3.GetObjectTaggingOutput, error) + GetObjectTaggingWithContext(aws.Context, *s3.GetObjectTaggingInput, ...request.Option) (*s3.GetObjectTaggingOutput, error) + GetObjectTaggingRequest(*s3.GetObjectTaggingInput) (*request.Request, *s3.GetObjectTaggingOutput) + + GetObjectTorrent(*s3.GetObjectTorrentInput) (*s3.GetObjectTorrentOutput, error) + GetObjectTorrentWithContext(aws.Context, *s3.GetObjectTorrentInput, ...request.Option) (*s3.GetObjectTorrentOutput, error) + GetObjectTorrentRequest(*s3.GetObjectTorrentInput) (*request.Request, *s3.GetObjectTorrentOutput) + + GetPublicAccessBlock(*s3.GetPublicAccessBlockInput) (*s3.GetPublicAccessBlockOutput, error) + GetPublicAccessBlockWithContext(aws.Context, *s3.GetPublicAccessBlockInput, ...request.Option) (*s3.GetPublicAccessBlockOutput, error) + GetPublicAccessBlockRequest(*s3.GetPublicAccessBlockInput) (*request.Request, *s3.GetPublicAccessBlockOutput) + + HeadBucket(*s3.HeadBucketInput) (*s3.HeadBucketOutput, error) + HeadBucketWithContext(aws.Context, *s3.HeadBucketInput, ...request.Option) (*s3.HeadBucketOutput, error) + HeadBucketRequest(*s3.HeadBucketInput) (*request.Request, *s3.HeadBucketOutput) + + HeadObject(*s3.HeadObjectInput) (*s3.HeadObjectOutput, error) + HeadObjectWithContext(aws.Context, *s3.HeadObjectInput, ...request.Option) (*s3.HeadObjectOutput, error) + HeadObjectRequest(*s3.HeadObjectInput) (*request.Request, *s3.HeadObjectOutput) + + ListBucketAnalyticsConfigurations(*s3.ListBucketAnalyticsConfigurationsInput) (*s3.ListBucketAnalyticsConfigurationsOutput, error) + ListBucketAnalyticsConfigurationsWithContext(aws.Context, *s3.ListBucketAnalyticsConfigurationsInput, ...request.Option) (*s3.ListBucketAnalyticsConfigurationsOutput, error) + ListBucketAnalyticsConfigurationsRequest(*s3.ListBucketAnalyticsConfigurationsInput) (*request.Request, *s3.ListBucketAnalyticsConfigurationsOutput) + + ListBucketIntelligentTieringConfigurations(*s3.ListBucketIntelligentTieringConfigurationsInput) (*s3.ListBucketIntelligentTieringConfigurationsOutput, error) + ListBucketIntelligentTieringConfigurationsWithContext(aws.Context, *s3.ListBucketIntelligentTieringConfigurationsInput, ...request.Option) (*s3.ListBucketIntelligentTieringConfigurationsOutput, error) + ListBucketIntelligentTieringConfigurationsRequest(*s3.ListBucketIntelligentTieringConfigurationsInput) (*request.Request, *s3.ListBucketIntelligentTieringConfigurationsOutput) + + ListBucketInventoryConfigurations(*s3.ListBucketInventoryConfigurationsInput) (*s3.ListBucketInventoryConfigurationsOutput, error) + ListBucketInventoryConfigurationsWithContext(aws.Context, *s3.ListBucketInventoryConfigurationsInput, ...request.Option) (*s3.ListBucketInventoryConfigurationsOutput, error) + ListBucketInventoryConfigurationsRequest(*s3.ListBucketInventoryConfigurationsInput) (*request.Request, *s3.ListBucketInventoryConfigurationsOutput) + + ListBucketMetricsConfigurations(*s3.ListBucketMetricsConfigurationsInput) (*s3.ListBucketMetricsConfigurationsOutput, error) + ListBucketMetricsConfigurationsWithContext(aws.Context, *s3.ListBucketMetricsConfigurationsInput, ...request.Option) (*s3.ListBucketMetricsConfigurationsOutput, error) + ListBucketMetricsConfigurationsRequest(*s3.ListBucketMetricsConfigurationsInput) (*request.Request, *s3.ListBucketMetricsConfigurationsOutput) + + ListBuckets(*s3.ListBucketsInput) (*s3.ListBucketsOutput, error) + ListBucketsWithContext(aws.Context, *s3.ListBucketsInput, ...request.Option) (*s3.ListBucketsOutput, error) + ListBucketsRequest(*s3.ListBucketsInput) (*request.Request, *s3.ListBucketsOutput) + + ListMultipartUploads(*s3.ListMultipartUploadsInput) (*s3.ListMultipartUploadsOutput, error) + ListMultipartUploadsWithContext(aws.Context, *s3.ListMultipartUploadsInput, ...request.Option) (*s3.ListMultipartUploadsOutput, error) + ListMultipartUploadsRequest(*s3.ListMultipartUploadsInput) (*request.Request, *s3.ListMultipartUploadsOutput) + + ListMultipartUploadsPages(*s3.ListMultipartUploadsInput, func(*s3.ListMultipartUploadsOutput, bool) bool) error + ListMultipartUploadsPagesWithContext(aws.Context, *s3.ListMultipartUploadsInput, func(*s3.ListMultipartUploadsOutput, bool) bool, ...request.Option) error + + ListObjectVersions(*s3.ListObjectVersionsInput) (*s3.ListObjectVersionsOutput, error) + ListObjectVersionsWithContext(aws.Context, *s3.ListObjectVersionsInput, ...request.Option) (*s3.ListObjectVersionsOutput, error) + ListObjectVersionsRequest(*s3.ListObjectVersionsInput) (*request.Request, *s3.ListObjectVersionsOutput) + + ListObjectVersionsPages(*s3.ListObjectVersionsInput, func(*s3.ListObjectVersionsOutput, bool) bool) error + ListObjectVersionsPagesWithContext(aws.Context, *s3.ListObjectVersionsInput, func(*s3.ListObjectVersionsOutput, bool) bool, ...request.Option) error + + ListObjects(*s3.ListObjectsInput) (*s3.ListObjectsOutput, error) + ListObjectsWithContext(aws.Context, *s3.ListObjectsInput, ...request.Option) (*s3.ListObjectsOutput, error) + ListObjectsRequest(*s3.ListObjectsInput) (*request.Request, *s3.ListObjectsOutput) + + ListObjectsPages(*s3.ListObjectsInput, func(*s3.ListObjectsOutput, bool) bool) error + ListObjectsPagesWithContext(aws.Context, *s3.ListObjectsInput, func(*s3.ListObjectsOutput, bool) bool, ...request.Option) error + + ListObjectsV2(*s3.ListObjectsV2Input) (*s3.ListObjectsV2Output, error) + ListObjectsV2WithContext(aws.Context, *s3.ListObjectsV2Input, ...request.Option) (*s3.ListObjectsV2Output, error) + ListObjectsV2Request(*s3.ListObjectsV2Input) (*request.Request, *s3.ListObjectsV2Output) + + ListObjectsV2Pages(*s3.ListObjectsV2Input, func(*s3.ListObjectsV2Output, bool) bool) error + ListObjectsV2PagesWithContext(aws.Context, *s3.ListObjectsV2Input, func(*s3.ListObjectsV2Output, bool) bool, ...request.Option) error + + ListParts(*s3.ListPartsInput) (*s3.ListPartsOutput, error) + ListPartsWithContext(aws.Context, *s3.ListPartsInput, ...request.Option) (*s3.ListPartsOutput, error) + ListPartsRequest(*s3.ListPartsInput) (*request.Request, *s3.ListPartsOutput) + + ListPartsPages(*s3.ListPartsInput, func(*s3.ListPartsOutput, bool) bool) error + ListPartsPagesWithContext(aws.Context, *s3.ListPartsInput, func(*s3.ListPartsOutput, bool) bool, ...request.Option) error + + PutBucketAccelerateConfiguration(*s3.PutBucketAccelerateConfigurationInput) (*s3.PutBucketAccelerateConfigurationOutput, error) + PutBucketAccelerateConfigurationWithContext(aws.Context, *s3.PutBucketAccelerateConfigurationInput, ...request.Option) (*s3.PutBucketAccelerateConfigurationOutput, error) + PutBucketAccelerateConfigurationRequest(*s3.PutBucketAccelerateConfigurationInput) (*request.Request, *s3.PutBucketAccelerateConfigurationOutput) + + PutBucketAcl(*s3.PutBucketAclInput) (*s3.PutBucketAclOutput, error) + PutBucketAclWithContext(aws.Context, *s3.PutBucketAclInput, ...request.Option) (*s3.PutBucketAclOutput, error) + PutBucketAclRequest(*s3.PutBucketAclInput) (*request.Request, *s3.PutBucketAclOutput) + + PutBucketAnalyticsConfiguration(*s3.PutBucketAnalyticsConfigurationInput) (*s3.PutBucketAnalyticsConfigurationOutput, error) + PutBucketAnalyticsConfigurationWithContext(aws.Context, *s3.PutBucketAnalyticsConfigurationInput, ...request.Option) (*s3.PutBucketAnalyticsConfigurationOutput, error) + PutBucketAnalyticsConfigurationRequest(*s3.PutBucketAnalyticsConfigurationInput) (*request.Request, *s3.PutBucketAnalyticsConfigurationOutput) + + PutBucketCors(*s3.PutBucketCorsInput) (*s3.PutBucketCorsOutput, error) + PutBucketCorsWithContext(aws.Context, *s3.PutBucketCorsInput, ...request.Option) (*s3.PutBucketCorsOutput, error) + PutBucketCorsRequest(*s3.PutBucketCorsInput) (*request.Request, *s3.PutBucketCorsOutput) + + PutBucketEncryption(*s3.PutBucketEncryptionInput) (*s3.PutBucketEncryptionOutput, error) + PutBucketEncryptionWithContext(aws.Context, *s3.PutBucketEncryptionInput, ...request.Option) (*s3.PutBucketEncryptionOutput, error) + PutBucketEncryptionRequest(*s3.PutBucketEncryptionInput) (*request.Request, *s3.PutBucketEncryptionOutput) + + PutBucketIntelligentTieringConfiguration(*s3.PutBucketIntelligentTieringConfigurationInput) (*s3.PutBucketIntelligentTieringConfigurationOutput, error) + PutBucketIntelligentTieringConfigurationWithContext(aws.Context, *s3.PutBucketIntelligentTieringConfigurationInput, ...request.Option) (*s3.PutBucketIntelligentTieringConfigurationOutput, error) + PutBucketIntelligentTieringConfigurationRequest(*s3.PutBucketIntelligentTieringConfigurationInput) (*request.Request, *s3.PutBucketIntelligentTieringConfigurationOutput) + + PutBucketInventoryConfiguration(*s3.PutBucketInventoryConfigurationInput) (*s3.PutBucketInventoryConfigurationOutput, error) + PutBucketInventoryConfigurationWithContext(aws.Context, *s3.PutBucketInventoryConfigurationInput, ...request.Option) (*s3.PutBucketInventoryConfigurationOutput, error) + PutBucketInventoryConfigurationRequest(*s3.PutBucketInventoryConfigurationInput) (*request.Request, *s3.PutBucketInventoryConfigurationOutput) + + PutBucketLifecycle(*s3.PutBucketLifecycleInput) (*s3.PutBucketLifecycleOutput, error) + PutBucketLifecycleWithContext(aws.Context, *s3.PutBucketLifecycleInput, ...request.Option) (*s3.PutBucketLifecycleOutput, error) + PutBucketLifecycleRequest(*s3.PutBucketLifecycleInput) (*request.Request, *s3.PutBucketLifecycleOutput) + + PutBucketLifecycleConfiguration(*s3.PutBucketLifecycleConfigurationInput) (*s3.PutBucketLifecycleConfigurationOutput, error) + PutBucketLifecycleConfigurationWithContext(aws.Context, *s3.PutBucketLifecycleConfigurationInput, ...request.Option) (*s3.PutBucketLifecycleConfigurationOutput, error) + PutBucketLifecycleConfigurationRequest(*s3.PutBucketLifecycleConfigurationInput) (*request.Request, *s3.PutBucketLifecycleConfigurationOutput) + + PutBucketLogging(*s3.PutBucketLoggingInput) (*s3.PutBucketLoggingOutput, error) + PutBucketLoggingWithContext(aws.Context, *s3.PutBucketLoggingInput, ...request.Option) (*s3.PutBucketLoggingOutput, error) + PutBucketLoggingRequest(*s3.PutBucketLoggingInput) (*request.Request, *s3.PutBucketLoggingOutput) + + PutBucketMetricsConfiguration(*s3.PutBucketMetricsConfigurationInput) (*s3.PutBucketMetricsConfigurationOutput, error) + PutBucketMetricsConfigurationWithContext(aws.Context, *s3.PutBucketMetricsConfigurationInput, ...request.Option) (*s3.PutBucketMetricsConfigurationOutput, error) + PutBucketMetricsConfigurationRequest(*s3.PutBucketMetricsConfigurationInput) (*request.Request, *s3.PutBucketMetricsConfigurationOutput) + + PutBucketNotification(*s3.PutBucketNotificationInput) (*s3.PutBucketNotificationOutput, error) + PutBucketNotificationWithContext(aws.Context, *s3.PutBucketNotificationInput, ...request.Option) (*s3.PutBucketNotificationOutput, error) + PutBucketNotificationRequest(*s3.PutBucketNotificationInput) (*request.Request, *s3.PutBucketNotificationOutput) + + PutBucketNotificationConfiguration(*s3.PutBucketNotificationConfigurationInput) (*s3.PutBucketNotificationConfigurationOutput, error) + PutBucketNotificationConfigurationWithContext(aws.Context, *s3.PutBucketNotificationConfigurationInput, ...request.Option) (*s3.PutBucketNotificationConfigurationOutput, error) + PutBucketNotificationConfigurationRequest(*s3.PutBucketNotificationConfigurationInput) (*request.Request, *s3.PutBucketNotificationConfigurationOutput) + + PutBucketOwnershipControls(*s3.PutBucketOwnershipControlsInput) (*s3.PutBucketOwnershipControlsOutput, error) + PutBucketOwnershipControlsWithContext(aws.Context, *s3.PutBucketOwnershipControlsInput, ...request.Option) (*s3.PutBucketOwnershipControlsOutput, error) + PutBucketOwnershipControlsRequest(*s3.PutBucketOwnershipControlsInput) (*request.Request, *s3.PutBucketOwnershipControlsOutput) + + PutBucketPolicy(*s3.PutBucketPolicyInput) (*s3.PutBucketPolicyOutput, error) + PutBucketPolicyWithContext(aws.Context, *s3.PutBucketPolicyInput, ...request.Option) (*s3.PutBucketPolicyOutput, error) + PutBucketPolicyRequest(*s3.PutBucketPolicyInput) (*request.Request, *s3.PutBucketPolicyOutput) + + PutBucketReplication(*s3.PutBucketReplicationInput) (*s3.PutBucketReplicationOutput, error) + PutBucketReplicationWithContext(aws.Context, *s3.PutBucketReplicationInput, ...request.Option) (*s3.PutBucketReplicationOutput, error) + PutBucketReplicationRequest(*s3.PutBucketReplicationInput) (*request.Request, *s3.PutBucketReplicationOutput) + + PutBucketRequestPayment(*s3.PutBucketRequestPaymentInput) (*s3.PutBucketRequestPaymentOutput, error) + PutBucketRequestPaymentWithContext(aws.Context, *s3.PutBucketRequestPaymentInput, ...request.Option) (*s3.PutBucketRequestPaymentOutput, error) + PutBucketRequestPaymentRequest(*s3.PutBucketRequestPaymentInput) (*request.Request, *s3.PutBucketRequestPaymentOutput) + + PutBucketTagging(*s3.PutBucketTaggingInput) (*s3.PutBucketTaggingOutput, error) + PutBucketTaggingWithContext(aws.Context, *s3.PutBucketTaggingInput, ...request.Option) (*s3.PutBucketTaggingOutput, error) + PutBucketTaggingRequest(*s3.PutBucketTaggingInput) (*request.Request, *s3.PutBucketTaggingOutput) + + PutBucketVersioning(*s3.PutBucketVersioningInput) (*s3.PutBucketVersioningOutput, error) + PutBucketVersioningWithContext(aws.Context, *s3.PutBucketVersioningInput, ...request.Option) (*s3.PutBucketVersioningOutput, error) + PutBucketVersioningRequest(*s3.PutBucketVersioningInput) (*request.Request, *s3.PutBucketVersioningOutput) + + PutBucketWebsite(*s3.PutBucketWebsiteInput) (*s3.PutBucketWebsiteOutput, error) + PutBucketWebsiteWithContext(aws.Context, *s3.PutBucketWebsiteInput, ...request.Option) (*s3.PutBucketWebsiteOutput, error) + PutBucketWebsiteRequest(*s3.PutBucketWebsiteInput) (*request.Request, *s3.PutBucketWebsiteOutput) + + PutObject(*s3.PutObjectInput) (*s3.PutObjectOutput, error) + PutObjectWithContext(aws.Context, *s3.PutObjectInput, ...request.Option) (*s3.PutObjectOutput, error) + PutObjectRequest(*s3.PutObjectInput) (*request.Request, *s3.PutObjectOutput) + + PutObjectAcl(*s3.PutObjectAclInput) (*s3.PutObjectAclOutput, error) + PutObjectAclWithContext(aws.Context, *s3.PutObjectAclInput, ...request.Option) (*s3.PutObjectAclOutput, error) + PutObjectAclRequest(*s3.PutObjectAclInput) (*request.Request, *s3.PutObjectAclOutput) + + PutObjectLegalHold(*s3.PutObjectLegalHoldInput) (*s3.PutObjectLegalHoldOutput, error) + PutObjectLegalHoldWithContext(aws.Context, *s3.PutObjectLegalHoldInput, ...request.Option) (*s3.PutObjectLegalHoldOutput, error) + PutObjectLegalHoldRequest(*s3.PutObjectLegalHoldInput) (*request.Request, *s3.PutObjectLegalHoldOutput) + + PutObjectLockConfiguration(*s3.PutObjectLockConfigurationInput) (*s3.PutObjectLockConfigurationOutput, error) + PutObjectLockConfigurationWithContext(aws.Context, *s3.PutObjectLockConfigurationInput, ...request.Option) (*s3.PutObjectLockConfigurationOutput, error) + PutObjectLockConfigurationRequest(*s3.PutObjectLockConfigurationInput) (*request.Request, *s3.PutObjectLockConfigurationOutput) + + PutObjectRetention(*s3.PutObjectRetentionInput) (*s3.PutObjectRetentionOutput, error) + PutObjectRetentionWithContext(aws.Context, *s3.PutObjectRetentionInput, ...request.Option) (*s3.PutObjectRetentionOutput, error) + PutObjectRetentionRequest(*s3.PutObjectRetentionInput) (*request.Request, *s3.PutObjectRetentionOutput) + + PutObjectTagging(*s3.PutObjectTaggingInput) (*s3.PutObjectTaggingOutput, error) + PutObjectTaggingWithContext(aws.Context, *s3.PutObjectTaggingInput, ...request.Option) (*s3.PutObjectTaggingOutput, error) + PutObjectTaggingRequest(*s3.PutObjectTaggingInput) (*request.Request, *s3.PutObjectTaggingOutput) + + PutPublicAccessBlock(*s3.PutPublicAccessBlockInput) (*s3.PutPublicAccessBlockOutput, error) + PutPublicAccessBlockWithContext(aws.Context, *s3.PutPublicAccessBlockInput, ...request.Option) (*s3.PutPublicAccessBlockOutput, error) + PutPublicAccessBlockRequest(*s3.PutPublicAccessBlockInput) (*request.Request, *s3.PutPublicAccessBlockOutput) + + RestoreObject(*s3.RestoreObjectInput) (*s3.RestoreObjectOutput, error) + RestoreObjectWithContext(aws.Context, *s3.RestoreObjectInput, ...request.Option) (*s3.RestoreObjectOutput, error) + RestoreObjectRequest(*s3.RestoreObjectInput) (*request.Request, *s3.RestoreObjectOutput) + + SelectObjectContent(*s3.SelectObjectContentInput) (*s3.SelectObjectContentOutput, error) + SelectObjectContentWithContext(aws.Context, *s3.SelectObjectContentInput, ...request.Option) (*s3.SelectObjectContentOutput, error) + SelectObjectContentRequest(*s3.SelectObjectContentInput) (*request.Request, *s3.SelectObjectContentOutput) + + UploadPart(*s3.UploadPartInput) (*s3.UploadPartOutput, error) + UploadPartWithContext(aws.Context, *s3.UploadPartInput, ...request.Option) (*s3.UploadPartOutput, error) + UploadPartRequest(*s3.UploadPartInput) (*request.Request, *s3.UploadPartOutput) + + UploadPartCopy(*s3.UploadPartCopyInput) (*s3.UploadPartCopyOutput, error) + UploadPartCopyWithContext(aws.Context, *s3.UploadPartCopyInput, ...request.Option) (*s3.UploadPartCopyOutput, error) + UploadPartCopyRequest(*s3.UploadPartCopyInput) (*request.Request, *s3.UploadPartCopyOutput) + + WaitUntilBucketExists(*s3.HeadBucketInput) error + WaitUntilBucketExistsWithContext(aws.Context, *s3.HeadBucketInput, ...request.WaiterOption) error + + WaitUntilBucketNotExists(*s3.HeadBucketInput) error + WaitUntilBucketNotExistsWithContext(aws.Context, *s3.HeadBucketInput, ...request.WaiterOption) error + + WaitUntilObjectExists(*s3.HeadObjectInput) error + WaitUntilObjectExistsWithContext(aws.Context, *s3.HeadObjectInput, ...request.WaiterOption) error + + WaitUntilObjectNotExists(*s3.HeadObjectInput) error + WaitUntilObjectNotExistsWithContext(aws.Context, *s3.HeadObjectInput, ...request.WaiterOption) error +} + +var _ S3API = (*s3.S3)(nil) diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/batch.go b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/batch.go new file mode 100644 index 00000000000..22bd0b7ce59 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/batch.go @@ -0,0 +1,529 @@ +package s3manager + +import ( + "bytes" + "fmt" + "io" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/s3" + "github.com/aws/aws-sdk-go/service/s3/s3iface" +) + +const ( + // DefaultBatchSize is the batch size we initialize when constructing a batch delete client. + // This value is used when calling DeleteObjects. This represents how many objects to delete + // per DeleteObjects call. + DefaultBatchSize = 100 +) + +// BatchError will contain the key and bucket of the object that failed to +// either upload or download. +type BatchError struct { + Errors Errors + code string + message string +} + +// Errors is a typed alias for a slice of errors to satisfy the error +// interface. +type Errors []Error + +func (errs Errors) Error() string { + buf := bytes.NewBuffer(nil) + for i, err := range errs { + buf.WriteString(err.Error()) + if i+1 < len(errs) { + buf.WriteString("\n") + } + } + return buf.String() +} + +// Error will contain the original error, bucket, and key of the operation that failed +// during batch operations. +type Error struct { + OrigErr error + Bucket *string + Key *string +} + +func newError(err error, bucket, key *string) Error { + return Error{ + err, + bucket, + key, + } +} + +func (err *Error) Error() string { + origErr := "" + if err.OrigErr != nil { + origErr = ":\n" + err.OrigErr.Error() + } + return fmt.Sprintf("failed to perform batch operation on %q to %q%s", + aws.StringValue(err.Key), + aws.StringValue(err.Bucket), + origErr, + ) +} + +// NewBatchError will return a BatchError that satisfies the awserr.Error interface. +func NewBatchError(code, message string, err []Error) awserr.Error { + return &BatchError{ + Errors: err, + code: code, + message: message, + } +} + +// Code will return the code associated with the batch error. +func (err *BatchError) Code() string { + return err.code +} + +// Message will return the message associated with the batch error. +func (err *BatchError) Message() string { + return err.message +} + +func (err *BatchError) Error() string { + return awserr.SprintError(err.Code(), err.Message(), "", err.Errors) +} + +// OrigErr will return the original error. Which, in this case, will always be nil +// for batched operations. +func (err *BatchError) OrigErr() error { + return err.Errors +} + +// BatchDeleteIterator is an interface that uses the scanner pattern to +// iterate through what needs to be deleted. +type BatchDeleteIterator interface { + Next() bool + Err() error + DeleteObject() BatchDeleteObject +} + +// DeleteListIterator is an alternative iterator for the BatchDelete client. This will +// iterate through a list of objects and delete the objects. +// +// Example: +// iter := &s3manager.DeleteListIterator{ +// Client: svc, +// Input: &s3.ListObjectsInput{ +// Bucket: aws.String("bucket"), +// MaxKeys: aws.Int64(5), +// }, +// Paginator: request.Pagination{ +// NewRequest: func() (*request.Request, error) { +// var inCpy *ListObjectsInput +// if input != nil { +// tmp := *input +// inCpy = &tmp +// } +// req, _ := c.ListObjectsRequest(inCpy) +// return req, nil +// }, +// }, +// } +// +// batcher := s3manager.NewBatchDeleteWithClient(svc) +// if err := batcher.Delete(aws.BackgroundContext(), iter); err != nil { +// return err +// } +type DeleteListIterator struct { + Bucket *string + Paginator request.Pagination + objects []*s3.Object +} + +// NewDeleteListIterator will return a new DeleteListIterator. +func NewDeleteListIterator(svc s3iface.S3API, input *s3.ListObjectsInput, opts ...func(*DeleteListIterator)) BatchDeleteIterator { + iter := &DeleteListIterator{ + Bucket: input.Bucket, + Paginator: request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *s3.ListObjectsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := svc.ListObjectsRequest(inCpy) + return req, nil + }, + }, + } + + for _, opt := range opts { + opt(iter) + } + return iter +} + +// Next will use the S3API client to iterate through a list of objects. +func (iter *DeleteListIterator) Next() bool { + if len(iter.objects) > 0 { + iter.objects = iter.objects[1:] + } + + if len(iter.objects) == 0 && iter.Paginator.Next() { + iter.objects = iter.Paginator.Page().(*s3.ListObjectsOutput).Contents + } + + return len(iter.objects) > 0 +} + +// Err will return the last known error from Next. +func (iter *DeleteListIterator) Err() error { + return iter.Paginator.Err() +} + +// DeleteObject will return the current object to be deleted. +func (iter *DeleteListIterator) DeleteObject() BatchDeleteObject { + return BatchDeleteObject{ + Object: &s3.DeleteObjectInput{ + Bucket: iter.Bucket, + Key: iter.objects[0].Key, + }, + } +} + +// BatchDelete will use the s3 package's service client to perform a batch +// delete. +type BatchDelete struct { + Client s3iface.S3API + BatchSize int +} + +// NewBatchDeleteWithClient will return a new delete client that can delete a batched amount of +// objects. +// +// Example: +// batcher := s3manager.NewBatchDeleteWithClient(client, size) +// +// objects := []BatchDeleteObject{ +// { +// Object: &s3.DeleteObjectInput { +// Key: aws.String("key"), +// Bucket: aws.String("bucket"), +// }, +// }, +// } +// +// if err := batcher.Delete(aws.BackgroundContext(), &s3manager.DeleteObjectsIterator{ +// Objects: objects, +// }); err != nil { +// return err +// } +func NewBatchDeleteWithClient(client s3iface.S3API, options ...func(*BatchDelete)) *BatchDelete { + svc := &BatchDelete{ + Client: client, + BatchSize: DefaultBatchSize, + } + + for _, opt := range options { + opt(svc) + } + + return svc +} + +// NewBatchDelete will return a new delete client that can delete a batched amount of +// objects. +// +// Example: +// batcher := s3manager.NewBatchDelete(sess, size) +// +// objects := []BatchDeleteObject{ +// { +// Object: &s3.DeleteObjectInput { +// Key: aws.String("key"), +// Bucket: aws.String("bucket"), +// }, +// }, +// } +// +// if err := batcher.Delete(aws.BackgroundContext(), &s3manager.DeleteObjectsIterator{ +// Objects: objects, +// }); err != nil { +// return err +// } +func NewBatchDelete(c client.ConfigProvider, options ...func(*BatchDelete)) *BatchDelete { + client := s3.New(c) + return NewBatchDeleteWithClient(client, options...) +} + +// BatchDeleteObject is a wrapper object for calling the batch delete operation. +type BatchDeleteObject struct { + Object *s3.DeleteObjectInput + // After will run after each iteration during the batch process. This function will + // be executed whether or not the request was successful. + After func() error +} + +// DeleteObjectsIterator is an interface that uses the scanner pattern to iterate +// through a series of objects to be deleted. +type DeleteObjectsIterator struct { + Objects []BatchDeleteObject + index int + inc bool +} + +// Next will increment the default iterator's index and ensure that there +// is another object to iterator to. +func (iter *DeleteObjectsIterator) Next() bool { + if iter.inc { + iter.index++ + } else { + iter.inc = true + } + return iter.index < len(iter.Objects) +} + +// Err will return an error. Since this is just used to satisfy the BatchDeleteIterator interface +// this will only return nil. +func (iter *DeleteObjectsIterator) Err() error { + return nil +} + +// DeleteObject will return the BatchDeleteObject at the current batched index. +func (iter *DeleteObjectsIterator) DeleteObject() BatchDeleteObject { + object := iter.Objects[iter.index] + return object +} + +// Delete will use the iterator to queue up objects that need to be deleted. +// Once the batch size is met, this will call the deleteBatch function. +func (d *BatchDelete) Delete(ctx aws.Context, iter BatchDeleteIterator) error { + var errs []Error + objects := []BatchDeleteObject{} + var input *s3.DeleteObjectsInput + + for iter.Next() { + o := iter.DeleteObject() + + if input == nil { + input = initDeleteObjectsInput(o.Object) + } + + parity := hasParity(input, o) + if parity { + input.Delete.Objects = append(input.Delete.Objects, &s3.ObjectIdentifier{ + Key: o.Object.Key, + VersionId: o.Object.VersionId, + }) + objects = append(objects, o) + } + + if len(input.Delete.Objects) == d.BatchSize || !parity { + if err := deleteBatch(ctx, d, input, objects); err != nil { + errs = append(errs, err...) + } + + objects = objects[:0] + input = nil + + if !parity { + objects = append(objects, o) + input = initDeleteObjectsInput(o.Object) + input.Delete.Objects = append(input.Delete.Objects, &s3.ObjectIdentifier{ + Key: o.Object.Key, + VersionId: o.Object.VersionId, + }) + } + } + } + + // iter.Next() could return false (above) plus populate iter.Err() + if iter.Err() != nil { + errs = append(errs, newError(iter.Err(), nil, nil)) + } + + if input != nil && len(input.Delete.Objects) > 0 { + if err := deleteBatch(ctx, d, input, objects); err != nil { + errs = append(errs, err...) + } + } + + if len(errs) > 0 { + return NewBatchError("BatchedDeleteIncomplete", "some objects have failed to be deleted.", errs) + } + return nil +} + +func initDeleteObjectsInput(o *s3.DeleteObjectInput) *s3.DeleteObjectsInput { + return &s3.DeleteObjectsInput{ + Bucket: o.Bucket, + MFA: o.MFA, + RequestPayer: o.RequestPayer, + Delete: &s3.Delete{}, + } +} + +const ( + // ErrDeleteBatchFailCode represents an error code which will be returned + // only when DeleteObjects.Errors has an error that does not contain a code. + ErrDeleteBatchFailCode = "DeleteBatchError" + errDefaultDeleteBatchMessage = "failed to delete" +) + +// deleteBatch will delete a batch of items in the objects parameters. +func deleteBatch(ctx aws.Context, d *BatchDelete, input *s3.DeleteObjectsInput, objects []BatchDeleteObject) []Error { + errs := []Error{} + + if result, err := d.Client.DeleteObjectsWithContext(ctx, input); err != nil { + for i := 0; i < len(input.Delete.Objects); i++ { + errs = append(errs, newError(err, input.Bucket, input.Delete.Objects[i].Key)) + } + } else if len(result.Errors) > 0 { + for i := 0; i < len(result.Errors); i++ { + code := ErrDeleteBatchFailCode + msg := errDefaultDeleteBatchMessage + if result.Errors[i].Message != nil { + msg = *result.Errors[i].Message + } + if result.Errors[i].Code != nil { + code = *result.Errors[i].Code + } + + errs = append(errs, newError(awserr.New(code, msg, err), input.Bucket, result.Errors[i].Key)) + } + } + for _, object := range objects { + if object.After == nil { + continue + } + if err := object.After(); err != nil { + errs = append(errs, newError(err, object.Object.Bucket, object.Object.Key)) + } + } + + return errs +} + +func hasParity(o1 *s3.DeleteObjectsInput, o2 BatchDeleteObject) bool { + if o1.Bucket != nil && o2.Object.Bucket != nil { + if *o1.Bucket != *o2.Object.Bucket { + return false + } + } else if o1.Bucket != o2.Object.Bucket { + return false + } + + if o1.MFA != nil && o2.Object.MFA != nil { + if *o1.MFA != *o2.Object.MFA { + return false + } + } else if o1.MFA != o2.Object.MFA { + return false + } + + if o1.RequestPayer != nil && o2.Object.RequestPayer != nil { + if *o1.RequestPayer != *o2.Object.RequestPayer { + return false + } + } else if o1.RequestPayer != o2.Object.RequestPayer { + return false + } + + return true +} + +// BatchDownloadIterator is an interface that uses the scanner pattern to iterate +// through a series of objects to be downloaded. +type BatchDownloadIterator interface { + Next() bool + Err() error + DownloadObject() BatchDownloadObject +} + +// BatchDownloadObject contains all necessary information to run a batch operation once. +type BatchDownloadObject struct { + Object *s3.GetObjectInput + Writer io.WriterAt + // After will run after each iteration during the batch process. This function will + // be executed whether or not the request was successful. + After func() error +} + +// DownloadObjectsIterator implements the BatchDownloadIterator interface and allows for batched +// download of objects. +type DownloadObjectsIterator struct { + Objects []BatchDownloadObject + index int + inc bool +} + +// Next will increment the default iterator's index and ensure that there +// is another object to iterator to. +func (batcher *DownloadObjectsIterator) Next() bool { + if batcher.inc { + batcher.index++ + } else { + batcher.inc = true + } + return batcher.index < len(batcher.Objects) +} + +// DownloadObject will return the BatchDownloadObject at the current batched index. +func (batcher *DownloadObjectsIterator) DownloadObject() BatchDownloadObject { + object := batcher.Objects[batcher.index] + return object +} + +// Err will return an error. Since this is just used to satisfy the BatchDeleteIterator interface +// this will only return nil. +func (batcher *DownloadObjectsIterator) Err() error { + return nil +} + +// BatchUploadIterator is an interface that uses the scanner pattern to +// iterate through what needs to be uploaded. +type BatchUploadIterator interface { + Next() bool + Err() error + UploadObject() BatchUploadObject +} + +// UploadObjectsIterator implements the BatchUploadIterator interface and allows for batched +// upload of objects. +type UploadObjectsIterator struct { + Objects []BatchUploadObject + index int + inc bool +} + +// Next will increment the default iterator's index and ensure that there +// is another object to iterator to. +func (batcher *UploadObjectsIterator) Next() bool { + if batcher.inc { + batcher.index++ + } else { + batcher.inc = true + } + return batcher.index < len(batcher.Objects) +} + +// Err will return an error. Since this is just used to satisfy the BatchUploadIterator interface +// this will only return nil. +func (batcher *UploadObjectsIterator) Err() error { + return nil +} + +// UploadObject will return the BatchUploadObject at the current batched index. +func (batcher *UploadObjectsIterator) UploadObject() BatchUploadObject { + object := batcher.Objects[batcher.index] + return object +} + +// BatchUploadObject contains all necessary information to run a batch operation once. +type BatchUploadObject struct { + Object *UploadInput + // After will run after each iteration during the batch process. This function will + // be executed whether or not the request was successful. + After func() error +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/bucket_region.go b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/bucket_region.go new file mode 100644 index 00000000000..9cc1e5970c1 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/bucket_region.go @@ -0,0 +1,159 @@ +package s3manager + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/corehandlers" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/s3" + "github.com/aws/aws-sdk-go/service/s3/s3iface" +) + +// GetBucketRegion will attempt to get the region for a bucket using the +// regionHint to determine which AWS partition to perform the query on. +// +// The request will not be signed, and will not use your AWS credentials. +// +// A "NotFound" error code will be returned if the bucket does not exist in the +// AWS partition the regionHint belongs to. If the regionHint parameter is an +// empty string GetBucketRegion will fallback to the ConfigProvider's region +// config. If the regionHint is empty, and the ConfigProvider does not have a +// region value, an error will be returned.. +// +// For example to get the region of a bucket which exists in "eu-central-1" +// you could provide a region hint of "us-west-2". +// +// sess := session.Must(session.NewSession()) +// +// bucket := "my-bucket" +// region, err := s3manager.GetBucketRegion(ctx, sess, bucket, "us-west-2") +// if err != nil { +// if aerr, ok := err.(awserr.Error); ok && aerr.Code() == "NotFound" { +// fmt.Fprintf(os.Stderr, "unable to find bucket %s's region not found\n", bucket) +// } +// return err +// } +// fmt.Printf("Bucket %s is in %s region\n", bucket, region) +// +// By default the request will be made to the Amazon S3 endpoint using the Path +// style addressing. +// +// s3.us-west-2.amazonaws.com/bucketname +// +// This is not compatible with Amazon S3's FIPS endpoints. To override this +// behavior to use Virtual Host style addressing, provide a functional option +// that will set the Request's Config.S3ForcePathStyle to aws.Bool(false). +// +// region, err := s3manager.GetBucketRegion(ctx, sess, "bucketname", "us-west-2", func(r *request.Request) { +// r.S3ForcePathStyle = aws.Bool(false) +// }) +// +// To configure the GetBucketRegion to make a request via the Amazon +// S3 FIPS endpoints directly when a FIPS region name is not available, (e.g. +// fips-us-gov-west-1) set the Config.Endpoint on the Session, or client the +// utility is called with. The hint region will be ignored if an endpoint URL +// is configured on the session or client. +// +// sess, err := session.NewSession(&aws.Config{ +// Endpoint: aws.String("https://s3-fips.us-west-2.amazonaws.com"), +// }) +// +// region, err := s3manager.GetBucketRegion(context.Background(), sess, "bucketname", "") +func GetBucketRegion(ctx aws.Context, c client.ConfigProvider, bucket, regionHint string, opts ...request.Option) (string, error) { + var cfg aws.Config + if len(regionHint) != 0 { + cfg.Region = aws.String(regionHint) + } + svc := s3.New(c, &cfg) + return GetBucketRegionWithClient(ctx, svc, bucket, opts...) +} + +const bucketRegionHeader = "X-Amz-Bucket-Region" + +// GetBucketRegionWithClient is the same as GetBucketRegion with the exception +// that it takes a S3 service client instead of a Session. The regionHint is +// derived from the region the S3 service client was created in. +// +// By default the request will be made to the Amazon S3 endpoint using the Path +// style addressing. +// +// s3.us-west-2.amazonaws.com/bucketname +// +// This is not compatible with Amazon S3's FIPS endpoints. To override this +// behavior to use Virtual Host style addressing, provide a functional option +// that will set the Request's Config.S3ForcePathStyle to aws.Bool(false). +// +// region, err := s3manager.GetBucketRegionWithClient(ctx, client, "bucketname", func(r *request.Request) { +// r.S3ForcePathStyle = aws.Bool(false) +// }) +// +// To configure the GetBucketRegion to make a request via the Amazon +// S3 FIPS endpoints directly when a FIPS region name is not available, (e.g. +// fips-us-gov-west-1) set the Config.Endpoint on the Session, or client the +// utility is called with. The hint region will be ignored if an endpoint URL +// is configured on the session or client. +// +// region, err := s3manager.GetBucketRegionWithClient(context.Background(), +// s3.New(sess, &aws.Config{ +// Endpoint: aws.String("https://s3-fips.us-west-2.amazonaws.com"), +// }), +// "bucketname") +// +// See GetBucketRegion for more information. +func GetBucketRegionWithClient(ctx aws.Context, svc s3iface.S3API, bucket string, opts ...request.Option) (string, error) { + req, _ := svc.HeadBucketRequest(&s3.HeadBucketInput{ + Bucket: aws.String(bucket), + }) + req.Config.S3ForcePathStyle = aws.Bool(true) + + req.Config.Credentials = credentials.AnonymousCredentials + req.SetContext(ctx) + + // Disable HTTP redirects to prevent an invalid 301 from eating the response + // because Go's HTTP client will fail, and drop the response if an 301 is + // received without a location header. S3 will return a 301 without the + // location header for HeadObject API calls. + req.DisableFollowRedirects = true + + var bucketRegion string + req.Handlers.Send.PushBack(func(r *request.Request) { + bucketRegion = r.HTTPResponse.Header.Get(bucketRegionHeader) + if len(bucketRegion) == 0 { + return + } + r.HTTPResponse.StatusCode = 200 + r.HTTPResponse.Status = "OK" + r.Error = nil + }) + // Replace the endpoint validation handler to not require a region if an + // endpoint URL was specified. Since these requests are not authenticated, + // requiring a region is not needed when an endpoint URL is provided. + req.Handlers.Validate.Swap( + corehandlers.ValidateEndpointHandler.Name, + request.NamedHandler{ + Name: "validateEndpointWithoutRegion", + Fn: validateEndpointWithoutRegion, + }, + ) + + req.ApplyOptions(opts...) + + if err := req.Send(); err != nil { + return "", err + } + + bucketRegion = s3.NormalizeBucketLocation(bucketRegion) + + return bucketRegion, nil +} + +func validateEndpointWithoutRegion(r *request.Request) { + // Check if the caller provided an explicit URL instead of one derived by + // the SDK's endpoint resolver. For GetBucketRegion, with an explicit + // endpoint URL, a region is not needed. If no endpoint URL is provided, + // fallback the SDK's standard endpoint validation handler. + if len(aws.StringValue(r.Config.Endpoint)) == 0 { + corehandlers.ValidateEndpointHandler.Fn(r) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/buffered_read_seeker.go b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/buffered_read_seeker.go new file mode 100644 index 00000000000..f1d9e85c7b3 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/buffered_read_seeker.go @@ -0,0 +1,81 @@ +package s3manager + +import ( + "io" + + "github.com/aws/aws-sdk-go/internal/sdkio" +) + +// BufferedReadSeeker is buffered io.ReadSeeker +type BufferedReadSeeker struct { + r io.ReadSeeker + buffer []byte + readIdx, writeIdx int +} + +// NewBufferedReadSeeker returns a new BufferedReadSeeker +// if len(b) == 0 then the buffer will be initialized to 64 KiB. +func NewBufferedReadSeeker(r io.ReadSeeker, b []byte) *BufferedReadSeeker { + if len(b) == 0 { + b = make([]byte, 64*1024) + } + return &BufferedReadSeeker{r: r, buffer: b} +} + +func (b *BufferedReadSeeker) reset(r io.ReadSeeker) { + b.r = r + b.readIdx, b.writeIdx = 0, 0 +} + +// Read will read up len(p) bytes into p and will return +// the number of bytes read and any error that occurred. +// If the len(p) > the buffer size then a single read request +// will be issued to the underlying io.ReadSeeker for len(p) bytes. +// A Read request will at most perform a single Read to the underlying +// io.ReadSeeker, and may return < len(p) if serviced from the buffer. +func (b *BufferedReadSeeker) Read(p []byte) (n int, err error) { + if len(p) == 0 { + return n, err + } + + if b.readIdx == b.writeIdx { + if len(p) >= len(b.buffer) { + n, err = b.r.Read(p) + return n, err + } + b.readIdx, b.writeIdx = 0, 0 + + n, err = b.r.Read(b.buffer) + if n == 0 { + return n, err + } + + b.writeIdx += n + } + + n = copy(p, b.buffer[b.readIdx:b.writeIdx]) + b.readIdx += n + + return n, err +} + +// Seek will position then underlying io.ReadSeeker to the given offset +// and will clear the buffer. +func (b *BufferedReadSeeker) Seek(offset int64, whence int) (int64, error) { + n, err := b.r.Seek(offset, whence) + + b.reset(b.r) + + return n, err +} + +// ReadAt will read up to len(p) bytes at the given file offset. +// This will result in the buffer being cleared. +func (b *BufferedReadSeeker) ReadAt(p []byte, off int64) (int, error) { + _, err := b.Seek(off, sdkio.SeekStart) + if err != nil { + return 0, err + } + + return b.Read(p) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/default_read_seeker_write_to.go b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/default_read_seeker_write_to.go new file mode 100644 index 00000000000..42276530a8b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/default_read_seeker_write_to.go @@ -0,0 +1,7 @@ +// +build !windows + +package s3manager + +func defaultUploadBufferProvider() ReadSeekerWriteToProvider { + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/default_read_seeker_write_to_windows.go b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/default_read_seeker_write_to_windows.go new file mode 100644 index 00000000000..687082c3066 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/default_read_seeker_write_to_windows.go @@ -0,0 +1,5 @@ +package s3manager + +func defaultUploadBufferProvider() ReadSeekerWriteToProvider { + return NewBufferedReadSeekerWriteToPool(1024 * 1024) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/default_writer_read_from.go b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/default_writer_read_from.go new file mode 100644 index 00000000000..ada50c24355 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/default_writer_read_from.go @@ -0,0 +1,7 @@ +// +build !windows + +package s3manager + +func defaultDownloadBufferProvider() WriterReadFromProvider { + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/default_writer_read_from_windows.go b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/default_writer_read_from_windows.go new file mode 100644 index 00000000000..7e9d9579f64 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/default_writer_read_from_windows.go @@ -0,0 +1,5 @@ +package s3manager + +func defaultDownloadBufferProvider() WriterReadFromProvider { + return NewPooledBufferedWriterReadFromProvider(1024 * 1024) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/doc.go b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/doc.go new file mode 100644 index 00000000000..229c0d63bda --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/doc.go @@ -0,0 +1,3 @@ +// Package s3manager provides utilities to upload and download objects from +// S3 concurrently. Helpful for when working with large objects. +package s3manager diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/download.go b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/download.go new file mode 100644 index 00000000000..4b54b7c033d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/download.go @@ -0,0 +1,597 @@ +package s3manager + +import ( + "fmt" + "io" + "net/http" + "strconv" + "strings" + "sync" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/s3" + "github.com/aws/aws-sdk-go/service/s3/s3iface" +) + +// DefaultDownloadPartSize is the default range of bytes to get at a time when +// using Download(). +const DefaultDownloadPartSize = 1024 * 1024 * 5 + +// DefaultDownloadConcurrency is the default number of goroutines to spin up +// when using Download(). +const DefaultDownloadConcurrency = 5 + +type errReadingBody struct { + err error +} + +func (e *errReadingBody) Error() string { + return fmt.Sprintf("failed to read part body: %v", e.err) +} + +func (e *errReadingBody) Unwrap() error { + return e.err +} + +// The Downloader structure that calls Download(). It is safe to call Download() +// on this structure for multiple objects and across concurrent goroutines. +// Mutating the Downloader's properties is not safe to be done concurrently. +type Downloader struct { + // The size (in bytes) to request from S3 for each part. + // The minimum allowed part size is 5MB, and if this value is set to zero, + // the DefaultDownloadPartSize value will be used. + // + // PartSize is ignored if the Range input parameter is provided. + PartSize int64 + + // The number of goroutines to spin up in parallel when sending parts. + // If this is set to zero, the DefaultDownloadConcurrency value will be used. + // + // Concurrency of 1 will download the parts sequentially. + // + // Concurrency is ignored if the Range input parameter is provided. + Concurrency int + + // An S3 client to use when performing downloads. + S3 s3iface.S3API + + // List of request options that will be passed down to individual API + // operation requests made by the downloader. + RequestOptions []request.Option + + // Defines the buffer strategy used when downloading a part. + // + // If a WriterReadFromProvider is given the Download manager + // will pass the io.WriterAt of the Download request to the provider + // and will use the returned WriterReadFrom from the provider as the + // destination writer when copying from http response body. + BufferProvider WriterReadFromProvider +} + +// WithDownloaderRequestOptions appends to the Downloader's API request options. +func WithDownloaderRequestOptions(opts ...request.Option) func(*Downloader) { + return func(d *Downloader) { + d.RequestOptions = append(d.RequestOptions, opts...) + } +} + +// NewDownloader creates a new Downloader instance to downloads objects from +// S3 in concurrent chunks. Pass in additional functional options to customize +// the downloader behavior. Requires a client.ConfigProvider in order to create +// a S3 service client. The session.Session satisfies the client.ConfigProvider +// interface. +// +// Example: +// // The session the S3 Downloader will use +// sess := session.Must(session.NewSession()) +// +// // Create a downloader with the session and default options +// downloader := s3manager.NewDownloader(sess) +// +// // Create a downloader with the session and custom options +// downloader := s3manager.NewDownloader(sess, func(d *s3manager.Downloader) { +// d.PartSize = 64 * 1024 * 1024 // 64MB per part +// }) +func NewDownloader(c client.ConfigProvider, options ...func(*Downloader)) *Downloader { + return newDownloader(s3.New(c), options...) +} + +func newDownloader(client s3iface.S3API, options ...func(*Downloader)) *Downloader { + d := &Downloader{ + S3: client, + PartSize: DefaultDownloadPartSize, + Concurrency: DefaultDownloadConcurrency, + BufferProvider: defaultDownloadBufferProvider(), + } + for _, option := range options { + option(d) + } + + return d +} + +// NewDownloaderWithClient creates a new Downloader instance to downloads +// objects from S3 in concurrent chunks. Pass in additional functional +// options to customize the downloader behavior. Requires a S3 service client +// to make S3 API calls. +// +// Example: +// // The session the S3 Downloader will use +// sess := session.Must(session.NewSession()) +// +// // The S3 client the S3 Downloader will use +// s3Svc := s3.New(sess) +// +// // Create a downloader with the s3 client and default options +// downloader := s3manager.NewDownloaderWithClient(s3Svc) +// +// // Create a downloader with the s3 client and custom options +// downloader := s3manager.NewDownloaderWithClient(s3Svc, func(d *s3manager.Downloader) { +// d.PartSize = 64 * 1024 * 1024 // 64MB per part +// }) +func NewDownloaderWithClient(svc s3iface.S3API, options ...func(*Downloader)) *Downloader { + return newDownloader(svc, options...) +} + +type maxRetrier interface { + MaxRetries() int +} + +// Download downloads an object in S3 and writes the payload into w using +// concurrent GET requests. The n int64 returned is the size of the object downloaded +// in bytes. +// +// Additional functional options can be provided to configure the individual +// download. These options are copies of the Downloader instance Download is called from. +// Modifying the options will not impact the original Downloader instance. +// +// It is safe to call this method concurrently across goroutines. +// +// The w io.WriterAt can be satisfied by an os.File to do multipart concurrent +// downloads, or in memory []byte wrapper using aws.WriteAtBuffer. +// +// Specifying a Downloader.Concurrency of 1 will cause the Downloader to +// download the parts from S3 sequentially. +// +// If the GetObjectInput's Range value is provided that will cause the downloader +// to perform a single GetObjectInput request for that object's range. This will +// caused the part size, and concurrency configurations to be ignored. +func (d Downloader) Download(w io.WriterAt, input *s3.GetObjectInput, options ...func(*Downloader)) (n int64, err error) { + return d.DownloadWithContext(aws.BackgroundContext(), w, input, options...) +} + +// DownloadWithContext downloads an object in S3 and writes the payload into w +// using concurrent GET requests. The n int64 returned is the size of the object downloaded +// in bytes. +// +// DownloadWithContext is the same as Download with the additional support for +// Context input parameters. The Context must not be nil. A nil Context will +// cause a panic. Use the Context to add deadlining, timeouts, etc. The +// DownloadWithContext may create sub-contexts for individual underlying +// requests. +// +// Additional functional options can be provided to configure the individual +// download. These options are copies of the Downloader instance Download is +// called from. Modifying the options will not impact the original Downloader +// instance. Use the WithDownloaderRequestOptions helper function to pass in request +// options that will be applied to all API operations made with this downloader. +// +// The w io.WriterAt can be satisfied by an os.File to do multipart concurrent +// downloads, or in memory []byte wrapper using aws.WriteAtBuffer. +// +// Specifying a Downloader.Concurrency of 1 will cause the Downloader to +// download the parts from S3 sequentially. +// +// It is safe to call this method concurrently across goroutines. +// +// If the GetObjectInput's Range value is provided that will cause the downloader +// to perform a single GetObjectInput request for that object's range. This will +// caused the part size, and concurrency configurations to be ignored. +func (d Downloader) DownloadWithContext(ctx aws.Context, w io.WriterAt, input *s3.GetObjectInput, options ...func(*Downloader)) (n int64, err error) { + impl := downloader{w: w, in: input, cfg: d, ctx: ctx} + + for _, option := range options { + option(&impl.cfg) + } + impl.cfg.RequestOptions = append(impl.cfg.RequestOptions, request.WithAppendUserAgent("S3Manager")) + + if s, ok := d.S3.(maxRetrier); ok { + impl.partBodyMaxRetries = s.MaxRetries() + } + + impl.totalBytes = -1 + if impl.cfg.Concurrency == 0 { + impl.cfg.Concurrency = DefaultDownloadConcurrency + } + + if impl.cfg.PartSize == 0 { + impl.cfg.PartSize = DefaultDownloadPartSize + } + + return impl.download() +} + +// DownloadWithIterator will download a batched amount of objects in S3 and writes them +// to the io.WriterAt specificed in the iterator. +// +// Example: +// svc := s3manager.NewDownloader(session) +// +// fooFile, err := os.Open("/tmp/foo.file") +// if err != nil { +// return err +// } +// +// barFile, err := os.Open("/tmp/bar.file") +// if err != nil { +// return err +// } +// +// objects := []s3manager.BatchDownloadObject { +// { +// Object: &s3.GetObjectInput { +// Bucket: aws.String("bucket"), +// Key: aws.String("foo"), +// }, +// Writer: fooFile, +// }, +// { +// Object: &s3.GetObjectInput { +// Bucket: aws.String("bucket"), +// Key: aws.String("bar"), +// }, +// Writer: barFile, +// }, +// } +// +// iter := &s3manager.DownloadObjectsIterator{Objects: objects} +// if err := svc.DownloadWithIterator(aws.BackgroundContext(), iter); err != nil { +// return err +// } +func (d Downloader) DownloadWithIterator(ctx aws.Context, iter BatchDownloadIterator, opts ...func(*Downloader)) error { + var errs []Error + for iter.Next() { + object := iter.DownloadObject() + if _, err := d.DownloadWithContext(ctx, object.Writer, object.Object, opts...); err != nil { + errs = append(errs, newError(err, object.Object.Bucket, object.Object.Key)) + } + + if object.After == nil { + continue + } + + if err := object.After(); err != nil { + errs = append(errs, newError(err, object.Object.Bucket, object.Object.Key)) + } + } + + if len(errs) > 0 { + return NewBatchError("BatchedDownloadIncomplete", "some objects have failed to download.", errs) + } + return nil +} + +// downloader is the implementation structure used internally by Downloader. +type downloader struct { + ctx aws.Context + cfg Downloader + + in *s3.GetObjectInput + w io.WriterAt + + wg sync.WaitGroup + m sync.Mutex + + pos int64 + totalBytes int64 + written int64 + err error + + partBodyMaxRetries int +} + +// download performs the implementation of the object download across ranged +// GETs. +func (d *downloader) download() (n int64, err error) { + // If range is specified fall back to single download of that range + // this enables the functionality of ranged gets with the downloader but + // at the cost of no multipart downloads. + if rng := aws.StringValue(d.in.Range); len(rng) > 0 { + d.downloadRange(rng) + return d.written, d.err + } + + // Spin off first worker to check additional header information + d.getChunk() + + if total := d.getTotalBytes(); total >= 0 { + // Spin up workers + ch := make(chan dlchunk, d.cfg.Concurrency) + + for i := 0; i < d.cfg.Concurrency; i++ { + d.wg.Add(1) + go d.downloadPart(ch) + } + + // Assign work + for d.getErr() == nil { + if d.pos >= total { + break // We're finished queuing chunks + } + + // Queue the next range of bytes to read. + ch <- dlchunk{w: d.w, start: d.pos, size: d.cfg.PartSize} + d.pos += d.cfg.PartSize + } + + // Wait for completion + close(ch) + d.wg.Wait() + } else { + // Checking if we read anything new + for d.err == nil { + d.getChunk() + } + + // We expect a 416 error letting us know we are done downloading the + // total bytes. Since we do not know the content's length, this will + // keep grabbing chunks of data until the range of bytes specified in + // the request is out of range of the content. Once, this happens, a + // 416 should occur. + e, ok := d.err.(awserr.RequestFailure) + if ok && e.StatusCode() == http.StatusRequestedRangeNotSatisfiable { + d.err = nil + } + } + + // Return error + return d.written, d.err +} + +// downloadPart is an individual goroutine worker reading from the ch channel +// and performing a GetObject request on the data with a given byte range. +// +// If this is the first worker, this operation also resolves the total number +// of bytes to be read so that the worker manager knows when it is finished. +func (d *downloader) downloadPart(ch chan dlchunk) { + defer d.wg.Done() + for { + chunk, ok := <-ch + if !ok { + break + } + if d.getErr() != nil { + // Drain the channel if there is an error, to prevent deadlocking + // of download producer. + continue + } + + if err := d.downloadChunk(chunk); err != nil { + d.setErr(err) + } + } +} + +// getChunk grabs a chunk of data from the body. +// Not thread safe. Should only used when grabbing data on a single thread. +func (d *downloader) getChunk() { + if d.getErr() != nil { + return + } + + chunk := dlchunk{w: d.w, start: d.pos, size: d.cfg.PartSize} + d.pos += d.cfg.PartSize + + if err := d.downloadChunk(chunk); err != nil { + d.setErr(err) + } +} + +// downloadRange downloads an Object given the passed in Byte-Range value. +// The chunk used down download the range will be configured for that range. +func (d *downloader) downloadRange(rng string) { + if d.getErr() != nil { + return + } + + chunk := dlchunk{w: d.w, start: d.pos} + // Ranges specified will short circuit the multipart download + chunk.withRange = rng + + if err := d.downloadChunk(chunk); err != nil { + d.setErr(err) + } + + // Update the position based on the amount of data received. + d.pos = d.written +} + +// downloadChunk downloads the chunk from s3 +func (d *downloader) downloadChunk(chunk dlchunk) error { + in := &s3.GetObjectInput{} + awsutil.Copy(in, d.in) + + // Get the next byte range of data + in.Range = aws.String(chunk.ByteRange()) + + var n int64 + var err error + for retry := 0; retry <= d.partBodyMaxRetries; retry++ { + n, err = d.tryDownloadChunk(in, &chunk) + if err == nil { + break + } + // Check if the returned error is an errReadingBody. + // If err is errReadingBody this indicates that an error + // occurred while copying the http response body. + // If this occurs we unwrap the err to set the underlying error + // and attempt any remaining retries. + if bodyErr, ok := err.(*errReadingBody); ok { + err = bodyErr.Unwrap() + } else { + return err + } + + chunk.cur = 0 + logMessage(d.cfg.S3, aws.LogDebugWithRequestRetries, + fmt.Sprintf("DEBUG: object part body download interrupted %s, err, %v, retrying attempt %d", + aws.StringValue(in.Key), err, retry)) + } + + d.incrWritten(n) + + return err +} + +func (d *downloader) tryDownloadChunk(in *s3.GetObjectInput, w io.Writer) (int64, error) { + cleanup := func() {} + if d.cfg.BufferProvider != nil { + w, cleanup = d.cfg.BufferProvider.GetReadFrom(w) + } + defer cleanup() + + resp, err := d.cfg.S3.GetObjectWithContext(d.ctx, in, d.cfg.RequestOptions...) + if err != nil { + return 0, err + } + d.setTotalBytes(resp) // Set total if not yet set. + + n, err := io.Copy(w, resp.Body) + resp.Body.Close() + if err != nil { + return n, &errReadingBody{err: err} + } + + return n, nil +} + +func logMessage(svc s3iface.S3API, level aws.LogLevelType, msg string) { + s, ok := svc.(*s3.S3) + if !ok { + return + } + + if s.Config.Logger == nil { + return + } + + if s.Config.LogLevel.Matches(level) { + s.Config.Logger.Log(msg) + } +} + +// getTotalBytes is a thread-safe getter for retrieving the total byte status. +func (d *downloader) getTotalBytes() int64 { + d.m.Lock() + defer d.m.Unlock() + + return d.totalBytes +} + +// setTotalBytes is a thread-safe setter for setting the total byte status. +// Will extract the object's total bytes from the Content-Range if the file +// will be chunked, or Content-Length. Content-Length is used when the response +// does not include a Content-Range. Meaning the object was not chunked. This +// occurs when the full file fits within the PartSize directive. +func (d *downloader) setTotalBytes(resp *s3.GetObjectOutput) { + d.m.Lock() + defer d.m.Unlock() + + if d.totalBytes >= 0 { + return + } + + if resp.ContentRange == nil { + // ContentRange is nil when the full file contents is provided, and + // is not chunked. Use ContentLength instead. + if resp.ContentLength != nil { + d.totalBytes = *resp.ContentLength + return + } + } else { + parts := strings.Split(*resp.ContentRange, "/") + + total := int64(-1) + var err error + // Checking for whether or not a numbered total exists + // If one does not exist, we will assume the total to be -1, undefined, + // and sequentially download each chunk until hitting a 416 error + totalStr := parts[len(parts)-1] + if totalStr != "*" { + total, err = strconv.ParseInt(totalStr, 10, 64) + if err != nil { + d.err = err + return + } + } + + d.totalBytes = total + } +} + +func (d *downloader) incrWritten(n int64) { + d.m.Lock() + defer d.m.Unlock() + + d.written += n +} + +// getErr is a thread-safe getter for the error object +func (d *downloader) getErr() error { + d.m.Lock() + defer d.m.Unlock() + + return d.err +} + +// setErr is a thread-safe setter for the error object +func (d *downloader) setErr(e error) { + d.m.Lock() + defer d.m.Unlock() + + d.err = e +} + +// dlchunk represents a single chunk of data to write by the worker routine. +// This structure also implements an io.SectionReader style interface for +// io.WriterAt, effectively making it an io.SectionWriter (which does not +// exist). +type dlchunk struct { + w io.WriterAt + start int64 + size int64 + cur int64 + + // specifies the byte range the chunk should be downloaded with. + withRange string +} + +// Write wraps io.WriterAt for the dlchunk, writing from the dlchunk's start +// position to its end (or EOF). +// +// If a range is specified on the dlchunk the size will be ignored when writing. +// as the total size may not of be known ahead of time. +func (c *dlchunk) Write(p []byte) (n int, err error) { + if c.cur >= c.size && len(c.withRange) == 0 { + return 0, io.EOF + } + + n, err = c.w.WriteAt(p, c.start+c.cur) + c.cur += int64(n) + + return +} + +// ByteRange returns a HTTP Byte-Range header value that should be used by the +// client to request the chunk's range. +func (c *dlchunk) ByteRange() string { + if len(c.withRange) != 0 { + return c.withRange + } + + return fmt.Sprintf("bytes=%d-%d", c.start, c.start+c.size-1) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/pool.go b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/pool.go new file mode 100644 index 00000000000..f6f27fc48a1 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/pool.go @@ -0,0 +1,252 @@ +package s3manager + +import ( + "fmt" + "sync" + + "github.com/aws/aws-sdk-go/aws" +) + +type byteSlicePool interface { + Get(aws.Context) (*[]byte, error) + Put(*[]byte) + ModifyCapacity(int) + SliceSize() int64 + Close() +} + +type maxSlicePool struct { + // allocator is defined as a function pointer to allow + // for test cases to instrument custom tracers when allocations + // occur. + allocator sliceAllocator + + slices chan *[]byte + allocations chan struct{} + capacityChange chan struct{} + + max int + sliceSize int64 + + mtx sync.RWMutex +} + +func newMaxSlicePool(sliceSize int64) *maxSlicePool { + p := &maxSlicePool{sliceSize: sliceSize} + p.allocator = p.newSlice + + return p +} + +var errZeroCapacity = fmt.Errorf("get called on zero capacity pool") + +func (p *maxSlicePool) Get(ctx aws.Context) (*[]byte, error) { + // check if context is canceled before attempting to get a slice + // this ensures priority is given to the cancel case first + select { + case <-ctx.Done(): + return nil, ctx.Err() + default: + } + + p.mtx.RLock() + + for { + select { + case bs, ok := <-p.slices: + p.mtx.RUnlock() + if !ok { + // attempt to get on a zero capacity pool + return nil, errZeroCapacity + } + return bs, nil + case <-ctx.Done(): + p.mtx.RUnlock() + return nil, ctx.Err() + default: + // pass + } + + select { + case _, ok := <-p.allocations: + p.mtx.RUnlock() + if !ok { + // attempt to get on a zero capacity pool + return nil, errZeroCapacity + } + return p.allocator(), nil + case <-ctx.Done(): + p.mtx.RUnlock() + return nil, ctx.Err() + default: + // In the event that there are no slices or allocations available + // This prevents some deadlock situations that can occur around sync.RWMutex + // When a lock request occurs on ModifyCapacity, no new readers are allowed to acquire a read lock. + // By releasing the read lock here and waiting for a notification, we prevent a deadlock situation where + // Get could hold the read lock indefinitely waiting for capacity, ModifyCapacity is waiting for a write lock, + // and a Put is blocked trying to get a read-lock which is blocked by ModifyCapacity. + + // Short-circuit if the pool capacity is zero. + if p.max == 0 { + p.mtx.RUnlock() + return nil, errZeroCapacity + } + + // Since we will be releasing the read-lock we need to take the reference to the channel. + // Since channels are references we will still get notified if slices are added, or if + // the channel is closed due to a capacity modification. This specifically avoids a data race condition + // where ModifyCapacity both closes a channel and initializes a new one while we don't have a read-lock. + c := p.capacityChange + + p.mtx.RUnlock() + + select { + case _ = <-c: + p.mtx.RLock() + case <-ctx.Done(): + return nil, ctx.Err() + } + } + } +} + +func (p *maxSlicePool) Put(bs *[]byte) { + p.mtx.RLock() + defer p.mtx.RUnlock() + + if p.max == 0 { + return + } + + select { + case p.slices <- bs: + p.notifyCapacity() + default: + // If the new channel when attempting to add the slice then we drop the slice. + // The logic here is to prevent a deadlock situation if channel is already at max capacity. + // Allows us to reap allocations that are returned and are no longer needed. + } +} + +func (p *maxSlicePool) ModifyCapacity(delta int) { + if delta == 0 { + return + } + + p.mtx.Lock() + defer p.mtx.Unlock() + + p.max += delta + + if p.max == 0 { + p.empty() + return + } + + if p.capacityChange != nil { + close(p.capacityChange) + } + p.capacityChange = make(chan struct{}, p.max) + + origAllocations := p.allocations + p.allocations = make(chan struct{}, p.max) + + newAllocs := len(origAllocations) + delta + for i := 0; i < newAllocs; i++ { + p.allocations <- struct{}{} + } + + if origAllocations != nil { + close(origAllocations) + } + + origSlices := p.slices + p.slices = make(chan *[]byte, p.max) + if origSlices == nil { + return + } + + close(origSlices) + for bs := range origSlices { + select { + case p.slices <- bs: + default: + // If the new channel blocks while adding slices from the old channel + // then we drop the slice. The logic here is to prevent a deadlock situation + // if the new channel has a smaller capacity then the old. + } + } +} + +func (p *maxSlicePool) notifyCapacity() { + select { + case p.capacityChange <- struct{}{}: + default: + // This *shouldn't* happen as the channel is both buffered to the max pool capacity size and is resized + // on capacity modifications. This is just a safety to ensure that a blocking situation can't occur. + } +} + +func (p *maxSlicePool) SliceSize() int64 { + return p.sliceSize +} + +func (p *maxSlicePool) Close() { + p.mtx.Lock() + defer p.mtx.Unlock() + p.empty() +} + +func (p *maxSlicePool) empty() { + p.max = 0 + + if p.capacityChange != nil { + close(p.capacityChange) + p.capacityChange = nil + } + + if p.allocations != nil { + close(p.allocations) + for range p.allocations { + // drain channel + } + p.allocations = nil + } + + if p.slices != nil { + close(p.slices) + for range p.slices { + // drain channel + } + p.slices = nil + } +} + +func (p *maxSlicePool) newSlice() *[]byte { + bs := make([]byte, p.sliceSize) + return &bs +} + +type returnCapacityPoolCloser struct { + byteSlicePool + returnCapacity int +} + +func (n *returnCapacityPoolCloser) ModifyCapacity(delta int) { + if delta > 0 { + n.returnCapacity = -1 * delta + } + n.byteSlicePool.ModifyCapacity(delta) +} + +func (n *returnCapacityPoolCloser) Close() { + if n.returnCapacity < 0 { + n.byteSlicePool.ModifyCapacity(n.returnCapacity) + } +} + +type sliceAllocator func() *[]byte + +var newByteSlicePool = func(sliceSize int64) byteSlicePool { + return newMaxSlicePool(sliceSize) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/read_seeker_write_to.go b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/read_seeker_write_to.go new file mode 100644 index 00000000000..f62e1a45eef --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/read_seeker_write_to.go @@ -0,0 +1,65 @@ +package s3manager + +import ( + "io" + "sync" +) + +// ReadSeekerWriteTo defines an interface implementing io.WriteTo and io.ReadSeeker +type ReadSeekerWriteTo interface { + io.ReadSeeker + io.WriterTo +} + +// BufferedReadSeekerWriteTo wraps a BufferedReadSeeker with an io.WriteAt +// implementation. +type BufferedReadSeekerWriteTo struct { + *BufferedReadSeeker +} + +// WriteTo writes to the given io.Writer from BufferedReadSeeker until there's no more data to write or +// an error occurs. Returns the number of bytes written and any error encountered during the write. +func (b *BufferedReadSeekerWriteTo) WriteTo(writer io.Writer) (int64, error) { + return io.Copy(writer, b.BufferedReadSeeker) +} + +// ReadSeekerWriteToProvider provides an implementation of io.WriteTo for an io.ReadSeeker +type ReadSeekerWriteToProvider interface { + GetWriteTo(seeker io.ReadSeeker) (r ReadSeekerWriteTo, cleanup func()) +} + +// BufferedReadSeekerWriteToPool uses a sync.Pool to create and reuse +// []byte slices for buffering parts in memory +type BufferedReadSeekerWriteToPool struct { + pool sync.Pool +} + +// NewBufferedReadSeekerWriteToPool will return a new BufferedReadSeekerWriteToPool that will create +// a pool of reusable buffers . If size is less then < 64 KiB then the buffer +// will default to 64 KiB. Reason: io.Copy from writers or readers that don't support io.WriteTo or io.ReadFrom +// respectively will default to copying 32 KiB. +func NewBufferedReadSeekerWriteToPool(size int) *BufferedReadSeekerWriteToPool { + if size < 65536 { + size = 65536 + } + + return &BufferedReadSeekerWriteToPool{ + pool: sync.Pool{New: func() interface{} { + return make([]byte, size) + }}, + } +} + +// GetWriteTo will wrap the provided io.ReadSeeker with a BufferedReadSeekerWriteTo. +// The provided cleanup must be called after operations have been completed on the +// returned io.ReadSeekerWriteTo in order to signal the return of resources to the pool. +func (p *BufferedReadSeekerWriteToPool) GetWriteTo(seeker io.ReadSeeker) (r ReadSeekerWriteTo, cleanup func()) { + buffer := p.pool.Get().([]byte) + + r = &BufferedReadSeekerWriteTo{BufferedReadSeeker: NewBufferedReadSeeker(seeker, buffer)} + cleanup = func() { + p.pool.Put(buffer) + } + + return r, cleanup +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/upload.go b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/upload.go new file mode 100644 index 00000000000..7dba8347bc9 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/upload.go @@ -0,0 +1,782 @@ +package s3manager + +import ( + "bytes" + "fmt" + "io" + "sort" + "sync" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/s3" + "github.com/aws/aws-sdk-go/service/s3/s3iface" +) + +// MaxUploadParts is the maximum allowed number of parts in a multi-part upload +// on Amazon S3. +const MaxUploadParts = 10000 + +// MinUploadPartSize is the minimum allowed part size when uploading a part to +// Amazon S3. +const MinUploadPartSize int64 = 1024 * 1024 * 5 + +// DefaultUploadPartSize is the default part size to buffer chunks of a +// payload into. +const DefaultUploadPartSize = MinUploadPartSize + +// DefaultUploadConcurrency is the default number of goroutines to spin up when +// using Upload(). +const DefaultUploadConcurrency = 5 + +// A MultiUploadFailure wraps a failed S3 multipart upload. An error returned +// will satisfy this interface when a multi part upload failed to upload all +// chucks to S3. In the case of a failure the UploadID is needed to operate on +// the chunks, if any, which were uploaded. +// +// Example: +// +// u := s3manager.NewUploader(opts) +// output, err := u.upload(input) +// if err != nil { +// if multierr, ok := err.(s3manager.MultiUploadFailure); ok { +// // Process error and its associated uploadID +// fmt.Println("Error:", multierr.Code(), multierr.Message(), multierr.UploadID()) +// } else { +// // Process error generically +// fmt.Println("Error:", err.Error()) +// } +// } +// +type MultiUploadFailure interface { + awserr.Error + + // Returns the upload id for the S3 multipart upload that failed. + UploadID() string +} + +// So that the Error interface type can be included as an anonymous field +// in the multiUploadError struct and not conflict with the error.Error() method. +type awsError awserr.Error + +// A multiUploadError wraps the upload ID of a failed s3 multipart upload. +// Composed of BaseError for code, message, and original error +// +// Should be used for an error that occurred failing a S3 multipart upload, +// and a upload ID is available. If an uploadID is not available a more relevant +type multiUploadError struct { + awsError + + // ID for multipart upload which failed. + uploadID string +} + +// Error returns the string representation of the error. +// +// See apierr.BaseError ErrorWithExtra for output format +// +// Satisfies the error interface. +func (m multiUploadError) Error() string { + extra := fmt.Sprintf("upload id: %s", m.uploadID) + return awserr.SprintError(m.Code(), m.Message(), extra, m.OrigErr()) +} + +// String returns the string representation of the error. +// Alias for Error to satisfy the stringer interface. +func (m multiUploadError) String() string { + return m.Error() +} + +// UploadID returns the id of the S3 upload which failed. +func (m multiUploadError) UploadID() string { + return m.uploadID +} + +// UploadOutput represents a response from the Upload() call. +type UploadOutput struct { + // The URL where the object was uploaded to. + Location string + + // The version of the object that was uploaded. Will only be populated if + // the S3 Bucket is versioned. If the bucket is not versioned this field + // will not be set. + VersionID *string + + // The ID for a multipart upload to S3. In the case of an error the error + // can be cast to the MultiUploadFailure interface to extract the upload ID. + UploadID string + + // Entity tag of the object. + ETag *string +} + +// WithUploaderRequestOptions appends to the Uploader's API request options. +func WithUploaderRequestOptions(opts ...request.Option) func(*Uploader) { + return func(u *Uploader) { + u.RequestOptions = append(u.RequestOptions, opts...) + } +} + +// The Uploader structure that calls Upload(). It is safe to call Upload() +// on this structure for multiple objects and across concurrent goroutines. +// Mutating the Uploader's properties is not safe to be done concurrently. +type Uploader struct { + // The buffer size (in bytes) to use when buffering data into chunks and + // sending them as parts to S3. The minimum allowed part size is 5MB, and + // if this value is set to zero, the DefaultUploadPartSize value will be used. + PartSize int64 + + // The number of goroutines to spin up in parallel per call to Upload when + // sending parts. If this is set to zero, the DefaultUploadConcurrency value + // will be used. + // + // The concurrency pool is not shared between calls to Upload. + Concurrency int + + // Setting this value to true will cause the SDK to avoid calling + // AbortMultipartUpload on a failure, leaving all successfully uploaded + // parts on S3 for manual recovery. + // + // Note that storing parts of an incomplete multipart upload counts towards + // space usage on S3 and will add additional costs if not cleaned up. + LeavePartsOnError bool + + // MaxUploadParts is the max number of parts which will be uploaded to S3. + // Will be used to calculate the partsize of the object to be uploaded. + // E.g: 5GB file, with MaxUploadParts set to 100, will upload the file + // as 100, 50MB parts. With a limited of s3.MaxUploadParts (10,000 parts). + // + // MaxUploadParts must not be used to limit the total number of bytes uploaded. + // Use a type like to io.LimitReader (https://golang.org/pkg/io/#LimitedReader) + // instead. An io.LimitReader is helpful when uploading an unbounded reader + // to S3, and you know its maximum size. Otherwise the reader's io.EOF returned + // error must be used to signal end of stream. + // + // Defaults to package const's MaxUploadParts value. + MaxUploadParts int + + // The client to use when uploading to S3. + S3 s3iface.S3API + + // List of request options that will be passed down to individual API + // operation requests made by the uploader. + RequestOptions []request.Option + + // Defines the buffer strategy used when uploading a part + BufferProvider ReadSeekerWriteToProvider + + // partPool allows for the re-usage of streaming payload part buffers between upload calls + partPool byteSlicePool +} + +// NewUploader creates a new Uploader instance to upload objects to S3. Pass In +// additional functional options to customize the uploader's behavior. Requires a +// client.ConfigProvider in order to create a S3 service client. The session.Session +// satisfies the client.ConfigProvider interface. +// +// Example: +// // The session the S3 Uploader will use +// sess := session.Must(session.NewSession()) +// +// // Create an uploader with the session and default options +// uploader := s3manager.NewUploader(sess) +// +// // Create an uploader with the session and custom options +// uploader := s3manager.NewUploader(session, func(u *s3manager.Uploader) { +// u.PartSize = 64 * 1024 * 1024 // 64MB per part +// }) +func NewUploader(c client.ConfigProvider, options ...func(*Uploader)) *Uploader { + return newUploader(s3.New(c), options...) +} + +func newUploader(client s3iface.S3API, options ...func(*Uploader)) *Uploader { + u := &Uploader{ + S3: client, + PartSize: DefaultUploadPartSize, + Concurrency: DefaultUploadConcurrency, + LeavePartsOnError: false, + MaxUploadParts: MaxUploadParts, + BufferProvider: defaultUploadBufferProvider(), + } + + for _, option := range options { + option(u) + } + + u.partPool = newByteSlicePool(u.PartSize) + + return u +} + +// NewUploaderWithClient creates a new Uploader instance to upload objects to S3. Pass in +// additional functional options to customize the uploader's behavior. Requires +// a S3 service client to make S3 API calls. +// +// Example: +// // The session the S3 Uploader will use +// sess := session.Must(session.NewSession()) +// +// // S3 service client the Upload manager will use. +// s3Svc := s3.New(sess) +// +// // Create an uploader with S3 client and default options +// uploader := s3manager.NewUploaderWithClient(s3Svc) +// +// // Create an uploader with S3 client and custom options +// uploader := s3manager.NewUploaderWithClient(s3Svc, func(u *s3manager.Uploader) { +// u.PartSize = 64 * 1024 * 1024 // 64MB per part +// }) +func NewUploaderWithClient(svc s3iface.S3API, options ...func(*Uploader)) *Uploader { + return newUploader(svc, options...) +} + +// Upload uploads an object to S3, intelligently buffering large files into +// smaller chunks and sending them in parallel across multiple goroutines. You +// can configure the buffer size and concurrency through the Uploader's parameters. +// +// Additional functional options can be provided to configure the individual +// upload. These options are copies of the Uploader instance Upload is called from. +// Modifying the options will not impact the original Uploader instance. +// +// Use the WithUploaderRequestOptions helper function to pass in request +// options that will be applied to all API operations made with this uploader. +// +// It is safe to call this method concurrently across goroutines. +// +// Example: +// // Upload input parameters +// upParams := &s3manager.UploadInput{ +// Bucket: &bucketName, +// Key: &keyName, +// Body: file, +// } +// +// // Perform an upload. +// result, err := uploader.Upload(upParams) +// +// // Perform upload with options different than the those in the Uploader. +// result, err := uploader.Upload(upParams, func(u *s3manager.Uploader) { +// u.PartSize = 10 * 1024 * 1024 // 10MB part size +// u.LeavePartsOnError = true // Don't delete the parts if the upload fails. +// }) +func (u Uploader) Upload(input *UploadInput, options ...func(*Uploader)) (*UploadOutput, error) { + return u.UploadWithContext(aws.BackgroundContext(), input, options...) +} + +// UploadWithContext uploads an object to S3, intelligently buffering large +// files into smaller chunks and sending them in parallel across multiple +// goroutines. You can configure the buffer size and concurrency through the +// Uploader's parameters. +// +// UploadWithContext is the same as Upload with the additional support for +// Context input parameters. The Context must not be nil. A nil Context will +// cause a panic. Use the context to add deadlining, timeouts, etc. The +// UploadWithContext may create sub-contexts for individual underlying requests. +// +// Additional functional options can be provided to configure the individual +// upload. These options are copies of the Uploader instance Upload is called from. +// Modifying the options will not impact the original Uploader instance. +// +// Use the WithUploaderRequestOptions helper function to pass in request +// options that will be applied to all API operations made with this uploader. +// +// It is safe to call this method concurrently across goroutines. +func (u Uploader) UploadWithContext(ctx aws.Context, input *UploadInput, opts ...func(*Uploader)) (*UploadOutput, error) { + i := uploader{in: input, cfg: u, ctx: ctx} + + for _, opt := range opts { + opt(&i.cfg) + } + + i.cfg.RequestOptions = append(i.cfg.RequestOptions, request.WithAppendUserAgent("S3Manager")) + + return i.upload() +} + +// UploadWithIterator will upload a batched amount of objects to S3. This operation uses +// the iterator pattern to know which object to upload next. Since this is an interface this +// allows for custom defined functionality. +// +// Example: +// svc:= s3manager.NewUploader(sess) +// +// objects := []BatchUploadObject{ +// { +// Object: &s3manager.UploadInput { +// Key: aws.String("key"), +// Bucket: aws.String("bucket"), +// }, +// }, +// } +// +// iter := &s3manager.UploadObjectsIterator{Objects: objects} +// if err := svc.UploadWithIterator(aws.BackgroundContext(), iter); err != nil { +// return err +// } +func (u Uploader) UploadWithIterator(ctx aws.Context, iter BatchUploadIterator, opts ...func(*Uploader)) error { + var errs []Error + for iter.Next() { + object := iter.UploadObject() + if _, err := u.UploadWithContext(ctx, object.Object, opts...); err != nil { + s3Err := Error{ + OrigErr: err, + Bucket: object.Object.Bucket, + Key: object.Object.Key, + } + + errs = append(errs, s3Err) + } + + if object.After == nil { + continue + } + + if err := object.After(); err != nil { + s3Err := Error{ + OrigErr: err, + Bucket: object.Object.Bucket, + Key: object.Object.Key, + } + + errs = append(errs, s3Err) + } + } + + if len(errs) > 0 { + return NewBatchError("BatchedUploadIncomplete", "some objects have failed to upload.", errs) + } + return nil +} + +// internal structure to manage an upload to S3. +type uploader struct { + ctx aws.Context + cfg Uploader + + in *UploadInput + + readerPos int64 // current reader position + totalSize int64 // set to -1 if the size is not known +} + +// internal logic for deciding whether to upload a single part or use a +// multipart upload. +func (u *uploader) upload() (*UploadOutput, error) { + if err := u.init(); err != nil { + return nil, awserr.New("ReadRequestBody", "unable to initialize upload", err) + } + defer u.cfg.partPool.Close() + + if u.cfg.PartSize < MinUploadPartSize { + msg := fmt.Sprintf("part size must be at least %d bytes", MinUploadPartSize) + return nil, awserr.New("ConfigError", msg, nil) + } + + // Do one read to determine if we have more than one part + reader, _, cleanup, err := u.nextReader() + if err == io.EOF { // single part + return u.singlePart(reader, cleanup) + } else if err != nil { + cleanup() + return nil, awserr.New("ReadRequestBody", "read upload data failed", err) + } + + mu := multiuploader{uploader: u} + return mu.upload(reader, cleanup) +} + +// init will initialize all default options. +func (u *uploader) init() error { + if u.cfg.Concurrency == 0 { + u.cfg.Concurrency = DefaultUploadConcurrency + } + if u.cfg.PartSize == 0 { + u.cfg.PartSize = DefaultUploadPartSize + } + if u.cfg.MaxUploadParts == 0 { + u.cfg.MaxUploadParts = MaxUploadParts + } + + // Try to get the total size for some optimizations + if err := u.initSize(); err != nil { + return err + } + + // If PartSize was changed or partPool was never setup then we need to allocated a new pool + // so that we return []byte slices of the correct size + poolCap := u.cfg.Concurrency + 1 + if u.cfg.partPool == nil || u.cfg.partPool.SliceSize() != u.cfg.PartSize { + u.cfg.partPool = newByteSlicePool(u.cfg.PartSize) + u.cfg.partPool.ModifyCapacity(poolCap) + } else { + u.cfg.partPool = &returnCapacityPoolCloser{byteSlicePool: u.cfg.partPool} + u.cfg.partPool.ModifyCapacity(poolCap) + } + + return nil +} + +// initSize tries to detect the total stream size, setting u.totalSize. If +// the size is not known, totalSize is set to -1. +func (u *uploader) initSize() error { + u.totalSize = -1 + + switch r := u.in.Body.(type) { + case io.Seeker: + n, err := aws.SeekerLen(r) + if err != nil { + return err + } + u.totalSize = n + + // Try to adjust partSize if it is too small and account for + // integer division truncation. + if u.totalSize/u.cfg.PartSize >= int64(u.cfg.MaxUploadParts) { + // Add one to the part size to account for remainders + // during the size calculation. e.g odd number of bytes. + u.cfg.PartSize = (u.totalSize / int64(u.cfg.MaxUploadParts)) + 1 + } + } + + return nil +} + +// nextReader returns a seekable reader representing the next packet of data. +// This operation increases the shared u.readerPos counter, but note that it +// does not need to be wrapped in a mutex because nextReader is only called +// from the main thread. +func (u *uploader) nextReader() (io.ReadSeeker, int, func(), error) { + switch r := u.in.Body.(type) { + case readerAtSeeker: + var err error + + n := u.cfg.PartSize + if u.totalSize >= 0 { + bytesLeft := u.totalSize - u.readerPos + + if bytesLeft <= u.cfg.PartSize { + err = io.EOF + n = bytesLeft + } + } + + var ( + reader io.ReadSeeker + cleanup func() + ) + + reader = io.NewSectionReader(r, u.readerPos, n) + if u.cfg.BufferProvider != nil { + reader, cleanup = u.cfg.BufferProvider.GetWriteTo(reader) + } else { + cleanup = func() {} + } + + u.readerPos += n + + return reader, int(n), cleanup, err + + default: + part, err := u.cfg.partPool.Get(u.ctx) + if err != nil { + return nil, 0, func() {}, err + } + + n, err := readFillBuf(r, *part) + u.readerPos += int64(n) + + cleanup := func() { + u.cfg.partPool.Put(part) + } + + return bytes.NewReader((*part)[0:n]), n, cleanup, err + } +} + +func readFillBuf(r io.Reader, b []byte) (offset int, err error) { + for offset < len(b) && err == nil { + var n int + n, err = r.Read(b[offset:]) + offset += n + } + + return offset, err +} + +// singlePart contains upload logic for uploading a single chunk via +// a regular PutObject request. Multipart requests require at least two +// parts, or at least 5MB of data. +func (u *uploader) singlePart(r io.ReadSeeker, cleanup func()) (*UploadOutput, error) { + defer cleanup() + + params := &s3.PutObjectInput{} + awsutil.Copy(params, u.in) + params.Body = r + + // Need to use request form because URL generated in request is + // used in return. + req, out := u.cfg.S3.PutObjectRequest(params) + req.SetContext(u.ctx) + req.ApplyOptions(u.cfg.RequestOptions...) + if err := req.Send(); err != nil { + return nil, err + } + + url := req.HTTPRequest.URL.String() + return &UploadOutput{ + Location: url, + VersionID: out.VersionId, + ETag: out.ETag, + }, nil +} + +// internal structure to manage a specific multipart upload to S3. +type multiuploader struct { + *uploader + wg sync.WaitGroup + m sync.Mutex + err error + uploadID string + parts completedParts +} + +// keeps track of a single chunk of data being sent to S3. +type chunk struct { + buf io.ReadSeeker + num int64 + cleanup func() +} + +// completedParts is a wrapper to make parts sortable by their part number, +// since S3 required this list to be sent in sorted order. +type completedParts []*s3.CompletedPart + +func (a completedParts) Len() int { return len(a) } +func (a completedParts) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a completedParts) Less(i, j int) bool { return *a[i].PartNumber < *a[j].PartNumber } + +// upload will perform a multipart upload using the firstBuf buffer containing +// the first chunk of data. +func (u *multiuploader) upload(firstBuf io.ReadSeeker, cleanup func()) (*UploadOutput, error) { + params := &s3.CreateMultipartUploadInput{} + awsutil.Copy(params, u.in) + + // Create the multipart + resp, err := u.cfg.S3.CreateMultipartUploadWithContext(u.ctx, params, u.cfg.RequestOptions...) + if err != nil { + cleanup() + return nil, err + } + u.uploadID = *resp.UploadId + + // Create the workers + ch := make(chan chunk, u.cfg.Concurrency) + for i := 0; i < u.cfg.Concurrency; i++ { + u.wg.Add(1) + go u.readChunk(ch) + } + + // Send part 1 to the workers + var num int64 = 1 + ch <- chunk{buf: firstBuf, num: num, cleanup: cleanup} + + // Read and queue the rest of the parts + for u.geterr() == nil && err == nil { + var ( + reader io.ReadSeeker + nextChunkLen int + ok bool + ) + + reader, nextChunkLen, cleanup, err = u.nextReader() + ok, err = u.shouldContinue(num, nextChunkLen, err) + if !ok { + cleanup() + if err != nil { + u.seterr(err) + } + break + } + + num++ + + ch <- chunk{buf: reader, num: num, cleanup: cleanup} + } + + // Close the channel, wait for workers, and complete upload + close(ch) + u.wg.Wait() + complete := u.complete() + + if err := u.geterr(); err != nil { + return nil, &multiUploadError{ + awsError: awserr.New( + "MultipartUpload", + "upload multipart failed", + err), + uploadID: u.uploadID, + } + } + + // Create a presigned URL of the S3 Get Object in order to have parity with + // single part upload. + getReq, _ := u.cfg.S3.GetObjectRequest(&s3.GetObjectInput{ + Bucket: u.in.Bucket, + Key: u.in.Key, + }) + getReq.Config.Credentials = credentials.AnonymousCredentials + getReq.SetContext(u.ctx) + uploadLocation, _, _ := getReq.PresignRequest(1) + + return &UploadOutput{ + Location: uploadLocation, + VersionID: complete.VersionId, + UploadID: u.uploadID, + ETag: complete.ETag, + }, nil +} + +func (u *multiuploader) shouldContinue(part int64, nextChunkLen int, err error) (bool, error) { + if err != nil && err != io.EOF { + return false, awserr.New("ReadRequestBody", "read multipart upload data failed", err) + } + + if nextChunkLen == 0 { + // No need to upload empty part, if file was empty to start + // with empty single part would of been created and never + // started multipart upload. + return false, nil + } + + part++ + // This upload exceeded maximum number of supported parts, error now. + if part > int64(u.cfg.MaxUploadParts) || part > int64(MaxUploadParts) { + var msg string + if part > int64(u.cfg.MaxUploadParts) { + msg = fmt.Sprintf("exceeded total allowed configured MaxUploadParts (%d). Adjust PartSize to fit in this limit", + u.cfg.MaxUploadParts) + } else { + msg = fmt.Sprintf("exceeded total allowed S3 limit MaxUploadParts (%d). Adjust PartSize to fit in this limit", + MaxUploadParts) + } + return false, awserr.New("TotalPartsExceeded", msg, nil) + } + + return true, err +} + +// readChunk runs in worker goroutines to pull chunks off of the ch channel +// and send() them as UploadPart requests. +func (u *multiuploader) readChunk(ch chan chunk) { + defer u.wg.Done() + for { + data, ok := <-ch + + if !ok { + break + } + + if u.geterr() == nil { + if err := u.send(data); err != nil { + u.seterr(err) + } + } + + data.cleanup() + } +} + +// send performs an UploadPart request and keeps track of the completed +// part information. +func (u *multiuploader) send(c chunk) error { + params := &s3.UploadPartInput{ + Bucket: u.in.Bucket, + Key: u.in.Key, + Body: c.buf, + UploadId: &u.uploadID, + SSECustomerAlgorithm: u.in.SSECustomerAlgorithm, + SSECustomerKey: u.in.SSECustomerKey, + PartNumber: &c.num, + } + + resp, err := u.cfg.S3.UploadPartWithContext(u.ctx, params, u.cfg.RequestOptions...) + if err != nil { + return err + } + + n := c.num + completed := &s3.CompletedPart{ETag: resp.ETag, PartNumber: &n} + + u.m.Lock() + u.parts = append(u.parts, completed) + u.m.Unlock() + + return nil +} + +// geterr is a thread-safe getter for the error object +func (u *multiuploader) geterr() error { + u.m.Lock() + defer u.m.Unlock() + + return u.err +} + +// seterr is a thread-safe setter for the error object +func (u *multiuploader) seterr(e error) { + u.m.Lock() + defer u.m.Unlock() + + u.err = e +} + +// fail will abort the multipart unless LeavePartsOnError is set to true. +func (u *multiuploader) fail() { + if u.cfg.LeavePartsOnError { + return + } + + params := &s3.AbortMultipartUploadInput{ + Bucket: u.in.Bucket, + Key: u.in.Key, + UploadId: &u.uploadID, + } + _, err := u.cfg.S3.AbortMultipartUploadWithContext(u.ctx, params, u.cfg.RequestOptions...) + if err != nil { + logMessage(u.cfg.S3, aws.LogDebug, fmt.Sprintf("failed to abort multipart upload, %v", err)) + } +} + +// complete successfully completes a multipart upload and returns the response. +func (u *multiuploader) complete() *s3.CompleteMultipartUploadOutput { + if u.geterr() != nil { + u.fail() + return nil + } + + // Parts must be sorted in PartNumber order. + sort.Sort(u.parts) + + params := &s3.CompleteMultipartUploadInput{ + Bucket: u.in.Bucket, + Key: u.in.Key, + UploadId: &u.uploadID, + MultipartUpload: &s3.CompletedMultipartUpload{Parts: u.parts}, + } + resp, err := u.cfg.S3.CompleteMultipartUploadWithContext(u.ctx, params, u.cfg.RequestOptions...) + if err != nil { + u.seterr(err) + u.fail() + } + + return resp +} + +type readerAtSeeker interface { + io.ReaderAt + io.ReadSeeker +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/upload_input.go b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/upload_input.go new file mode 100644 index 00000000000..6cac26fa83d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/upload_input.go @@ -0,0 +1,207 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package s3manager + +import ( + "io" + "time" +) + +// UploadInput provides the input parameters for uploading a stream or buffer +// to an object in an Amazon S3 bucket. This type is similar to the s3 +// package's PutObjectInput with the exception that the Body member is an +// io.Reader instead of an io.ReadSeeker. +type UploadInput struct { + _ struct{} `locationName:"PutObjectRequest" type:"structure" payload:"Body"` + + // The canned ACL to apply to the object. For more information, see Canned ACL + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL). + // + // This action is not supported by Amazon S3 on Outposts. + ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"` + + // The readable body payload to send to S3. + Body io.Reader + + // The bucket name to which the PUT operation was initiated. + // + // When using this API with an access point, you must direct requests to the + // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this operation with an access point through the AWS SDKs, you + // provide the access point ARN in place of the bucket name. For more information + // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // in the Amazon Simple Storage Service Developer Guide. + // + // When using this API with Amazon S3 on Outposts, you must direct requests + // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When + // using this operation using S3 on Outposts through the AWS SDKs, you provide + // the Outposts bucket ARN in place of the bucket name. For more information + // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) + // in the Amazon Simple Storage Service Developer Guide. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption + // with server-side encryption using AWS KMS (SSE-KMS). Setting this header + // to true causes Amazon S3 to use an S3 Bucket Key for object encryption with + // SSE-KMS. + // + // Specifying this header with a PUT operation doesn’t affect bucket-level + // settings for S3 Bucket Key. + BucketKeyEnabled *bool `location:"header" locationName:"x-amz-server-side-encryption-bucket-key-enabled" type:"boolean"` + + // Can be used to specify caching behavior along the request/reply chain. For + // more information, see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9 + // (http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9). + CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"` + + // Specifies presentational information for the object. For more information, + // see http://www.w3.org/Protocols/rfc2616/rfc2616-sec19.html#sec19.5.1 (http://www.w3.org/Protocols/rfc2616/rfc2616-sec19.html#sec19.5.1). + ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"` + + // Specifies what content encodings have been applied to the object and thus + // what decoding mechanisms must be applied to obtain the media-type referenced + // by the Content-Type header field. For more information, see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.11 + // (http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.11). + ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"` + + // The language the content is in. + ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"` + + // The base64-encoded 128-bit MD5 digest of the message (without the headers) + // according to RFC 1864. This header can be used as a message integrity check + // to verify that the data is the same data that was originally sent. Although + // it is optional, we recommend using the Content-MD5 mechanism as an end-to-end + // integrity check. For more information about REST request authentication, + // see REST Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html). + ContentMD5 *string `location:"header" locationName:"Content-MD5" type:"string"` + + // A standard MIME type describing the format of the contents. For more information, + // see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.17 (http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.17). + ContentType *string `location:"header" locationName:"Content-Type" type:"string"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // The date and time at which the object is no longer cacheable. For more information, + // see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.21 (http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.21). + Expires *time.Time `location:"header" locationName:"Expires" type:"timestamp"` + + // Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object. + // + // This action is not supported by Amazon S3 on Outposts. + GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` + + // Allows grantee to read the object data and its metadata. + // + // This action is not supported by Amazon S3 on Outposts. + GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"` + + // Allows grantee to read the object ACL. + // + // This action is not supported by Amazon S3 on Outposts. + GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"` + + // Allows grantee to write the ACL for the applicable object. + // + // This action is not supported by Amazon S3 on Outposts. + GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` + + // Object key for which the PUT operation was initiated. + // + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // A map of metadata to store with the object in S3. + Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"` + + // Specifies whether a legal hold will be applied to this object. For more information + // about S3 Object Lock, see Object Lock (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html). + ObjectLockLegalHoldStatus *string `location:"header" locationName:"x-amz-object-lock-legal-hold" type:"string" enum:"ObjectLockLegalHoldStatus"` + + // The Object Lock mode that you want to apply to this object. + ObjectLockMode *string `location:"header" locationName:"x-amz-object-lock-mode" type:"string" enum:"ObjectLockMode"` + + // The date and time when you want this object's Object Lock to expire. + ObjectLockRetainUntilDate *time.Time `location:"header" locationName:"x-amz-object-lock-retain-until-date" type:"timestamp" timestampFormat:"iso8601"` + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from requester pays buckets, see Downloading Objects + // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 Developer Guide. + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // Specifies the algorithm to use to when encrypting the object (for example, + // AES256). + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting + // data. This value is used to store the object and then it is discarded; Amazon + // S3 does not store the encryption key. The key must be appropriate for use + // with the algorithm specified in the x-amz-server-side-encryption-customer-algorithm + // header. + SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure that the + // encryption key was transmitted without error. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // Specifies the AWS KMS Encryption Context to use for object encryption. The + // value of this header is a base64-encoded UTF-8 string holding JSON with the + // encryption context key-value pairs. + SSEKMSEncryptionContext *string `location:"header" locationName:"x-amz-server-side-encryption-context" type:"string" sensitive:"true"` + + // If x-amz-server-side-encryption is present and has the value of aws:kms, + // this header specifies the ID of the AWS Key Management Service (AWS KMS) + // symmetrical customer managed customer master key (CMK) that was used for + // the object. + // + // If the value of x-amz-server-side-encryption is aws:kms, this header specifies + // the ID of the symmetric customer managed AWS KMS CMK that will be used for + // the object. If you specify x-amz-server-side-encryption:aws:kms, but do not + // providex-amz-server-side-encryption-aws-kms-key-id, Amazon S3 uses the AWS + // managed CMK in AWS to protect the data. + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` + + // The server-side encryption algorithm used when storing this object in Amazon + // S3 (for example, AES256, aws:kms). + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` + + // By default, Amazon S3 uses the STANDARD Storage Class to store newly created + // objects. The STANDARD storage class provides high durability and high availability. + // Depending on performance needs, you can specify a different Storage Class. + // Amazon S3 on Outposts only uses the OUTPOSTS Storage Class. For more information, + // see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html) + // in the Amazon S3 Service Developer Guide. + StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"` + + // The tag-set for the object. The tag-set must be encoded as URL Query parameters. + // (For example, "Key1=Value1") + Tagging *string `location:"header" locationName:"x-amz-tagging" type:"string"` + + // If the bucket is configured as a website, redirects requests for this object + // to another object in the same bucket or to an external URL. Amazon S3 stores + // the value of this header in the object metadata. For information about object + // metadata, see Object Key and Metadata (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html). + // + // In the following example, the request header sets the redirect to an object + // (anotherPage.html) in the same bucket: + // + // x-amz-website-redirect-location: /anotherPage.html + // + // In the following example, the request header sets the object redirect to + // another website: + // + // x-amz-website-redirect-location: http://www.example.com/ + // + // For more information about website hosting in Amazon S3, see Hosting Websites + // on Amazon S3 (https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html) + // and How to Configure Website Page Redirects (https://docs.aws.amazon.com/AmazonS3/latest/dev/how-to-page-redirect.html). + WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"` +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/writer_read_from.go b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/writer_read_from.go new file mode 100644 index 00000000000..765dc07ca32 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/writer_read_from.go @@ -0,0 +1,75 @@ +package s3manager + +import ( + "bufio" + "io" + "sync" + + "github.com/aws/aws-sdk-go/internal/sdkio" +) + +// WriterReadFrom defines an interface implementing io.Writer and io.ReaderFrom +type WriterReadFrom interface { + io.Writer + io.ReaderFrom +} + +// WriterReadFromProvider provides an implementation of io.ReadFrom for the given io.Writer +type WriterReadFromProvider interface { + GetReadFrom(writer io.Writer) (w WriterReadFrom, cleanup func()) +} + +type bufferedWriter interface { + WriterReadFrom + Flush() error + Reset(io.Writer) +} + +type bufferedReadFrom struct { + bufferedWriter +} + +func (b *bufferedReadFrom) ReadFrom(r io.Reader) (int64, error) { + n, err := b.bufferedWriter.ReadFrom(r) + if flushErr := b.Flush(); flushErr != nil && err == nil { + err = flushErr + } + return n, err +} + +// PooledBufferedReadFromProvider is a WriterReadFromProvider that uses a sync.Pool +// to manage allocation and reuse of *bufio.Writer structures. +type PooledBufferedReadFromProvider struct { + pool sync.Pool +} + +// NewPooledBufferedWriterReadFromProvider returns a new PooledBufferedReadFromProvider +// Size is used to control the size of the underlying *bufio.Writer created for +// calls to GetReadFrom. +func NewPooledBufferedWriterReadFromProvider(size int) *PooledBufferedReadFromProvider { + if size < int(32*sdkio.KibiByte) { + size = int(64 * sdkio.KibiByte) + } + + return &PooledBufferedReadFromProvider{ + pool: sync.Pool{ + New: func() interface{} { + return &bufferedReadFrom{bufferedWriter: bufio.NewWriterSize(nil, size)} + }, + }, + } +} + +// GetReadFrom takes an io.Writer and wraps it with a type which satisfies the WriterReadFrom +// interface/ Additionally a cleanup function is provided which must be called after usage of the WriterReadFrom +// has been completed in order to allow the reuse of the *bufio.Writer +func (p *PooledBufferedReadFromProvider) GetReadFrom(writer io.Writer) (r WriterReadFrom, cleanup func()) { + buffer := p.pool.Get().(*bufferedReadFrom) + buffer.Reset(writer) + r = buffer + cleanup = func() { + buffer.Reset(nil) // Reset to nil writer to release reference + p.pool.Put(buffer) + } + return r, cleanup +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/service.go b/vendor/github.com/aws/aws-sdk-go/service/s3/service.go new file mode 100644 index 00000000000..1b78b5d45e1 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/service.go @@ -0,0 +1,106 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package s3 + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/private/protocol/restxml" +) + +// S3 provides the API operation methods for making requests to +// Amazon Simple Storage Service. See this package's package overview docs +// for details on the service. +// +// S3 methods are safe to use concurrently. It is not safe to +// modify mutate any of the struct's properties though. +type S3 struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// Service information constants +const ( + ServiceName = "s3" // Name of service. + EndpointsID = ServiceName // ID to lookup a service endpoint with. + ServiceID = "S3" // ServiceID is a unique identifier of a specific service. +) + +// New creates a new instance of the S3 client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// mySession := session.Must(session.NewSession()) +// +// // Create a S3 client from just a session. +// svc := s3.New(mySession) +// +// // Create a S3 client with additional configuration +// svc := s3.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *S3 { + c := p.ClientConfig(EndpointsID, cfgs...) + if c.SigningNameDerived || len(c.SigningName) == 0 { + c.SigningName = "s3" + } + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *S3 { + svc := &S3{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + ServiceID: ServiceID, + SigningName: signingName, + SigningRegion: signingRegion, + PartitionID: partitionID, + Endpoint: endpoint, + APIVersion: "2006-03-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.BuildNamedHandler(v4.SignRequestHandler.Name, func(s *v4.Signer) { + s.DisableURIPathEscaping = true + })) + svc.Handlers.Build.PushBackNamed(restxml.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(restxml.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(restxml.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(restxml.UnmarshalErrorHandler) + + svc.Handlers.BuildStream.PushBackNamed(restxml.BuildHandler) + svc.Handlers.UnmarshalStream.PushBackNamed(restxml.UnmarshalHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a S3 operation and runs any +// custom request initialization. +func (c *S3) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/sse.go b/vendor/github.com/aws/aws-sdk-go/service/s3/sse.go new file mode 100644 index 00000000000..57a0bd92ca3 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/sse.go @@ -0,0 +1,84 @@ +package s3 + +import ( + "crypto/md5" + "encoding/base64" + "net/http" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" +) + +var errSSERequiresSSL = awserr.New("ConfigError", "cannot send SSE keys over HTTP.", nil) + +func validateSSERequiresSSL(r *request.Request) { + if r.HTTPRequest.URL.Scheme == "https" { + return + } + + if iface, ok := r.Params.(sseCustomerKeyGetter); ok { + if len(iface.getSSECustomerKey()) > 0 { + r.Error = errSSERequiresSSL + return + } + } + + if iface, ok := r.Params.(copySourceSSECustomerKeyGetter); ok { + if len(iface.getCopySourceSSECustomerKey()) > 0 { + r.Error = errSSERequiresSSL + return + } + } +} + +const ( + sseKeyHeader = "x-amz-server-side-encryption-customer-key" + sseKeyMD5Header = sseKeyHeader + "-md5" +) + +func computeSSEKeyMD5(r *request.Request) { + var key string + if g, ok := r.Params.(sseCustomerKeyGetter); ok { + key = g.getSSECustomerKey() + } + + computeKeyMD5(sseKeyHeader, sseKeyMD5Header, key, r.HTTPRequest) +} + +const ( + copySrcSSEKeyHeader = "x-amz-copy-source-server-side-encryption-customer-key" + copySrcSSEKeyMD5Header = copySrcSSEKeyHeader + "-md5" +) + +func computeCopySourceSSEKeyMD5(r *request.Request) { + var key string + if g, ok := r.Params.(copySourceSSECustomerKeyGetter); ok { + key = g.getCopySourceSSECustomerKey() + } + + computeKeyMD5(copySrcSSEKeyHeader, copySrcSSEKeyMD5Header, key, r.HTTPRequest) +} + +func computeKeyMD5(keyHeader, keyMD5Header, key string, r *http.Request) { + if len(key) == 0 { + // Backwards compatiablity where user just set the header value instead + // of using the API parameter, or setting the header value for an + // operation without the parameters modeled. + key = r.Header.Get(keyHeader) + if len(key) == 0 { + return + } + + // In backwards compatible, the header's value is not base64 encoded, + // and needs to be encoded and updated by the SDK's customizations. + b64Key := base64.StdEncoding.EncodeToString([]byte(key)) + r.Header.Set(keyHeader, b64Key) + } + + // Only update Key's MD5 if not already set. + if len(r.Header.Get(keyMD5Header)) == 0 { + sum := md5.Sum([]byte(key)) + keyMD5 := base64.StdEncoding.EncodeToString(sum[:]) + r.Header.Set(keyMD5Header, keyMD5) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/statusok_error.go b/vendor/github.com/aws/aws-sdk-go/service/s3/statusok_error.go new file mode 100644 index 00000000000..247770e4c88 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/statusok_error.go @@ -0,0 +1,42 @@ +package s3 + +import ( + "bytes" + "io" + "io/ioutil" + "net/http" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/internal/sdkio" +) + +func copyMultipartStatusOKUnmarhsalError(r *request.Request) { + b, err := ioutil.ReadAll(r.HTTPResponse.Body) + if err != nil { + r.Error = awserr.NewRequestFailure( + awserr.New(request.ErrCodeSerialization, "unable to read response body", err), + r.HTTPResponse.StatusCode, + r.RequestID, + ) + return + } + body := bytes.NewReader(b) + r.HTTPResponse.Body = ioutil.NopCloser(body) + defer body.Seek(0, sdkio.SeekStart) + + unmarshalError(r) + if err, ok := r.Error.(awserr.Error); ok && err != nil { + if err.Code() == request.ErrCodeSerialization && + err.OrigErr() != io.EOF { + r.Error = nil + return + } + // if empty payload + if err.OrigErr() == io.EOF { + r.HTTPResponse.StatusCode = http.StatusInternalServerError + } else { + r.HTTPResponse.StatusCode = http.StatusServiceUnavailable + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/unmarshal_error.go b/vendor/github.com/aws/aws-sdk-go/service/s3/unmarshal_error.go new file mode 100644 index 00000000000..6eecf669107 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/unmarshal_error.go @@ -0,0 +1,114 @@ +package s3 + +import ( + "bytes" + "encoding/xml" + "fmt" + "io" + "io/ioutil" + "net/http" + "strings" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil" +) + +type xmlErrorResponse struct { + XMLName xml.Name `xml:"Error"` + Code string `xml:"Code"` + Message string `xml:"Message"` +} + +func unmarshalError(r *request.Request) { + defer r.HTTPResponse.Body.Close() + defer io.Copy(ioutil.Discard, r.HTTPResponse.Body) + + // Bucket exists in a different region, and request needs + // to be made to the correct region. + if r.HTTPResponse.StatusCode == http.StatusMovedPermanently { + msg := fmt.Sprintf( + "incorrect region, the bucket is not in '%s' region at endpoint '%s'", + aws.StringValue(r.Config.Region), + aws.StringValue(r.Config.Endpoint), + ) + if v := r.HTTPResponse.Header.Get("x-amz-bucket-region"); len(v) != 0 { + msg += fmt.Sprintf(", bucket is in '%s' region", v) + } + r.Error = awserr.NewRequestFailure( + awserr.New("BucketRegionError", msg, nil), + r.HTTPResponse.StatusCode, + r.RequestID, + ) + return + } + + // Attempt to parse error from body if it is known + var errResp xmlErrorResponse + var err error + if r.HTTPResponse.StatusCode >= 200 && r.HTTPResponse.StatusCode < 300 { + err = s3unmarshalXMLError(&errResp, r.HTTPResponse.Body) + } else { + err = xmlutil.UnmarshalXMLError(&errResp, r.HTTPResponse.Body) + } + + if err != nil { + var errorMsg string + if err == io.EOF { + errorMsg = "empty response payload" + } else { + errorMsg = "failed to unmarshal error message" + } + + r.Error = awserr.NewRequestFailure( + awserr.New(request.ErrCodeSerialization, + errorMsg, err), + r.HTTPResponse.StatusCode, + r.RequestID, + ) + return + } + + // Fallback to status code converted to message if still no error code + if len(errResp.Code) == 0 { + statusText := http.StatusText(r.HTTPResponse.StatusCode) + errResp.Code = strings.Replace(statusText, " ", "", -1) + errResp.Message = statusText + } + + r.Error = awserr.NewRequestFailure( + awserr.New(errResp.Code, errResp.Message, err), + r.HTTPResponse.StatusCode, + r.RequestID, + ) +} + +// A RequestFailure provides access to the S3 Request ID and Host ID values +// returned from API operation errors. Getting the error as a string will +// return the formated error with the same information as awserr.RequestFailure, +// while also adding the HostID value from the response. +type RequestFailure interface { + awserr.RequestFailure + + // Host ID is the S3 Host ID needed for debug, and contacting support + HostID() string +} + +// s3unmarshalXMLError is s3 specific xml error unmarshaler +// for 200 OK errors and response payloads. +// This function differs from the xmlUtil.UnmarshalXMLError +// func. It does not ignore the EOF error and passes it up. +// Related to bug fix for `s3 200 OK response with empty payload` +func s3unmarshalXMLError(v interface{}, stream io.Reader) error { + var errBuf bytes.Buffer + body := io.TeeReader(stream, &errBuf) + + err := xml.NewDecoder(body).Decode(v) + if err != nil && err != io.EOF { + return awserr.NewUnmarshalError(err, + "failed to unmarshal error message", errBuf.Bytes()) + } + + return err +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/waiters.go b/vendor/github.com/aws/aws-sdk-go/service/s3/waiters.go new file mode 100644 index 00000000000..2596c694b50 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/waiters.go @@ -0,0 +1,214 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package s3 + +import ( + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/request" +) + +// WaitUntilBucketExists uses the Amazon S3 API operation +// HeadBucket to wait for a condition to be met before returning. +// If the condition is not met within the max attempt window, an error will +// be returned. +func (c *S3) WaitUntilBucketExists(input *HeadBucketInput) error { + return c.WaitUntilBucketExistsWithContext(aws.BackgroundContext(), input) +} + +// WaitUntilBucketExistsWithContext is an extended version of WaitUntilBucketExists. +// With the support for passing in a context and options to configure the +// Waiter and the underlying request options. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) WaitUntilBucketExistsWithContext(ctx aws.Context, input *HeadBucketInput, opts ...request.WaiterOption) error { + w := request.Waiter{ + Name: "WaitUntilBucketExists", + MaxAttempts: 20, + Delay: request.ConstantWaiterDelay(5 * time.Second), + Acceptors: []request.WaiterAcceptor{ + { + State: request.SuccessWaiterState, + Matcher: request.StatusWaiterMatch, + Expected: 200, + }, + { + State: request.SuccessWaiterState, + Matcher: request.StatusWaiterMatch, + Expected: 301, + }, + { + State: request.SuccessWaiterState, + Matcher: request.StatusWaiterMatch, + Expected: 403, + }, + { + State: request.RetryWaiterState, + Matcher: request.StatusWaiterMatch, + Expected: 404, + }, + }, + Logger: c.Config.Logger, + NewRequest: func(opts []request.Option) (*request.Request, error) { + var inCpy *HeadBucketInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.HeadBucketRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + w.ApplyOptions(opts...) + + return w.WaitWithContext(ctx) +} + +// WaitUntilBucketNotExists uses the Amazon S3 API operation +// HeadBucket to wait for a condition to be met before returning. +// If the condition is not met within the max attempt window, an error will +// be returned. +func (c *S3) WaitUntilBucketNotExists(input *HeadBucketInput) error { + return c.WaitUntilBucketNotExistsWithContext(aws.BackgroundContext(), input) +} + +// WaitUntilBucketNotExistsWithContext is an extended version of WaitUntilBucketNotExists. +// With the support for passing in a context and options to configure the +// Waiter and the underlying request options. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) WaitUntilBucketNotExistsWithContext(ctx aws.Context, input *HeadBucketInput, opts ...request.WaiterOption) error { + w := request.Waiter{ + Name: "WaitUntilBucketNotExists", + MaxAttempts: 20, + Delay: request.ConstantWaiterDelay(5 * time.Second), + Acceptors: []request.WaiterAcceptor{ + { + State: request.SuccessWaiterState, + Matcher: request.StatusWaiterMatch, + Expected: 404, + }, + }, + Logger: c.Config.Logger, + NewRequest: func(opts []request.Option) (*request.Request, error) { + var inCpy *HeadBucketInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.HeadBucketRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + w.ApplyOptions(opts...) + + return w.WaitWithContext(ctx) +} + +// WaitUntilObjectExists uses the Amazon S3 API operation +// HeadObject to wait for a condition to be met before returning. +// If the condition is not met within the max attempt window, an error will +// be returned. +func (c *S3) WaitUntilObjectExists(input *HeadObjectInput) error { + return c.WaitUntilObjectExistsWithContext(aws.BackgroundContext(), input) +} + +// WaitUntilObjectExistsWithContext is an extended version of WaitUntilObjectExists. +// With the support for passing in a context and options to configure the +// Waiter and the underlying request options. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) WaitUntilObjectExistsWithContext(ctx aws.Context, input *HeadObjectInput, opts ...request.WaiterOption) error { + w := request.Waiter{ + Name: "WaitUntilObjectExists", + MaxAttempts: 20, + Delay: request.ConstantWaiterDelay(5 * time.Second), + Acceptors: []request.WaiterAcceptor{ + { + State: request.SuccessWaiterState, + Matcher: request.StatusWaiterMatch, + Expected: 200, + }, + { + State: request.RetryWaiterState, + Matcher: request.StatusWaiterMatch, + Expected: 404, + }, + }, + Logger: c.Config.Logger, + NewRequest: func(opts []request.Option) (*request.Request, error) { + var inCpy *HeadObjectInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.HeadObjectRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + w.ApplyOptions(opts...) + + return w.WaitWithContext(ctx) +} + +// WaitUntilObjectNotExists uses the Amazon S3 API operation +// HeadObject to wait for a condition to be met before returning. +// If the condition is not met within the max attempt window, an error will +// be returned. +func (c *S3) WaitUntilObjectNotExists(input *HeadObjectInput) error { + return c.WaitUntilObjectNotExistsWithContext(aws.BackgroundContext(), input) +} + +// WaitUntilObjectNotExistsWithContext is an extended version of WaitUntilObjectNotExists. +// With the support for passing in a context and options to configure the +// Waiter and the underlying request options. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) WaitUntilObjectNotExistsWithContext(ctx aws.Context, input *HeadObjectInput, opts ...request.WaiterOption) error { + w := request.Waiter{ + Name: "WaitUntilObjectNotExists", + MaxAttempts: 20, + Delay: request.ConstantWaiterDelay(5 * time.Second), + Acceptors: []request.WaiterAcceptor{ + { + State: request.SuccessWaiterState, + Matcher: request.StatusWaiterMatch, + Expected: 404, + }, + }, + Logger: c.Config.Logger, + NewRequest: func(opts []request.Option) (*request.Request, error) { + var inCpy *HeadObjectInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.HeadObjectRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + w.ApplyOptions(opts...) + + return w.WaitWithContext(ctx) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/sso/api.go b/vendor/github.com/aws/aws-sdk-go/service/sso/api.go new file mode 100644 index 00000000000..4498f285e47 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/sso/api.go @@ -0,0 +1,1210 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package sso + +import ( + "fmt" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/restjson" +) + +const opGetRoleCredentials = "GetRoleCredentials" + +// GetRoleCredentialsRequest generates a "aws/request.Request" representing the +// client's request for the GetRoleCredentials operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetRoleCredentials for more information on using the GetRoleCredentials +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetRoleCredentialsRequest method. +// req, resp := client.GetRoleCredentialsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sso-2019-06-10/GetRoleCredentials +func (c *SSO) GetRoleCredentialsRequest(input *GetRoleCredentialsInput) (req *request.Request, output *GetRoleCredentialsOutput) { + op := &request.Operation{ + Name: opGetRoleCredentials, + HTTPMethod: "GET", + HTTPPath: "/federation/credentials", + } + + if input == nil { + input = &GetRoleCredentialsInput{} + } + + output = &GetRoleCredentialsOutput{} + req = c.newRequest(op, input, output) + req.Config.Credentials = credentials.AnonymousCredentials + return +} + +// GetRoleCredentials API operation for AWS Single Sign-On. +// +// Returns the STS short-term credentials for a given role name that is assigned +// to the user. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Single Sign-On's +// API operation GetRoleCredentials for usage and error information. +// +// Returned Error Types: +// * InvalidRequestException +// Indicates that a problem occurred with the input to the request. For example, +// a required parameter might be missing or out of range. +// +// * UnauthorizedException +// Indicates that the request is not authorized. This can happen due to an invalid +// access token in the request. +// +// * TooManyRequestsException +// Indicates that the request is being made too frequently and is more than +// what the server can handle. +// +// * ResourceNotFoundException +// The specified resource doesn't exist. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sso-2019-06-10/GetRoleCredentials +func (c *SSO) GetRoleCredentials(input *GetRoleCredentialsInput) (*GetRoleCredentialsOutput, error) { + req, out := c.GetRoleCredentialsRequest(input) + return out, req.Send() +} + +// GetRoleCredentialsWithContext is the same as GetRoleCredentials with the addition of +// the ability to pass a context and additional request options. +// +// See GetRoleCredentials for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSO) GetRoleCredentialsWithContext(ctx aws.Context, input *GetRoleCredentialsInput, opts ...request.Option) (*GetRoleCredentialsOutput, error) { + req, out := c.GetRoleCredentialsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opListAccountRoles = "ListAccountRoles" + +// ListAccountRolesRequest generates a "aws/request.Request" representing the +// client's request for the ListAccountRoles operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListAccountRoles for more information on using the ListAccountRoles +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListAccountRolesRequest method. +// req, resp := client.ListAccountRolesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sso-2019-06-10/ListAccountRoles +func (c *SSO) ListAccountRolesRequest(input *ListAccountRolesInput) (req *request.Request, output *ListAccountRolesOutput) { + op := &request.Operation{ + Name: opListAccountRoles, + HTTPMethod: "GET", + HTTPPath: "/assignment/roles", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "maxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListAccountRolesInput{} + } + + output = &ListAccountRolesOutput{} + req = c.newRequest(op, input, output) + req.Config.Credentials = credentials.AnonymousCredentials + return +} + +// ListAccountRoles API operation for AWS Single Sign-On. +// +// Lists all roles that are assigned to the user for a given AWS account. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Single Sign-On's +// API operation ListAccountRoles for usage and error information. +// +// Returned Error Types: +// * InvalidRequestException +// Indicates that a problem occurred with the input to the request. For example, +// a required parameter might be missing or out of range. +// +// * UnauthorizedException +// Indicates that the request is not authorized. This can happen due to an invalid +// access token in the request. +// +// * TooManyRequestsException +// Indicates that the request is being made too frequently and is more than +// what the server can handle. +// +// * ResourceNotFoundException +// The specified resource doesn't exist. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sso-2019-06-10/ListAccountRoles +func (c *SSO) ListAccountRoles(input *ListAccountRolesInput) (*ListAccountRolesOutput, error) { + req, out := c.ListAccountRolesRequest(input) + return out, req.Send() +} + +// ListAccountRolesWithContext is the same as ListAccountRoles with the addition of +// the ability to pass a context and additional request options. +// +// See ListAccountRoles for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSO) ListAccountRolesWithContext(ctx aws.Context, input *ListAccountRolesInput, opts ...request.Option) (*ListAccountRolesOutput, error) { + req, out := c.ListAccountRolesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListAccountRolesPages iterates over the pages of a ListAccountRoles operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListAccountRoles method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListAccountRoles operation. +// pageNum := 0 +// err := client.ListAccountRolesPages(params, +// func(page *sso.ListAccountRolesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *SSO) ListAccountRolesPages(input *ListAccountRolesInput, fn func(*ListAccountRolesOutput, bool) bool) error { + return c.ListAccountRolesPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListAccountRolesPagesWithContext same as ListAccountRolesPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSO) ListAccountRolesPagesWithContext(ctx aws.Context, input *ListAccountRolesInput, fn func(*ListAccountRolesOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListAccountRolesInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListAccountRolesRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListAccountRolesOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opListAccounts = "ListAccounts" + +// ListAccountsRequest generates a "aws/request.Request" representing the +// client's request for the ListAccounts operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListAccounts for more information on using the ListAccounts +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListAccountsRequest method. +// req, resp := client.ListAccountsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sso-2019-06-10/ListAccounts +func (c *SSO) ListAccountsRequest(input *ListAccountsInput) (req *request.Request, output *ListAccountsOutput) { + op := &request.Operation{ + Name: opListAccounts, + HTTPMethod: "GET", + HTTPPath: "/assignment/accounts", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "maxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListAccountsInput{} + } + + output = &ListAccountsOutput{} + req = c.newRequest(op, input, output) + req.Config.Credentials = credentials.AnonymousCredentials + return +} + +// ListAccounts API operation for AWS Single Sign-On. +// +// Lists all AWS accounts assigned to the user. These AWS accounts are assigned +// by the administrator of the account. For more information, see Assign User +// Access (https://docs.aws.amazon.com/singlesignon/latest/userguide/useraccess.html#assignusers) +// in the AWS SSO User Guide. This operation returns a paginated response. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Single Sign-On's +// API operation ListAccounts for usage and error information. +// +// Returned Error Types: +// * InvalidRequestException +// Indicates that a problem occurred with the input to the request. For example, +// a required parameter might be missing or out of range. +// +// * UnauthorizedException +// Indicates that the request is not authorized. This can happen due to an invalid +// access token in the request. +// +// * TooManyRequestsException +// Indicates that the request is being made too frequently and is more than +// what the server can handle. +// +// * ResourceNotFoundException +// The specified resource doesn't exist. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sso-2019-06-10/ListAccounts +func (c *SSO) ListAccounts(input *ListAccountsInput) (*ListAccountsOutput, error) { + req, out := c.ListAccountsRequest(input) + return out, req.Send() +} + +// ListAccountsWithContext is the same as ListAccounts with the addition of +// the ability to pass a context and additional request options. +// +// See ListAccounts for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSO) ListAccountsWithContext(ctx aws.Context, input *ListAccountsInput, opts ...request.Option) (*ListAccountsOutput, error) { + req, out := c.ListAccountsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListAccountsPages iterates over the pages of a ListAccounts operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListAccounts method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListAccounts operation. +// pageNum := 0 +// err := client.ListAccountsPages(params, +// func(page *sso.ListAccountsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *SSO) ListAccountsPages(input *ListAccountsInput, fn func(*ListAccountsOutput, bool) bool) error { + return c.ListAccountsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListAccountsPagesWithContext same as ListAccountsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSO) ListAccountsPagesWithContext(ctx aws.Context, input *ListAccountsInput, fn func(*ListAccountsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListAccountsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListAccountsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListAccountsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opLogout = "Logout" + +// LogoutRequest generates a "aws/request.Request" representing the +// client's request for the Logout operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See Logout for more information on using the Logout +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the LogoutRequest method. +// req, resp := client.LogoutRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sso-2019-06-10/Logout +func (c *SSO) LogoutRequest(input *LogoutInput) (req *request.Request, output *LogoutOutput) { + op := &request.Operation{ + Name: opLogout, + HTTPMethod: "POST", + HTTPPath: "/logout", + } + + if input == nil { + input = &LogoutInput{} + } + + output = &LogoutOutput{} + req = c.newRequest(op, input, output) + req.Config.Credentials = credentials.AnonymousCredentials + req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// Logout API operation for AWS Single Sign-On. +// +// Removes the client- and server-side session that is associated with the user. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Single Sign-On's +// API operation Logout for usage and error information. +// +// Returned Error Types: +// * InvalidRequestException +// Indicates that a problem occurred with the input to the request. For example, +// a required parameter might be missing or out of range. +// +// * UnauthorizedException +// Indicates that the request is not authorized. This can happen due to an invalid +// access token in the request. +// +// * TooManyRequestsException +// Indicates that the request is being made too frequently and is more than +// what the server can handle. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sso-2019-06-10/Logout +func (c *SSO) Logout(input *LogoutInput) (*LogoutOutput, error) { + req, out := c.LogoutRequest(input) + return out, req.Send() +} + +// LogoutWithContext is the same as Logout with the addition of +// the ability to pass a context and additional request options. +// +// See Logout for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSO) LogoutWithContext(ctx aws.Context, input *LogoutInput, opts ...request.Option) (*LogoutOutput, error) { + req, out := c.LogoutRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// Provides information about your AWS account. +type AccountInfo struct { + _ struct{} `type:"structure"` + + // The identifier of the AWS account that is assigned to the user. + AccountId *string `locationName:"accountId" type:"string"` + + // The display name of the AWS account that is assigned to the user. + AccountName *string `locationName:"accountName" type:"string"` + + // The email address of the AWS account that is assigned to the user. + EmailAddress *string `locationName:"emailAddress" min:"1" type:"string"` +} + +// String returns the string representation +func (s AccountInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AccountInfo) GoString() string { + return s.String() +} + +// SetAccountId sets the AccountId field's value. +func (s *AccountInfo) SetAccountId(v string) *AccountInfo { + s.AccountId = &v + return s +} + +// SetAccountName sets the AccountName field's value. +func (s *AccountInfo) SetAccountName(v string) *AccountInfo { + s.AccountName = &v + return s +} + +// SetEmailAddress sets the EmailAddress field's value. +func (s *AccountInfo) SetEmailAddress(v string) *AccountInfo { + s.EmailAddress = &v + return s +} + +type GetRoleCredentialsInput struct { + _ struct{} `type:"structure"` + + // The token issued by the CreateToken API call. For more information, see CreateToken + // (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html) + // in the AWS SSO OIDC API Reference Guide. + // + // AccessToken is a required field + AccessToken *string `location:"header" locationName:"x-amz-sso_bearer_token" type:"string" required:"true" sensitive:"true"` + + // The identifier for the AWS account that is assigned to the user. + // + // AccountId is a required field + AccountId *string `location:"querystring" locationName:"account_id" type:"string" required:"true"` + + // The friendly name of the role that is assigned to the user. + // + // RoleName is a required field + RoleName *string `location:"querystring" locationName:"role_name" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetRoleCredentialsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetRoleCredentialsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetRoleCredentialsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetRoleCredentialsInput"} + if s.AccessToken == nil { + invalidParams.Add(request.NewErrParamRequired("AccessToken")) + } + if s.AccountId == nil { + invalidParams.Add(request.NewErrParamRequired("AccountId")) + } + if s.RoleName == nil { + invalidParams.Add(request.NewErrParamRequired("RoleName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAccessToken sets the AccessToken field's value. +func (s *GetRoleCredentialsInput) SetAccessToken(v string) *GetRoleCredentialsInput { + s.AccessToken = &v + return s +} + +// SetAccountId sets the AccountId field's value. +func (s *GetRoleCredentialsInput) SetAccountId(v string) *GetRoleCredentialsInput { + s.AccountId = &v + return s +} + +// SetRoleName sets the RoleName field's value. +func (s *GetRoleCredentialsInput) SetRoleName(v string) *GetRoleCredentialsInput { + s.RoleName = &v + return s +} + +type GetRoleCredentialsOutput struct { + _ struct{} `type:"structure"` + + // The credentials for the role that is assigned to the user. + RoleCredentials *RoleCredentials `locationName:"roleCredentials" type:"structure"` +} + +// String returns the string representation +func (s GetRoleCredentialsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetRoleCredentialsOutput) GoString() string { + return s.String() +} + +// SetRoleCredentials sets the RoleCredentials field's value. +func (s *GetRoleCredentialsOutput) SetRoleCredentials(v *RoleCredentials) *GetRoleCredentialsOutput { + s.RoleCredentials = v + return s +} + +// Indicates that a problem occurred with the input to the request. For example, +// a required parameter might be missing or out of range. +type InvalidRequestException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s InvalidRequestException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InvalidRequestException) GoString() string { + return s.String() +} + +func newErrorInvalidRequestException(v protocol.ResponseMetadata) error { + return &InvalidRequestException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *InvalidRequestException) Code() string { + return "InvalidRequestException" +} + +// Message returns the exception's message. +func (s *InvalidRequestException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *InvalidRequestException) OrigErr() error { + return nil +} + +func (s *InvalidRequestException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *InvalidRequestException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *InvalidRequestException) RequestID() string { + return s.RespMetadata.RequestID +} + +type ListAccountRolesInput struct { + _ struct{} `type:"structure"` + + // The token issued by the CreateToken API call. For more information, see CreateToken + // (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html) + // in the AWS SSO OIDC API Reference Guide. + // + // AccessToken is a required field + AccessToken *string `location:"header" locationName:"x-amz-sso_bearer_token" type:"string" required:"true" sensitive:"true"` + + // The identifier for the AWS account that is assigned to the user. + // + // AccountId is a required field + AccountId *string `location:"querystring" locationName:"account_id" type:"string" required:"true"` + + // The number of items that clients can request per page. + MaxResults *int64 `location:"querystring" locationName:"max_result" min:"1" type:"integer"` + + // The page token from the previous response output when you request subsequent + // pages. + NextToken *string `location:"querystring" locationName:"next_token" type:"string"` +} + +// String returns the string representation +func (s ListAccountRolesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListAccountRolesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListAccountRolesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListAccountRolesInput"} + if s.AccessToken == nil { + invalidParams.Add(request.NewErrParamRequired("AccessToken")) + } + if s.AccountId == nil { + invalidParams.Add(request.NewErrParamRequired("AccountId")) + } + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAccessToken sets the AccessToken field's value. +func (s *ListAccountRolesInput) SetAccessToken(v string) *ListAccountRolesInput { + s.AccessToken = &v + return s +} + +// SetAccountId sets the AccountId field's value. +func (s *ListAccountRolesInput) SetAccountId(v string) *ListAccountRolesInput { + s.AccountId = &v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *ListAccountRolesInput) SetMaxResults(v int64) *ListAccountRolesInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListAccountRolesInput) SetNextToken(v string) *ListAccountRolesInput { + s.NextToken = &v + return s +} + +type ListAccountRolesOutput struct { + _ struct{} `type:"structure"` + + // The page token client that is used to retrieve the list of accounts. + NextToken *string `locationName:"nextToken" type:"string"` + + // A paginated response with the list of roles and the next token if more results + // are available. + RoleList []*RoleInfo `locationName:"roleList" type:"list"` +} + +// String returns the string representation +func (s ListAccountRolesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListAccountRolesOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *ListAccountRolesOutput) SetNextToken(v string) *ListAccountRolesOutput { + s.NextToken = &v + return s +} + +// SetRoleList sets the RoleList field's value. +func (s *ListAccountRolesOutput) SetRoleList(v []*RoleInfo) *ListAccountRolesOutput { + s.RoleList = v + return s +} + +type ListAccountsInput struct { + _ struct{} `type:"structure"` + + // The token issued by the CreateToken API call. For more information, see CreateToken + // (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html) + // in the AWS SSO OIDC API Reference Guide. + // + // AccessToken is a required field + AccessToken *string `location:"header" locationName:"x-amz-sso_bearer_token" type:"string" required:"true" sensitive:"true"` + + // This is the number of items clients can request per page. + MaxResults *int64 `location:"querystring" locationName:"max_result" min:"1" type:"integer"` + + // (Optional) When requesting subsequent pages, this is the page token from + // the previous response output. + NextToken *string `location:"querystring" locationName:"next_token" type:"string"` +} + +// String returns the string representation +func (s ListAccountsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListAccountsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListAccountsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListAccountsInput"} + if s.AccessToken == nil { + invalidParams.Add(request.NewErrParamRequired("AccessToken")) + } + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAccessToken sets the AccessToken field's value. +func (s *ListAccountsInput) SetAccessToken(v string) *ListAccountsInput { + s.AccessToken = &v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *ListAccountsInput) SetMaxResults(v int64) *ListAccountsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListAccountsInput) SetNextToken(v string) *ListAccountsInput { + s.NextToken = &v + return s +} + +type ListAccountsOutput struct { + _ struct{} `type:"structure"` + + // A paginated response with the list of account information and the next token + // if more results are available. + AccountList []*AccountInfo `locationName:"accountList" type:"list"` + + // The page token client that is used to retrieve the list of accounts. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s ListAccountsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListAccountsOutput) GoString() string { + return s.String() +} + +// SetAccountList sets the AccountList field's value. +func (s *ListAccountsOutput) SetAccountList(v []*AccountInfo) *ListAccountsOutput { + s.AccountList = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListAccountsOutput) SetNextToken(v string) *ListAccountsOutput { + s.NextToken = &v + return s +} + +type LogoutInput struct { + _ struct{} `type:"structure"` + + // The token issued by the CreateToken API call. For more information, see CreateToken + // (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html) + // in the AWS SSO OIDC API Reference Guide. + // + // AccessToken is a required field + AccessToken *string `location:"header" locationName:"x-amz-sso_bearer_token" type:"string" required:"true" sensitive:"true"` +} + +// String returns the string representation +func (s LogoutInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LogoutInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *LogoutInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "LogoutInput"} + if s.AccessToken == nil { + invalidParams.Add(request.NewErrParamRequired("AccessToken")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAccessToken sets the AccessToken field's value. +func (s *LogoutInput) SetAccessToken(v string) *LogoutInput { + s.AccessToken = &v + return s +} + +type LogoutOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s LogoutOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LogoutOutput) GoString() string { + return s.String() +} + +// The specified resource doesn't exist. +type ResourceNotFoundException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s ResourceNotFoundException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResourceNotFoundException) GoString() string { + return s.String() +} + +func newErrorResourceNotFoundException(v protocol.ResponseMetadata) error { + return &ResourceNotFoundException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *ResourceNotFoundException) Code() string { + return "ResourceNotFoundException" +} + +// Message returns the exception's message. +func (s *ResourceNotFoundException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *ResourceNotFoundException) OrigErr() error { + return nil +} + +func (s *ResourceNotFoundException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *ResourceNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *ResourceNotFoundException) RequestID() string { + return s.RespMetadata.RequestID +} + +// Provides information about the role credentials that are assigned to the +// user. +type RoleCredentials struct { + _ struct{} `type:"structure"` + + // The identifier used for the temporary security credentials. For more information, + // see Using Temporary Security Credentials to Request Access to AWS Resources + // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_use-resources.html) + // in the AWS IAM User Guide. + AccessKeyId *string `locationName:"accessKeyId" type:"string"` + + // The date on which temporary security credentials expire. + Expiration *int64 `locationName:"expiration" type:"long"` + + // The key that is used to sign the request. For more information, see Using + // Temporary Security Credentials to Request Access to AWS Resources (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_use-resources.html) + // in the AWS IAM User Guide. + SecretAccessKey *string `locationName:"secretAccessKey" type:"string" sensitive:"true"` + + // The token used for temporary credentials. For more information, see Using + // Temporary Security Credentials to Request Access to AWS Resources (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_use-resources.html) + // in the AWS IAM User Guide. + SessionToken *string `locationName:"sessionToken" type:"string" sensitive:"true"` +} + +// String returns the string representation +func (s RoleCredentials) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RoleCredentials) GoString() string { + return s.String() +} + +// SetAccessKeyId sets the AccessKeyId field's value. +func (s *RoleCredentials) SetAccessKeyId(v string) *RoleCredentials { + s.AccessKeyId = &v + return s +} + +// SetExpiration sets the Expiration field's value. +func (s *RoleCredentials) SetExpiration(v int64) *RoleCredentials { + s.Expiration = &v + return s +} + +// SetSecretAccessKey sets the SecretAccessKey field's value. +func (s *RoleCredentials) SetSecretAccessKey(v string) *RoleCredentials { + s.SecretAccessKey = &v + return s +} + +// SetSessionToken sets the SessionToken field's value. +func (s *RoleCredentials) SetSessionToken(v string) *RoleCredentials { + s.SessionToken = &v + return s +} + +// Provides information about the role that is assigned to the user. +type RoleInfo struct { + _ struct{} `type:"structure"` + + // The identifier of the AWS account assigned to the user. + AccountId *string `locationName:"accountId" type:"string"` + + // The friendly name of the role that is assigned to the user. + RoleName *string `locationName:"roleName" type:"string"` +} + +// String returns the string representation +func (s RoleInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RoleInfo) GoString() string { + return s.String() +} + +// SetAccountId sets the AccountId field's value. +func (s *RoleInfo) SetAccountId(v string) *RoleInfo { + s.AccountId = &v + return s +} + +// SetRoleName sets the RoleName field's value. +func (s *RoleInfo) SetRoleName(v string) *RoleInfo { + s.RoleName = &v + return s +} + +// Indicates that the request is being made too frequently and is more than +// what the server can handle. +type TooManyRequestsException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s TooManyRequestsException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TooManyRequestsException) GoString() string { + return s.String() +} + +func newErrorTooManyRequestsException(v protocol.ResponseMetadata) error { + return &TooManyRequestsException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *TooManyRequestsException) Code() string { + return "TooManyRequestsException" +} + +// Message returns the exception's message. +func (s *TooManyRequestsException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *TooManyRequestsException) OrigErr() error { + return nil +} + +func (s *TooManyRequestsException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *TooManyRequestsException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *TooManyRequestsException) RequestID() string { + return s.RespMetadata.RequestID +} + +// Indicates that the request is not authorized. This can happen due to an invalid +// access token in the request. +type UnauthorizedException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s UnauthorizedException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UnauthorizedException) GoString() string { + return s.String() +} + +func newErrorUnauthorizedException(v protocol.ResponseMetadata) error { + return &UnauthorizedException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *UnauthorizedException) Code() string { + return "UnauthorizedException" +} + +// Message returns the exception's message. +func (s *UnauthorizedException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *UnauthorizedException) OrigErr() error { + return nil +} + +func (s *UnauthorizedException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *UnauthorizedException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *UnauthorizedException) RequestID() string { + return s.RespMetadata.RequestID +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/sso/doc.go b/vendor/github.com/aws/aws-sdk-go/service/sso/doc.go new file mode 100644 index 00000000000..92d82b2afb6 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/sso/doc.go @@ -0,0 +1,44 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +// Package sso provides the client and types for making API +// requests to AWS Single Sign-On. +// +// AWS Single Sign-On Portal is a web service that makes it easy for you to +// assign user access to AWS SSO resources such as the user portal. Users can +// get AWS account applications and roles assigned to them and get federated +// into the application. +// +// For general information about AWS SSO, see What is AWS Single Sign-On? (https://docs.aws.amazon.com/singlesignon/latest/userguide/what-is.html) +// in the AWS SSO User Guide. +// +// This API reference guide describes the AWS SSO Portal operations that you +// can call programatically and includes detailed information on data types +// and errors. +// +// AWS provides SDKs that consist of libraries and sample code for various programming +// languages and platforms, such as Java, Ruby, .Net, iOS, or Android. The SDKs +// provide a convenient way to create programmatic access to AWS SSO and other +// AWS services. For more information about the AWS SDKs, including how to download +// and install them, see Tools for Amazon Web Services (http://aws.amazon.com/tools/). +// +// See https://docs.aws.amazon.com/goto/WebAPI/sso-2019-06-10 for more information on this service. +// +// See sso package documentation for more information. +// https://docs.aws.amazon.com/sdk-for-go/api/service/sso/ +// +// Using the Client +// +// To contact AWS Single Sign-On with the SDK use the New function to create +// a new service client. With that client you can make API requests to the service. +// These clients are safe to use concurrently. +// +// See the SDK's documentation for more information on how to use the SDK. +// https://docs.aws.amazon.com/sdk-for-go/api/ +// +// See aws.Config documentation for more information on configuring SDK clients. +// https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config +// +// See the AWS Single Sign-On client SSO for more +// information on creating client for this service. +// https://docs.aws.amazon.com/sdk-for-go/api/service/sso/#New +package sso diff --git a/vendor/github.com/aws/aws-sdk-go/service/sso/errors.go b/vendor/github.com/aws/aws-sdk-go/service/sso/errors.go new file mode 100644 index 00000000000..77a6792e352 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/sso/errors.go @@ -0,0 +1,44 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package sso + +import ( + "github.com/aws/aws-sdk-go/private/protocol" +) + +const ( + + // ErrCodeInvalidRequestException for service response error code + // "InvalidRequestException". + // + // Indicates that a problem occurred with the input to the request. For example, + // a required parameter might be missing or out of range. + ErrCodeInvalidRequestException = "InvalidRequestException" + + // ErrCodeResourceNotFoundException for service response error code + // "ResourceNotFoundException". + // + // The specified resource doesn't exist. + ErrCodeResourceNotFoundException = "ResourceNotFoundException" + + // ErrCodeTooManyRequestsException for service response error code + // "TooManyRequestsException". + // + // Indicates that the request is being made too frequently and is more than + // what the server can handle. + ErrCodeTooManyRequestsException = "TooManyRequestsException" + + // ErrCodeUnauthorizedException for service response error code + // "UnauthorizedException". + // + // Indicates that the request is not authorized. This can happen due to an invalid + // access token in the request. + ErrCodeUnauthorizedException = "UnauthorizedException" +) + +var exceptionFromCode = map[string]func(protocol.ResponseMetadata) error{ + "InvalidRequestException": newErrorInvalidRequestException, + "ResourceNotFoundException": newErrorResourceNotFoundException, + "TooManyRequestsException": newErrorTooManyRequestsException, + "UnauthorizedException": newErrorUnauthorizedException, +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/sso/service.go b/vendor/github.com/aws/aws-sdk-go/service/sso/service.go new file mode 100644 index 00000000000..35175331fc7 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/sso/service.go @@ -0,0 +1,104 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package sso + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/restjson" +) + +// SSO provides the API operation methods for making requests to +// AWS Single Sign-On. See this package's package overview docs +// for details on the service. +// +// SSO methods are safe to use concurrently. It is not safe to +// modify mutate any of the struct's properties though. +type SSO struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// Service information constants +const ( + ServiceName = "SSO" // Name of service. + EndpointsID = "portal.sso" // ID to lookup a service endpoint with. + ServiceID = "SSO" // ServiceID is a unique identifier of a specific service. +) + +// New creates a new instance of the SSO client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// mySession := session.Must(session.NewSession()) +// +// // Create a SSO client from just a session. +// svc := sso.New(mySession) +// +// // Create a SSO client with additional configuration +// svc := sso.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *SSO { + c := p.ClientConfig(EndpointsID, cfgs...) + if c.SigningNameDerived || len(c.SigningName) == 0 { + c.SigningName = "awsssoportal" + } + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *SSO { + svc := &SSO{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + ServiceID: ServiceID, + SigningName: signingName, + SigningRegion: signingRegion, + PartitionID: partitionID, + Endpoint: endpoint, + APIVersion: "2019-06-10", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(restjson.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(restjson.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(restjson.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed( + protocol.NewUnmarshalErrorHandler(restjson.NewUnmarshalTypedError(exceptionFromCode)).NamedHandler(), + ) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a SSO operation and runs any +// custom request initialization. +func (c *SSO) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/sso/ssoiface/interface.go b/vendor/github.com/aws/aws-sdk-go/service/sso/ssoiface/interface.go new file mode 100644 index 00000000000..4cac247c188 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/sso/ssoiface/interface.go @@ -0,0 +1,86 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +// Package ssoiface provides an interface to enable mocking the AWS Single Sign-On service client +// for testing your code. +// +// It is important to note that this interface will have breaking changes +// when the service model is updated and adds new API operations, paginators, +// and waiters. +package ssoiface + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/sso" +) + +// SSOAPI provides an interface to enable mocking the +// sso.SSO service client's API operation, +// paginators, and waiters. This make unit testing your code that calls out +// to the SDK's service client's calls easier. +// +// The best way to use this interface is so the SDK's service client's calls +// can be stubbed out for unit testing your code with the SDK without needing +// to inject custom request handlers into the SDK's request pipeline. +// +// // myFunc uses an SDK service client to make a request to +// // AWS Single Sign-On. +// func myFunc(svc ssoiface.SSOAPI) bool { +// // Make svc.GetRoleCredentials request +// } +// +// func main() { +// sess := session.New() +// svc := sso.New(sess) +// +// myFunc(svc) +// } +// +// In your _test.go file: +// +// // Define a mock struct to be used in your unit tests of myFunc. +// type mockSSOClient struct { +// ssoiface.SSOAPI +// } +// func (m *mockSSOClient) GetRoleCredentials(input *sso.GetRoleCredentialsInput) (*sso.GetRoleCredentialsOutput, error) { +// // mock response/functionality +// } +// +// func TestMyFunc(t *testing.T) { +// // Setup Test +// mockSvc := &mockSSOClient{} +// +// myfunc(mockSvc) +// +// // Verify myFunc's functionality +// } +// +// It is important to note that this interface will have breaking changes +// when the service model is updated and adds new API operations, paginators, +// and waiters. Its suggested to use the pattern above for testing, or using +// tooling to generate mocks to satisfy the interfaces. +type SSOAPI interface { + GetRoleCredentials(*sso.GetRoleCredentialsInput) (*sso.GetRoleCredentialsOutput, error) + GetRoleCredentialsWithContext(aws.Context, *sso.GetRoleCredentialsInput, ...request.Option) (*sso.GetRoleCredentialsOutput, error) + GetRoleCredentialsRequest(*sso.GetRoleCredentialsInput) (*request.Request, *sso.GetRoleCredentialsOutput) + + ListAccountRoles(*sso.ListAccountRolesInput) (*sso.ListAccountRolesOutput, error) + ListAccountRolesWithContext(aws.Context, *sso.ListAccountRolesInput, ...request.Option) (*sso.ListAccountRolesOutput, error) + ListAccountRolesRequest(*sso.ListAccountRolesInput) (*request.Request, *sso.ListAccountRolesOutput) + + ListAccountRolesPages(*sso.ListAccountRolesInput, func(*sso.ListAccountRolesOutput, bool) bool) error + ListAccountRolesPagesWithContext(aws.Context, *sso.ListAccountRolesInput, func(*sso.ListAccountRolesOutput, bool) bool, ...request.Option) error + + ListAccounts(*sso.ListAccountsInput) (*sso.ListAccountsOutput, error) + ListAccountsWithContext(aws.Context, *sso.ListAccountsInput, ...request.Option) (*sso.ListAccountsOutput, error) + ListAccountsRequest(*sso.ListAccountsInput) (*request.Request, *sso.ListAccountsOutput) + + ListAccountsPages(*sso.ListAccountsInput, func(*sso.ListAccountsOutput, bool) bool) error + ListAccountsPagesWithContext(aws.Context, *sso.ListAccountsInput, func(*sso.ListAccountsOutput, bool) bool, ...request.Option) error + + Logout(*sso.LogoutInput) (*sso.LogoutOutput, error) + LogoutWithContext(aws.Context, *sso.LogoutInput, ...request.Option) (*sso.LogoutOutput, error) + LogoutRequest(*sso.LogoutInput) (*request.Request, *sso.LogoutOutput) +} + +var _ SSOAPI = (*sso.SSO)(nil) diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/api.go b/vendor/github.com/aws/aws-sdk-go/service/sts/api.go new file mode 100644 index 00000000000..bfc4372f9fd --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/api.go @@ -0,0 +1,3119 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package sts + +import ( + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/request" +) + +const opAssumeRole = "AssumeRole" + +// AssumeRoleRequest generates a "aws/request.Request" representing the +// client's request for the AssumeRole operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See AssumeRole for more information on using the AssumeRole +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the AssumeRoleRequest method. +// req, resp := client.AssumeRoleRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRole +func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, output *AssumeRoleOutput) { + op := &request.Operation{ + Name: opAssumeRole, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AssumeRoleInput{} + } + + output = &AssumeRoleOutput{} + req = c.newRequest(op, input, output) + return +} + +// AssumeRole API operation for AWS Security Token Service. +// +// Returns a set of temporary security credentials that you can use to access +// AWS resources that you might not normally have access to. These temporary +// credentials consist of an access key ID, a secret access key, and a security +// token. Typically, you use AssumeRole within your account or for cross-account +// access. For a comparison of AssumeRole with other API operations that produce +// temporary credentials, see Requesting Temporary Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) +// and Comparing the AWS STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) +// in the IAM User Guide. +// +// You cannot use AWS account root user credentials to call AssumeRole. You +// must use credentials for an IAM user or an IAM role to call AssumeRole. +// +// For cross-account access, imagine that you own multiple accounts and need +// to access resources in each account. You could create long-term credentials +// in each account to access those resources. However, managing all those credentials +// and remembering which one can access which account can be time consuming. +// Instead, you can create one set of long-term credentials in one account. +// Then use temporary security credentials to access all the other accounts +// by assuming roles in those accounts. For more information about roles, see +// IAM Roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles.html) +// in the IAM User Guide. +// +// Session Duration +// +// By default, the temporary security credentials created by AssumeRole last +// for one hour. However, you can use the optional DurationSeconds parameter +// to specify the duration of your session. You can provide a value from 900 +// seconds (15 minutes) up to the maximum session duration setting for the role. +// This setting can have a value from 1 hour to 12 hours. To learn how to view +// the maximum value for your role, see View the Maximum Session Duration Setting +// for a Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) +// in the IAM User Guide. The maximum session duration limit applies when you +// use the AssumeRole* API operations or the assume-role* CLI commands. However +// the limit does not apply when you use those operations to create a console +// URL. For more information, see Using IAM Roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html) +// in the IAM User Guide. +// +// Permissions +// +// The temporary security credentials created by AssumeRole can be used to make +// API calls to any AWS service with the following exception: You cannot call +// the AWS STS GetFederationToken or GetSessionToken API operations. +// +// (Optional) You can pass inline or managed session policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) +// to this operation. You can pass a single JSON policy document to use as an +// inline session policy. You can also specify up to 10 managed policies to +// use as managed session policies. The plain text that you use for both inline +// and managed session policies can't exceed 2,048 characters. Passing policies +// to this operation returns new temporary credentials. The resulting session's +// permissions are the intersection of the role's identity-based policy and +// the session policies. You can use the role's temporary credentials in subsequent +// AWS API calls to access resources in the account that owns the role. You +// cannot use session policies to grant more permissions than those allowed +// by the identity-based policy of the role that is being assumed. For more +// information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) +// in the IAM User Guide. +// +// To assume a role from a different account, your AWS account must be trusted +// by the role. The trust relationship is defined in the role's trust policy +// when the role is created. That trust policy states which accounts are allowed +// to delegate that access to users in the account. +// +// A user who wants to access a role in a different account must also have permissions +// that are delegated from the user account administrator. The administrator +// must attach a policy that allows the user to call AssumeRole for the ARN +// of the role in the other account. If the user is in the same account as the +// role, then you can do either of the following: +// +// * Attach a policy to the user (identical to the previous user in a different +// account). +// +// * Add the user as a principal directly in the role's trust policy. +// +// In this case, the trust policy acts as an IAM resource-based policy. Users +// in the same account as the role do not need explicit permission to assume +// the role. For more information about trust policies and resource-based policies, +// see IAM Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html) +// in the IAM User Guide. +// +// Tags +// +// (Optional) You can pass tag key-value pairs to your session. These tags are +// called session tags. For more information about session tags, see Passing +// Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) +// in the IAM User Guide. +// +// An administrator must grant you the permissions necessary to pass session +// tags. The administrator can also create granular permissions to allow you +// to pass only specific session tags. For more information, see Tutorial: Using +// Tags for Attribute-Based Access Control (https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html) +// in the IAM User Guide. +// +// You can set the session tags as transitive. Transitive tags persist during +// role chaining. For more information, see Chaining Roles with Session Tags +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining) +// in the IAM User Guide. +// +// Using MFA with AssumeRole +// +// (Optional) You can include multi-factor authentication (MFA) information +// when you call AssumeRole. This is useful for cross-account scenarios to ensure +// that the user that assumes the role has been authenticated with an AWS MFA +// device. In that scenario, the trust policy of the role being assumed includes +// a condition that tests for MFA authentication. If the caller does not include +// valid MFA information, the request to assume the role is denied. The condition +// in a trust policy that tests for MFA authentication might look like the following +// example. +// +// "Condition": {"Bool": {"aws:MultiFactorAuthPresent": true}} +// +// For more information, see Configuring MFA-Protected API Access (https://docs.aws.amazon.com/IAM/latest/UserGuide/MFAProtectedAPI.html) +// in the IAM User Guide guide. +// +// To use MFA with AssumeRole, you pass values for the SerialNumber and TokenCode +// parameters. The SerialNumber value identifies the user's hardware or virtual +// MFA device. The TokenCode is the time-based one-time password (TOTP) that +// the MFA device produces. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Security Token Service's +// API operation AssumeRole for usage and error information. +// +// Returned Error Codes: +// * ErrCodeMalformedPolicyDocumentException "MalformedPolicyDocument" +// The request was rejected because the policy document was malformed. The error +// message describes the specific error. +// +// * ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge" +// The request was rejected because the total packed size of the session policies +// and session tags combined was too large. An AWS conversion compresses the +// session policy document, session policy ARNs, and session tags into a packed +// binary format that has a separate limit. The error message indicates by percentage +// how close the policies and tags are to the upper size limit. For more information, +// see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) +// in the IAM User Guide. +// +// You could receive this error even though you meet other defined session policy +// and session tag limits. For more information, see IAM and STS Entity Character +// Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// in the IAM User Guide. +// +// * ErrCodeRegionDisabledException "RegionDisabledException" +// STS is not activated in the requested region for the account that is being +// asked to generate credentials. The account administrator must use the IAM +// console to activate STS in that region. For more information, see Activating +// and Deactivating AWS STS in an AWS Region (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// in the IAM User Guide. +// +// * ErrCodeExpiredTokenException "ExpiredTokenException" +// The web identity token that was passed is expired or is not valid. Get a +// new identity token from the identity provider and then retry the request. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRole +func (c *STS) AssumeRole(input *AssumeRoleInput) (*AssumeRoleOutput, error) { + req, out := c.AssumeRoleRequest(input) + return out, req.Send() +} + +// AssumeRoleWithContext is the same as AssumeRole with the addition of +// the ability to pass a context and additional request options. +// +// See AssumeRole for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *STS) AssumeRoleWithContext(ctx aws.Context, input *AssumeRoleInput, opts ...request.Option) (*AssumeRoleOutput, error) { + req, out := c.AssumeRoleRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opAssumeRoleWithSAML = "AssumeRoleWithSAML" + +// AssumeRoleWithSAMLRequest generates a "aws/request.Request" representing the +// client's request for the AssumeRoleWithSAML operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See AssumeRoleWithSAML for more information on using the AssumeRoleWithSAML +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the AssumeRoleWithSAMLRequest method. +// req, resp := client.AssumeRoleWithSAMLRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithSAML +func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *request.Request, output *AssumeRoleWithSAMLOutput) { + op := &request.Operation{ + Name: opAssumeRoleWithSAML, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AssumeRoleWithSAMLInput{} + } + + output = &AssumeRoleWithSAMLOutput{} + req = c.newRequest(op, input, output) + req.Config.Credentials = credentials.AnonymousCredentials + return +} + +// AssumeRoleWithSAML API operation for AWS Security Token Service. +// +// Returns a set of temporary security credentials for users who have been authenticated +// via a SAML authentication response. This operation provides a mechanism for +// tying an enterprise identity store or directory to role-based AWS access +// without user-specific credentials or configuration. For a comparison of AssumeRoleWithSAML +// with the other API operations that produce temporary credentials, see Requesting +// Temporary Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) +// and Comparing the AWS STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) +// in the IAM User Guide. +// +// The temporary security credentials returned by this operation consist of +// an access key ID, a secret access key, and a security token. Applications +// can use these temporary security credentials to sign calls to AWS services. +// +// Session Duration +// +// By default, the temporary security credentials created by AssumeRoleWithSAML +// last for one hour. However, you can use the optional DurationSeconds parameter +// to specify the duration of your session. Your role session lasts for the +// duration that you specify, or until the time specified in the SAML authentication +// response's SessionNotOnOrAfter value, whichever is shorter. You can provide +// a DurationSeconds value from 900 seconds (15 minutes) up to the maximum session +// duration setting for the role. This setting can have a value from 1 hour +// to 12 hours. To learn how to view the maximum value for your role, see View +// the Maximum Session Duration Setting for a Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) +// in the IAM User Guide. The maximum session duration limit applies when you +// use the AssumeRole* API operations or the assume-role* CLI commands. However +// the limit does not apply when you use those operations to create a console +// URL. For more information, see Using IAM Roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html) +// in the IAM User Guide. +// +// Permissions +// +// The temporary security credentials created by AssumeRoleWithSAML can be used +// to make API calls to any AWS service with the following exception: you cannot +// call the STS GetFederationToken or GetSessionToken API operations. +// +// (Optional) You can pass inline or managed session policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) +// to this operation. You can pass a single JSON policy document to use as an +// inline session policy. You can also specify up to 10 managed policies to +// use as managed session policies. The plain text that you use for both inline +// and managed session policies can't exceed 2,048 characters. Passing policies +// to this operation returns new temporary credentials. The resulting session's +// permissions are the intersection of the role's identity-based policy and +// the session policies. You can use the role's temporary credentials in subsequent +// AWS API calls to access resources in the account that owns the role. You +// cannot use session policies to grant more permissions than those allowed +// by the identity-based policy of the role that is being assumed. For more +// information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) +// in the IAM User Guide. +// +// Calling AssumeRoleWithSAML does not require the use of AWS security credentials. +// The identity of the caller is validated by using keys in the metadata document +// that is uploaded for the SAML provider entity for your identity provider. +// +// Calling AssumeRoleWithSAML can result in an entry in your AWS CloudTrail +// logs. The entry includes the value in the NameID element of the SAML assertion. +// We recommend that you use a NameIDType that is not associated with any personally +// identifiable information (PII). For example, you could instead use the persistent +// identifier (urn:oasis:names:tc:SAML:2.0:nameid-format:persistent). +// +// Tags +// +// (Optional) You can configure your IdP to pass attributes into your SAML assertion +// as session tags. Each session tag consists of a key name and an associated +// value. For more information about session tags, see Passing Session Tags +// in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) +// in the IAM User Guide. +// +// You can pass up to 50 session tags. The plain text session tag keys can’t +// exceed 128 characters and the values can’t exceed 256 characters. For these +// and additional limits, see IAM and STS Character Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) +// in the IAM User Guide. +// +// An AWS conversion compresses the passed session policies and session tags +// into a packed binary format that has a separate limit. Your request can fail +// for this limit even if your plain text meets the other requirements. The +// PackedPolicySize response element indicates by percentage how close the policies +// and tags for your request are to the upper size limit. +// +// You can pass a session tag with the same key as a tag that is attached to +// the role. When you do, session tags override the role's tags with the same +// key. +// +// An administrator must grant you the permissions necessary to pass session +// tags. The administrator can also create granular permissions to allow you +// to pass only specific session tags. For more information, see Tutorial: Using +// Tags for Attribute-Based Access Control (https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html) +// in the IAM User Guide. +// +// You can set the session tags as transitive. Transitive tags persist during +// role chaining. For more information, see Chaining Roles with Session Tags +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining) +// in the IAM User Guide. +// +// SAML Configuration +// +// Before your application can call AssumeRoleWithSAML, you must configure your +// SAML identity provider (IdP) to issue the claims required by AWS. Additionally, +// you must use AWS Identity and Access Management (IAM) to create a SAML provider +// entity in your AWS account that represents your identity provider. You must +// also create an IAM role that specifies this SAML provider in its trust policy. +// +// For more information, see the following resources: +// +// * About SAML 2.0-based Federation (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_saml.html) +// in the IAM User Guide. +// +// * Creating SAML Identity Providers (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml.html) +// in the IAM User Guide. +// +// * Configuring a Relying Party and Claims (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml_relying-party.html) +// in the IAM User Guide. +// +// * Creating a Role for SAML 2.0 Federation (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-idp_saml.html) +// in the IAM User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Security Token Service's +// API operation AssumeRoleWithSAML for usage and error information. +// +// Returned Error Codes: +// * ErrCodeMalformedPolicyDocumentException "MalformedPolicyDocument" +// The request was rejected because the policy document was malformed. The error +// message describes the specific error. +// +// * ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge" +// The request was rejected because the total packed size of the session policies +// and session tags combined was too large. An AWS conversion compresses the +// session policy document, session policy ARNs, and session tags into a packed +// binary format that has a separate limit. The error message indicates by percentage +// how close the policies and tags are to the upper size limit. For more information, +// see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) +// in the IAM User Guide. +// +// You could receive this error even though you meet other defined session policy +// and session tag limits. For more information, see IAM and STS Entity Character +// Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// in the IAM User Guide. +// +// * ErrCodeIDPRejectedClaimException "IDPRejectedClaim" +// The identity provider (IdP) reported that authentication failed. This might +// be because the claim is invalid. +// +// If this error is returned for the AssumeRoleWithWebIdentity operation, it +// can also mean that the claim has expired or has been explicitly revoked. +// +// * ErrCodeInvalidIdentityTokenException "InvalidIdentityToken" +// The web identity token that was passed could not be validated by AWS. Get +// a new identity token from the identity provider and then retry the request. +// +// * ErrCodeExpiredTokenException "ExpiredTokenException" +// The web identity token that was passed is expired or is not valid. Get a +// new identity token from the identity provider and then retry the request. +// +// * ErrCodeRegionDisabledException "RegionDisabledException" +// STS is not activated in the requested region for the account that is being +// asked to generate credentials. The account administrator must use the IAM +// console to activate STS in that region. For more information, see Activating +// and Deactivating AWS STS in an AWS Region (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// in the IAM User Guide. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithSAML +func (c *STS) AssumeRoleWithSAML(input *AssumeRoleWithSAMLInput) (*AssumeRoleWithSAMLOutput, error) { + req, out := c.AssumeRoleWithSAMLRequest(input) + return out, req.Send() +} + +// AssumeRoleWithSAMLWithContext is the same as AssumeRoleWithSAML with the addition of +// the ability to pass a context and additional request options. +// +// See AssumeRoleWithSAML for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *STS) AssumeRoleWithSAMLWithContext(ctx aws.Context, input *AssumeRoleWithSAMLInput, opts ...request.Option) (*AssumeRoleWithSAMLOutput, error) { + req, out := c.AssumeRoleWithSAMLRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opAssumeRoleWithWebIdentity = "AssumeRoleWithWebIdentity" + +// AssumeRoleWithWebIdentityRequest generates a "aws/request.Request" representing the +// client's request for the AssumeRoleWithWebIdentity operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See AssumeRoleWithWebIdentity for more information on using the AssumeRoleWithWebIdentity +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the AssumeRoleWithWebIdentityRequest method. +// req, resp := client.AssumeRoleWithWebIdentityRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithWebIdentity +func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityInput) (req *request.Request, output *AssumeRoleWithWebIdentityOutput) { + op := &request.Operation{ + Name: opAssumeRoleWithWebIdentity, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AssumeRoleWithWebIdentityInput{} + } + + output = &AssumeRoleWithWebIdentityOutput{} + req = c.newRequest(op, input, output) + req.Config.Credentials = credentials.AnonymousCredentials + return +} + +// AssumeRoleWithWebIdentity API operation for AWS Security Token Service. +// +// Returns a set of temporary security credentials for users who have been authenticated +// in a mobile or web application with a web identity provider. Example providers +// include Amazon Cognito, Login with Amazon, Facebook, Google, or any OpenID +// Connect-compatible identity provider. +// +// For mobile applications, we recommend that you use Amazon Cognito. You can +// use Amazon Cognito with the AWS SDK for iOS Developer Guide (http://aws.amazon.com/sdkforios/) +// and the AWS SDK for Android Developer Guide (http://aws.amazon.com/sdkforandroid/) +// to uniquely identify a user. You can also supply the user with a consistent +// identity throughout the lifetime of an application. +// +// To learn more about Amazon Cognito, see Amazon Cognito Overview (https://docs.aws.amazon.com/mobile/sdkforandroid/developerguide/cognito-auth.html#d0e840) +// in AWS SDK for Android Developer Guide and Amazon Cognito Overview (https://docs.aws.amazon.com/mobile/sdkforios/developerguide/cognito-auth.html#d0e664) +// in the AWS SDK for iOS Developer Guide. +// +// Calling AssumeRoleWithWebIdentity does not require the use of AWS security +// credentials. Therefore, you can distribute an application (for example, on +// mobile devices) that requests temporary security credentials without including +// long-term AWS credentials in the application. You also don't need to deploy +// server-based proxy services that use long-term AWS credentials. Instead, +// the identity of the caller is validated by using a token from the web identity +// provider. For a comparison of AssumeRoleWithWebIdentity with the other API +// operations that produce temporary credentials, see Requesting Temporary Security +// Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) +// and Comparing the AWS STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) +// in the IAM User Guide. +// +// The temporary security credentials returned by this API consist of an access +// key ID, a secret access key, and a security token. Applications can use these +// temporary security credentials to sign calls to AWS service API operations. +// +// Session Duration +// +// By default, the temporary security credentials created by AssumeRoleWithWebIdentity +// last for one hour. However, you can use the optional DurationSeconds parameter +// to specify the duration of your session. You can provide a value from 900 +// seconds (15 minutes) up to the maximum session duration setting for the role. +// This setting can have a value from 1 hour to 12 hours. To learn how to view +// the maximum value for your role, see View the Maximum Session Duration Setting +// for a Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) +// in the IAM User Guide. The maximum session duration limit applies when you +// use the AssumeRole* API operations or the assume-role* CLI commands. However +// the limit does not apply when you use those operations to create a console +// URL. For more information, see Using IAM Roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html) +// in the IAM User Guide. +// +// Permissions +// +// The temporary security credentials created by AssumeRoleWithWebIdentity can +// be used to make API calls to any AWS service with the following exception: +// you cannot call the STS GetFederationToken or GetSessionToken API operations. +// +// (Optional) You can pass inline or managed session policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) +// to this operation. You can pass a single JSON policy document to use as an +// inline session policy. You can also specify up to 10 managed policies to +// use as managed session policies. The plain text that you use for both inline +// and managed session policies can't exceed 2,048 characters. Passing policies +// to this operation returns new temporary credentials. The resulting session's +// permissions are the intersection of the role's identity-based policy and +// the session policies. You can use the role's temporary credentials in subsequent +// AWS API calls to access resources in the account that owns the role. You +// cannot use session policies to grant more permissions than those allowed +// by the identity-based policy of the role that is being assumed. For more +// information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) +// in the IAM User Guide. +// +// Tags +// +// (Optional) You can configure your IdP to pass attributes into your web identity +// token as session tags. Each session tag consists of a key name and an associated +// value. For more information about session tags, see Passing Session Tags +// in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) +// in the IAM User Guide. +// +// You can pass up to 50 session tags. The plain text session tag keys can’t +// exceed 128 characters and the values can’t exceed 256 characters. For these +// and additional limits, see IAM and STS Character Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) +// in the IAM User Guide. +// +// An AWS conversion compresses the passed session policies and session tags +// into a packed binary format that has a separate limit. Your request can fail +// for this limit even if your plain text meets the other requirements. The +// PackedPolicySize response element indicates by percentage how close the policies +// and tags for your request are to the upper size limit. +// +// You can pass a session tag with the same key as a tag that is attached to +// the role. When you do, the session tag overrides the role tag with the same +// key. +// +// An administrator must grant you the permissions necessary to pass session +// tags. The administrator can also create granular permissions to allow you +// to pass only specific session tags. For more information, see Tutorial: Using +// Tags for Attribute-Based Access Control (https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html) +// in the IAM User Guide. +// +// You can set the session tags as transitive. Transitive tags persist during +// role chaining. For more information, see Chaining Roles with Session Tags +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining) +// in the IAM User Guide. +// +// Identities +// +// Before your application can call AssumeRoleWithWebIdentity, you must have +// an identity token from a supported identity provider and create a role that +// the application can assume. The role that your application assumes must trust +// the identity provider that is associated with the identity token. In other +// words, the identity provider must be specified in the role's trust policy. +// +// Calling AssumeRoleWithWebIdentity can result in an entry in your AWS CloudTrail +// logs. The entry includes the Subject (http://openid.net/specs/openid-connect-core-1_0.html#Claims) +// of the provided Web Identity Token. We recommend that you avoid using any +// personally identifiable information (PII) in this field. For example, you +// could instead use a GUID or a pairwise identifier, as suggested in the OIDC +// specification (http://openid.net/specs/openid-connect-core-1_0.html#SubjectIDTypes). +// +// For more information about how to use web identity federation and the AssumeRoleWithWebIdentity +// API, see the following resources: +// +// * Using Web Identity Federation API Operations for Mobile Apps (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_oidc_manual.html) +// and Federation Through a Web-based Identity Provider (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity). +// +// * Web Identity Federation Playground (https://aws.amazon.com/blogs/aws/the-aws-web-identity-federation-playground/). +// Walk through the process of authenticating through Login with Amazon, +// Facebook, or Google, getting temporary security credentials, and then +// using those credentials to make a request to AWS. +// +// * AWS SDK for iOS Developer Guide (http://aws.amazon.com/sdkforios/) and +// AWS SDK for Android Developer Guide (http://aws.amazon.com/sdkforandroid/). +// These toolkits contain sample apps that show how to invoke the identity +// providers. The toolkits then show how to use the information from these +// providers to get and use temporary security credentials. +// +// * Web Identity Federation with Mobile Applications (http://aws.amazon.com/articles/web-identity-federation-with-mobile-applications). +// This article discusses web identity federation and shows an example of +// how to use web identity federation to get access to content in Amazon +// S3. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Security Token Service's +// API operation AssumeRoleWithWebIdentity for usage and error information. +// +// Returned Error Codes: +// * ErrCodeMalformedPolicyDocumentException "MalformedPolicyDocument" +// The request was rejected because the policy document was malformed. The error +// message describes the specific error. +// +// * ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge" +// The request was rejected because the total packed size of the session policies +// and session tags combined was too large. An AWS conversion compresses the +// session policy document, session policy ARNs, and session tags into a packed +// binary format that has a separate limit. The error message indicates by percentage +// how close the policies and tags are to the upper size limit. For more information, +// see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) +// in the IAM User Guide. +// +// You could receive this error even though you meet other defined session policy +// and session tag limits. For more information, see IAM and STS Entity Character +// Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// in the IAM User Guide. +// +// * ErrCodeIDPRejectedClaimException "IDPRejectedClaim" +// The identity provider (IdP) reported that authentication failed. This might +// be because the claim is invalid. +// +// If this error is returned for the AssumeRoleWithWebIdentity operation, it +// can also mean that the claim has expired or has been explicitly revoked. +// +// * ErrCodeIDPCommunicationErrorException "IDPCommunicationError" +// The request could not be fulfilled because the identity provider (IDP) that +// was asked to verify the incoming identity token could not be reached. This +// is often a transient error caused by network conditions. Retry the request +// a limited number of times so that you don't exceed the request rate. If the +// error persists, the identity provider might be down or not responding. +// +// * ErrCodeInvalidIdentityTokenException "InvalidIdentityToken" +// The web identity token that was passed could not be validated by AWS. Get +// a new identity token from the identity provider and then retry the request. +// +// * ErrCodeExpiredTokenException "ExpiredTokenException" +// The web identity token that was passed is expired or is not valid. Get a +// new identity token from the identity provider and then retry the request. +// +// * ErrCodeRegionDisabledException "RegionDisabledException" +// STS is not activated in the requested region for the account that is being +// asked to generate credentials. The account administrator must use the IAM +// console to activate STS in that region. For more information, see Activating +// and Deactivating AWS STS in an AWS Region (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// in the IAM User Guide. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithWebIdentity +func (c *STS) AssumeRoleWithWebIdentity(input *AssumeRoleWithWebIdentityInput) (*AssumeRoleWithWebIdentityOutput, error) { + req, out := c.AssumeRoleWithWebIdentityRequest(input) + return out, req.Send() +} + +// AssumeRoleWithWebIdentityWithContext is the same as AssumeRoleWithWebIdentity with the addition of +// the ability to pass a context and additional request options. +// +// See AssumeRoleWithWebIdentity for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *STS) AssumeRoleWithWebIdentityWithContext(ctx aws.Context, input *AssumeRoleWithWebIdentityInput, opts ...request.Option) (*AssumeRoleWithWebIdentityOutput, error) { + req, out := c.AssumeRoleWithWebIdentityRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDecodeAuthorizationMessage = "DecodeAuthorizationMessage" + +// DecodeAuthorizationMessageRequest generates a "aws/request.Request" representing the +// client's request for the DecodeAuthorizationMessage operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DecodeAuthorizationMessage for more information on using the DecodeAuthorizationMessage +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DecodeAuthorizationMessageRequest method. +// req, resp := client.DecodeAuthorizationMessageRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/DecodeAuthorizationMessage +func (c *STS) DecodeAuthorizationMessageRequest(input *DecodeAuthorizationMessageInput) (req *request.Request, output *DecodeAuthorizationMessageOutput) { + op := &request.Operation{ + Name: opDecodeAuthorizationMessage, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DecodeAuthorizationMessageInput{} + } + + output = &DecodeAuthorizationMessageOutput{} + req = c.newRequest(op, input, output) + return +} + +// DecodeAuthorizationMessage API operation for AWS Security Token Service. +// +// Decodes additional information about the authorization status of a request +// from an encoded message returned in response to an AWS request. +// +// For example, if a user is not authorized to perform an operation that he +// or she has requested, the request returns a Client.UnauthorizedOperation +// response (an HTTP 403 response). Some AWS operations additionally return +// an encoded message that can provide details about this authorization failure. +// +// Only certain AWS operations return an encoded authorization message. The +// documentation for an individual operation indicates whether that operation +// returns an encoded message in addition to returning an HTTP code. +// +// The message is encoded because the details of the authorization status can +// constitute privileged information that the user who requested the operation +// should not see. To decode an authorization status message, a user must be +// granted permissions via an IAM policy to request the DecodeAuthorizationMessage +// (sts:DecodeAuthorizationMessage) action. +// +// The decoded message includes the following type of information: +// +// * Whether the request was denied due to an explicit deny or due to the +// absence of an explicit allow. For more information, see Determining Whether +// a Request is Allowed or Denied (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_evaluation-logic.html#policy-eval-denyallow) +// in the IAM User Guide. +// +// * The principal who made the request. +// +// * The requested action. +// +// * The requested resource. +// +// * The values of condition keys in the context of the user's request. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Security Token Service's +// API operation DecodeAuthorizationMessage for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidAuthorizationMessageException "InvalidAuthorizationMessageException" +// The error returned if the message passed to DecodeAuthorizationMessage was +// invalid. This can happen if the token contains invalid characters, such as +// linebreaks. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/DecodeAuthorizationMessage +func (c *STS) DecodeAuthorizationMessage(input *DecodeAuthorizationMessageInput) (*DecodeAuthorizationMessageOutput, error) { + req, out := c.DecodeAuthorizationMessageRequest(input) + return out, req.Send() +} + +// DecodeAuthorizationMessageWithContext is the same as DecodeAuthorizationMessage with the addition of +// the ability to pass a context and additional request options. +// +// See DecodeAuthorizationMessage for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *STS) DecodeAuthorizationMessageWithContext(ctx aws.Context, input *DecodeAuthorizationMessageInput, opts ...request.Option) (*DecodeAuthorizationMessageOutput, error) { + req, out := c.DecodeAuthorizationMessageRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetAccessKeyInfo = "GetAccessKeyInfo" + +// GetAccessKeyInfoRequest generates a "aws/request.Request" representing the +// client's request for the GetAccessKeyInfo operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetAccessKeyInfo for more information on using the GetAccessKeyInfo +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetAccessKeyInfoRequest method. +// req, resp := client.GetAccessKeyInfoRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetAccessKeyInfo +func (c *STS) GetAccessKeyInfoRequest(input *GetAccessKeyInfoInput) (req *request.Request, output *GetAccessKeyInfoOutput) { + op := &request.Operation{ + Name: opGetAccessKeyInfo, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetAccessKeyInfoInput{} + } + + output = &GetAccessKeyInfoOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetAccessKeyInfo API operation for AWS Security Token Service. +// +// Returns the account identifier for the specified access key ID. +// +// Access keys consist of two parts: an access key ID (for example, AKIAIOSFODNN7EXAMPLE) +// and a secret access key (for example, wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY). +// For more information about access keys, see Managing Access Keys for IAM +// Users (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html) +// in the IAM User Guide. +// +// When you pass an access key ID to this operation, it returns the ID of the +// AWS account to which the keys belong. Access key IDs beginning with AKIA +// are long-term credentials for an IAM user or the AWS account root user. Access +// key IDs beginning with ASIA are temporary credentials that are created using +// STS operations. If the account in the response belongs to you, you can sign +// in as the root user and review your root user access keys. Then, you can +// pull a credentials report (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_getting-report.html) +// to learn which IAM user owns the keys. To learn who requested the temporary +// credentials for an ASIA access key, view the STS events in your CloudTrail +// logs (https://docs.aws.amazon.com/IAM/latest/UserGuide/cloudtrail-integration.html) +// in the IAM User Guide. +// +// This operation does not indicate the state of the access key. The key might +// be active, inactive, or deleted. Active keys might not have permissions to +// perform an operation. Providing a deleted access key might return an error +// that the key doesn't exist. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Security Token Service's +// API operation GetAccessKeyInfo for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetAccessKeyInfo +func (c *STS) GetAccessKeyInfo(input *GetAccessKeyInfoInput) (*GetAccessKeyInfoOutput, error) { + req, out := c.GetAccessKeyInfoRequest(input) + return out, req.Send() +} + +// GetAccessKeyInfoWithContext is the same as GetAccessKeyInfo with the addition of +// the ability to pass a context and additional request options. +// +// See GetAccessKeyInfo for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *STS) GetAccessKeyInfoWithContext(ctx aws.Context, input *GetAccessKeyInfoInput, opts ...request.Option) (*GetAccessKeyInfoOutput, error) { + req, out := c.GetAccessKeyInfoRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetCallerIdentity = "GetCallerIdentity" + +// GetCallerIdentityRequest generates a "aws/request.Request" representing the +// client's request for the GetCallerIdentity operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetCallerIdentity for more information on using the GetCallerIdentity +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetCallerIdentityRequest method. +// req, resp := client.GetCallerIdentityRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetCallerIdentity +func (c *STS) GetCallerIdentityRequest(input *GetCallerIdentityInput) (req *request.Request, output *GetCallerIdentityOutput) { + op := &request.Operation{ + Name: opGetCallerIdentity, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetCallerIdentityInput{} + } + + output = &GetCallerIdentityOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetCallerIdentity API operation for AWS Security Token Service. +// +// Returns details about the IAM user or role whose credentials are used to +// call the operation. +// +// No permissions are required to perform this operation. If an administrator +// adds a policy to your IAM user or role that explicitly denies access to the +// sts:GetCallerIdentity action, you can still perform this operation. Permissions +// are not required because the same information is returned when an IAM user +// or role is denied access. To view an example response, see I Am Not Authorized +// to Perform: iam:DeleteVirtualMFADevice (https://docs.aws.amazon.com/IAM/latest/UserGuide/troubleshoot_general.html#troubleshoot_general_access-denied-delete-mfa) +// in the IAM User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Security Token Service's +// API operation GetCallerIdentity for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetCallerIdentity +func (c *STS) GetCallerIdentity(input *GetCallerIdentityInput) (*GetCallerIdentityOutput, error) { + req, out := c.GetCallerIdentityRequest(input) + return out, req.Send() +} + +// GetCallerIdentityWithContext is the same as GetCallerIdentity with the addition of +// the ability to pass a context and additional request options. +// +// See GetCallerIdentity for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *STS) GetCallerIdentityWithContext(ctx aws.Context, input *GetCallerIdentityInput, opts ...request.Option) (*GetCallerIdentityOutput, error) { + req, out := c.GetCallerIdentityRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetFederationToken = "GetFederationToken" + +// GetFederationTokenRequest generates a "aws/request.Request" representing the +// client's request for the GetFederationToken operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetFederationToken for more information on using the GetFederationToken +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetFederationTokenRequest method. +// req, resp := client.GetFederationTokenRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetFederationToken +func (c *STS) GetFederationTokenRequest(input *GetFederationTokenInput) (req *request.Request, output *GetFederationTokenOutput) { + op := &request.Operation{ + Name: opGetFederationToken, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetFederationTokenInput{} + } + + output = &GetFederationTokenOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetFederationToken API operation for AWS Security Token Service. +// +// Returns a set of temporary security credentials (consisting of an access +// key ID, a secret access key, and a security token) for a federated user. +// A typical use is in a proxy application that gets temporary security credentials +// on behalf of distributed applications inside a corporate network. You must +// call the GetFederationToken operation using the long-term security credentials +// of an IAM user. As a result, this call is appropriate in contexts where those +// credentials can be safely stored, usually in a server-based application. +// For a comparison of GetFederationToken with the other API operations that +// produce temporary credentials, see Requesting Temporary Security Credentials +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) +// and Comparing the AWS STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) +// in the IAM User Guide. +// +// You can create a mobile-based or browser-based app that can authenticate +// users using a web identity provider like Login with Amazon, Facebook, Google, +// or an OpenID Connect-compatible identity provider. In this case, we recommend +// that you use Amazon Cognito (http://aws.amazon.com/cognito/) or AssumeRoleWithWebIdentity. +// For more information, see Federation Through a Web-based Identity Provider +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity) +// in the IAM User Guide. +// +// You can also call GetFederationToken using the security credentials of an +// AWS account root user, but we do not recommend it. Instead, we recommend +// that you create an IAM user for the purpose of the proxy application. Then +// attach a policy to the IAM user that limits federated users to only the actions +// and resources that they need to access. For more information, see IAM Best +// Practices (https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html) +// in the IAM User Guide. +// +// Session duration +// +// The temporary credentials are valid for the specified duration, from 900 +// seconds (15 minutes) up to a maximum of 129,600 seconds (36 hours). The default +// session duration is 43,200 seconds (12 hours). Temporary credentials that +// are obtained by using AWS account root user credentials have a maximum duration +// of 3,600 seconds (1 hour). +// +// Permissions +// +// You can use the temporary credentials created by GetFederationToken in any +// AWS service except the following: +// +// * You cannot call any IAM operations using the AWS CLI or the AWS API. +// +// * You cannot call any STS operations except GetCallerIdentity. +// +// You must pass an inline or managed session policy (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) +// to this operation. You can pass a single JSON policy document to use as an +// inline session policy. You can also specify up to 10 managed policies to +// use as managed session policies. The plain text that you use for both inline +// and managed session policies can't exceed 2,048 characters. +// +// Though the session policy parameters are optional, if you do not pass a policy, +// then the resulting federated user session has no permissions. When you pass +// session policies, the session permissions are the intersection of the IAM +// user policies and the session policies that you pass. This gives you a way +// to further restrict the permissions for a federated user. You cannot use +// session policies to grant more permissions than those that are defined in +// the permissions policy of the IAM user. For more information, see Session +// Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) +// in the IAM User Guide. For information about using GetFederationToken to +// create temporary security credentials, see GetFederationToken—Federation +// Through a Custom Identity Broker (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getfederationtoken). +// +// You can use the credentials to access a resource that has a resource-based +// policy. If that policy specifically references the federated user session +// in the Principal element of the policy, the session has the permissions allowed +// by the policy. These permissions are granted in addition to the permissions +// granted by the session policies. +// +// Tags +// +// (Optional) You can pass tag key-value pairs to your session. These are called +// session tags. For more information about session tags, see Passing Session +// Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) +// in the IAM User Guide. +// +// An administrator must grant you the permissions necessary to pass session +// tags. The administrator can also create granular permissions to allow you +// to pass only specific session tags. For more information, see Tutorial: Using +// Tags for Attribute-Based Access Control (https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html) +// in the IAM User Guide. +// +// Tag key–value pairs are not case sensitive, but case is preserved. This +// means that you cannot have separate Department and department tag keys. Assume +// that the user that you are federating has the Department=Marketing tag and +// you pass the department=engineering session tag. Department and department +// are not saved as separate tags, and the session tag passed in the request +// takes precedence over the user tag. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Security Token Service's +// API operation GetFederationToken for usage and error information. +// +// Returned Error Codes: +// * ErrCodeMalformedPolicyDocumentException "MalformedPolicyDocument" +// The request was rejected because the policy document was malformed. The error +// message describes the specific error. +// +// * ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge" +// The request was rejected because the total packed size of the session policies +// and session tags combined was too large. An AWS conversion compresses the +// session policy document, session policy ARNs, and session tags into a packed +// binary format that has a separate limit. The error message indicates by percentage +// how close the policies and tags are to the upper size limit. For more information, +// see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) +// in the IAM User Guide. +// +// You could receive this error even though you meet other defined session policy +// and session tag limits. For more information, see IAM and STS Entity Character +// Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// in the IAM User Guide. +// +// * ErrCodeRegionDisabledException "RegionDisabledException" +// STS is not activated in the requested region for the account that is being +// asked to generate credentials. The account administrator must use the IAM +// console to activate STS in that region. For more information, see Activating +// and Deactivating AWS STS in an AWS Region (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// in the IAM User Guide. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetFederationToken +func (c *STS) GetFederationToken(input *GetFederationTokenInput) (*GetFederationTokenOutput, error) { + req, out := c.GetFederationTokenRequest(input) + return out, req.Send() +} + +// GetFederationTokenWithContext is the same as GetFederationToken with the addition of +// the ability to pass a context and additional request options. +// +// See GetFederationToken for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *STS) GetFederationTokenWithContext(ctx aws.Context, input *GetFederationTokenInput, opts ...request.Option) (*GetFederationTokenOutput, error) { + req, out := c.GetFederationTokenRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetSessionToken = "GetSessionToken" + +// GetSessionTokenRequest generates a "aws/request.Request" representing the +// client's request for the GetSessionToken operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetSessionToken for more information on using the GetSessionToken +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetSessionTokenRequest method. +// req, resp := client.GetSessionTokenRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetSessionToken +func (c *STS) GetSessionTokenRequest(input *GetSessionTokenInput) (req *request.Request, output *GetSessionTokenOutput) { + op := &request.Operation{ + Name: opGetSessionToken, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetSessionTokenInput{} + } + + output = &GetSessionTokenOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetSessionToken API operation for AWS Security Token Service. +// +// Returns a set of temporary credentials for an AWS account or IAM user. The +// credentials consist of an access key ID, a secret access key, and a security +// token. Typically, you use GetSessionToken if you want to use MFA to protect +// programmatic calls to specific AWS API operations like Amazon EC2 StopInstances. +// MFA-enabled IAM users would need to call GetSessionToken and submit an MFA +// code that is associated with their MFA device. Using the temporary security +// credentials that are returned from the call, IAM users can then make programmatic +// calls to API operations that require MFA authentication. If you do not supply +// a correct MFA code, then the API returns an access denied error. For a comparison +// of GetSessionToken with the other API operations that produce temporary credentials, +// see Requesting Temporary Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) +// and Comparing the AWS STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) +// in the IAM User Guide. +// +// Session Duration +// +// The GetSessionToken operation must be called by using the long-term AWS security +// credentials of the AWS account root user or an IAM user. Credentials that +// are created by IAM users are valid for the duration that you specify. This +// duration can range from 900 seconds (15 minutes) up to a maximum of 129,600 +// seconds (36 hours), with a default of 43,200 seconds (12 hours). Credentials +// based on account credentials can range from 900 seconds (15 minutes) up to +// 3,600 seconds (1 hour), with a default of 1 hour. +// +// Permissions +// +// The temporary security credentials created by GetSessionToken can be used +// to make API calls to any AWS service with the following exceptions: +// +// * You cannot call any IAM API operations unless MFA authentication information +// is included in the request. +// +// * You cannot call any STS API except AssumeRole or GetCallerIdentity. +// +// We recommend that you do not call GetSessionToken with AWS account root user +// credentials. Instead, follow our best practices (https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#create-iam-users) +// by creating one or more IAM users, giving them the necessary permissions, +// and using IAM users for everyday interaction with AWS. +// +// The credentials that are returned by GetSessionToken are based on permissions +// associated with the user whose credentials were used to call the operation. +// If GetSessionToken is called using AWS account root user credentials, the +// temporary credentials have root user permissions. Similarly, if GetSessionToken +// is called using the credentials of an IAM user, the temporary credentials +// have the same permissions as the IAM user. +// +// For more information about using GetSessionToken to create temporary credentials, +// go to Temporary Credentials for Users in Untrusted Environments (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getsessiontoken) +// in the IAM User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Security Token Service's +// API operation GetSessionToken for usage and error information. +// +// Returned Error Codes: +// * ErrCodeRegionDisabledException "RegionDisabledException" +// STS is not activated in the requested region for the account that is being +// asked to generate credentials. The account administrator must use the IAM +// console to activate STS in that region. For more information, see Activating +// and Deactivating AWS STS in an AWS Region (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// in the IAM User Guide. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetSessionToken +func (c *STS) GetSessionToken(input *GetSessionTokenInput) (*GetSessionTokenOutput, error) { + req, out := c.GetSessionTokenRequest(input) + return out, req.Send() +} + +// GetSessionTokenWithContext is the same as GetSessionToken with the addition of +// the ability to pass a context and additional request options. +// +// See GetSessionToken for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *STS) GetSessionTokenWithContext(ctx aws.Context, input *GetSessionTokenInput, opts ...request.Option) (*GetSessionTokenOutput, error) { + req, out := c.GetSessionTokenRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +type AssumeRoleInput struct { + _ struct{} `type:"structure"` + + // The duration, in seconds, of the role session. The value can range from 900 + // seconds (15 minutes) up to the maximum session duration setting for the role. + // This setting can have a value from 1 hour to 12 hours. If you specify a value + // higher than this setting, the operation fails. For example, if you specify + // a session duration of 12 hours, but your administrator set the maximum session + // duration to 6 hours, your operation fails. To learn how to view the maximum + // value for your role, see View the Maximum Session Duration Setting for a + // Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) + // in the IAM User Guide. + // + // By default, the value is set to 3600 seconds. + // + // The DurationSeconds parameter is separate from the duration of a console + // session that you might request using the returned credentials. The request + // to the federation endpoint for a console sign-in token takes a SessionDuration + // parameter that specifies the maximum length of the console session. For more + // information, see Creating a URL that Enables Federated Users to Access the + // AWS Management Console (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html) + // in the IAM User Guide. + DurationSeconds *int64 `min:"900" type:"integer"` + + // A unique identifier that might be required when you assume a role in another + // account. If the administrator of the account to which the role belongs provided + // you with an external ID, then provide that value in the ExternalId parameter. + // This value can be any string, such as a passphrase or account number. A cross-account + // role is usually set up to trust everyone in an account. Therefore, the administrator + // of the trusting account might send an external ID to the administrator of + // the trusted account. That way, only someone with the ID can assume the role, + // rather than everyone in the account. For more information about the external + // ID, see How to Use an External ID When Granting Access to Your AWS Resources + // to a Third Party (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-user_externalid.html) + // in the IAM User Guide. + // + // The regex used to validate this parameter is a string of characters consisting + // of upper- and lower-case alphanumeric characters with no spaces. You can + // also include underscores or any of the following characters: =,.@:/- + ExternalId *string `min:"2" type:"string"` + + // An IAM policy in JSON format that you want to use as an inline session policy. + // + // This parameter is optional. Passing policies to this operation returns new + // temporary credentials. The resulting session's permissions are the intersection + // of the role's identity-based policy and the session policies. You can use + // the role's temporary credentials in subsequent AWS API calls to access resources + // in the account that owns the role. You cannot use session policies to grant + // more permissions than those allowed by the identity-based policy of the role + // that is being assumed. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // in the IAM User Guide. + // + // The plain text that you use for both inline and managed session policies + // can't exceed 2,048 characters. The JSON policy characters can be any ASCII + // character from the space character to the end of the valid character list + // (\u0020 through \u00FF). It can also include the tab (\u0009), linefeed (\u000A), + // and carriage return (\u000D) characters. + // + // An AWS conversion compresses the passed session policies and session tags + // into a packed binary format that has a separate limit. Your request can fail + // for this limit even if your plain text meets the other requirements. The + // PackedPolicySize response element indicates by percentage how close the policies + // and tags for your request are to the upper size limit. + Policy *string `min:"1" type:"string"` + + // The Amazon Resource Names (ARNs) of the IAM managed policies that you want + // to use as managed session policies. The policies must exist in the same account + // as the role. + // + // This parameter is optional. You can provide up to 10 managed policy ARNs. + // However, the plain text that you use for both inline and managed session + // policies can't exceed 2,048 characters. For more information about ARNs, + // see Amazon Resource Names (ARNs) and AWS Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. + // + // An AWS conversion compresses the passed session policies and session tags + // into a packed binary format that has a separate limit. Your request can fail + // for this limit even if your plain text meets the other requirements. The + // PackedPolicySize response element indicates by percentage how close the policies + // and tags for your request are to the upper size limit. + // + // Passing policies to this operation returns new temporary credentials. The + // resulting session's permissions are the intersection of the role's identity-based + // policy and the session policies. You can use the role's temporary credentials + // in subsequent AWS API calls to access resources in the account that owns + // the role. You cannot use session policies to grant more permissions than + // those allowed by the identity-based policy of the role that is being assumed. + // For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // in the IAM User Guide. + PolicyArns []*PolicyDescriptorType `type:"list"` + + // The Amazon Resource Name (ARN) of the role to assume. + // + // RoleArn is a required field + RoleArn *string `min:"20" type:"string" required:"true"` + + // An identifier for the assumed role session. + // + // Use the role session name to uniquely identify a session when the same role + // is assumed by different principals or for different reasons. In cross-account + // scenarios, the role session name is visible to, and can be logged by the + // account that owns the role. The role session name is also used in the ARN + // of the assumed role principal. This means that subsequent cross-account API + // requests that use the temporary security credentials will expose the role + // session name to the external account in their AWS CloudTrail logs. + // + // The regex used to validate this parameter is a string of characters consisting + // of upper- and lower-case alphanumeric characters with no spaces. You can + // also include underscores or any of the following characters: =,.@- + // + // RoleSessionName is a required field + RoleSessionName *string `min:"2" type:"string" required:"true"` + + // The identification number of the MFA device that is associated with the user + // who is making the AssumeRole call. Specify this value if the trust policy + // of the role being assumed includes a condition that requires MFA authentication. + // The value is either the serial number for a hardware device (such as GAHT12345678) + // or an Amazon Resource Name (ARN) for a virtual device (such as arn:aws:iam::123456789012:mfa/user). + // + // The regex used to validate this parameter is a string of characters consisting + // of upper- and lower-case alphanumeric characters with no spaces. You can + // also include underscores or any of the following characters: =,.@- + SerialNumber *string `min:"9" type:"string"` + + // A list of session tags that you want to pass. Each session tag consists of + // a key name and an associated value. For more information about session tags, + // see Tagging AWS STS Sessions (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) + // in the IAM User Guide. + // + // This parameter is optional. You can pass up to 50 session tags. The plain + // text session tag keys can’t exceed 128 characters, and the values can’t + // exceed 256 characters. For these and additional limits, see IAM and STS Character + // Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) + // in the IAM User Guide. + // + // An AWS conversion compresses the passed session policies and session tags + // into a packed binary format that has a separate limit. Your request can fail + // for this limit even if your plain text meets the other requirements. The + // PackedPolicySize response element indicates by percentage how close the policies + // and tags for your request are to the upper size limit. + // + // You can pass a session tag with the same key as a tag that is already attached + // to the role. When you do, session tags override a role tag with the same + // key. + // + // Tag key–value pairs are not case sensitive, but case is preserved. This + // means that you cannot have separate Department and department tag keys. Assume + // that the role has the Department=Marketing tag and you pass the department=engineering + // session tag. Department and department are not saved as separate tags, and + // the session tag passed in the request takes precedence over the role tag. + // + // Additionally, if you used temporary credentials to perform this operation, + // the new session inherits any transitive session tags from the calling session. + // If you pass a session tag with the same key as an inherited tag, the operation + // fails. To view the inherited tags for a session, see the AWS CloudTrail logs. + // For more information, see Viewing Session Tags in CloudTrail (https://docs.aws.amazon.com/IAM/latest/UserGuide/session-tags.html#id_session-tags_ctlogs) + // in the IAM User Guide. + Tags []*Tag `type:"list"` + + // The value provided by the MFA device, if the trust policy of the role being + // assumed requires MFA (that is, if the policy includes a condition that tests + // for MFA). If the role being assumed requires MFA and if the TokenCode value + // is missing or expired, the AssumeRole call returns an "access denied" error. + // + // The format for this parameter, as described by its regex pattern, is a sequence + // of six numeric digits. + TokenCode *string `min:"6" type:"string"` + + // A list of keys for session tags that you want to set as transitive. If you + // set a tag key as transitive, the corresponding key and value passes to subsequent + // sessions in a role chain. For more information, see Chaining Roles with Session + // Tags (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining) + // in the IAM User Guide. + // + // This parameter is optional. When you set session tags as transitive, the + // session policy and session tags packed binary limit is not affected. + // + // If you choose not to specify a transitive tag key, then no tags are passed + // from this session to any subsequent sessions. + TransitiveTagKeys []*string `type:"list"` +} + +// String returns the string representation +func (s AssumeRoleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssumeRoleInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AssumeRoleInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AssumeRoleInput"} + if s.DurationSeconds != nil && *s.DurationSeconds < 900 { + invalidParams.Add(request.NewErrParamMinValue("DurationSeconds", 900)) + } + if s.ExternalId != nil && len(*s.ExternalId) < 2 { + invalidParams.Add(request.NewErrParamMinLen("ExternalId", 2)) + } + if s.Policy != nil && len(*s.Policy) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Policy", 1)) + } + if s.RoleArn == nil { + invalidParams.Add(request.NewErrParamRequired("RoleArn")) + } + if s.RoleArn != nil && len(*s.RoleArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("RoleArn", 20)) + } + if s.RoleSessionName == nil { + invalidParams.Add(request.NewErrParamRequired("RoleSessionName")) + } + if s.RoleSessionName != nil && len(*s.RoleSessionName) < 2 { + invalidParams.Add(request.NewErrParamMinLen("RoleSessionName", 2)) + } + if s.SerialNumber != nil && len(*s.SerialNumber) < 9 { + invalidParams.Add(request.NewErrParamMinLen("SerialNumber", 9)) + } + if s.TokenCode != nil && len(*s.TokenCode) < 6 { + invalidParams.Add(request.NewErrParamMinLen("TokenCode", 6)) + } + if s.PolicyArns != nil { + for i, v := range s.PolicyArns { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "PolicyArns", i), err.(request.ErrInvalidParams)) + } + } + } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDurationSeconds sets the DurationSeconds field's value. +func (s *AssumeRoleInput) SetDurationSeconds(v int64) *AssumeRoleInput { + s.DurationSeconds = &v + return s +} + +// SetExternalId sets the ExternalId field's value. +func (s *AssumeRoleInput) SetExternalId(v string) *AssumeRoleInput { + s.ExternalId = &v + return s +} + +// SetPolicy sets the Policy field's value. +func (s *AssumeRoleInput) SetPolicy(v string) *AssumeRoleInput { + s.Policy = &v + return s +} + +// SetPolicyArns sets the PolicyArns field's value. +func (s *AssumeRoleInput) SetPolicyArns(v []*PolicyDescriptorType) *AssumeRoleInput { + s.PolicyArns = v + return s +} + +// SetRoleArn sets the RoleArn field's value. +func (s *AssumeRoleInput) SetRoleArn(v string) *AssumeRoleInput { + s.RoleArn = &v + return s +} + +// SetRoleSessionName sets the RoleSessionName field's value. +func (s *AssumeRoleInput) SetRoleSessionName(v string) *AssumeRoleInput { + s.RoleSessionName = &v + return s +} + +// SetSerialNumber sets the SerialNumber field's value. +func (s *AssumeRoleInput) SetSerialNumber(v string) *AssumeRoleInput { + s.SerialNumber = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *AssumeRoleInput) SetTags(v []*Tag) *AssumeRoleInput { + s.Tags = v + return s +} + +// SetTokenCode sets the TokenCode field's value. +func (s *AssumeRoleInput) SetTokenCode(v string) *AssumeRoleInput { + s.TokenCode = &v + return s +} + +// SetTransitiveTagKeys sets the TransitiveTagKeys field's value. +func (s *AssumeRoleInput) SetTransitiveTagKeys(v []*string) *AssumeRoleInput { + s.TransitiveTagKeys = v + return s +} + +// Contains the response to a successful AssumeRole request, including temporary +// AWS credentials that can be used to make AWS requests. +type AssumeRoleOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) and the assumed role ID, which are identifiers + // that you can use to refer to the resulting temporary security credentials. + // For example, you can reference these credentials as a principal in a resource-based + // policy by using the ARN or assumed role ID. The ARN and ID include the RoleSessionName + // that you specified when you called AssumeRole. + AssumedRoleUser *AssumedRoleUser `type:"structure"` + + // The temporary security credentials, which include an access key ID, a secret + // access key, and a security (or session) token. + // + // The size of the security token that STS API operations return is not fixed. + // We strongly recommend that you make no assumptions about the maximum size. + Credentials *Credentials `type:"structure"` + + // A percentage value that indicates the packed size of the session policies + // and session tags combined passed in the request. The request fails if the + // packed size is greater than 100 percent, which means the policies and tags + // exceeded the allowed space. + PackedPolicySize *int64 `type:"integer"` +} + +// String returns the string representation +func (s AssumeRoleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssumeRoleOutput) GoString() string { + return s.String() +} + +// SetAssumedRoleUser sets the AssumedRoleUser field's value. +func (s *AssumeRoleOutput) SetAssumedRoleUser(v *AssumedRoleUser) *AssumeRoleOutput { + s.AssumedRoleUser = v + return s +} + +// SetCredentials sets the Credentials field's value. +func (s *AssumeRoleOutput) SetCredentials(v *Credentials) *AssumeRoleOutput { + s.Credentials = v + return s +} + +// SetPackedPolicySize sets the PackedPolicySize field's value. +func (s *AssumeRoleOutput) SetPackedPolicySize(v int64) *AssumeRoleOutput { + s.PackedPolicySize = &v + return s +} + +type AssumeRoleWithSAMLInput struct { + _ struct{} `type:"structure"` + + // The duration, in seconds, of the role session. Your role session lasts for + // the duration that you specify for the DurationSeconds parameter, or until + // the time specified in the SAML authentication response's SessionNotOnOrAfter + // value, whichever is shorter. You can provide a DurationSeconds value from + // 900 seconds (15 minutes) up to the maximum session duration setting for the + // role. This setting can have a value from 1 hour to 12 hours. If you specify + // a value higher than this setting, the operation fails. For example, if you + // specify a session duration of 12 hours, but your administrator set the maximum + // session duration to 6 hours, your operation fails. To learn how to view the + // maximum value for your role, see View the Maximum Session Duration Setting + // for a Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) + // in the IAM User Guide. + // + // By default, the value is set to 3600 seconds. + // + // The DurationSeconds parameter is separate from the duration of a console + // session that you might request using the returned credentials. The request + // to the federation endpoint for a console sign-in token takes a SessionDuration + // parameter that specifies the maximum length of the console session. For more + // information, see Creating a URL that Enables Federated Users to Access the + // AWS Management Console (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html) + // in the IAM User Guide. + DurationSeconds *int64 `min:"900" type:"integer"` + + // An IAM policy in JSON format that you want to use as an inline session policy. + // + // This parameter is optional. Passing policies to this operation returns new + // temporary credentials. The resulting session's permissions are the intersection + // of the role's identity-based policy and the session policies. You can use + // the role's temporary credentials in subsequent AWS API calls to access resources + // in the account that owns the role. You cannot use session policies to grant + // more permissions than those allowed by the identity-based policy of the role + // that is being assumed. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // in the IAM User Guide. + // + // The plain text that you use for both inline and managed session policies + // can't exceed 2,048 characters. The JSON policy characters can be any ASCII + // character from the space character to the end of the valid character list + // (\u0020 through \u00FF). It can also include the tab (\u0009), linefeed (\u000A), + // and carriage return (\u000D) characters. + // + // An AWS conversion compresses the passed session policies and session tags + // into a packed binary format that has a separate limit. Your request can fail + // for this limit even if your plain text meets the other requirements. The + // PackedPolicySize response element indicates by percentage how close the policies + // and tags for your request are to the upper size limit. + Policy *string `min:"1" type:"string"` + + // The Amazon Resource Names (ARNs) of the IAM managed policies that you want + // to use as managed session policies. The policies must exist in the same account + // as the role. + // + // This parameter is optional. You can provide up to 10 managed policy ARNs. + // However, the plain text that you use for both inline and managed session + // policies can't exceed 2,048 characters. For more information about ARNs, + // see Amazon Resource Names (ARNs) and AWS Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. + // + // An AWS conversion compresses the passed session policies and session tags + // into a packed binary format that has a separate limit. Your request can fail + // for this limit even if your plain text meets the other requirements. The + // PackedPolicySize response element indicates by percentage how close the policies + // and tags for your request are to the upper size limit. + // + // Passing policies to this operation returns new temporary credentials. The + // resulting session's permissions are the intersection of the role's identity-based + // policy and the session policies. You can use the role's temporary credentials + // in subsequent AWS API calls to access resources in the account that owns + // the role. You cannot use session policies to grant more permissions than + // those allowed by the identity-based policy of the role that is being assumed. + // For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // in the IAM User Guide. + PolicyArns []*PolicyDescriptorType `type:"list"` + + // The Amazon Resource Name (ARN) of the SAML provider in IAM that describes + // the IdP. + // + // PrincipalArn is a required field + PrincipalArn *string `min:"20" type:"string" required:"true"` + + // The Amazon Resource Name (ARN) of the role that the caller is assuming. + // + // RoleArn is a required field + RoleArn *string `min:"20" type:"string" required:"true"` + + // The base-64 encoded SAML authentication response provided by the IdP. + // + // For more information, see Configuring a Relying Party and Adding Claims (https://docs.aws.amazon.com/IAM/latest/UserGuide/create-role-saml-IdP-tasks.html) + // in the IAM User Guide. + // + // SAMLAssertion is a required field + SAMLAssertion *string `min:"4" type:"string" required:"true"` +} + +// String returns the string representation +func (s AssumeRoleWithSAMLInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssumeRoleWithSAMLInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AssumeRoleWithSAMLInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AssumeRoleWithSAMLInput"} + if s.DurationSeconds != nil && *s.DurationSeconds < 900 { + invalidParams.Add(request.NewErrParamMinValue("DurationSeconds", 900)) + } + if s.Policy != nil && len(*s.Policy) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Policy", 1)) + } + if s.PrincipalArn == nil { + invalidParams.Add(request.NewErrParamRequired("PrincipalArn")) + } + if s.PrincipalArn != nil && len(*s.PrincipalArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("PrincipalArn", 20)) + } + if s.RoleArn == nil { + invalidParams.Add(request.NewErrParamRequired("RoleArn")) + } + if s.RoleArn != nil && len(*s.RoleArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("RoleArn", 20)) + } + if s.SAMLAssertion == nil { + invalidParams.Add(request.NewErrParamRequired("SAMLAssertion")) + } + if s.SAMLAssertion != nil && len(*s.SAMLAssertion) < 4 { + invalidParams.Add(request.NewErrParamMinLen("SAMLAssertion", 4)) + } + if s.PolicyArns != nil { + for i, v := range s.PolicyArns { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "PolicyArns", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDurationSeconds sets the DurationSeconds field's value. +func (s *AssumeRoleWithSAMLInput) SetDurationSeconds(v int64) *AssumeRoleWithSAMLInput { + s.DurationSeconds = &v + return s +} + +// SetPolicy sets the Policy field's value. +func (s *AssumeRoleWithSAMLInput) SetPolicy(v string) *AssumeRoleWithSAMLInput { + s.Policy = &v + return s +} + +// SetPolicyArns sets the PolicyArns field's value. +func (s *AssumeRoleWithSAMLInput) SetPolicyArns(v []*PolicyDescriptorType) *AssumeRoleWithSAMLInput { + s.PolicyArns = v + return s +} + +// SetPrincipalArn sets the PrincipalArn field's value. +func (s *AssumeRoleWithSAMLInput) SetPrincipalArn(v string) *AssumeRoleWithSAMLInput { + s.PrincipalArn = &v + return s +} + +// SetRoleArn sets the RoleArn field's value. +func (s *AssumeRoleWithSAMLInput) SetRoleArn(v string) *AssumeRoleWithSAMLInput { + s.RoleArn = &v + return s +} + +// SetSAMLAssertion sets the SAMLAssertion field's value. +func (s *AssumeRoleWithSAMLInput) SetSAMLAssertion(v string) *AssumeRoleWithSAMLInput { + s.SAMLAssertion = &v + return s +} + +// Contains the response to a successful AssumeRoleWithSAML request, including +// temporary AWS credentials that can be used to make AWS requests. +type AssumeRoleWithSAMLOutput struct { + _ struct{} `type:"structure"` + + // The identifiers for the temporary security credentials that the operation + // returns. + AssumedRoleUser *AssumedRoleUser `type:"structure"` + + // The value of the Recipient attribute of the SubjectConfirmationData element + // of the SAML assertion. + Audience *string `type:"string"` + + // The temporary security credentials, which include an access key ID, a secret + // access key, and a security (or session) token. + // + // The size of the security token that STS API operations return is not fixed. + // We strongly recommend that you make no assumptions about the maximum size. + Credentials *Credentials `type:"structure"` + + // The value of the Issuer element of the SAML assertion. + Issuer *string `type:"string"` + + // A hash value based on the concatenation of the Issuer response value, the + // AWS account ID, and the friendly name (the last part of the ARN) of the SAML + // provider in IAM. The combination of NameQualifier and Subject can be used + // to uniquely identify a federated user. + // + // The following pseudocode shows how the hash value is calculated: + // + // BASE64 ( SHA1 ( "https://example.com/saml" + "123456789012" + "/MySAMLIdP" + // ) ) + NameQualifier *string `type:"string"` + + // A percentage value that indicates the packed size of the session policies + // and session tags combined passed in the request. The request fails if the + // packed size is greater than 100 percent, which means the policies and tags + // exceeded the allowed space. + PackedPolicySize *int64 `type:"integer"` + + // The value of the NameID element in the Subject element of the SAML assertion. + Subject *string `type:"string"` + + // The format of the name ID, as defined by the Format attribute in the NameID + // element of the SAML assertion. Typical examples of the format are transient + // or persistent. + // + // If the format includes the prefix urn:oasis:names:tc:SAML:2.0:nameid-format, + // that prefix is removed. For example, urn:oasis:names:tc:SAML:2.0:nameid-format:transient + // is returned as transient. If the format includes any other prefix, the format + // is returned with no modifications. + SubjectType *string `type:"string"` +} + +// String returns the string representation +func (s AssumeRoleWithSAMLOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssumeRoleWithSAMLOutput) GoString() string { + return s.String() +} + +// SetAssumedRoleUser sets the AssumedRoleUser field's value. +func (s *AssumeRoleWithSAMLOutput) SetAssumedRoleUser(v *AssumedRoleUser) *AssumeRoleWithSAMLOutput { + s.AssumedRoleUser = v + return s +} + +// SetAudience sets the Audience field's value. +func (s *AssumeRoleWithSAMLOutput) SetAudience(v string) *AssumeRoleWithSAMLOutput { + s.Audience = &v + return s +} + +// SetCredentials sets the Credentials field's value. +func (s *AssumeRoleWithSAMLOutput) SetCredentials(v *Credentials) *AssumeRoleWithSAMLOutput { + s.Credentials = v + return s +} + +// SetIssuer sets the Issuer field's value. +func (s *AssumeRoleWithSAMLOutput) SetIssuer(v string) *AssumeRoleWithSAMLOutput { + s.Issuer = &v + return s +} + +// SetNameQualifier sets the NameQualifier field's value. +func (s *AssumeRoleWithSAMLOutput) SetNameQualifier(v string) *AssumeRoleWithSAMLOutput { + s.NameQualifier = &v + return s +} + +// SetPackedPolicySize sets the PackedPolicySize field's value. +func (s *AssumeRoleWithSAMLOutput) SetPackedPolicySize(v int64) *AssumeRoleWithSAMLOutput { + s.PackedPolicySize = &v + return s +} + +// SetSubject sets the Subject field's value. +func (s *AssumeRoleWithSAMLOutput) SetSubject(v string) *AssumeRoleWithSAMLOutput { + s.Subject = &v + return s +} + +// SetSubjectType sets the SubjectType field's value. +func (s *AssumeRoleWithSAMLOutput) SetSubjectType(v string) *AssumeRoleWithSAMLOutput { + s.SubjectType = &v + return s +} + +type AssumeRoleWithWebIdentityInput struct { + _ struct{} `type:"structure"` + + // The duration, in seconds, of the role session. The value can range from 900 + // seconds (15 minutes) up to the maximum session duration setting for the role. + // This setting can have a value from 1 hour to 12 hours. If you specify a value + // higher than this setting, the operation fails. For example, if you specify + // a session duration of 12 hours, but your administrator set the maximum session + // duration to 6 hours, your operation fails. To learn how to view the maximum + // value for your role, see View the Maximum Session Duration Setting for a + // Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) + // in the IAM User Guide. + // + // By default, the value is set to 3600 seconds. + // + // The DurationSeconds parameter is separate from the duration of a console + // session that you might request using the returned credentials. The request + // to the federation endpoint for a console sign-in token takes a SessionDuration + // parameter that specifies the maximum length of the console session. For more + // information, see Creating a URL that Enables Federated Users to Access the + // AWS Management Console (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html) + // in the IAM User Guide. + DurationSeconds *int64 `min:"900" type:"integer"` + + // An IAM policy in JSON format that you want to use as an inline session policy. + // + // This parameter is optional. Passing policies to this operation returns new + // temporary credentials. The resulting session's permissions are the intersection + // of the role's identity-based policy and the session policies. You can use + // the role's temporary credentials in subsequent AWS API calls to access resources + // in the account that owns the role. You cannot use session policies to grant + // more permissions than those allowed by the identity-based policy of the role + // that is being assumed. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // in the IAM User Guide. + // + // The plain text that you use for both inline and managed session policies + // can't exceed 2,048 characters. The JSON policy characters can be any ASCII + // character from the space character to the end of the valid character list + // (\u0020 through \u00FF). It can also include the tab (\u0009), linefeed (\u000A), + // and carriage return (\u000D) characters. + // + // An AWS conversion compresses the passed session policies and session tags + // into a packed binary format that has a separate limit. Your request can fail + // for this limit even if your plain text meets the other requirements. The + // PackedPolicySize response element indicates by percentage how close the policies + // and tags for your request are to the upper size limit. + Policy *string `min:"1" type:"string"` + + // The Amazon Resource Names (ARNs) of the IAM managed policies that you want + // to use as managed session policies. The policies must exist in the same account + // as the role. + // + // This parameter is optional. You can provide up to 10 managed policy ARNs. + // However, the plain text that you use for both inline and managed session + // policies can't exceed 2,048 characters. For more information about ARNs, + // see Amazon Resource Names (ARNs) and AWS Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. + // + // An AWS conversion compresses the passed session policies and session tags + // into a packed binary format that has a separate limit. Your request can fail + // for this limit even if your plain text meets the other requirements. The + // PackedPolicySize response element indicates by percentage how close the policies + // and tags for your request are to the upper size limit. + // + // Passing policies to this operation returns new temporary credentials. The + // resulting session's permissions are the intersection of the role's identity-based + // policy and the session policies. You can use the role's temporary credentials + // in subsequent AWS API calls to access resources in the account that owns + // the role. You cannot use session policies to grant more permissions than + // those allowed by the identity-based policy of the role that is being assumed. + // For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // in the IAM User Guide. + PolicyArns []*PolicyDescriptorType `type:"list"` + + // The fully qualified host component of the domain name of the identity provider. + // + // Specify this value only for OAuth 2.0 access tokens. Currently www.amazon.com + // and graph.facebook.com are the only supported identity providers for OAuth + // 2.0 access tokens. Do not include URL schemes and port numbers. + // + // Do not specify this value for OpenID Connect ID tokens. + ProviderId *string `min:"4" type:"string"` + + // The Amazon Resource Name (ARN) of the role that the caller is assuming. + // + // RoleArn is a required field + RoleArn *string `min:"20" type:"string" required:"true"` + + // An identifier for the assumed role session. Typically, you pass the name + // or identifier that is associated with the user who is using your application. + // That way, the temporary security credentials that your application will use + // are associated with that user. This session name is included as part of the + // ARN and assumed role ID in the AssumedRoleUser response element. + // + // The regex used to validate this parameter is a string of characters consisting + // of upper- and lower-case alphanumeric characters with no spaces. You can + // also include underscores or any of the following characters: =,.@- + // + // RoleSessionName is a required field + RoleSessionName *string `min:"2" type:"string" required:"true"` + + // The OAuth 2.0 access token or OpenID Connect ID token that is provided by + // the identity provider. Your application must get this token by authenticating + // the user who is using your application with a web identity provider before + // the application makes an AssumeRoleWithWebIdentity call. + // + // WebIdentityToken is a required field + WebIdentityToken *string `min:"4" type:"string" required:"true"` +} + +// String returns the string representation +func (s AssumeRoleWithWebIdentityInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssumeRoleWithWebIdentityInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AssumeRoleWithWebIdentityInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AssumeRoleWithWebIdentityInput"} + if s.DurationSeconds != nil && *s.DurationSeconds < 900 { + invalidParams.Add(request.NewErrParamMinValue("DurationSeconds", 900)) + } + if s.Policy != nil && len(*s.Policy) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Policy", 1)) + } + if s.ProviderId != nil && len(*s.ProviderId) < 4 { + invalidParams.Add(request.NewErrParamMinLen("ProviderId", 4)) + } + if s.RoleArn == nil { + invalidParams.Add(request.NewErrParamRequired("RoleArn")) + } + if s.RoleArn != nil && len(*s.RoleArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("RoleArn", 20)) + } + if s.RoleSessionName == nil { + invalidParams.Add(request.NewErrParamRequired("RoleSessionName")) + } + if s.RoleSessionName != nil && len(*s.RoleSessionName) < 2 { + invalidParams.Add(request.NewErrParamMinLen("RoleSessionName", 2)) + } + if s.WebIdentityToken == nil { + invalidParams.Add(request.NewErrParamRequired("WebIdentityToken")) + } + if s.WebIdentityToken != nil && len(*s.WebIdentityToken) < 4 { + invalidParams.Add(request.NewErrParamMinLen("WebIdentityToken", 4)) + } + if s.PolicyArns != nil { + for i, v := range s.PolicyArns { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "PolicyArns", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDurationSeconds sets the DurationSeconds field's value. +func (s *AssumeRoleWithWebIdentityInput) SetDurationSeconds(v int64) *AssumeRoleWithWebIdentityInput { + s.DurationSeconds = &v + return s +} + +// SetPolicy sets the Policy field's value. +func (s *AssumeRoleWithWebIdentityInput) SetPolicy(v string) *AssumeRoleWithWebIdentityInput { + s.Policy = &v + return s +} + +// SetPolicyArns sets the PolicyArns field's value. +func (s *AssumeRoleWithWebIdentityInput) SetPolicyArns(v []*PolicyDescriptorType) *AssumeRoleWithWebIdentityInput { + s.PolicyArns = v + return s +} + +// SetProviderId sets the ProviderId field's value. +func (s *AssumeRoleWithWebIdentityInput) SetProviderId(v string) *AssumeRoleWithWebIdentityInput { + s.ProviderId = &v + return s +} + +// SetRoleArn sets the RoleArn field's value. +func (s *AssumeRoleWithWebIdentityInput) SetRoleArn(v string) *AssumeRoleWithWebIdentityInput { + s.RoleArn = &v + return s +} + +// SetRoleSessionName sets the RoleSessionName field's value. +func (s *AssumeRoleWithWebIdentityInput) SetRoleSessionName(v string) *AssumeRoleWithWebIdentityInput { + s.RoleSessionName = &v + return s +} + +// SetWebIdentityToken sets the WebIdentityToken field's value. +func (s *AssumeRoleWithWebIdentityInput) SetWebIdentityToken(v string) *AssumeRoleWithWebIdentityInput { + s.WebIdentityToken = &v + return s +} + +// Contains the response to a successful AssumeRoleWithWebIdentity request, +// including temporary AWS credentials that can be used to make AWS requests. +type AssumeRoleWithWebIdentityOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) and the assumed role ID, which are identifiers + // that you can use to refer to the resulting temporary security credentials. + // For example, you can reference these credentials as a principal in a resource-based + // policy by using the ARN or assumed role ID. The ARN and ID include the RoleSessionName + // that you specified when you called AssumeRole. + AssumedRoleUser *AssumedRoleUser `type:"structure"` + + // The intended audience (also known as client ID) of the web identity token. + // This is traditionally the client identifier issued to the application that + // requested the web identity token. + Audience *string `type:"string"` + + // The temporary security credentials, which include an access key ID, a secret + // access key, and a security token. + // + // The size of the security token that STS API operations return is not fixed. + // We strongly recommend that you make no assumptions about the maximum size. + Credentials *Credentials `type:"structure"` + + // A percentage value that indicates the packed size of the session policies + // and session tags combined passed in the request. The request fails if the + // packed size is greater than 100 percent, which means the policies and tags + // exceeded the allowed space. + PackedPolicySize *int64 `type:"integer"` + + // The issuing authority of the web identity token presented. For OpenID Connect + // ID tokens, this contains the value of the iss field. For OAuth 2.0 access + // tokens, this contains the value of the ProviderId parameter that was passed + // in the AssumeRoleWithWebIdentity request. + Provider *string `type:"string"` + + // The unique user identifier that is returned by the identity provider. This + // identifier is associated with the WebIdentityToken that was submitted with + // the AssumeRoleWithWebIdentity call. The identifier is typically unique to + // the user and the application that acquired the WebIdentityToken (pairwise + // identifier). For OpenID Connect ID tokens, this field contains the value + // returned by the identity provider as the token's sub (Subject) claim. + SubjectFromWebIdentityToken *string `min:"6" type:"string"` +} + +// String returns the string representation +func (s AssumeRoleWithWebIdentityOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssumeRoleWithWebIdentityOutput) GoString() string { + return s.String() +} + +// SetAssumedRoleUser sets the AssumedRoleUser field's value. +func (s *AssumeRoleWithWebIdentityOutput) SetAssumedRoleUser(v *AssumedRoleUser) *AssumeRoleWithWebIdentityOutput { + s.AssumedRoleUser = v + return s +} + +// SetAudience sets the Audience field's value. +func (s *AssumeRoleWithWebIdentityOutput) SetAudience(v string) *AssumeRoleWithWebIdentityOutput { + s.Audience = &v + return s +} + +// SetCredentials sets the Credentials field's value. +func (s *AssumeRoleWithWebIdentityOutput) SetCredentials(v *Credentials) *AssumeRoleWithWebIdentityOutput { + s.Credentials = v + return s +} + +// SetPackedPolicySize sets the PackedPolicySize field's value. +func (s *AssumeRoleWithWebIdentityOutput) SetPackedPolicySize(v int64) *AssumeRoleWithWebIdentityOutput { + s.PackedPolicySize = &v + return s +} + +// SetProvider sets the Provider field's value. +func (s *AssumeRoleWithWebIdentityOutput) SetProvider(v string) *AssumeRoleWithWebIdentityOutput { + s.Provider = &v + return s +} + +// SetSubjectFromWebIdentityToken sets the SubjectFromWebIdentityToken field's value. +func (s *AssumeRoleWithWebIdentityOutput) SetSubjectFromWebIdentityToken(v string) *AssumeRoleWithWebIdentityOutput { + s.SubjectFromWebIdentityToken = &v + return s +} + +// The identifiers for the temporary security credentials that the operation +// returns. +type AssumedRoleUser struct { + _ struct{} `type:"structure"` + + // The ARN of the temporary security credentials that are returned from the + // AssumeRole action. For more information about ARNs and how to use them in + // policies, see IAM Identifiers (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html) + // in the IAM User Guide. + // + // Arn is a required field + Arn *string `min:"20" type:"string" required:"true"` + + // A unique identifier that contains the role ID and the role session name of + // the role that is being assumed. The role ID is generated by AWS when the + // role is created. + // + // AssumedRoleId is a required field + AssumedRoleId *string `min:"2" type:"string" required:"true"` +} + +// String returns the string representation +func (s AssumedRoleUser) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssumedRoleUser) GoString() string { + return s.String() +} + +// SetArn sets the Arn field's value. +func (s *AssumedRoleUser) SetArn(v string) *AssumedRoleUser { + s.Arn = &v + return s +} + +// SetAssumedRoleId sets the AssumedRoleId field's value. +func (s *AssumedRoleUser) SetAssumedRoleId(v string) *AssumedRoleUser { + s.AssumedRoleId = &v + return s +} + +// AWS credentials for API authentication. +type Credentials struct { + _ struct{} `type:"structure"` + + // The access key ID that identifies the temporary security credentials. + // + // AccessKeyId is a required field + AccessKeyId *string `min:"16" type:"string" required:"true"` + + // The date on which the current credentials expire. + // + // Expiration is a required field + Expiration *time.Time `type:"timestamp" required:"true"` + + // The secret access key that can be used to sign requests. + // + // SecretAccessKey is a required field + SecretAccessKey *string `type:"string" required:"true"` + + // The token that users must pass to the service API to use the temporary credentials. + // + // SessionToken is a required field + SessionToken *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s Credentials) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Credentials) GoString() string { + return s.String() +} + +// SetAccessKeyId sets the AccessKeyId field's value. +func (s *Credentials) SetAccessKeyId(v string) *Credentials { + s.AccessKeyId = &v + return s +} + +// SetExpiration sets the Expiration field's value. +func (s *Credentials) SetExpiration(v time.Time) *Credentials { + s.Expiration = &v + return s +} + +// SetSecretAccessKey sets the SecretAccessKey field's value. +func (s *Credentials) SetSecretAccessKey(v string) *Credentials { + s.SecretAccessKey = &v + return s +} + +// SetSessionToken sets the SessionToken field's value. +func (s *Credentials) SetSessionToken(v string) *Credentials { + s.SessionToken = &v + return s +} + +type DecodeAuthorizationMessageInput struct { + _ struct{} `type:"structure"` + + // The encoded message that was returned with the response. + // + // EncodedMessage is a required field + EncodedMessage *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DecodeAuthorizationMessageInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DecodeAuthorizationMessageInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DecodeAuthorizationMessageInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DecodeAuthorizationMessageInput"} + if s.EncodedMessage == nil { + invalidParams.Add(request.NewErrParamRequired("EncodedMessage")) + } + if s.EncodedMessage != nil && len(*s.EncodedMessage) < 1 { + invalidParams.Add(request.NewErrParamMinLen("EncodedMessage", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetEncodedMessage sets the EncodedMessage field's value. +func (s *DecodeAuthorizationMessageInput) SetEncodedMessage(v string) *DecodeAuthorizationMessageInput { + s.EncodedMessage = &v + return s +} + +// A document that contains additional information about the authorization status +// of a request from an encoded message that is returned in response to an AWS +// request. +type DecodeAuthorizationMessageOutput struct { + _ struct{} `type:"structure"` + + // An XML document that contains the decoded message. + DecodedMessage *string `type:"string"` +} + +// String returns the string representation +func (s DecodeAuthorizationMessageOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DecodeAuthorizationMessageOutput) GoString() string { + return s.String() +} + +// SetDecodedMessage sets the DecodedMessage field's value. +func (s *DecodeAuthorizationMessageOutput) SetDecodedMessage(v string) *DecodeAuthorizationMessageOutput { + s.DecodedMessage = &v + return s +} + +// Identifiers for the federated user that is associated with the credentials. +type FederatedUser struct { + _ struct{} `type:"structure"` + + // The ARN that specifies the federated user that is associated with the credentials. + // For more information about ARNs and how to use them in policies, see IAM + // Identifiers (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html) + // in the IAM User Guide. + // + // Arn is a required field + Arn *string `min:"20" type:"string" required:"true"` + + // The string that identifies the federated user associated with the credentials, + // similar to the unique ID of an IAM user. + // + // FederatedUserId is a required field + FederatedUserId *string `min:"2" type:"string" required:"true"` +} + +// String returns the string representation +func (s FederatedUser) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FederatedUser) GoString() string { + return s.String() +} + +// SetArn sets the Arn field's value. +func (s *FederatedUser) SetArn(v string) *FederatedUser { + s.Arn = &v + return s +} + +// SetFederatedUserId sets the FederatedUserId field's value. +func (s *FederatedUser) SetFederatedUserId(v string) *FederatedUser { + s.FederatedUserId = &v + return s +} + +type GetAccessKeyInfoInput struct { + _ struct{} `type:"structure"` + + // The identifier of an access key. + // + // This parameter allows (through its regex pattern) a string of characters + // that can consist of any upper- or lowercase letter or digit. + // + // AccessKeyId is a required field + AccessKeyId *string `min:"16" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetAccessKeyInfoInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetAccessKeyInfoInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetAccessKeyInfoInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetAccessKeyInfoInput"} + if s.AccessKeyId == nil { + invalidParams.Add(request.NewErrParamRequired("AccessKeyId")) + } + if s.AccessKeyId != nil && len(*s.AccessKeyId) < 16 { + invalidParams.Add(request.NewErrParamMinLen("AccessKeyId", 16)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAccessKeyId sets the AccessKeyId field's value. +func (s *GetAccessKeyInfoInput) SetAccessKeyId(v string) *GetAccessKeyInfoInput { + s.AccessKeyId = &v + return s +} + +type GetAccessKeyInfoOutput struct { + _ struct{} `type:"structure"` + + // The number used to identify the AWS account. + Account *string `type:"string"` +} + +// String returns the string representation +func (s GetAccessKeyInfoOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetAccessKeyInfoOutput) GoString() string { + return s.String() +} + +// SetAccount sets the Account field's value. +func (s *GetAccessKeyInfoOutput) SetAccount(v string) *GetAccessKeyInfoOutput { + s.Account = &v + return s +} + +type GetCallerIdentityInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s GetCallerIdentityInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetCallerIdentityInput) GoString() string { + return s.String() +} + +// Contains the response to a successful GetCallerIdentity request, including +// information about the entity making the request. +type GetCallerIdentityOutput struct { + _ struct{} `type:"structure"` + + // The AWS account ID number of the account that owns or contains the calling + // entity. + Account *string `type:"string"` + + // The AWS ARN associated with the calling entity. + Arn *string `min:"20" type:"string"` + + // The unique identifier of the calling entity. The exact value depends on the + // type of entity that is making the call. The values returned are those listed + // in the aws:userid column in the Principal table (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_variables.html#principaltable) + // found on the Policy Variables reference page in the IAM User Guide. + UserId *string `type:"string"` +} + +// String returns the string representation +func (s GetCallerIdentityOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetCallerIdentityOutput) GoString() string { + return s.String() +} + +// SetAccount sets the Account field's value. +func (s *GetCallerIdentityOutput) SetAccount(v string) *GetCallerIdentityOutput { + s.Account = &v + return s +} + +// SetArn sets the Arn field's value. +func (s *GetCallerIdentityOutput) SetArn(v string) *GetCallerIdentityOutput { + s.Arn = &v + return s +} + +// SetUserId sets the UserId field's value. +func (s *GetCallerIdentityOutput) SetUserId(v string) *GetCallerIdentityOutput { + s.UserId = &v + return s +} + +type GetFederationTokenInput struct { + _ struct{} `type:"structure"` + + // The duration, in seconds, that the session should last. Acceptable durations + // for federation sessions range from 900 seconds (15 minutes) to 129,600 seconds + // (36 hours), with 43,200 seconds (12 hours) as the default. Sessions obtained + // using AWS account root user credentials are restricted to a maximum of 3,600 + // seconds (one hour). If the specified duration is longer than one hour, the + // session obtained by using root user credentials defaults to one hour. + DurationSeconds *int64 `min:"900" type:"integer"` + + // The name of the federated user. The name is used as an identifier for the + // temporary security credentials (such as Bob). For example, you can reference + // the federated user name in a resource-based policy, such as in an Amazon + // S3 bucket policy. + // + // The regex used to validate this parameter is a string of characters consisting + // of upper- and lower-case alphanumeric characters with no spaces. You can + // also include underscores or any of the following characters: =,.@- + // + // Name is a required field + Name *string `min:"2" type:"string" required:"true"` + + // An IAM policy in JSON format that you want to use as an inline session policy. + // + // You must pass an inline or managed session policy (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // to this operation. You can pass a single JSON policy document to use as an + // inline session policy. You can also specify up to 10 managed policies to + // use as managed session policies. + // + // This parameter is optional. However, if you do not pass any session policies, + // then the resulting federated user session has no permissions. + // + // When you pass session policies, the session permissions are the intersection + // of the IAM user policies and the session policies that you pass. This gives + // you a way to further restrict the permissions for a federated user. You cannot + // use session policies to grant more permissions than those that are defined + // in the permissions policy of the IAM user. For more information, see Session + // Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // in the IAM User Guide. + // + // The resulting credentials can be used to access a resource that has a resource-based + // policy. If that policy specifically references the federated user session + // in the Principal element of the policy, the session has the permissions allowed + // by the policy. These permissions are granted in addition to the permissions + // that are granted by the session policies. + // + // The plain text that you use for both inline and managed session policies + // can't exceed 2,048 characters. The JSON policy characters can be any ASCII + // character from the space character to the end of the valid character list + // (\u0020 through \u00FF). It can also include the tab (\u0009), linefeed (\u000A), + // and carriage return (\u000D) characters. + // + // An AWS conversion compresses the passed session policies and session tags + // into a packed binary format that has a separate limit. Your request can fail + // for this limit even if your plain text meets the other requirements. The + // PackedPolicySize response element indicates by percentage how close the policies + // and tags for your request are to the upper size limit. + Policy *string `min:"1" type:"string"` + + // The Amazon Resource Names (ARNs) of the IAM managed policies that you want + // to use as a managed session policy. The policies must exist in the same account + // as the IAM user that is requesting federated access. + // + // You must pass an inline or managed session policy (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // to this operation. You can pass a single JSON policy document to use as an + // inline session policy. You can also specify up to 10 managed policies to + // use as managed session policies. The plain text that you use for both inline + // and managed session policies can't exceed 2,048 characters. You can provide + // up to 10 managed policy ARNs. For more information about ARNs, see Amazon + // Resource Names (ARNs) and AWS Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. + // + // This parameter is optional. However, if you do not pass any session policies, + // then the resulting federated user session has no permissions. + // + // When you pass session policies, the session permissions are the intersection + // of the IAM user policies and the session policies that you pass. This gives + // you a way to further restrict the permissions for a federated user. You cannot + // use session policies to grant more permissions than those that are defined + // in the permissions policy of the IAM user. For more information, see Session + // Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // in the IAM User Guide. + // + // The resulting credentials can be used to access a resource that has a resource-based + // policy. If that policy specifically references the federated user session + // in the Principal element of the policy, the session has the permissions allowed + // by the policy. These permissions are granted in addition to the permissions + // that are granted by the session policies. + // + // An AWS conversion compresses the passed session policies and session tags + // into a packed binary format that has a separate limit. Your request can fail + // for this limit even if your plain text meets the other requirements. The + // PackedPolicySize response element indicates by percentage how close the policies + // and tags for your request are to the upper size limit. + PolicyArns []*PolicyDescriptorType `type:"list"` + + // A list of session tags. Each session tag consists of a key name and an associated + // value. For more information about session tags, see Passing Session Tags + // in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) + // in the IAM User Guide. + // + // This parameter is optional. You can pass up to 50 session tags. The plain + // text session tag keys can’t exceed 128 characters and the values can’t + // exceed 256 characters. For these and additional limits, see IAM and STS Character + // Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) + // in the IAM User Guide. + // + // An AWS conversion compresses the passed session policies and session tags + // into a packed binary format that has a separate limit. Your request can fail + // for this limit even if your plain text meets the other requirements. The + // PackedPolicySize response element indicates by percentage how close the policies + // and tags for your request are to the upper size limit. + // + // You can pass a session tag with the same key as a tag that is already attached + // to the user you are federating. When you do, session tags override a user + // tag with the same key. + // + // Tag key–value pairs are not case sensitive, but case is preserved. This + // means that you cannot have separate Department and department tag keys. Assume + // that the role has the Department=Marketing tag and you pass the department=engineering + // session tag. Department and department are not saved as separate tags, and + // the session tag passed in the request takes precedence over the role tag. + Tags []*Tag `type:"list"` +} + +// String returns the string representation +func (s GetFederationTokenInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetFederationTokenInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetFederationTokenInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetFederationTokenInput"} + if s.DurationSeconds != nil && *s.DurationSeconds < 900 { + invalidParams.Add(request.NewErrParamMinValue("DurationSeconds", 900)) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 2 { + invalidParams.Add(request.NewErrParamMinLen("Name", 2)) + } + if s.Policy != nil && len(*s.Policy) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Policy", 1)) + } + if s.PolicyArns != nil { + for i, v := range s.PolicyArns { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "PolicyArns", i), err.(request.ErrInvalidParams)) + } + } + } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDurationSeconds sets the DurationSeconds field's value. +func (s *GetFederationTokenInput) SetDurationSeconds(v int64) *GetFederationTokenInput { + s.DurationSeconds = &v + return s +} + +// SetName sets the Name field's value. +func (s *GetFederationTokenInput) SetName(v string) *GetFederationTokenInput { + s.Name = &v + return s +} + +// SetPolicy sets the Policy field's value. +func (s *GetFederationTokenInput) SetPolicy(v string) *GetFederationTokenInput { + s.Policy = &v + return s +} + +// SetPolicyArns sets the PolicyArns field's value. +func (s *GetFederationTokenInput) SetPolicyArns(v []*PolicyDescriptorType) *GetFederationTokenInput { + s.PolicyArns = v + return s +} + +// SetTags sets the Tags field's value. +func (s *GetFederationTokenInput) SetTags(v []*Tag) *GetFederationTokenInput { + s.Tags = v + return s +} + +// Contains the response to a successful GetFederationToken request, including +// temporary AWS credentials that can be used to make AWS requests. +type GetFederationTokenOutput struct { + _ struct{} `type:"structure"` + + // The temporary security credentials, which include an access key ID, a secret + // access key, and a security (or session) token. + // + // The size of the security token that STS API operations return is not fixed. + // We strongly recommend that you make no assumptions about the maximum size. + Credentials *Credentials `type:"structure"` + + // Identifiers for the federated user associated with the credentials (such + // as arn:aws:sts::123456789012:federated-user/Bob or 123456789012:Bob). You + // can use the federated user's ARN in your resource-based policies, such as + // an Amazon S3 bucket policy. + FederatedUser *FederatedUser `type:"structure"` + + // A percentage value that indicates the packed size of the session policies + // and session tags combined passed in the request. The request fails if the + // packed size is greater than 100 percent, which means the policies and tags + // exceeded the allowed space. + PackedPolicySize *int64 `type:"integer"` +} + +// String returns the string representation +func (s GetFederationTokenOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetFederationTokenOutput) GoString() string { + return s.String() +} + +// SetCredentials sets the Credentials field's value. +func (s *GetFederationTokenOutput) SetCredentials(v *Credentials) *GetFederationTokenOutput { + s.Credentials = v + return s +} + +// SetFederatedUser sets the FederatedUser field's value. +func (s *GetFederationTokenOutput) SetFederatedUser(v *FederatedUser) *GetFederationTokenOutput { + s.FederatedUser = v + return s +} + +// SetPackedPolicySize sets the PackedPolicySize field's value. +func (s *GetFederationTokenOutput) SetPackedPolicySize(v int64) *GetFederationTokenOutput { + s.PackedPolicySize = &v + return s +} + +type GetSessionTokenInput struct { + _ struct{} `type:"structure"` + + // The duration, in seconds, that the credentials should remain valid. Acceptable + // durations for IAM user sessions range from 900 seconds (15 minutes) to 129,600 + // seconds (36 hours), with 43,200 seconds (12 hours) as the default. Sessions + // for AWS account owners are restricted to a maximum of 3,600 seconds (one + // hour). If the duration is longer than one hour, the session for AWS account + // owners defaults to one hour. + DurationSeconds *int64 `min:"900" type:"integer"` + + // The identification number of the MFA device that is associated with the IAM + // user who is making the GetSessionToken call. Specify this value if the IAM + // user has a policy that requires MFA authentication. The value is either the + // serial number for a hardware device (such as GAHT12345678) or an Amazon Resource + // Name (ARN) for a virtual device (such as arn:aws:iam::123456789012:mfa/user). + // You can find the device for an IAM user by going to the AWS Management Console + // and viewing the user's security credentials. + // + // The regex used to validate this parameter is a string of characters consisting + // of upper- and lower-case alphanumeric characters with no spaces. You can + // also include underscores or any of the following characters: =,.@:/- + SerialNumber *string `min:"9" type:"string"` + + // The value provided by the MFA device, if MFA is required. If any policy requires + // the IAM user to submit an MFA code, specify this value. If MFA authentication + // is required, the user must provide a code when requesting a set of temporary + // security credentials. A user who fails to provide the code receives an "access + // denied" response when requesting resources that require MFA authentication. + // + // The format for this parameter, as described by its regex pattern, is a sequence + // of six numeric digits. + TokenCode *string `min:"6" type:"string"` +} + +// String returns the string representation +func (s GetSessionTokenInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetSessionTokenInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetSessionTokenInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetSessionTokenInput"} + if s.DurationSeconds != nil && *s.DurationSeconds < 900 { + invalidParams.Add(request.NewErrParamMinValue("DurationSeconds", 900)) + } + if s.SerialNumber != nil && len(*s.SerialNumber) < 9 { + invalidParams.Add(request.NewErrParamMinLen("SerialNumber", 9)) + } + if s.TokenCode != nil && len(*s.TokenCode) < 6 { + invalidParams.Add(request.NewErrParamMinLen("TokenCode", 6)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDurationSeconds sets the DurationSeconds field's value. +func (s *GetSessionTokenInput) SetDurationSeconds(v int64) *GetSessionTokenInput { + s.DurationSeconds = &v + return s +} + +// SetSerialNumber sets the SerialNumber field's value. +func (s *GetSessionTokenInput) SetSerialNumber(v string) *GetSessionTokenInput { + s.SerialNumber = &v + return s +} + +// SetTokenCode sets the TokenCode field's value. +func (s *GetSessionTokenInput) SetTokenCode(v string) *GetSessionTokenInput { + s.TokenCode = &v + return s +} + +// Contains the response to a successful GetSessionToken request, including +// temporary AWS credentials that can be used to make AWS requests. +type GetSessionTokenOutput struct { + _ struct{} `type:"structure"` + + // The temporary security credentials, which include an access key ID, a secret + // access key, and a security (or session) token. + // + // The size of the security token that STS API operations return is not fixed. + // We strongly recommend that you make no assumptions about the maximum size. + Credentials *Credentials `type:"structure"` +} + +// String returns the string representation +func (s GetSessionTokenOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetSessionTokenOutput) GoString() string { + return s.String() +} + +// SetCredentials sets the Credentials field's value. +func (s *GetSessionTokenOutput) SetCredentials(v *Credentials) *GetSessionTokenOutput { + s.Credentials = v + return s +} + +// A reference to the IAM managed policy that is passed as a session policy +// for a role session or a federated user session. +type PolicyDescriptorType struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the IAM managed policy to use as a session + // policy for the role. For more information about ARNs, see Amazon Resource + // Names (ARNs) and AWS Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. + Arn *string `locationName:"arn" min:"20" type:"string"` +} + +// String returns the string representation +func (s PolicyDescriptorType) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PolicyDescriptorType) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PolicyDescriptorType) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PolicyDescriptorType"} + if s.Arn != nil && len(*s.Arn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("Arn", 20)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetArn sets the Arn field's value. +func (s *PolicyDescriptorType) SetArn(v string) *PolicyDescriptorType { + s.Arn = &v + return s +} + +// You can pass custom key-value pair attributes when you assume a role or federate +// a user. These are called session tags. You can then use the session tags +// to control access to resources. For more information, see Tagging AWS STS +// Sessions (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) +// in the IAM User Guide. +type Tag struct { + _ struct{} `type:"structure"` + + // The key for a session tag. + // + // You can pass up to 50 session tags. The plain text session tag keys can’t + // exceed 128 characters. For these and additional limits, see IAM and STS Character + // Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) + // in the IAM User Guide. + // + // Key is a required field + Key *string `min:"1" type:"string" required:"true"` + + // The value for a session tag. + // + // You can pass up to 50 session tags. The plain text session tag values can’t + // exceed 256 characters. For these and additional limits, see IAM and STS Character + // Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) + // in the IAM User Guide. + // + // Value is a required field + Value *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s Tag) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Tag) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Tag) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Tag"} + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.Value == nil { + invalidParams.Add(request.NewErrParamRequired("Value")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetKey sets the Key field's value. +func (s *Tag) SetKey(v string) *Tag { + s.Key = &v + return s +} + +// SetValue sets the Value field's value. +func (s *Tag) SetValue(v string) *Tag { + s.Value = &v + return s +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/customizations.go b/vendor/github.com/aws/aws-sdk-go/service/sts/customizations.go new file mode 100644 index 00000000000..d5307fcaa0f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/customizations.go @@ -0,0 +1,11 @@ +package sts + +import "github.com/aws/aws-sdk-go/aws/request" + +func init() { + initRequest = customizeRequest +} + +func customizeRequest(r *request.Request) { + r.RetryErrorCodes = append(r.RetryErrorCodes, ErrCodeIDPCommunicationErrorException) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go b/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go new file mode 100644 index 00000000000..cb1debbaa45 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go @@ -0,0 +1,32 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +// Package sts provides the client and types for making API +// requests to AWS Security Token Service. +// +// AWS Security Token Service (STS) enables you to request temporary, limited-privilege +// credentials for AWS Identity and Access Management (IAM) users or for users +// that you authenticate (federated users). This guide provides descriptions +// of the STS API. For more information about using this service, see Temporary +// Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp.html). +// +// See https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15 for more information on this service. +// +// See sts package documentation for more information. +// https://docs.aws.amazon.com/sdk-for-go/api/service/sts/ +// +// Using the Client +// +// To contact AWS Security Token Service with the SDK use the New function to create +// a new service client. With that client you can make API requests to the service. +// These clients are safe to use concurrently. +// +// See the SDK's documentation for more information on how to use the SDK. +// https://docs.aws.amazon.com/sdk-for-go/api/ +// +// See aws.Config documentation for more information on configuring SDK clients. +// https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config +// +// See the AWS Security Token Service client STS for more +// information on creating client for this service. +// https://docs.aws.amazon.com/sdk-for-go/api/service/sts/#New +package sts diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/errors.go b/vendor/github.com/aws/aws-sdk-go/service/sts/errors.go new file mode 100644 index 00000000000..a233f542ef2 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/errors.go @@ -0,0 +1,82 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package sts + +const ( + + // ErrCodeExpiredTokenException for service response error code + // "ExpiredTokenException". + // + // The web identity token that was passed is expired or is not valid. Get a + // new identity token from the identity provider and then retry the request. + ErrCodeExpiredTokenException = "ExpiredTokenException" + + // ErrCodeIDPCommunicationErrorException for service response error code + // "IDPCommunicationError". + // + // The request could not be fulfilled because the identity provider (IDP) that + // was asked to verify the incoming identity token could not be reached. This + // is often a transient error caused by network conditions. Retry the request + // a limited number of times so that you don't exceed the request rate. If the + // error persists, the identity provider might be down or not responding. + ErrCodeIDPCommunicationErrorException = "IDPCommunicationError" + + // ErrCodeIDPRejectedClaimException for service response error code + // "IDPRejectedClaim". + // + // The identity provider (IdP) reported that authentication failed. This might + // be because the claim is invalid. + // + // If this error is returned for the AssumeRoleWithWebIdentity operation, it + // can also mean that the claim has expired or has been explicitly revoked. + ErrCodeIDPRejectedClaimException = "IDPRejectedClaim" + + // ErrCodeInvalidAuthorizationMessageException for service response error code + // "InvalidAuthorizationMessageException". + // + // The error returned if the message passed to DecodeAuthorizationMessage was + // invalid. This can happen if the token contains invalid characters, such as + // linebreaks. + ErrCodeInvalidAuthorizationMessageException = "InvalidAuthorizationMessageException" + + // ErrCodeInvalidIdentityTokenException for service response error code + // "InvalidIdentityToken". + // + // The web identity token that was passed could not be validated by AWS. Get + // a new identity token from the identity provider and then retry the request. + ErrCodeInvalidIdentityTokenException = "InvalidIdentityToken" + + // ErrCodeMalformedPolicyDocumentException for service response error code + // "MalformedPolicyDocument". + // + // The request was rejected because the policy document was malformed. The error + // message describes the specific error. + ErrCodeMalformedPolicyDocumentException = "MalformedPolicyDocument" + + // ErrCodePackedPolicyTooLargeException for service response error code + // "PackedPolicyTooLarge". + // + // The request was rejected because the total packed size of the session policies + // and session tags combined was too large. An AWS conversion compresses the + // session policy document, session policy ARNs, and session tags into a packed + // binary format that has a separate limit. The error message indicates by percentage + // how close the policies and tags are to the upper size limit. For more information, + // see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) + // in the IAM User Guide. + // + // You could receive this error even though you meet other defined session policy + // and session tag limits. For more information, see IAM and STS Entity Character + // Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) + // in the IAM User Guide. + ErrCodePackedPolicyTooLargeException = "PackedPolicyTooLarge" + + // ErrCodeRegionDisabledException for service response error code + // "RegionDisabledException". + // + // STS is not activated in the requested region for the account that is being + // asked to generate credentials. The account administrator must use the IAM + // console to activate STS in that region. For more information, see Activating + // and Deactivating AWS STS in an AWS Region (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) + // in the IAM User Guide. + ErrCodeRegionDisabledException = "RegionDisabledException" +) diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/service.go b/vendor/github.com/aws/aws-sdk-go/service/sts/service.go new file mode 100644 index 00000000000..d34a6855331 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/service.go @@ -0,0 +1,98 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package sts + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/private/protocol/query" +) + +// STS provides the API operation methods for making requests to +// AWS Security Token Service. See this package's package overview docs +// for details on the service. +// +// STS methods are safe to use concurrently. It is not safe to +// modify mutate any of the struct's properties though. +type STS struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// Service information constants +const ( + ServiceName = "sts" // Name of service. + EndpointsID = ServiceName // ID to lookup a service endpoint with. + ServiceID = "STS" // ServiceID is a unique identifier of a specific service. +) + +// New creates a new instance of the STS client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// mySession := session.Must(session.NewSession()) +// +// // Create a STS client from just a session. +// svc := sts.New(mySession) +// +// // Create a STS client with additional configuration +// svc := sts.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *STS { + c := p.ClientConfig(EndpointsID, cfgs...) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *STS { + svc := &STS{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + ServiceID: ServiceID, + SigningName: signingName, + SigningRegion: signingRegion, + PartitionID: partitionID, + Endpoint: endpoint, + APIVersion: "2011-06-15", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(query.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(query.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(query.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(query.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a STS operation and runs any +// custom request initialization. +func (c *STS) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/stsiface/interface.go b/vendor/github.com/aws/aws-sdk-go/service/sts/stsiface/interface.go new file mode 100644 index 00000000000..e2e1d6efe55 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/stsiface/interface.go @@ -0,0 +1,96 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +// Package stsiface provides an interface to enable mocking the AWS Security Token Service service client +// for testing your code. +// +// It is important to note that this interface will have breaking changes +// when the service model is updated and adds new API operations, paginators, +// and waiters. +package stsiface + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/sts" +) + +// STSAPI provides an interface to enable mocking the +// sts.STS service client's API operation, +// paginators, and waiters. This make unit testing your code that calls out +// to the SDK's service client's calls easier. +// +// The best way to use this interface is so the SDK's service client's calls +// can be stubbed out for unit testing your code with the SDK without needing +// to inject custom request handlers into the SDK's request pipeline. +// +// // myFunc uses an SDK service client to make a request to +// // AWS Security Token Service. +// func myFunc(svc stsiface.STSAPI) bool { +// // Make svc.AssumeRole request +// } +// +// func main() { +// sess := session.New() +// svc := sts.New(sess) +// +// myFunc(svc) +// } +// +// In your _test.go file: +// +// // Define a mock struct to be used in your unit tests of myFunc. +// type mockSTSClient struct { +// stsiface.STSAPI +// } +// func (m *mockSTSClient) AssumeRole(input *sts.AssumeRoleInput) (*sts.AssumeRoleOutput, error) { +// // mock response/functionality +// } +// +// func TestMyFunc(t *testing.T) { +// // Setup Test +// mockSvc := &mockSTSClient{} +// +// myfunc(mockSvc) +// +// // Verify myFunc's functionality +// } +// +// It is important to note that this interface will have breaking changes +// when the service model is updated and adds new API operations, paginators, +// and waiters. Its suggested to use the pattern above for testing, or using +// tooling to generate mocks to satisfy the interfaces. +type STSAPI interface { + AssumeRole(*sts.AssumeRoleInput) (*sts.AssumeRoleOutput, error) + AssumeRoleWithContext(aws.Context, *sts.AssumeRoleInput, ...request.Option) (*sts.AssumeRoleOutput, error) + AssumeRoleRequest(*sts.AssumeRoleInput) (*request.Request, *sts.AssumeRoleOutput) + + AssumeRoleWithSAML(*sts.AssumeRoleWithSAMLInput) (*sts.AssumeRoleWithSAMLOutput, error) + AssumeRoleWithSAMLWithContext(aws.Context, *sts.AssumeRoleWithSAMLInput, ...request.Option) (*sts.AssumeRoleWithSAMLOutput, error) + AssumeRoleWithSAMLRequest(*sts.AssumeRoleWithSAMLInput) (*request.Request, *sts.AssumeRoleWithSAMLOutput) + + AssumeRoleWithWebIdentity(*sts.AssumeRoleWithWebIdentityInput) (*sts.AssumeRoleWithWebIdentityOutput, error) + AssumeRoleWithWebIdentityWithContext(aws.Context, *sts.AssumeRoleWithWebIdentityInput, ...request.Option) (*sts.AssumeRoleWithWebIdentityOutput, error) + AssumeRoleWithWebIdentityRequest(*sts.AssumeRoleWithWebIdentityInput) (*request.Request, *sts.AssumeRoleWithWebIdentityOutput) + + DecodeAuthorizationMessage(*sts.DecodeAuthorizationMessageInput) (*sts.DecodeAuthorizationMessageOutput, error) + DecodeAuthorizationMessageWithContext(aws.Context, *sts.DecodeAuthorizationMessageInput, ...request.Option) (*sts.DecodeAuthorizationMessageOutput, error) + DecodeAuthorizationMessageRequest(*sts.DecodeAuthorizationMessageInput) (*request.Request, *sts.DecodeAuthorizationMessageOutput) + + GetAccessKeyInfo(*sts.GetAccessKeyInfoInput) (*sts.GetAccessKeyInfoOutput, error) + GetAccessKeyInfoWithContext(aws.Context, *sts.GetAccessKeyInfoInput, ...request.Option) (*sts.GetAccessKeyInfoOutput, error) + GetAccessKeyInfoRequest(*sts.GetAccessKeyInfoInput) (*request.Request, *sts.GetAccessKeyInfoOutput) + + GetCallerIdentity(*sts.GetCallerIdentityInput) (*sts.GetCallerIdentityOutput, error) + GetCallerIdentityWithContext(aws.Context, *sts.GetCallerIdentityInput, ...request.Option) (*sts.GetCallerIdentityOutput, error) + GetCallerIdentityRequest(*sts.GetCallerIdentityInput) (*request.Request, *sts.GetCallerIdentityOutput) + + GetFederationToken(*sts.GetFederationTokenInput) (*sts.GetFederationTokenOutput, error) + GetFederationTokenWithContext(aws.Context, *sts.GetFederationTokenInput, ...request.Option) (*sts.GetFederationTokenOutput, error) + GetFederationTokenRequest(*sts.GetFederationTokenInput) (*request.Request, *sts.GetFederationTokenOutput) + + GetSessionToken(*sts.GetSessionTokenInput) (*sts.GetSessionTokenOutput, error) + GetSessionTokenWithContext(aws.Context, *sts.GetSessionTokenInput, ...request.Option) (*sts.GetSessionTokenOutput, error) + GetSessionTokenRequest(*sts.GetSessionTokenInput) (*request.Request, *sts.GetSessionTokenOutput) +} + +var _ STSAPI = (*sts.STS)(nil) diff --git a/vendor/github.com/beorn7/perks/LICENSE b/vendor/github.com/beorn7/perks/LICENSE new file mode 100644 index 00000000000..339177be663 --- /dev/null +++ b/vendor/github.com/beorn7/perks/LICENSE @@ -0,0 +1,20 @@ +Copyright (C) 2013 Blake Mizerany + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/beorn7/perks/quantile/exampledata.txt b/vendor/github.com/beorn7/perks/quantile/exampledata.txt new file mode 100644 index 00000000000..1602287d7ce --- /dev/null +++ b/vendor/github.com/beorn7/perks/quantile/exampledata.txt @@ -0,0 +1,2388 @@ +8 +5 +26 +12 +5 +235 +13 +6 +28 +30 +3 +3 +3 +3 +5 +2 +33 +7 +2 +4 +7 +12 +14 +5 +8 +3 +10 +4 +5 +3 +6 +6 +209 +20 +3 +10 +14 +3 +4 +6 +8 +5 +11 +7 +3 +2 +3 +3 +212 +5 +222 +4 +10 +10 +5 +6 +3 +8 +3 +10 +254 +220 +2 +3 +5 +24 +5 +4 +222 +7 +3 +3 +223 +8 +15 +12 +14 +14 +3 +2 +2 +3 +13 +3 +11 +4 +4 +6 +5 +7 +13 +5 +3 +5 +2 +5 +3 +5 +2 +7 +15 +17 +14 +3 +6 +6 +3 +17 +5 +4 +7 +6 +4 +4 +8 +6 +8 +3 +9 +3 +6 +3 +4 +5 +3 +3 +660 +4 +6 +10 +3 +6 +3 +2 +5 +13 +2 +4 +4 +10 +4 +8 +4 +3 +7 +9 +9 +3 +10 +37 +3 +13 +4 +12 +3 +6 +10 +8 +5 +21 +2 +3 +8 +3 +2 +3 +3 +4 +12 +2 +4 +8 +8 +4 +3 +2 +20 +1 +6 +32 +2 +11 +6 +18 +3 +8 +11 +3 +212 +3 +4 +2 +6 +7 +12 +11 +3 +2 +16 +10 +6 +4 +6 +3 +2 +7 +3 +2 +2 +2 +2 +5 +6 +4 +3 +10 +3 +4 +6 +5 +3 +4 +4 +5 +6 +4 +3 +4 +4 +5 +7 +5 +5 +3 +2 +7 +2 +4 +12 +4 +5 +6 +2 +4 +4 +8 +4 +15 +13 +7 +16 +5 +3 +23 +5 +5 +7 +3 +2 +9 +8 +7 +5 +8 +11 +4 +10 +76 +4 +47 +4 +3 +2 +7 +4 +2 +3 +37 +10 +4 +2 +20 +5 +4 +4 +10 +10 +4 +3 +7 +23 +240 +7 +13 +5 +5 +3 +3 +2 +5 +4 +2 +8 +7 +19 +2 +23 +8 +7 +2 +5 +3 +8 +3 +8 +13 +5 +5 +5 +2 +3 +23 +4 +9 +8 +4 +3 +3 +5 +220 +2 +3 +4 +6 +14 +3 +53 +6 +2 +5 +18 +6 +3 +219 +6 +5 +2 +5 +3 +6 +5 +15 +4 +3 +17 +3 +2 +4 +7 +2 +3 +3 +4 +4 +3 +2 +664 +6 +3 +23 +5 +5 +16 +5 +8 +2 +4 +2 +24 +12 +3 +2 +3 +5 +8 +3 +5 +4 +3 +14 +3 +5 +8 +2 +3 +7 +9 +4 +2 +3 +6 +8 +4 +3 +4 +6 +5 +3 +3 +6 +3 +19 +4 +4 +6 +3 +6 +3 +5 +22 +5 +4 +4 +3 +8 +11 +4 +9 +7 +6 +13 +4 +4 +4 +6 +17 +9 +3 +3 +3 +4 +3 +221 +5 +11 +3 +4 +2 +12 +6 +3 +5 +7 +5 +7 +4 +9 +7 +14 +37 +19 +217 +16 +3 +5 +2 +2 +7 +19 +7 +6 +7 +4 +24 +5 +11 +4 +7 +7 +9 +13 +3 +4 +3 +6 +28 +4 +4 +5 +5 +2 +5 +6 +4 +4 +6 +10 +5 +4 +3 +2 +3 +3 +6 +5 +5 +4 +3 +2 +3 +7 +4 +6 +18 +16 +8 +16 +4 +5 +8 +6 +9 +13 +1545 +6 +215 +6 +5 +6 +3 +45 +31 +5 +2 +2 +4 +3 +3 +2 +5 +4 +3 +5 +7 +7 +4 +5 +8 +5 +4 +749 +2 +31 +9 +11 +2 +11 +5 +4 +4 +7 +9 +11 +4 +5 +4 +7 +3 +4 +6 +2 +15 +3 +4 +3 +4 +3 +5 +2 +13 +5 +5 +3 +3 +23 +4 +4 +5 +7 +4 +13 +2 +4 +3 +4 +2 +6 +2 +7 +3 +5 +5 +3 +29 +5 +4 +4 +3 +10 +2 +3 +79 +16 +6 +6 +7 +7 +3 +5 +5 +7 +4 +3 +7 +9 +5 +6 +5 +9 +6 +3 +6 +4 +17 +2 +10 +9 +3 +6 +2 +3 +21 +22 +5 +11 +4 +2 +17 +2 +224 +2 +14 +3 +4 +4 +2 +4 +4 +4 +4 +5 +3 +4 +4 +10 +2 +6 +3 +3 +5 +7 +2 +7 +5 +6 +3 +218 +2 +2 +5 +2 +6 +3 +5 +222 +14 +6 +33 +3 +2 +5 +3 +3 +3 +9 +5 +3 +3 +2 +7 +4 +3 +4 +3 +5 +6 +5 +26 +4 +13 +9 +7 +3 +221 +3 +3 +4 +4 +4 +4 +2 +18 +5 +3 +7 +9 +6 +8 +3 +10 +3 +11 +9 +5 +4 +17 +5 +5 +6 +6 +3 +2 +4 +12 +17 +6 +7 +218 +4 +2 +4 +10 +3 +5 +15 +3 +9 +4 +3 +3 +6 +29 +3 +3 +4 +5 +5 +3 +8 +5 +6 +6 +7 +5 +3 +5 +3 +29 +2 +31 +5 +15 +24 +16 +5 +207 +4 +3 +3 +2 +15 +4 +4 +13 +5 +5 +4 +6 +10 +2 +7 +8 +4 +6 +20 +5 +3 +4 +3 +12 +12 +5 +17 +7 +3 +3 +3 +6 +10 +3 +5 +25 +80 +4 +9 +3 +2 +11 +3 +3 +2 +3 +8 +7 +5 +5 +19 +5 +3 +3 +12 +11 +2 +6 +5 +5 +5 +3 +3 +3 +4 +209 +14 +3 +2 +5 +19 +4 +4 +3 +4 +14 +5 +6 +4 +13 +9 +7 +4 +7 +10 +2 +9 +5 +7 +2 +8 +4 +6 +5 +5 +222 +8 +7 +12 +5 +216 +3 +4 +4 +6 +3 +14 +8 +7 +13 +4 +3 +3 +3 +3 +17 +5 +4 +3 +33 +6 +6 +33 +7 +5 +3 +8 +7 +5 +2 +9 +4 +2 +233 +24 +7 +4 +8 +10 +3 +4 +15 +2 +16 +3 +3 +13 +12 +7 +5 +4 +207 +4 +2 +4 +27 +15 +2 +5 +2 +25 +6 +5 +5 +6 +13 +6 +18 +6 +4 +12 +225 +10 +7 +5 +2 +2 +11 +4 +14 +21 +8 +10 +3 +5 +4 +232 +2 +5 +5 +3 +7 +17 +11 +6 +6 +23 +4 +6 +3 +5 +4 +2 +17 +3 +6 +5 +8 +3 +2 +2 +14 +9 +4 +4 +2 +5 +5 +3 +7 +6 +12 +6 +10 +3 +6 +2 +2 +19 +5 +4 +4 +9 +2 +4 +13 +3 +5 +6 +3 +6 +5 +4 +9 +6 +3 +5 +7 +3 +6 +6 +4 +3 +10 +6 +3 +221 +3 +5 +3 +6 +4 +8 +5 +3 +6 +4 +4 +2 +54 +5 +6 +11 +3 +3 +4 +4 +4 +3 +7 +3 +11 +11 +7 +10 +6 +13 +223 +213 +15 +231 +7 +3 +7 +228 +2 +3 +4 +4 +5 +6 +7 +4 +13 +3 +4 +5 +3 +6 +4 +6 +7 +2 +4 +3 +4 +3 +3 +6 +3 +7 +3 +5 +18 +5 +6 +8 +10 +3 +3 +3 +2 +4 +2 +4 +4 +5 +6 +6 +4 +10 +13 +3 +12 +5 +12 +16 +8 +4 +19 +11 +2 +4 +5 +6 +8 +5 +6 +4 +18 +10 +4 +2 +216 +6 +6 +6 +2 +4 +12 +8 +3 +11 +5 +6 +14 +5 +3 +13 +4 +5 +4 +5 +3 +28 +6 +3 +7 +219 +3 +9 +7 +3 +10 +6 +3 +4 +19 +5 +7 +11 +6 +15 +19 +4 +13 +11 +3 +7 +5 +10 +2 +8 +11 +2 +6 +4 +6 +24 +6 +3 +3 +3 +3 +6 +18 +4 +11 +4 +2 +5 +10 +8 +3 +9 +5 +3 +4 +5 +6 +2 +5 +7 +4 +4 +14 +6 +4 +4 +5 +5 +7 +2 +4 +3 +7 +3 +3 +6 +4 +5 +4 +4 +4 +3 +3 +3 +3 +8 +14 +2 +3 +5 +3 +2 +4 +5 +3 +7 +3 +3 +18 +3 +4 +4 +5 +7 +3 +3 +3 +13 +5 +4 +8 +211 +5 +5 +3 +5 +2 +5 +4 +2 +655 +6 +3 +5 +11 +2 +5 +3 +12 +9 +15 +11 +5 +12 +217 +2 +6 +17 +3 +3 +207 +5 +5 +4 +5 +9 +3 +2 +8 +5 +4 +3 +2 +5 +12 +4 +14 +5 +4 +2 +13 +5 +8 +4 +225 +4 +3 +4 +5 +4 +3 +3 +6 +23 +9 +2 +6 +7 +233 +4 +4 +6 +18 +3 +4 +6 +3 +4 +4 +2 +3 +7 +4 +13 +227 +4 +3 +5 +4 +2 +12 +9 +17 +3 +7 +14 +6 +4 +5 +21 +4 +8 +9 +2 +9 +25 +16 +3 +6 +4 +7 +8 +5 +2 +3 +5 +4 +3 +3 +5 +3 +3 +3 +2 +3 +19 +2 +4 +3 +4 +2 +3 +4 +4 +2 +4 +3 +3 +3 +2 +6 +3 +17 +5 +6 +4 +3 +13 +5 +3 +3 +3 +4 +9 +4 +2 +14 +12 +4 +5 +24 +4 +3 +37 +12 +11 +21 +3 +4 +3 +13 +4 +2 +3 +15 +4 +11 +4 +4 +3 +8 +3 +4 +4 +12 +8 +5 +3 +3 +4 +2 +220 +3 +5 +223 +3 +3 +3 +10 +3 +15 +4 +241 +9 +7 +3 +6 +6 +23 +4 +13 +7 +3 +4 +7 +4 +9 +3 +3 +4 +10 +5 +5 +1 +5 +24 +2 +4 +5 +5 +6 +14 +3 +8 +2 +3 +5 +13 +13 +3 +5 +2 +3 +15 +3 +4 +2 +10 +4 +4 +4 +5 +5 +3 +5 +3 +4 +7 +4 +27 +3 +6 +4 +15 +3 +5 +6 +6 +5 +4 +8 +3 +9 +2 +6 +3 +4 +3 +7 +4 +18 +3 +11 +3 +3 +8 +9 +7 +24 +3 +219 +7 +10 +4 +5 +9 +12 +2 +5 +4 +4 +4 +3 +3 +19 +5 +8 +16 +8 +6 +22 +3 +23 +3 +242 +9 +4 +3 +3 +5 +7 +3 +3 +5 +8 +3 +7 +5 +14 +8 +10 +3 +4 +3 +7 +4 +6 +7 +4 +10 +4 +3 +11 +3 +7 +10 +3 +13 +6 +8 +12 +10 +5 +7 +9 +3 +4 +7 +7 +10 +8 +30 +9 +19 +4 +3 +19 +15 +4 +13 +3 +215 +223 +4 +7 +4 +8 +17 +16 +3 +7 +6 +5 +5 +4 +12 +3 +7 +4 +4 +13 +4 +5 +2 +5 +6 +5 +6 +6 +7 +10 +18 +23 +9 +3 +3 +6 +5 +2 +4 +2 +7 +3 +3 +2 +5 +5 +14 +10 +224 +6 +3 +4 +3 +7 +5 +9 +3 +6 +4 +2 +5 +11 +4 +3 +3 +2 +8 +4 +7 +4 +10 +7 +3 +3 +18 +18 +17 +3 +3 +3 +4 +5 +3 +3 +4 +12 +7 +3 +11 +13 +5 +4 +7 +13 +5 +4 +11 +3 +12 +3 +6 +4 +4 +21 +4 +6 +9 +5 +3 +10 +8 +4 +6 +4 +4 +6 +5 +4 +8 +6 +4 +6 +4 +4 +5 +9 +6 +3 +4 +2 +9 +3 +18 +2 +4 +3 +13 +3 +6 +6 +8 +7 +9 +3 +2 +16 +3 +4 +6 +3 +2 +33 +22 +14 +4 +9 +12 +4 +5 +6 +3 +23 +9 +4 +3 +5 +5 +3 +4 +5 +3 +5 +3 +10 +4 +5 +5 +8 +4 +4 +6 +8 +5 +4 +3 +4 +6 +3 +3 +3 +5 +9 +12 +6 +5 +9 +3 +5 +3 +2 +2 +2 +18 +3 +2 +21 +2 +5 +4 +6 +4 +5 +10 +3 +9 +3 +2 +10 +7 +3 +6 +6 +4 +4 +8 +12 +7 +3 +7 +3 +3 +9 +3 +4 +5 +4 +4 +5 +5 +10 +15 +4 +4 +14 +6 +227 +3 +14 +5 +216 +22 +5 +4 +2 +2 +6 +3 +4 +2 +9 +9 +4 +3 +28 +13 +11 +4 +5 +3 +3 +2 +3 +3 +5 +3 +4 +3 +5 +23 +26 +3 +4 +5 +6 +4 +6 +3 +5 +5 +3 +4 +3 +2 +2 +2 +7 +14 +3 +6 +7 +17 +2 +2 +15 +14 +16 +4 +6 +7 +13 +6 +4 +5 +6 +16 +3 +3 +28 +3 +6 +15 +3 +9 +2 +4 +6 +3 +3 +22 +4 +12 +6 +7 +2 +5 +4 +10 +3 +16 +6 +9 +2 +5 +12 +7 +5 +5 +5 +5 +2 +11 +9 +17 +4 +3 +11 +7 +3 +5 +15 +4 +3 +4 +211 +8 +7 +5 +4 +7 +6 +7 +6 +3 +6 +5 +6 +5 +3 +4 +4 +26 +4 +6 +10 +4 +4 +3 +2 +3 +3 +4 +5 +9 +3 +9 +4 +4 +5 +5 +8 +2 +4 +2 +3 +8 +4 +11 +19 +5 +8 +6 +3 +5 +6 +12 +3 +2 +4 +16 +12 +3 +4 +4 +8 +6 +5 +6 +6 +219 +8 +222 +6 +16 +3 +13 +19 +5 +4 +3 +11 +6 +10 +4 +7 +7 +12 +5 +3 +3 +5 +6 +10 +3 +8 +2 +5 +4 +7 +2 +4 +4 +2 +12 +9 +6 +4 +2 +40 +2 +4 +10 +4 +223 +4 +2 +20 +6 +7 +24 +5 +4 +5 +2 +20 +16 +6 +5 +13 +2 +3 +3 +19 +3 +2 +4 +5 +6 +7 +11 +12 +5 +6 +7 +7 +3 +5 +3 +5 +3 +14 +3 +4 +4 +2 +11 +1 +7 +3 +9 +6 +11 +12 +5 +8 +6 +221 +4 +2 +12 +4 +3 +15 +4 +5 +226 +7 +218 +7 +5 +4 +5 +18 +4 +5 +9 +4 +4 +2 +9 +18 +18 +9 +5 +6 +6 +3 +3 +7 +3 +5 +4 +4 +4 +12 +3 +6 +31 +5 +4 +7 +3 +6 +5 +6 +5 +11 +2 +2 +11 +11 +6 +7 +5 +8 +7 +10 +5 +23 +7 +4 +3 +5 +34 +2 +5 +23 +7 +3 +6 +8 +4 +4 +4 +2 +5 +3 +8 +5 +4 +8 +25 +2 +3 +17 +8 +3 +4 +8 +7 +3 +15 +6 +5 +7 +21 +9 +5 +6 +6 +5 +3 +2 +3 +10 +3 +6 +3 +14 +7 +4 +4 +8 +7 +8 +2 +6 +12 +4 +213 +6 +5 +21 +8 +2 +5 +23 +3 +11 +2 +3 +6 +25 +2 +3 +6 +7 +6 +6 +4 +4 +6 +3 +17 +9 +7 +6 +4 +3 +10 +7 +2 +3 +3 +3 +11 +8 +3 +7 +6 +4 +14 +36 +3 +4 +3 +3 +22 +13 +21 +4 +2 +7 +4 +4 +17 +15 +3 +7 +11 +2 +4 +7 +6 +209 +6 +3 +2 +2 +24 +4 +9 +4 +3 +3 +3 +29 +2 +2 +4 +3 +3 +5 +4 +6 +3 +3 +2 +4 diff --git a/vendor/github.com/beorn7/perks/quantile/stream.go b/vendor/github.com/beorn7/perks/quantile/stream.go new file mode 100644 index 00000000000..d7d14f8eb63 --- /dev/null +++ b/vendor/github.com/beorn7/perks/quantile/stream.go @@ -0,0 +1,316 @@ +// Package quantile computes approximate quantiles over an unbounded data +// stream within low memory and CPU bounds. +// +// A small amount of accuracy is traded to achieve the above properties. +// +// Multiple streams can be merged before calling Query to generate a single set +// of results. This is meaningful when the streams represent the same type of +// data. See Merge and Samples. +// +// For more detailed information about the algorithm used, see: +// +// Effective Computation of Biased Quantiles over Data Streams +// +// http://www.cs.rutgers.edu/~muthu/bquant.pdf +package quantile + +import ( + "math" + "sort" +) + +// Sample holds an observed value and meta information for compression. JSON +// tags have been added for convenience. +type Sample struct { + Value float64 `json:",string"` + Width float64 `json:",string"` + Delta float64 `json:",string"` +} + +// Samples represents a slice of samples. It implements sort.Interface. +type Samples []Sample + +func (a Samples) Len() int { return len(a) } +func (a Samples) Less(i, j int) bool { return a[i].Value < a[j].Value } +func (a Samples) Swap(i, j int) { a[i], a[j] = a[j], a[i] } + +type invariant func(s *stream, r float64) float64 + +// NewLowBiased returns an initialized Stream for low-biased quantiles +// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but +// error guarantees can still be given even for the lower ranks of the data +// distribution. +// +// The provided epsilon is a relative error, i.e. the true quantile of a value +// returned by a query is guaranteed to be within (1±Epsilon)*Quantile. +// +// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error +// properties. +func NewLowBiased(epsilon float64) *Stream { + ƒ := func(s *stream, r float64) float64 { + return 2 * epsilon * r + } + return newStream(ƒ) +} + +// NewHighBiased returns an initialized Stream for high-biased quantiles +// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but +// error guarantees can still be given even for the higher ranks of the data +// distribution. +// +// The provided epsilon is a relative error, i.e. the true quantile of a value +// returned by a query is guaranteed to be within 1-(1±Epsilon)*(1-Quantile). +// +// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error +// properties. +func NewHighBiased(epsilon float64) *Stream { + ƒ := func(s *stream, r float64) float64 { + return 2 * epsilon * (s.n - r) + } + return newStream(ƒ) +} + +// NewTargeted returns an initialized Stream concerned with a particular set of +// quantile values that are supplied a priori. Knowing these a priori reduces +// space and computation time. The targets map maps the desired quantiles to +// their absolute errors, i.e. the true quantile of a value returned by a query +// is guaranteed to be within (Quantile±Epsilon). +// +// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error properties. +func NewTargeted(targetMap map[float64]float64) *Stream { + // Convert map to slice to avoid slow iterations on a map. + // ƒ is called on the hot path, so converting the map to a slice + // beforehand results in significant CPU savings. + targets := targetMapToSlice(targetMap) + + ƒ := func(s *stream, r float64) float64 { + var m = math.MaxFloat64 + var f float64 + for _, t := range targets { + if t.quantile*s.n <= r { + f = (2 * t.epsilon * r) / t.quantile + } else { + f = (2 * t.epsilon * (s.n - r)) / (1 - t.quantile) + } + if f < m { + m = f + } + } + return m + } + return newStream(ƒ) +} + +type target struct { + quantile float64 + epsilon float64 +} + +func targetMapToSlice(targetMap map[float64]float64) []target { + targets := make([]target, 0, len(targetMap)) + + for quantile, epsilon := range targetMap { + t := target{ + quantile: quantile, + epsilon: epsilon, + } + targets = append(targets, t) + } + + return targets +} + +// Stream computes quantiles for a stream of float64s. It is not thread-safe by +// design. Take care when using across multiple goroutines. +type Stream struct { + *stream + b Samples + sorted bool +} + +func newStream(ƒ invariant) *Stream { + x := &stream{ƒ: ƒ} + return &Stream{x, make(Samples, 0, 500), true} +} + +// Insert inserts v into the stream. +func (s *Stream) Insert(v float64) { + s.insert(Sample{Value: v, Width: 1}) +} + +func (s *Stream) insert(sample Sample) { + s.b = append(s.b, sample) + s.sorted = false + if len(s.b) == cap(s.b) { + s.flush() + } +} + +// Query returns the computed qth percentiles value. If s was created with +// NewTargeted, and q is not in the set of quantiles provided a priori, Query +// will return an unspecified result. +func (s *Stream) Query(q float64) float64 { + if !s.flushed() { + // Fast path when there hasn't been enough data for a flush; + // this also yields better accuracy for small sets of data. + l := len(s.b) + if l == 0 { + return 0 + } + i := int(math.Ceil(float64(l) * q)) + if i > 0 { + i -= 1 + } + s.maybeSort() + return s.b[i].Value + } + s.flush() + return s.stream.query(q) +} + +// Merge merges samples into the underlying streams samples. This is handy when +// merging multiple streams from separate threads, database shards, etc. +// +// ATTENTION: This method is broken and does not yield correct results. The +// underlying algorithm is not capable of merging streams correctly. +func (s *Stream) Merge(samples Samples) { + sort.Sort(samples) + s.stream.merge(samples) +} + +// Reset reinitializes and clears the list reusing the samples buffer memory. +func (s *Stream) Reset() { + s.stream.reset() + s.b = s.b[:0] +} + +// Samples returns stream samples held by s. +func (s *Stream) Samples() Samples { + if !s.flushed() { + return s.b + } + s.flush() + return s.stream.samples() +} + +// Count returns the total number of samples observed in the stream +// since initialization. +func (s *Stream) Count() int { + return len(s.b) + s.stream.count() +} + +func (s *Stream) flush() { + s.maybeSort() + s.stream.merge(s.b) + s.b = s.b[:0] +} + +func (s *Stream) maybeSort() { + if !s.sorted { + s.sorted = true + sort.Sort(s.b) + } +} + +func (s *Stream) flushed() bool { + return len(s.stream.l) > 0 +} + +type stream struct { + n float64 + l []Sample + ƒ invariant +} + +func (s *stream) reset() { + s.l = s.l[:0] + s.n = 0 +} + +func (s *stream) insert(v float64) { + s.merge(Samples{{v, 1, 0}}) +} + +func (s *stream) merge(samples Samples) { + // TODO(beorn7): This tries to merge not only individual samples, but + // whole summaries. The paper doesn't mention merging summaries at + // all. Unittests show that the merging is inaccurate. Find out how to + // do merges properly. + var r float64 + i := 0 + for _, sample := range samples { + for ; i < len(s.l); i++ { + c := s.l[i] + if c.Value > sample.Value { + // Insert at position i. + s.l = append(s.l, Sample{}) + copy(s.l[i+1:], s.l[i:]) + s.l[i] = Sample{ + sample.Value, + sample.Width, + math.Max(sample.Delta, math.Floor(s.ƒ(s, r))-1), + // TODO(beorn7): How to calculate delta correctly? + } + i++ + goto inserted + } + r += c.Width + } + s.l = append(s.l, Sample{sample.Value, sample.Width, 0}) + i++ + inserted: + s.n += sample.Width + r += sample.Width + } + s.compress() +} + +func (s *stream) count() int { + return int(s.n) +} + +func (s *stream) query(q float64) float64 { + t := math.Ceil(q * s.n) + t += math.Ceil(s.ƒ(s, t) / 2) + p := s.l[0] + var r float64 + for _, c := range s.l[1:] { + r += p.Width + if r+c.Width+c.Delta > t { + return p.Value + } + p = c + } + return p.Value +} + +func (s *stream) compress() { + if len(s.l) < 2 { + return + } + x := s.l[len(s.l)-1] + xi := len(s.l) - 1 + r := s.n - 1 - x.Width + + for i := len(s.l) - 2; i >= 0; i-- { + c := s.l[i] + if c.Width+x.Width+x.Delta <= s.ƒ(s, r) { + x.Width += c.Width + s.l[xi] = x + // Remove element at i. + copy(s.l[i:], s.l[i+1:]) + s.l = s.l[:len(s.l)-1] + xi -= 1 + } else { + x = c + xi = i + } + r -= c.Width + } +} + +func (s *stream) samples() Samples { + samples := make(Samples, len(s.l)) + copy(samples, s.l) + return samples +} diff --git a/vendor/github.com/cespare/xxhash/v2/.travis.yml b/vendor/github.com/cespare/xxhash/v2/.travis.yml new file mode 100644 index 00000000000..c516ea88da7 --- /dev/null +++ b/vendor/github.com/cespare/xxhash/v2/.travis.yml @@ -0,0 +1,8 @@ +language: go +go: + - "1.x" + - master +env: + - TAGS="" + - TAGS="-tags purego" +script: go test $TAGS -v ./... diff --git a/vendor/github.com/cespare/xxhash/v2/LICENSE.txt b/vendor/github.com/cespare/xxhash/v2/LICENSE.txt new file mode 100644 index 00000000000..24b53065f40 --- /dev/null +++ b/vendor/github.com/cespare/xxhash/v2/LICENSE.txt @@ -0,0 +1,22 @@ +Copyright (c) 2016 Caleb Spare + +MIT License + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/cespare/xxhash/v2/README.md b/vendor/github.com/cespare/xxhash/v2/README.md new file mode 100644 index 00000000000..2fd8693c21b --- /dev/null +++ b/vendor/github.com/cespare/xxhash/v2/README.md @@ -0,0 +1,67 @@ +# xxhash + +[![GoDoc](https://godoc.org/github.com/cespare/xxhash?status.svg)](https://godoc.org/github.com/cespare/xxhash) +[![Build Status](https://travis-ci.org/cespare/xxhash.svg?branch=master)](https://travis-ci.org/cespare/xxhash) + +xxhash is a Go implementation of the 64-bit +[xxHash](http://cyan4973.github.io/xxHash/) algorithm, XXH64. This is a +high-quality hashing algorithm that is much faster than anything in the Go +standard library. + +This package provides a straightforward API: + +``` +func Sum64(b []byte) uint64 +func Sum64String(s string) uint64 +type Digest struct{ ... } + func New() *Digest +``` + +The `Digest` type implements hash.Hash64. Its key methods are: + +``` +func (*Digest) Write([]byte) (int, error) +func (*Digest) WriteString(string) (int, error) +func (*Digest) Sum64() uint64 +``` + +This implementation provides a fast pure-Go implementation and an even faster +assembly implementation for amd64. + +## Compatibility + +This package is in a module and the latest code is in version 2 of the module. +You need a version of Go with at least "minimal module compatibility" to use +github.com/cespare/xxhash/v2: + +* 1.9.7+ for Go 1.9 +* 1.10.3+ for Go 1.10 +* Go 1.11 or later + +I recommend using the latest release of Go. + +## Benchmarks + +Here are some quick benchmarks comparing the pure-Go and assembly +implementations of Sum64. + +| input size | purego | asm | +| --- | --- | --- | +| 5 B | 979.66 MB/s | 1291.17 MB/s | +| 100 B | 7475.26 MB/s | 7973.40 MB/s | +| 4 KB | 17573.46 MB/s | 17602.65 MB/s | +| 10 MB | 17131.46 MB/s | 17142.16 MB/s | + +These numbers were generated on Ubuntu 18.04 with an Intel i7-8700K CPU using +the following commands under Go 1.11.2: + +``` +$ go test -tags purego -benchtime 10s -bench '/xxhash,direct,bytes' +$ go test -benchtime 10s -bench '/xxhash,direct,bytes' +``` + +## Projects using this package + +- [InfluxDB](https://github.com/influxdata/influxdb) +- [Prometheus](https://github.com/prometheus/prometheus) +- [FreeCache](https://github.com/coocood/freecache) diff --git a/vendor/github.com/cespare/xxhash/v2/go.mod b/vendor/github.com/cespare/xxhash/v2/go.mod new file mode 100644 index 00000000000..49f67608bf6 --- /dev/null +++ b/vendor/github.com/cespare/xxhash/v2/go.mod @@ -0,0 +1,3 @@ +module github.com/cespare/xxhash/v2 + +go 1.11 diff --git a/vendor/github.com/cespare/xxhash/v2/go.sum b/vendor/github.com/cespare/xxhash/v2/go.sum new file mode 100644 index 00000000000..e69de29bb2d diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash.go b/vendor/github.com/cespare/xxhash/v2/xxhash.go new file mode 100644 index 00000000000..db0b35fbe39 --- /dev/null +++ b/vendor/github.com/cespare/xxhash/v2/xxhash.go @@ -0,0 +1,236 @@ +// Package xxhash implements the 64-bit variant of xxHash (XXH64) as described +// at http://cyan4973.github.io/xxHash/. +package xxhash + +import ( + "encoding/binary" + "errors" + "math/bits" +) + +const ( + prime1 uint64 = 11400714785074694791 + prime2 uint64 = 14029467366897019727 + prime3 uint64 = 1609587929392839161 + prime4 uint64 = 9650029242287828579 + prime5 uint64 = 2870177450012600261 +) + +// NOTE(caleb): I'm using both consts and vars of the primes. Using consts where +// possible in the Go code is worth a small (but measurable) performance boost +// by avoiding some MOVQs. Vars are needed for the asm and also are useful for +// convenience in the Go code in a few places where we need to intentionally +// avoid constant arithmetic (e.g., v1 := prime1 + prime2 fails because the +// result overflows a uint64). +var ( + prime1v = prime1 + prime2v = prime2 + prime3v = prime3 + prime4v = prime4 + prime5v = prime5 +) + +// Digest implements hash.Hash64. +type Digest struct { + v1 uint64 + v2 uint64 + v3 uint64 + v4 uint64 + total uint64 + mem [32]byte + n int // how much of mem is used +} + +// New creates a new Digest that computes the 64-bit xxHash algorithm. +func New() *Digest { + var d Digest + d.Reset() + return &d +} + +// Reset clears the Digest's state so that it can be reused. +func (d *Digest) Reset() { + d.v1 = prime1v + prime2 + d.v2 = prime2 + d.v3 = 0 + d.v4 = -prime1v + d.total = 0 + d.n = 0 +} + +// Size always returns 8 bytes. +func (d *Digest) Size() int { return 8 } + +// BlockSize always returns 32 bytes. +func (d *Digest) BlockSize() int { return 32 } + +// Write adds more data to d. It always returns len(b), nil. +func (d *Digest) Write(b []byte) (n int, err error) { + n = len(b) + d.total += uint64(n) + + if d.n+n < 32 { + // This new data doesn't even fill the current block. + copy(d.mem[d.n:], b) + d.n += n + return + } + + if d.n > 0 { + // Finish off the partial block. + copy(d.mem[d.n:], b) + d.v1 = round(d.v1, u64(d.mem[0:8])) + d.v2 = round(d.v2, u64(d.mem[8:16])) + d.v3 = round(d.v3, u64(d.mem[16:24])) + d.v4 = round(d.v4, u64(d.mem[24:32])) + b = b[32-d.n:] + d.n = 0 + } + + if len(b) >= 32 { + // One or more full blocks left. + nw := writeBlocks(d, b) + b = b[nw:] + } + + // Store any remaining partial block. + copy(d.mem[:], b) + d.n = len(b) + + return +} + +// Sum appends the current hash to b and returns the resulting slice. +func (d *Digest) Sum(b []byte) []byte { + s := d.Sum64() + return append( + b, + byte(s>>56), + byte(s>>48), + byte(s>>40), + byte(s>>32), + byte(s>>24), + byte(s>>16), + byte(s>>8), + byte(s), + ) +} + +// Sum64 returns the current hash. +func (d *Digest) Sum64() uint64 { + var h uint64 + + if d.total >= 32 { + v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4 + h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4) + h = mergeRound(h, v1) + h = mergeRound(h, v2) + h = mergeRound(h, v3) + h = mergeRound(h, v4) + } else { + h = d.v3 + prime5 + } + + h += d.total + + i, end := 0, d.n + for ; i+8 <= end; i += 8 { + k1 := round(0, u64(d.mem[i:i+8])) + h ^= k1 + h = rol27(h)*prime1 + prime4 + } + if i+4 <= end { + h ^= uint64(u32(d.mem[i:i+4])) * prime1 + h = rol23(h)*prime2 + prime3 + i += 4 + } + for i < end { + h ^= uint64(d.mem[i]) * prime5 + h = rol11(h) * prime1 + i++ + } + + h ^= h >> 33 + h *= prime2 + h ^= h >> 29 + h *= prime3 + h ^= h >> 32 + + return h +} + +const ( + magic = "xxh\x06" + marshaledSize = len(magic) + 8*5 + 32 +) + +// MarshalBinary implements the encoding.BinaryMarshaler interface. +func (d *Digest) MarshalBinary() ([]byte, error) { + b := make([]byte, 0, marshaledSize) + b = append(b, magic...) + b = appendUint64(b, d.v1) + b = appendUint64(b, d.v2) + b = appendUint64(b, d.v3) + b = appendUint64(b, d.v4) + b = appendUint64(b, d.total) + b = append(b, d.mem[:d.n]...) + b = b[:len(b)+len(d.mem)-d.n] + return b, nil +} + +// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface. +func (d *Digest) UnmarshalBinary(b []byte) error { + if len(b) < len(magic) || string(b[:len(magic)]) != magic { + return errors.New("xxhash: invalid hash state identifier") + } + if len(b) != marshaledSize { + return errors.New("xxhash: invalid hash state size") + } + b = b[len(magic):] + b, d.v1 = consumeUint64(b) + b, d.v2 = consumeUint64(b) + b, d.v3 = consumeUint64(b) + b, d.v4 = consumeUint64(b) + b, d.total = consumeUint64(b) + copy(d.mem[:], b) + b = b[len(d.mem):] + d.n = int(d.total % uint64(len(d.mem))) + return nil +} + +func appendUint64(b []byte, x uint64) []byte { + var a [8]byte + binary.LittleEndian.PutUint64(a[:], x) + return append(b, a[:]...) +} + +func consumeUint64(b []byte) ([]byte, uint64) { + x := u64(b) + return b[8:], x +} + +func u64(b []byte) uint64 { return binary.LittleEndian.Uint64(b) } +func u32(b []byte) uint32 { return binary.LittleEndian.Uint32(b) } + +func round(acc, input uint64) uint64 { + acc += input * prime2 + acc = rol31(acc) + acc *= prime1 + return acc +} + +func mergeRound(acc, val uint64) uint64 { + val = round(0, val) + acc ^= val + acc = acc*prime1 + prime4 + return acc +} + +func rol1(x uint64) uint64 { return bits.RotateLeft64(x, 1) } +func rol7(x uint64) uint64 { return bits.RotateLeft64(x, 7) } +func rol11(x uint64) uint64 { return bits.RotateLeft64(x, 11) } +func rol12(x uint64) uint64 { return bits.RotateLeft64(x, 12) } +func rol18(x uint64) uint64 { return bits.RotateLeft64(x, 18) } +func rol23(x uint64) uint64 { return bits.RotateLeft64(x, 23) } +func rol27(x uint64) uint64 { return bits.RotateLeft64(x, 27) } +func rol31(x uint64) uint64 { return bits.RotateLeft64(x, 31) } diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.go b/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.go new file mode 100644 index 00000000000..ad14b807f4d --- /dev/null +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.go @@ -0,0 +1,13 @@ +// +build !appengine +// +build gc +// +build !purego + +package xxhash + +// Sum64 computes the 64-bit xxHash digest of b. +// +//go:noescape +func Sum64(b []byte) uint64 + +//go:noescape +func writeBlocks(d *Digest, b []byte) int diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s b/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s new file mode 100644 index 00000000000..d580e32aed4 --- /dev/null +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s @@ -0,0 +1,215 @@ +// +build !appengine +// +build gc +// +build !purego + +#include "textflag.h" + +// Register allocation: +// AX h +// CX pointer to advance through b +// DX n +// BX loop end +// R8 v1, k1 +// R9 v2 +// R10 v3 +// R11 v4 +// R12 tmp +// R13 prime1v +// R14 prime2v +// R15 prime4v + +// round reads from and advances the buffer pointer in CX. +// It assumes that R13 has prime1v and R14 has prime2v. +#define round(r) \ + MOVQ (CX), R12 \ + ADDQ $8, CX \ + IMULQ R14, R12 \ + ADDQ R12, r \ + ROLQ $31, r \ + IMULQ R13, r + +// mergeRound applies a merge round on the two registers acc and val. +// It assumes that R13 has prime1v, R14 has prime2v, and R15 has prime4v. +#define mergeRound(acc, val) \ + IMULQ R14, val \ + ROLQ $31, val \ + IMULQ R13, val \ + XORQ val, acc \ + IMULQ R13, acc \ + ADDQ R15, acc + +// func Sum64(b []byte) uint64 +TEXT ·Sum64(SB), NOSPLIT, $0-32 + // Load fixed primes. + MOVQ ·prime1v(SB), R13 + MOVQ ·prime2v(SB), R14 + MOVQ ·prime4v(SB), R15 + + // Load slice. + MOVQ b_base+0(FP), CX + MOVQ b_len+8(FP), DX + LEAQ (CX)(DX*1), BX + + // The first loop limit will be len(b)-32. + SUBQ $32, BX + + // Check whether we have at least one block. + CMPQ DX, $32 + JLT noBlocks + + // Set up initial state (v1, v2, v3, v4). + MOVQ R13, R8 + ADDQ R14, R8 + MOVQ R14, R9 + XORQ R10, R10 + XORQ R11, R11 + SUBQ R13, R11 + + // Loop until CX > BX. +blockLoop: + round(R8) + round(R9) + round(R10) + round(R11) + + CMPQ CX, BX + JLE blockLoop + + MOVQ R8, AX + ROLQ $1, AX + MOVQ R9, R12 + ROLQ $7, R12 + ADDQ R12, AX + MOVQ R10, R12 + ROLQ $12, R12 + ADDQ R12, AX + MOVQ R11, R12 + ROLQ $18, R12 + ADDQ R12, AX + + mergeRound(AX, R8) + mergeRound(AX, R9) + mergeRound(AX, R10) + mergeRound(AX, R11) + + JMP afterBlocks + +noBlocks: + MOVQ ·prime5v(SB), AX + +afterBlocks: + ADDQ DX, AX + + // Right now BX has len(b)-32, and we want to loop until CX > len(b)-8. + ADDQ $24, BX + + CMPQ CX, BX + JG fourByte + +wordLoop: + // Calculate k1. + MOVQ (CX), R8 + ADDQ $8, CX + IMULQ R14, R8 + ROLQ $31, R8 + IMULQ R13, R8 + + XORQ R8, AX + ROLQ $27, AX + IMULQ R13, AX + ADDQ R15, AX + + CMPQ CX, BX + JLE wordLoop + +fourByte: + ADDQ $4, BX + CMPQ CX, BX + JG singles + + MOVL (CX), R8 + ADDQ $4, CX + IMULQ R13, R8 + XORQ R8, AX + + ROLQ $23, AX + IMULQ R14, AX + ADDQ ·prime3v(SB), AX + +singles: + ADDQ $4, BX + CMPQ CX, BX + JGE finalize + +singlesLoop: + MOVBQZX (CX), R12 + ADDQ $1, CX + IMULQ ·prime5v(SB), R12 + XORQ R12, AX + + ROLQ $11, AX + IMULQ R13, AX + + CMPQ CX, BX + JL singlesLoop + +finalize: + MOVQ AX, R12 + SHRQ $33, R12 + XORQ R12, AX + IMULQ R14, AX + MOVQ AX, R12 + SHRQ $29, R12 + XORQ R12, AX + IMULQ ·prime3v(SB), AX + MOVQ AX, R12 + SHRQ $32, R12 + XORQ R12, AX + + MOVQ AX, ret+24(FP) + RET + +// writeBlocks uses the same registers as above except that it uses AX to store +// the d pointer. + +// func writeBlocks(d *Digest, b []byte) int +TEXT ·writeBlocks(SB), NOSPLIT, $0-40 + // Load fixed primes needed for round. + MOVQ ·prime1v(SB), R13 + MOVQ ·prime2v(SB), R14 + + // Load slice. + MOVQ b_base+8(FP), CX + MOVQ b_len+16(FP), DX + LEAQ (CX)(DX*1), BX + SUBQ $32, BX + + // Load vN from d. + MOVQ d+0(FP), AX + MOVQ 0(AX), R8 // v1 + MOVQ 8(AX), R9 // v2 + MOVQ 16(AX), R10 // v3 + MOVQ 24(AX), R11 // v4 + + // We don't need to check the loop condition here; this function is + // always called with at least one block of data to process. +blockLoop: + round(R8) + round(R9) + round(R10) + round(R11) + + CMPQ CX, BX + JLE blockLoop + + // Copy vN back to d. + MOVQ R8, 0(AX) + MOVQ R9, 8(AX) + MOVQ R10, 16(AX) + MOVQ R11, 24(AX) + + // The number of bytes written is CX minus the old base pointer. + SUBQ b_base+8(FP), CX + MOVQ CX, ret+32(FP) + + RET diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_other.go b/vendor/github.com/cespare/xxhash/v2/xxhash_other.go new file mode 100644 index 00000000000..4a5a821603e --- /dev/null +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_other.go @@ -0,0 +1,76 @@ +// +build !amd64 appengine !gc purego + +package xxhash + +// Sum64 computes the 64-bit xxHash digest of b. +func Sum64(b []byte) uint64 { + // A simpler version would be + // d := New() + // d.Write(b) + // return d.Sum64() + // but this is faster, particularly for small inputs. + + n := len(b) + var h uint64 + + if n >= 32 { + v1 := prime1v + prime2 + v2 := prime2 + v3 := uint64(0) + v4 := -prime1v + for len(b) >= 32 { + v1 = round(v1, u64(b[0:8:len(b)])) + v2 = round(v2, u64(b[8:16:len(b)])) + v3 = round(v3, u64(b[16:24:len(b)])) + v4 = round(v4, u64(b[24:32:len(b)])) + b = b[32:len(b):len(b)] + } + h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4) + h = mergeRound(h, v1) + h = mergeRound(h, v2) + h = mergeRound(h, v3) + h = mergeRound(h, v4) + } else { + h = prime5 + } + + h += uint64(n) + + i, end := 0, len(b) + for ; i+8 <= end; i += 8 { + k1 := round(0, u64(b[i:i+8:len(b)])) + h ^= k1 + h = rol27(h)*prime1 + prime4 + } + if i+4 <= end { + h ^= uint64(u32(b[i:i+4:len(b)])) * prime1 + h = rol23(h)*prime2 + prime3 + i += 4 + } + for ; i < end; i++ { + h ^= uint64(b[i]) * prime5 + h = rol11(h) * prime1 + } + + h ^= h >> 33 + h *= prime2 + h ^= h >> 29 + h *= prime3 + h ^= h >> 32 + + return h +} + +func writeBlocks(d *Digest, b []byte) int { + v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4 + n := len(b) + for len(b) >= 32 { + v1 = round(v1, u64(b[0:8:len(b)])) + v2 = round(v2, u64(b[8:16:len(b)])) + v3 = round(v3, u64(b[16:24:len(b)])) + v4 = round(v4, u64(b[24:32:len(b)])) + b = b[32:len(b):len(b)] + } + d.v1, d.v2, d.v3, d.v4 = v1, v2, v3, v4 + return n - len(b) +} diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go b/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go new file mode 100644 index 00000000000..fc9bea7a31f --- /dev/null +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go @@ -0,0 +1,15 @@ +// +build appengine + +// This file contains the safe implementations of otherwise unsafe-using code. + +package xxhash + +// Sum64String computes the 64-bit xxHash digest of s. +func Sum64String(s string) uint64 { + return Sum64([]byte(s)) +} + +// WriteString adds more data to d. It always returns len(s), nil. +func (d *Digest) WriteString(s string) (n int, err error) { + return d.Write([]byte(s)) +} diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go b/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go new file mode 100644 index 00000000000..53bf76efbc2 --- /dev/null +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go @@ -0,0 +1,46 @@ +// +build !appengine + +// This file encapsulates usage of unsafe. +// xxhash_safe.go contains the safe implementations. + +package xxhash + +import ( + "reflect" + "unsafe" +) + +// Notes: +// +// See https://groups.google.com/d/msg/golang-nuts/dcjzJy-bSpw/tcZYBzQqAQAJ +// for some discussion about these unsafe conversions. +// +// In the future it's possible that compiler optimizations will make these +// unsafe operations unnecessary: https://golang.org/issue/2205. +// +// Both of these wrapper functions still incur function call overhead since they +// will not be inlined. We could write Go/asm copies of Sum64 and Digest.Write +// for strings to squeeze out a bit more speed. Mid-stack inlining should +// eventually fix this. + +// Sum64String computes the 64-bit xxHash digest of s. +// It may be faster than Sum64([]byte(s)) by avoiding a copy. +func Sum64String(s string) uint64 { + var b []byte + bh := (*reflect.SliceHeader)(unsafe.Pointer(&b)) + bh.Data = (*reflect.StringHeader)(unsafe.Pointer(&s)).Data + bh.Len = len(s) + bh.Cap = len(s) + return Sum64(b) +} + +// WriteString adds more data to d. It always returns len(s), nil. +// It may be faster than Write([]byte(s)) by avoiding a copy. +func (d *Digest) WriteString(s string) (n int, err error) { + var b []byte + bh := (*reflect.SliceHeader)(unsafe.Pointer(&b)) + bh.Data = (*reflect.StringHeader)(unsafe.Pointer(&s)).Data + bh.Len = len(s) + bh.Cap = len(s) + return d.Write(b) +} diff --git a/vendor/github.com/danwakefield/fnmatch/.gitignore b/vendor/github.com/danwakefield/fnmatch/.gitignore new file mode 100644 index 00000000000..daf913b1b34 --- /dev/null +++ b/vendor/github.com/danwakefield/fnmatch/.gitignore @@ -0,0 +1,24 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof diff --git a/vendor/github.com/danwakefield/fnmatch/LICENSE b/vendor/github.com/danwakefield/fnmatch/LICENSE new file mode 100644 index 00000000000..0dc9851a343 --- /dev/null +++ b/vendor/github.com/danwakefield/fnmatch/LICENSE @@ -0,0 +1,23 @@ +Copyright (c) 2016, Daniel Wakefield +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/danwakefield/fnmatch/README.md b/vendor/github.com/danwakefield/fnmatch/README.md new file mode 100644 index 00000000000..b8d715662c2 --- /dev/null +++ b/vendor/github.com/danwakefield/fnmatch/README.md @@ -0,0 +1,4 @@ +# fnmatch +Updated clone of kballards golang fnmatch gist (https://gist.github.com/kballard/272720) + + diff --git a/vendor/github.com/danwakefield/fnmatch/fnmatch.go b/vendor/github.com/danwakefield/fnmatch/fnmatch.go new file mode 100644 index 00000000000..07ac7b37ca0 --- /dev/null +++ b/vendor/github.com/danwakefield/fnmatch/fnmatch.go @@ -0,0 +1,219 @@ +// Provide string-matching based on fnmatch.3 +package fnmatch + +// There are a few issues that I believe to be bugs, but this implementation is +// based as closely as possible on BSD fnmatch. These bugs are present in the +// source of BSD fnmatch, and so are replicated here. The issues are as follows: +// +// * FNM_PERIOD is no longer observed after the first * in a pattern +// This only applies to matches done with FNM_PATHNAME as well +// * FNM_PERIOD doesn't apply to ranges. According to the documentation, +// a period must be matched explicitly, but a range will match it too + +import ( + "unicode" + "unicode/utf8" +) + +const ( + FNM_NOESCAPE = (1 << iota) + FNM_PATHNAME + FNM_PERIOD + + FNM_LEADING_DIR + FNM_CASEFOLD + + FNM_IGNORECASE = FNM_CASEFOLD + FNM_FILE_NAME = FNM_PATHNAME +) + +func unpackRune(str *string) rune { + rune, size := utf8.DecodeRuneInString(*str) + *str = (*str)[size:] + return rune +} + +// Matches the pattern against the string, with the given flags, +// and returns true if the match is successful. +// This function should match fnmatch.3 as closely as possible. +func Match(pattern, s string, flags int) bool { + // The implementation for this function was patterned after the BSD fnmatch.c + // source found at http://src.gnu-darwin.org/src/contrib/csup/fnmatch.c.html + noescape := (flags&FNM_NOESCAPE != 0) + pathname := (flags&FNM_PATHNAME != 0) + period := (flags&FNM_PERIOD != 0) + leadingdir := (flags&FNM_LEADING_DIR != 0) + casefold := (flags&FNM_CASEFOLD != 0) + // the following is some bookkeeping that the original fnmatch.c implementation did not do + // We are forced to do this because we're not keeping indexes into C strings but rather + // processing utf8-encoded strings. Use a custom unpacker to maintain our state for us + sAtStart := true + sLastAtStart := true + sLastSlash := false + sLastUnpacked := rune(0) + unpackS := func() rune { + sLastSlash = (sLastUnpacked == '/') + sLastUnpacked = unpackRune(&s) + sLastAtStart = sAtStart + sAtStart = false + return sLastUnpacked + } + for len(pattern) > 0 { + c := unpackRune(&pattern) + switch c { + case '?': + if len(s) == 0 { + return false + } + sc := unpackS() + if pathname && sc == '/' { + return false + } + if period && sc == '.' && (sLastAtStart || (pathname && sLastSlash)) { + return false + } + case '*': + // collapse multiple *'s + // don't use unpackRune here, the only char we care to detect is ASCII + for len(pattern) > 0 && pattern[0] == '*' { + pattern = pattern[1:] + } + if period && s[0] == '.' && (sAtStart || (pathname && sLastUnpacked == '/')) { + return false + } + // optimize for patterns with * at end or before / + if len(pattern) == 0 { + if pathname { + return leadingdir || (strchr(s, '/') == -1) + } else { + return true + } + return !(pathname && strchr(s, '/') >= 0) + } else if pathname && pattern[0] == '/' { + offset := strchr(s, '/') + if offset == -1 { + return false + } else { + // we already know our pattern and string have a /, skip past it + s = s[offset:] // use unpackS here to maintain our bookkeeping state + unpackS() + pattern = pattern[1:] // we know / is one byte long + break + } + } + // general case, recurse + for test := s; len(test) > 0; unpackRune(&test) { + // I believe the (flags &^ FNM_PERIOD) is a bug when FNM_PATHNAME is specified + // but this follows exactly from how fnmatch.c implements it + if Match(pattern, test, (flags &^ FNM_PERIOD)) { + return true + } else if pathname && test[0] == '/' { + break + } + } + return false + case '[': + if len(s) == 0 { + return false + } + if pathname && s[0] == '/' { + return false + } + sc := unpackS() + if !rangematch(&pattern, sc, flags) { + return false + } + case '\\': + if !noescape { + if len(pattern) > 0 { + c = unpackRune(&pattern) + } + } + fallthrough + default: + if len(s) == 0 { + return false + } + sc := unpackS() + switch { + case sc == c: + case casefold && unicode.ToLower(sc) == unicode.ToLower(c): + default: + return false + } + } + } + return len(s) == 0 || (leadingdir && s[0] == '/') +} + +func rangematch(pattern *string, test rune, flags int) bool { + if len(*pattern) == 0 { + return false + } + casefold := (flags&FNM_CASEFOLD != 0) + noescape := (flags&FNM_NOESCAPE != 0) + if casefold { + test = unicode.ToLower(test) + } + var negate, matched bool + if (*pattern)[0] == '^' || (*pattern)[0] == '!' { + negate = true + (*pattern) = (*pattern)[1:] + } + for !matched && len(*pattern) > 1 && (*pattern)[0] != ']' { + c := unpackRune(pattern) + if !noescape && c == '\\' { + if len(*pattern) > 1 { + c = unpackRune(pattern) + } else { + return false + } + } + if casefold { + c = unicode.ToLower(c) + } + if (*pattern)[0] == '-' && len(*pattern) > 1 && (*pattern)[1] != ']' { + unpackRune(pattern) // skip the - + c2 := unpackRune(pattern) + if !noescape && c2 == '\\' { + if len(*pattern) > 0 { + c2 = unpackRune(pattern) + } else { + return false + } + } + if casefold { + c2 = unicode.ToLower(c2) + } + // this really should be more intelligent, but it looks like + // fnmatch.c does simple int comparisons, therefore we will as well + if c <= test && test <= c2 { + matched = true + } + } else if c == test { + matched = true + } + } + // skip past the rest of the pattern + ok := false + for !ok && len(*pattern) > 0 { + c := unpackRune(pattern) + if c == '\\' && len(*pattern) > 0 { + unpackRune(pattern) + } else if c == ']' { + ok = true + } + } + return ok && matched != negate +} + +// define strchr because strings.Index() seems a bit overkill +// returns the index of c in s, or -1 if there is no match +func strchr(s string, c rune) int { + for i, sc := range s { + if sc == c { + return i + } + } + return -1 +} diff --git a/vendor/github.com/denormal/go-gitignore/.gitignore b/vendor/github.com/denormal/go-gitignore/.gitignore new file mode 100644 index 00000000000..63e74e66b8c --- /dev/null +++ b/vendor/github.com/denormal/go-gitignore/.gitignore @@ -0,0 +1,33 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof + +# ignore directories +bin/ +pkg/ +**/github.com/ + +# ignore edit files +.*~ +.*.sw? diff --git a/vendor/github.com/denormal/go-gitignore/LICENSE b/vendor/github.com/denormal/go-gitignore/LICENSE new file mode 100644 index 00000000000..7c7d093d93f --- /dev/null +++ b/vendor/github.com/denormal/go-gitignore/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2016 Denormal Limited + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/denormal/go-gitignore/README.md b/vendor/github.com/denormal/go-gitignore/README.md new file mode 100644 index 00000000000..b9acf36edf7 --- /dev/null +++ b/vendor/github.com/denormal/go-gitignore/README.md @@ -0,0 +1,94 @@ +# go-gitignore + +Package `go-gitignore` provides an interface for parsing `.gitignore` files, +either individually, or within a repository, and +matching paths against the retrieved patterns. Path matching is done using +[fnmatch](https://github.com/danwakefield/fnmatch) as specified by +[git](https://git-scm.com/docs/gitignore), with +support for recursive matching via the `**` pattern. + +```go +import "github.com/denormal/go-gitignore" + +// match a file against a particular .gitignore +ignore, err := gitignore.NewFromFile("/my/.gitignore") +if err != nil { + panic(err) +} +match := ignore.Match("/my/file/to.check") +if match != nil { + if match.Ignore() { + return true + } +} + +// or match against a repository +// - here we match a directory path relative to the repository +ignore, err := gitignore.NewRepository( "/my/git/repository" ) +if err != nil { + panic(err) +} +match := ignore.Relative("src/examples", true) +if match != nil { + if match.Include() { + fmt.Printf( + "include src/examples/ because of pattern %q at %s", + match, match.Position(), + ) + } +} + +// if it's not important whether a path matches, but whether it is +// ignored or included... +if ignore.Ignore("src/test") { + fmt.Println("ignore src/test") +} else if ignore.Include("src/github.com") { + fmt.Println("include src/github.com") +} +``` + +For more information see `godoc github.com/denormal/go-gitignore`. + +## Patterns + +`go-gitignore` supports the same `.gitignore` pattern format and matching rules as defined by [git](https://git-scm.com/docs/gitignore): + +* A blank line matches no files, so it can serve as a separator for readability. + +* A line starting with `#` serves as a comment. Put a backslash `\` in front of the first hash for patterns that begin with a hash. + +* Trailing spaces are ignored unless they are quoted with backslash `\`. + +* An optional prefix `!` which negates the pattern; any matching file excluded by a previous pattern will become included again. It is not possible to re-include a file if a parent directory of that file is excluded. Git doesn’t list excluded directories for performance reasons, so any patterns on contained files have no effect, no matter where they are defined. Put a backslash `\` in front of the first `!` for patterns that begin with a literal `!`, for example, `\!important!.txt`. + +* If the pattern ends with a slash, it is removed for the purpose of the following description, but it would only find a match with a directory. In other words, `foo/` will match a directory foo and paths underneath it, but will not match a regular file or a symbolic link `foo` (this is consistent with the way how pathspec works in general in Git). + +* If the pattern does not contain a slash `/`, Git treats it as a shell glob pattern and checks for a match against the pathname relative to the location of the `.gitignore` file (relative to the toplevel of the work tree if not from a `.gitignore` file). + +* Otherwise, Git treats the pattern as a shell glob suitable for consumption by `fnmatch(3)` with the `FNM_PATHNAME` flag: wildcards in the pattern will not match a `/` in the pathname. For example, `Documentation/*.html` matches `Documentation/git.html` but not `Documentation/ppc/ppc.html` or `tools/perf/Documentation/perf.html`. + +* A leading slash matches the beginning of the pathname. For example, `/*.c` matches `cat-file.c` but not `mozilla-sha1/sha1.c`. + +Two consecutive asterisks `**` in patterns matched against full pathname may have special meaning: + +* A leading `**` followed by a slash means match in all directories. For example, `**/foo` matches file or directory `foo` anywhere, the same as pattern `foo`. `**/foo/bar` matches file or directory `bar` anywhere that is directly under directory `foo`. + +* A trailing `/**` matches everything inside. For example, `abc/**` matches all files inside directory `abc`, relative to the location of the `.gitignore` file, with infinite depth. + +* A slash followed by two consecutive asterisks then a slash matches zero or more directories. For example, `a/**/b` matches `a/b`, `a/x/b`, `a/x/y/b` and so on. + +* Other consecutive asterisks are considered invalid. + +## Installation + +`go-gitignore` can be installed using the standard Go approach: + +```go +go get github.com/denormal/go-gitignore +``` + +## License + +Copyright (c) 2016 Denormal Limited + +[MIT License](LICENSE) diff --git a/vendor/github.com/denormal/go-gitignore/cache.go b/vendor/github.com/denormal/go-gitignore/cache.go new file mode 100644 index 00000000000..7d3b615dcf8 --- /dev/null +++ b/vendor/github.com/denormal/go-gitignore/cache.go @@ -0,0 +1,60 @@ +package gitignore + +import ( + "sync" +) + +// Cache is the interface for the GitIgnore cache +type Cache interface { + // Set stores the GitIgnore ignore against its path. + Set(path string, ig GitIgnore) + + // Get attempts to retrieve an GitIgnore instance associated with the given + // path. If the path is not known nil is returned. + Get(path string) GitIgnore +} + +// cache is the default thread-safe cache implementation +type cache struct { + _i map[string]GitIgnore + _lock sync.Mutex +} + +// NewCache returns a Cache instance. This is a thread-safe, in-memory cache +// for GitIgnore instances. +func NewCache() Cache { + return &cache{} +} // Cache() + +// Set stores the GitIgnore ignore against its path. +func (c *cache) Set(path string, ignore GitIgnore) { + if ignore == nil { + return + } + + // ensure the map is defined + if c._i == nil { + c._i = make(map[string]GitIgnore) + } + + // set the cache item + c._lock.Lock() + c._i[path] = ignore + c._lock.Unlock() +} // Set() + +// Get attempts to retrieve an GitIgnore instance associated with the given +// path. If the path is not known nil is returned. +func (c *cache) Get(path string) GitIgnore { + c._lock.Lock() + _ignore, _ok := c._i[path] + c._lock.Unlock() + if _ok { + return _ignore + } else { + return nil + } +} // Get() + +// ensure cache supports the Cache interface +var _ Cache = &cache{} diff --git a/vendor/github.com/denormal/go-gitignore/doc.go b/vendor/github.com/denormal/go-gitignore/doc.go new file mode 100644 index 00000000000..27f5a0f1e74 --- /dev/null +++ b/vendor/github.com/denormal/go-gitignore/doc.go @@ -0,0 +1,8 @@ +/* +Package gitignore provides an interface for parsing .gitignore files, +either individually, or within a repository, and +matching paths against the retrieved patterns. Path matching is done using +fnmatch as specified by git (see https://git-scm.com/docs/gitignore), with +support for recursive matching via the "**" pattern. +*/ +package gitignore diff --git a/vendor/github.com/denormal/go-gitignore/error.go b/vendor/github.com/denormal/go-gitignore/error.go new file mode 100644 index 00000000000..94798e91dad --- /dev/null +++ b/vendor/github.com/denormal/go-gitignore/error.go @@ -0,0 +1,30 @@ +package gitignore + +type Error interface { + error + + // Position returns the position of the error within the .gitignore file + // (if any) + Position() Position + + // Underlying returns the underlying error, permitting direct comparison + // against the wrapped error. + Underlying() error +} + +type err struct { + error + _position Position +} // err() + +// NewError returns a new Error instance for the given error e and position p. +func NewError(e error, p Position) Error { + return &err{error: e, _position: p} +} // NewError() + +func (e *err) Position() Position { return e._position } + +func (e *err) Underlying() error { return e.error } + +// ensure err satisfies the Error interface +var _ Error = &err{} diff --git a/vendor/github.com/denormal/go-gitignore/errors.go b/vendor/github.com/denormal/go-gitignore/errors.go new file mode 100644 index 00000000000..9330a414178 --- /dev/null +++ b/vendor/github.com/denormal/go-gitignore/errors.go @@ -0,0 +1,11 @@ +package gitignore + +import ( + "errors" +) + +var ( + CarriageReturnError = errors.New("unexpected carriage return '\\r'") + InvalidPatternError = errors.New("invalid pattern") + InvalidDirectoryError = errors.New("invalid directory") +) diff --git a/vendor/github.com/denormal/go-gitignore/exclude.go b/vendor/github.com/denormal/go-gitignore/exclude.go new file mode 100644 index 00000000000..21c9253b086 --- /dev/null +++ b/vendor/github.com/denormal/go-gitignore/exclude.go @@ -0,0 +1,40 @@ +package gitignore + +import ( + "os" + "path/filepath" +) + +// exclude attempts to return the GitIgnore instance for the +// $GIT_DIR/info/exclude from the working copy to which path belongs. +func exclude(path string) (GitIgnore, error) { + // attempt to locate GIT_DIR + _gitdir := os.Getenv("GIT_DIR") + if _gitdir == "" { + _gitdir = filepath.Join(path, ".git") + } + _info, _err := os.Stat(_gitdir) + if _err != nil { + if os.IsNotExist(_err) { + return nil, nil + } else { + return nil, _err + } + } else if !_info.IsDir() { + return nil, nil + } + + // is there an info/exclude file within this directory? + _file := filepath.Join(_gitdir, "info", "exclude") + _, _err = os.Stat(_file) + if _err != nil { + if os.IsNotExist(_err) { + return nil, nil + } else { + return nil, _err + } + } + + // attempt to load the exclude file + return NewFromFile(_file) +} // exclude() diff --git a/vendor/github.com/denormal/go-gitignore/gitignore.go b/vendor/github.com/denormal/go-gitignore/gitignore.go new file mode 100644 index 00000000000..eb286b0d911 --- /dev/null +++ b/vendor/github.com/denormal/go-gitignore/gitignore.go @@ -0,0 +1,307 @@ +package gitignore + +import ( + "io" + "os" + "path/filepath" + "runtime" + "strings" +) + +// use an empty GitIgnore for cached lookups +var empty = &ignore{} + +// GitIgnore is the interface to .gitignore files and repositories. It defines +// methods for testing files for matching the .gitignore file, and then +// determining whether a file should be ignored or included. +type GitIgnore interface { + // Base returns the directory containing the .gitignore file. + Base() string + + // Match attempts to match the path against this GitIgnore, and will + // return its Match if successful. Match will invoke the GitIgnore error + // handler (if defined) if it is not possible to determine the absolute + // path of the given path, or if its not possible to determine if the + // path represents a file or a directory. If an error occurs, Match + // returns nil and the error handler (if defined via New, NewWithErrors + // or NewWithCache) will be invoked. + Match(path string) Match + + // Absolute attempts to match an absolute path against this GitIgnore. If + // the path is not located under the base directory of this GitIgnore, or + // is not matched by this GitIgnore, nil is returned. + Absolute(string, bool) Match + + // Relative attempts to match a path relative to the GitIgnore base + // directory. isdir is used to indicate whether the path represents a file + // or a directory. If the path is not matched by the GitIgnore, nil is + // returned. + Relative(path string, isdir bool) Match + + // Ignore returns true if the path is ignored by this GitIgnore. Paths + // that are not matched by this GitIgnore are not ignored. Internally, + // Ignore uses Match, and will return false if Match() returns nil for path. + Ignore(path string) bool + + // Include returns true if the path is included by this GitIgnore. Paths + // that are not matched by this GitIgnore are always included. Internally, + // Include uses Match, and will return true if Match() returns nil for path. + Include(path string) bool +} + +// ignore is the implementation of a .gitignore file. +type ignore struct { + _base string + _pattern []Pattern + _errors func(Error) bool +} + +// NewGitIgnore creates a new GitIgnore instance from the patterns listed in t, +// representing a .gitignore file in the base directory. If errors is given, it +// will be invoked for every error encountered when parsing the .gitignore +// patterns. Parsing will terminate if errors is called and returns false, +// otherwise, parsing will continue until end of file has been reached. +func New(r io.Reader, base string, errors func(Error) bool) GitIgnore { + // do we have an error handler? + _errors := errors + if _errors == nil { + _errors = func(e Error) bool { return true } + } + + // extract the patterns from the reader + _parser := NewParser(r, _errors) + _patterns := _parser.Parse() + + return &ignore{_base: base, _pattern: _patterns, _errors: _errors} +} // New() + +// NewFromFile creates a GitIgnore instance from the given file. An error +// will be returned if file cannot be opened or its absolute path determined. +func NewFromFile(file string) (GitIgnore, error) { + // define an error handler to catch any file access errors + // - record the first encountered error + var _error Error + _errors := func(e Error) bool { + if _error == nil { + _error = e + } + return true + } + + // attempt to retrieve the GitIgnore represented by this file + _ignore := NewWithErrors(file, _errors) + + // did we encounter an error? + // - if the error has a zero Position then it was encountered + // before parsing was attempted, so we return that error + if _error != nil { + if _error.Position().Zero() { + return nil, _error.Underlying() + } + } + + // otherwise, we ignore the parser errors + return _ignore, nil +} // NewFromFile() + +// NewWithErrors creates a GitIgnore instance from the given file. +// If errors is given, it will be invoked for every error encountered when +// parsing the .gitignore patterns. Parsing will terminate if errors is called +// and returns false, otherwise, parsing will continue until end of file has +// been reached. NewWithErrors returns nil if the .gitignore could not be read. +func NewWithErrors(file string, errors func(Error) bool) GitIgnore { + var _err error + + // do we have an error handler? + _file := file + _errors := errors + if _errors == nil { + _errors = func(e Error) bool { return true } + } else { + // augment the error handler to include the .gitignore file name + // - we do this here since the parser and lexer interfaces are + // not aware of file names + _errors = func(e Error) bool { + // augment the position with the file name + _position := e.Position() + _position.File = _file + + // create a new error with the updated Position + _error := NewError(e.Underlying(), _position) + + // invoke the original error handler + return errors(_error) + } + } + + // we need the absolute path for the GitIgnore base + _file, _err = filepath.Abs(file) + if _err != nil { + _errors(NewError(_err, Position{})) + return nil + } + _base := filepath.Dir(_file) + + // attempt to open the ignore file to create the io.Reader + _fh, _err := os.Open(_file) + if _err != nil { + _errors(NewError(_err, Position{})) + return nil + } + + // return the GitIgnore instance + return New(_fh, _base, _errors) +} // NewWithErrors() + +// NewWithCache returns a GitIgnore instance (using NewWithErrors) +// for the given file. If the file has been loaded before, its GitIgnore +// instance will be returned from the cache rather than being reloaded. If +// cache is not defined, NewWithCache will behave as NewWithErrors +// +// If NewWithErrors returns nil, NewWithCache will store an empty +// GitIgnore (i.e. no patterns) against the file to prevent repeated parse +// attempts on subsequent requests for the same file. Subsequent calls to +// NewWithCache for a file that could not be loaded due to an error will +// return nil. +// +// If errors is given, it will be invoked for every error encountered when +// parsing the .gitignore patterns. Parsing will terminate if errors is called +// and returns false, otherwise, parsing will continue until end of file has +// been reached. +func NewWithCache(file string, cache Cache, errors func(Error) bool) GitIgnore { + // do we have an error handler? + _errors := errors + if _errors == nil { + _errors = func(e Error) bool { return true } + } + + // use the file absolute path as its key into the cache + _abs, _err := filepath.Abs(file) + if _err != nil { + _errors(NewError(_err, Position{})) + return nil + } + + var _ignore GitIgnore + if cache != nil { + _ignore = cache.Get(_abs) + } + if _ignore == nil { + _ignore = NewWithErrors(file, _errors) + if _ignore == nil { + // if the load failed, cache an empty GitIgnore to prevent + // further attempts to load this file + _ignore = empty + } + if cache != nil { + cache.Set(_abs, _ignore) + } + } + + // return the ignore (if we have it) + if _ignore == empty { + return nil + } else { + return _ignore + } +} // NewWithCache() + +// Base returns the directory containing the .gitignore file for this GitIgnore. +func (i *ignore) Base() string { + return i._base +} // Base() + +// Match attempts to match the path against this GitIgnore, and will +// return its Match if successful. Match will invoke the GitIgnore error +// handler (if defined) if it is not possible to determine the absolute +// path of the given path, or if its not possible to determine if the +// path represents a file or a directory. If an error occurs, Match +// returns nil and the error handler (if defined via New, NewWithErrors +// or NewWithCache) will be invoked. +func (i *ignore) Match(path string) Match { + // ensure we have the absolute path for the given file + _path, _err := filepath.Abs(path) + if _err != nil { + i._errors(NewError(_err, Position{})) + return nil + } + + // is the path a file or a directory? + _info, _err := os.Stat(_path) + if _err != nil { + i._errors(NewError(_err, Position{})) + return nil + } + _isdir := _info.IsDir() + + // attempt to match the absolute path + return i.Absolute(_path, _isdir) +} // Match() + +// Absolute attempts to match an absolute path against this GitIgnore. If +// the path is not located under the base directory of this GitIgnore, or +// is not matched by this GitIgnore, nil is returned. +func (i *ignore) Absolute(path string, isdir bool) Match { + // does the file share the same directory as this ignore file? + if !strings.HasPrefix(path, i._base) { + return nil + } + + // extract the relative path of this file + _prefix := len(i._base) + 1 + _rel := string(path[_prefix:]) + return i.Relative(_rel, isdir) +} // Absolute() + +// Relative attempts to match a path relative to the GitIgnore base +// directory. isdir is used to indicate whether the path represents a file +// or a directory. If the path is not matched by the GitIgnore, nil is +// returned. +func (i *ignore) Relative(path string, isdir bool) Match { + // if we are on Windows, then translate the path to Unix form + _rel := path + if runtime.GOOS == "windows" { + _rel = filepath.ToSlash(_rel) + } + + // iterate over the patterns for this ignore file + // - iterate in reverse, since later patterns overwrite earlier + for _i := len(i._pattern) - 1; _i >= 0; _i-- { + _pattern := i._pattern[_i] + if _pattern.Match(_rel, isdir) { + return _pattern + } + } + + // we don't match this file + return nil +} // Relative() + +// Ignore returns true if the path is ignored by this GitIgnore. Paths +// that are not matched by this GitIgnore are not ignored. Internally, +// Ignore uses Match, and will return false if Match() returns nil for path. +func (i *ignore) Ignore(path string) bool { + _match := i.Match(path) + if _match != nil { + return _match.Ignore() + } + + // we didn't match this path, so we don't ignore it + return false +} // Ignore() + +// Include returns true if the path is included by this GitIgnore. Paths +// that are not matched by this GitIgnore are always included. Internally, +// Include uses Match, and will return true if Match() returns nil for path. +func (i *ignore) Include(path string) bool { + _match := i.Match(path) + if _match != nil { + return _match.Include() + } + + // we didn't match this path, so we include it + return true +} // Include() + +// ensure Ignore satisfies the GitIgnore interface +var _ GitIgnore = &ignore{} diff --git a/vendor/github.com/denormal/go-gitignore/lexer.go b/vendor/github.com/denormal/go-gitignore/lexer.go new file mode 100644 index 00000000000..f3c05fafd89 --- /dev/null +++ b/vendor/github.com/denormal/go-gitignore/lexer.go @@ -0,0 +1,474 @@ +package gitignore + +import ( + "bufio" + "io" +) + +// +// inspired by https://blog.gopheracademy.com/advent-2014/parsers-lexers/ +// + +// lexer is the implementation of the .gitignore lexical analyser +type lexer struct { + _r *bufio.Reader + _unread []rune + _offset int + _line int + _column int + _previous []int +} // lexer{} + +// Lexer is the interface to the lexical analyser for .gitignore files +type Lexer interface { + // Next returns the next Token from the Lexer reader. If an error is + // encountered, it will be returned as an Error instance, detailing the + // error and its position within the stream. + Next() (*Token, Error) + + // Position returns the current position of the Lexer. + Position() Position + + // String returns the string representation of the current position of the + // Lexer. + String() string +} + +// NewLexer returns a Lexer instance for the io.Reader r. +func NewLexer(r io.Reader) Lexer { + return &lexer{_r: bufio.NewReader(r), _line: 1, _column: 1} +} // NewLexer() + +// Next returns the next Token from the Lexer reader. If an error is +// encountered, it will be returned as an Error instance, detailing the error +// and its position within the stream. +func (l *lexer) Next() (*Token, Error) { + // are we at the beginning of the line? + _beginning := l.beginning() + + // read the next rune + _r, _err := l.read() + if _err != nil { + return nil, _err + } + + switch _r { + // end of file + case _EOF: + return l.token(EOF, nil, nil) + + // whitespace ' ', '\t' + case _SPACE: + fallthrough + case _TAB: + l.unread(_r) + _rtn, _err := l.whitespace() + return l.token(WHITESPACE, _rtn, _err) + + // end of line '\n' or '\r\n' + case _CR: + fallthrough + case _NEWLINE: + l.unread(_r) + _rtn, _err := l.eol() + return l.token(EOL, _rtn, _err) + + // separator '/' + case _SEPARATOR: + return l.token(SEPARATOR, []rune{_r}, nil) + + // '*' or any '**' + case _WILDCARD: + // is the wildcard followed by another wildcard? + // - does this represent the "any" token (i.e. "**") + _next, _err := l.peek() + if _err != nil { + return nil, _err + } else if _next == _WILDCARD { + // we know read() will succeed here since we used peek() above + l.read() + return l.token(ANY, []rune{_WILDCARD, _WILDCARD}, nil) + } + + // we have a single wildcard, so treat this as a pattern + l.unread(_r) + _rtn, _err := l.pattern() + return l.token(PATTERN, _rtn, _err) + + // comment '#' + case _COMMENT: + l.unread(_r) + + // if we are at the start of the line, then we treat this as a comment + if _beginning { + _rtn, _err := l.comment() + return l.token(COMMENT, _rtn, _err) + } + + // otherwise, we regard this as a pattern + _rtn, _err := l.pattern() + return l.token(PATTERN, _rtn, _err) + + // negation '!' + case _NEGATION: + if _beginning { + return l.token(NEGATION, []rune{_r}, nil) + } + fallthrough + + // pattern + default: + l.unread(_r) + _rtn, _err := l.pattern() + return l.token(PATTERN, _rtn, _err) + } +} // Next() + +// Position returns the current position of the Lexer. +func (l *lexer) Position() Position { + return Position{"", l._line, l._column, l._offset} +} // Position() + +// String returns the string representation of the current position of the +// Lexer. +func (l *lexer) String() string { + return l.Position().String() +} // String() + +// +// private methods +// + +// read the next rune from the stream. Return an Error if there is a problem +// reading from the stream. If the end of stream is reached, return the EOF +// Token. +func (l *lexer) read() (rune, Error) { + var _r rune + var _err error + + // do we have any unread runes to read? + _length := len(l._unread) + if _length > 0 { + _r = l._unread[_length-1] + l._unread = l._unread[:_length-1] + + // otherwise, attempt to read a new rune + } else { + _r, _, _err = l._r.ReadRune() + if _err == io.EOF { + return _EOF, nil + } + } + + // increment the offset and column counts + l._offset++ + l._column++ + + return _r, l.err(_err) +} // read() + +// unread returns the given runes to the stream, making them eligible to be +// read again. The runes are returned in the order given, so the last rune +// specified will be the next rune read from the stream. +func (l *lexer) unread(r ...rune) { + // ignore EOF runes + _r := make([]rune, 0) + for _, _rune := range r { + if _rune != _EOF { + _r = append(_r, _rune) + } + } + + // initialise the unread rune list if necessary + if l._unread == nil { + l._unread = make([]rune, 0) + } + if len(_r) != 0 { + l._unread = append(l._unread, _r...) + } + + // decrement the offset and column counts + // - we have to take care of column being 0 + // - at present we can only unwind across a single line boundary + _length := len(_r) + for ; _length > 0; _length-- { + l._offset-- + if l._column == 1 { + _length := len(l._previous) + if _length > 0 { + l._column = l._previous[_length-1] + l._previous = l._previous[:_length-1] + l._line-- + } + } else { + l._column-- + } + } +} // unread() + +// peek returns the next rune in the stream without consuming it (i.e. it will +// be returned by the next call to read or peek). peek will return an error if +// there is a problem reading from the stream. +func (l *lexer) peek() (rune, Error) { + // read the next rune + _r, _err := l.read() + if _err != nil { + return _r, _err + } + + // unread & return the rune + l.unread(_r) + return _r, _err +} // peek() + +// newline adjusts the positional counters when an end of line is reached +func (l *lexer) newline() { + // adjust the counters for the new line + if l._previous == nil { + l._previous = make([]int, 0) + } + l._previous = append(l._previous, l._column) + l._column = 1 + l._line++ +} // newline() + +// comment reads all runes until a newline or end of file is reached. An +// error is returned if an error is encountered reading from the stream. +func (l *lexer) comment() ([]rune, Error) { + _comment := make([]rune, 0) + + // read until we reach end of line or end of file + // - as we are in a comment, we ignore escape characters + for { + _next, _err := l.read() + if _err != nil { + return _comment, _err + } + + // read until we have end of line or end of file + switch _next { + case _CR: + fallthrough + case _NEWLINE: + fallthrough + case _EOF: + // return the read run to the stream and stop + l.unread(_next) + return _comment, nil + } + + // otherwise, add this run to the comment + _comment = append(_comment, _next) + } +} // comment() + +// escape attempts to read an escape sequence (e.g. '\ ') form the input +// stream. An error will be returned if there is an error reading from the +// stream. escape returns just the escape rune if the following rune is either +// end of line or end of file (since .gitignore files do not support line +// continuations). +func (l *lexer) escape() ([]rune, Error) { + // attempt to process the escape sequence + _peek, _err := l.peek() + if _err != nil { + return nil, _err + } + + // what is the next rune after the escape? + switch _peek { + // are we at the end of the line or file? + // - we return just the escape rune + case _CR: + fallthrough + case _NEWLINE: + fallthrough + case _EOF: + return []rune{_ESCAPE}, nil + } + + // otherwise, return the escape and the next rune + // - we know read() will succeed here since we used peek() above + l.read() + return []rune{_ESCAPE, _peek}, nil +} // escape() + +// eol returns all runes from the current position to the end of the line. An +// error is returned if there is a problem reading from the stream, or if a +// carriage return character '\r' is encountered that is not followed by a +// newline '\n'. +func (l *lexer) eol() ([]rune, Error) { + // read the to the end of the line + // - we should only be called here when we encounter an end of line + // sequence + _line := make([]rune, 0, 1) + + // loop until there's nothing more to do + for { + _next, _err := l.read() + if _err != nil { + return _line, _err + } + + // read until we have a newline or we're at end of file + switch _next { + // end of file + case _EOF: + return _line, nil + + // carriage return - we expect to see a newline next + case _CR: + _line = append(_line, _next) + _next, _err = l.read() + if _err != nil { + return _line, _err + } else if _next != _NEWLINE { + l.unread(_next) + return _line, l.err(CarriageReturnError) + } + fallthrough + + // newline + case _NEWLINE: + _line = append(_line, _next) + return _line, nil + } + } +} // eol() + +// whitespace returns all whitespace (i.e. ' ' and '\t') runes in a sequence, +// or an error if there is a problem reading the next runes. +func (l *lexer) whitespace() ([]rune, Error) { + // read until we hit the first non-whitespace rune + _ws := make([]rune, 0, 1) + + // loop until there's nothing more to do + for { + _next, _err := l.read() + if _err != nil { + return _ws, _err + } + + // what is this next rune? + switch _next { + // space or tab is consumed + case _SPACE: + fallthrough + case _TAB: + break + + // non-whitespace rune + default: + // return the rune to the buffer and we're done + l.unread(_next) + return _ws, nil + } + + // add this rune to the whitespace + _ws = append(_ws, _next) + } +} // whitespace() + +// pattern returns all runes representing a file or path pattern, delimited +// either by unescaped whitespace, a path separator '/' or enf of file. An +// error is returned if a problem is encountered reading from the stream. +func (l *lexer) pattern() ([]rune, Error) { + // read until we hit the first whitespace/end of line/eof rune + _pattern := make([]rune, 0, 1) + + // loop until there's nothing more to do + for { + _r, _err := l.read() + if _err != nil { + return _pattern, _err + } + + // what is the next rune? + switch _r { + // whitespace, newline, end of file, separator + // - this is the end of the pattern + case _SPACE: + fallthrough + case _TAB: + fallthrough + case _CR: + fallthrough + case _NEWLINE: + fallthrough + case _SEPARATOR: + fallthrough + case _EOF: + // return what we have + l.unread(_r) + return _pattern, nil + + // a wildcard is the end of the pattern if it is part of any '**' + case _WILDCARD: + _next, _err := l.peek() + if _err != nil { + return _pattern, _err + } else if _next == _WILDCARD { + l.unread(_r) + return _pattern, _err + } else { + _pattern = append(_pattern, _r) + } + + // escape sequence - consume the next rune + case _ESCAPE: + _escape, _err := l.escape() + if _err != nil { + return _pattern, _err + } + + // add the escape sequence as part of the pattern + _pattern = append(_pattern, _escape...) + + // any other character, we add to the pattern + default: + _pattern = append(_pattern, _r) + } + } +} // pattern() + +// token returns a Token instance of the given type_ represented by word runes. +func (l *lexer) token(type_ TokenType, word []rune, e Error) (*Token, Error) { + // if we have an error, then we return a BAD token + if e != nil { + type_ = BAD + } + + // extract the lexer position + // - the column is taken from the current column position + // minus the length of the consumed "word" + _word := len(word) + _column := l._column - _word + _offset := l._offset - _word + position := Position{"", l._line, _column, _offset} + + // if this is a newline token, we adjust the line & column counts + if type_ == EOL { + l.newline() + } + + // return the Token + return NewToken(type_, word, position), e +} // token() + +// err returns an Error encapsulating the error e and the current Lexer +// position. +func (l *lexer) err(e error) Error { + // do we have an error? + if e == nil { + return nil + } else { + return NewError(e, l.Position()) + } +} // err() + +// beginning returns true if the Lexer is at the start of a new line. +func (l *lexer) beginning() bool { + return l._column == 1 +} // beginning() + +// ensure the lexer conforms to the lexer interface +var _ Lexer = &lexer{} diff --git a/vendor/github.com/denormal/go-gitignore/match.go b/vendor/github.com/denormal/go-gitignore/match.go new file mode 100644 index 00000000000..0f39a129cd4 --- /dev/null +++ b/vendor/github.com/denormal/go-gitignore/match.go @@ -0,0 +1,23 @@ +package gitignore + +// Match represents the interface of successful matches against a .gitignore +// pattern set. A Match can be queried to determine whether the matched path +// should be ignored or included (i.e. was the path matched by a negated +// pattern), and to extract the position of the pattern within the .gitignore, +// and a string representation of the pattern. +type Match interface { + // Ignore returns true if the match pattern describes files or paths that + // should be ignored. + Ignore() bool + + // Include returns true if the match pattern describes files or paths that + // should be included. + Include() bool + + // String returns a string representation of the matched pattern. + String() string + + // Position returns the position in the .gitignore file at which the + // matching pattern was defined. + Position() Position +} diff --git a/vendor/github.com/denormal/go-gitignore/parser.go b/vendor/github.com/denormal/go-gitignore/parser.go new file mode 100644 index 00000000000..d56017e1723 --- /dev/null +++ b/vendor/github.com/denormal/go-gitignore/parser.go @@ -0,0 +1,444 @@ +package gitignore + +import ( + "io" +) + +// Parser is the interface for parsing .gitignore files and extracting the set +// of patterns specified in the .gitignore file. +type Parser interface { + // Parse returns all well-formed .gitignore Patterns contained within the + // parser stream. Parsing will terminate at the end of the stream, or if + // the parser error handler returns false. + Parse() []Pattern + + // Next returns the next well-formed .gitignore Pattern from the parser + // stream. If an error is encountered, and the error handler is either + // not defined, or returns true, Next will skip to the end of the current + // line and attempt to parse the next Pattern. If the error handler + // returns false, or the parser reaches the end of the stream, Next + // returns nil. + Next() Pattern + + // Position returns the current position of the parser in the input stream. + Position() Position +} // Parser{} + +// parser is the implementation of the .gitignore parser +type parser struct { + _lexer Lexer + _undo []*Token + _error func(Error) bool +} // parser{} + +// NewParser returns a new Parser instance for the given stream r. +// If err is not nil, it will be called for every error encountered during +// parsing. Parsing will terminate at the end of the stream, or if err +// returns false. +func NewParser(r io.Reader, err func(Error) bool) Parser { + return &parser{_lexer: NewLexer(r), _error: err} +} // NewParser() + +// Parse returns all well-formed .gitignore Patterns contained within the +// parser stream. Parsing will terminate at the end of the stream, or if +// the parser error handler returns false. +func (p *parser) Parse() []Pattern { + // keep parsing until there's no more patterns + _patterns := make([]Pattern, 0) + for { + _pattern := p.Next() + if _pattern == nil { + return _patterns + } + _patterns = append(_patterns, _pattern) + } +} // Parse() + +// Next returns the next well-formed .gitignore Pattern from the parser stream. +// If an error is encountered, and the error handler is either not defined, or +// returns true, Next will skip to the end of the current line and attempt to +// parse the next Pattern. If the error handler returns false, or the parser +// reaches the end of the stream, Next returns nil. +func (p *parser) Next() Pattern { + // keep searching until we find the next pattern, or until we + // reach the end of the file + for { + _token, _err := p.next() + if _err != nil { + if !p.errors(_err) { + return nil + } + + // we got an error from the lexer, so skip the remainder + // of this line and try again from the next line + for _err != nil { + _err = p.skip() + if _err != nil { + if !p.errors(_err) { + return nil + } + } + } + continue + } + + switch _token.Type { + // we're at the end of the file + case EOF: + return nil + + // we have a blank line or comment + case EOL: + continue + case COMMENT: + continue + + // otherwise, attempt to build the next pattern + default: + _pattern, _err := p.build(_token) + if _err != nil { + if !p.errors(_err) { + return nil + } + + // we encountered an error parsing the retrieved tokens + // - skip to the end of the line + for _err != nil { + _err = p.skip() + if _err != nil { + if !p.errors(_err) { + return nil + } + } + } + + // skip to the next token + continue + } else if _pattern != nil { + return _pattern + } + } + } +} // Next() + +// Position returns the current position of the parser in the input stream. +func (p *parser) Position() Position { + // if we have any previously read tokens, then the token at + // the end of the "undo" list (most recently "undone") gives the + // position of the parser + _length := len(p._undo) + if _length != 0 { + return p._undo[_length-1].Position + } + + // otherwise, return the position of the lexer + return p._lexer.Position() +} // Position() + +// +// private methods +// + +// build attempts to build a well-formed .gitignore Pattern starting from the +// given Token t. An Error will be returned if the sequence of tokens returned +// by the Lexer does not represent a valid Pattern. +func (p *parser) build(t *Token) (Pattern, Error) { + // attempt to create a valid pattern + switch t.Type { + // we have a negated pattern + case NEGATION: + return p.negation(t) + + // attempt to build a path specification + default: + return p.path(t) + } +} // build() + +// negation attempts to build a well-formed negated .gitignore Pattern starting +// from the negation Token t. As with build, negation returns an Error if the +// sequence of tokens returned by the Lexer does not represent a valid Pattern. +func (p *parser) negation(t *Token) (Pattern, Error) { + // a negation appears before a path specification, so + // skip the negation token + _next, _err := p.next() + if _err != nil { + return nil, _err + } + + // extract the sequence of tokens for this path + _tokens, _err := p.sequence(_next) + if _err != nil { + return nil, _err + } + + // include the "negation" token at the front of the sequence + _tokens = append([]*Token{t}, _tokens...) + + // return the Pattern instance + return NewPattern(_tokens), nil +} // negation() + +// path attempts to build a well-formed .gitignore Pattern representing a path +// specification, starting with the Token t. If the sequence of tokens returned +// by the Lexer does not represent a valid Pattern, path returns an Error. +// Trailing whitespace is dropped from the sequence of pattern tokens. +func (p *parser) path(t *Token) (Pattern, Error) { + // extract the sequence of tokens for this path + _tokens, _err := p.sequence(t) + if _err != nil { + return nil, _err + } + + // remove trailing whitespace tokens + _length := len(_tokens) + for _length > 0 { + // if we have a non-whitespace token, we can stop + _length-- + if _tokens[_length].Type != WHITESPACE { + break + } + + // otherwise, truncate the token list + _tokens = _tokens[:_length] + } + + // return the Pattern instance + return NewPattern(_tokens), nil +} // path() + +// sequence attempts to extract a well-formed Token sequence from the Lexer +// representing a .gitignore Pattern. sequence returns an Error if the +// retrieved sequence of tokens does not represent a valid Pattern. +func (p *parser) sequence(t *Token) ([]*Token, Error) { + // extract the sequence of tokens for a valid path + // - this excludes the negation token, which is handled as + // a special case before sequence() is called + switch t.Type { + // the path starts with a separator + case SEPARATOR: + return p.separator(t) + + // the path starts with the "any" pattern ("**") + case ANY: + return p.any(t) + + // the path starts with whitespace, wildcard or a pattern + case WHITESPACE: + fallthrough + case PATTERN: + return p.pattern(t) + } + + // otherwise, we have an invalid specification + p.undo(t) + return nil, p.err(InvalidPatternError) +} // sequence() + +// separator attempts to retrieve a valid sequence of tokens that may appear +// after the path separator '/' Token t. An Error is returned if the sequence if +// tokens is not valid, or if there is an error extracting tokens from the +// input stream. +func (p *parser) separator(t *Token) ([]*Token, Error) { + // build a list of tokens that may appear after a separator + _tokens := []*Token{t} + _token, _err := p.next() + if _err != nil { + return _tokens, _err + } + + // what tokens are we allowed to have follow a separator? + switch _token.Type { + // a separator can be followed by a pattern or + // an "any" pattern (i.e. "**") + case ANY: + _next, _err := p.any(_token) + return append(_tokens, _next...), _err + + case WHITESPACE: + fallthrough + case PATTERN: + _next, _err := p.pattern(_token) + return append(_tokens, _next...), _err + + // if we encounter end of line or file we are done + case EOL: + fallthrough + case EOF: + return _tokens, nil + + // a separator can be followed by another separator + // - it's not ideal, and not very useful, but it's interpreted + // as a single separator + // - we could clean it up here, but instead we pass + // everything down to the matching later on + case SEPARATOR: + _next, _err := p.separator(_token) + return append(_tokens, _next...), _err + } + + // any other token is invalid + p.undo(_token) + return _tokens, p.err(InvalidPatternError) +} // separator() + +// any attempts to retrieve a valid sequence of tokens that may appear +// after the any '**' Token t. An Error is returned if the sequence if +// tokens is not valid, or if there is an error extracting tokens from the +// input stream. +func (p *parser) any(t *Token) ([]*Token, Error) { + // build the list of tokens that may appear after "any" (i.e. "**") + _tokens := []*Token{t} + _token, _err := p.next() + if _err != nil { + return _tokens, _err + } + + // what tokens are we allowed to have follow an "any" symbol? + switch _token.Type { + // an "any" token may only be followed by a separator + case SEPARATOR: + _next, _err := p.separator(_token) + return append(_tokens, _next...), _err + + // whitespace is acceptable if it takes us to the end of the line + case WHITESPACE: + return _tokens, p.eol() + + // if we encounter end of line or file we are done + case EOL: + fallthrough + case EOF: + return _tokens, nil + } + + // any other token is invalid + p.undo(_token) + return _tokens, p.err(InvalidPatternError) +} // any() + +// pattern attempts to retrieve a valid sequence of tokens that may appear +// after the path pattern Token t. An Error is returned if the sequence if +// tokens is not valid, or if there is an error extracting tokens from the +// input stream. +func (p *parser) pattern(t *Token) ([]*Token, Error) { + // build the list of tokens that may appear after a pattern + _tokens := []*Token{t} + _token, _err := p.next() + if _err != nil { + return _tokens, _err + } + + // what tokens are we allowed to have follow a pattern? + var _next []*Token + switch _token.Type { + case SEPARATOR: + _next, _err = p.separator(_token) + return append(_tokens, _next...), _err + + case WHITESPACE: + fallthrough + case PATTERN: + _next, _err = p.pattern(_token) + return append(_tokens, _next...), _err + + // if we encounter end of line or file we are done + case EOL: + fallthrough + case EOF: + return _tokens, nil + } + + // any other token is invalid + p.undo(_token) + return _tokens, p.err(InvalidPatternError) +} // pattern() + +// eol attempts to consume the next Lexer token to read the end of line or end +// of file. If a EOL or EOF is not reached , eol will return an error. +func (p *parser) eol() Error { + // are we at the end of the line? + _token, _err := p.next() + if _err != nil { + return _err + } + + // have we encountered whitespace only? + switch _token.Type { + // if we're at the end of the line or file, we're done + case EOL: + fallthrough + case EOF: + p.undo(_token) + return nil + } + + // otherwise, we have an invalid pattern + p.undo(_token) + return p.err(InvalidPatternError) +} // eol() + +// next returns the next token from the Lexer, or an error if there is a +// problem reading from the input stream. +func (p *parser) next() (*Token, Error) { + // do we have any previously read tokens? + _length := len(p._undo) + if _length > 0 { + _token := p._undo[_length-1] + p._undo = p._undo[:_length-1] + return _token, nil + } + + // otherwise, attempt to retrieve the next token from the lexer + return p._lexer.Next() +} // next() + +// skip reads Tokens from the input until the end of line or end of file is +// reached. If there is a problem reading tokens, an Error is returned. +func (p *parser) skip() Error { + // skip to the next end of line or end of file token + for { + _token, _err := p.next() + if _err != nil { + return _err + } + + // if we have an end of line or file token, then we can stop + switch _token.Type { + case EOL: + fallthrough + case EOF: + return nil + } + } +} // skip() + +// undo returns the given Token t to the parser input stream to be retrieved +// again on a subsequent call to next. +func (p *parser) undo(t *Token) { + // add this token to the list of previously read tokens + // - initialise the undo list if required + if p._undo == nil { + p._undo = make([]*Token, 0, 1) + } + p._undo = append(p._undo, t) +} // undo() + +// err returns an Error for the error e, capturing the current parser Position. +func (p *parser) err(e error) Error { + // convert the error to include the parser position + return NewError(e, p.Position()) +} // err() + +// errors returns the response from the parser error handler to the Error e. If +// no error handler has been configured for this parser, errors returns true. +func (p *parser) errors(e Error) bool { + // do we have an error handler? + if p._error == nil { + return true + } + + // pass the error through to the error handler + // - if this returns false, parsing will stop + return p._error(e) +} // errors() diff --git a/vendor/github.com/denormal/go-gitignore/pattern.go b/vendor/github.com/denormal/go-gitignore/pattern.go new file mode 100644 index 00000000000..6885073adb9 --- /dev/null +++ b/vendor/github.com/denormal/go-gitignore/pattern.go @@ -0,0 +1,284 @@ +package gitignore + +import ( + "path/filepath" + "strings" + + "github.com/danwakefield/fnmatch" +) + +// Pattern represents per-line patterns within a .gitignore file +type Pattern interface { + Match + + // Match returns true if the given path matches the name pattern. If the + // pattern is meant for directories only, and the path is not a directory, + // Match will return false. The matching is performed by fnmatch(). It + // is assumed path is relative to the base path of the owning GitIgnore. + Match(string, bool) bool +} + +// pattern is the base implementation of a .gitignore pattern +type pattern struct { + _negated bool + _anchored bool + _directory bool + _string string + _fnmatch string + _position Position +} // pattern() + +// name represents patterns matching a file or path name (i.e. the last +// component of a path) +type name struct { + pattern +} // name{} + +// path represents a pattern that contains at least one path separator within +// the pattern (i.e. not at the start or end of the pattern) +type path struct { + pattern + _depth int +} // path{} + +// any represents a pattern that contains at least one "any" token "**" +// allowing for recursive matching. +type any struct { + pattern + _tokens []*Token +} // any{} + +// NewPattern returns a Pattern from the ordered slice of Tokens. The tokens are +// assumed to represent a well-formed .gitignore pattern. A Pattern may be +// negated, anchored to the start of the path (relative to the base directory +// of tie containing .gitignore), or match directories only. +func NewPattern(tokens []*Token) Pattern { + // if we have no tokens there is no pattern + if len(tokens) == 0 { + return nil + } + + // extract the pattern position from first token + _position := tokens[0].Position + _string := tokenset(tokens).String() + + // is this a negated pattern? + _negated := false + if tokens[0].Type == NEGATION { + _negated = true + tokens = tokens[1:] + } + + // is this pattern anchored to the start of the path? + _anchored := false + if tokens[0].Type == SEPARATOR { + _anchored = true + tokens = tokens[1:] + } + + // is this pattern for directories only? + _directory := false + _last := len(tokens) - 1 + if tokens[_last].Type == SEPARATOR { + _directory = true + tokens = tokens[:_last] + } + + // build the pattern expression + _fnmatch := tokenset(tokens).String() + _pattern := &pattern{ + _negated: _negated, + _anchored: _anchored, + _position: _position, + _directory: _directory, + _string: _string, + _fnmatch: _fnmatch, + } + return _pattern.compile(tokens) +} // NewPattern() + +// compile generates a specific Pattern (i.e. name, path or any) +// represented by the list of tokens. +func (p *pattern) compile(tokens []*Token) Pattern { + // what tokens do we have in this pattern? + // - ANY token means we can match to any depth + // - SEPARATOR means we have path rather than file matching + _separator := false + for _, _token := range tokens { + switch _token.Type { + case ANY: + return p.any(tokens) + case SEPARATOR: + _separator = true + } + } + + // should we perform path or name/file matching? + if _separator { + return p.path(tokens) + } else { + return p.name(tokens) + } +} // compile() + +// Ignore returns true if the pattern describes files or paths that should be +// ignored. +func (p *pattern) Ignore() bool { return !p._negated } + +// Include returns true if the pattern describes files or paths that should be +// included (i.e. not ignored) +func (p *pattern) Include() bool { return p._negated } + +// Position returns the position of the first token of this pattern. +func (p *pattern) Position() Position { return p._position } + +// String returns the string representation of the pattern. +func (p *pattern) String() string { return p._string } + +// +// name patterns +// - designed to match trailing file/directory names only +// + +// name returns a Pattern designed to match file or directory names, with no +// path elements. +func (p *pattern) name(tokens []*Token) Pattern { + return &name{*p} +} // name() + +// Match returns true if the given path matches the name pattern. If the +// pattern is meant for directories only, and the path is not a directory, +// Match will return false. The matching is performed by fnmatch(). It +// is assumed path is relative to the base path of the owning GitIgnore. +func (n *name) Match(path string, isdir bool) bool { + // are we expecting a directory? + if n._directory && !isdir { + return false + } + + // should we match the whole path, or just the last component? + if n._anchored { + return fnmatch.Match(n._fnmatch, path, 0) + } else { + _, _base := filepath.Split(path) + return fnmatch.Match(n._fnmatch, _base, 0) + } +} // Match() + +// +// path patterns +// - designed to match complete or partial paths (not just filenames) +// + +// path returns a Pattern designed to match paths that include at least one +// path separator '/' neither at the end nor the start of the pattern. +func (p *pattern) path(tokens []*Token) Pattern { + // how many directory components are we expecting? + _depth := 0 + for _, _token := range tokens { + if _token.Type == SEPARATOR { + _depth++ + } + } + + // return the pattern instance + return &path{pattern: *p, _depth: _depth} +} // path() + +// Match returns true if the given path matches the path pattern. If the +// pattern is meant for directories only, and the path is not a directory, +// Match will return false. The matching is performed by fnmatch() +// with flags set to FNM_PATHNAME. It is assumed path is relative to the +// base path of the owning GitIgnore. +func (p *path) Match(path string, isdir bool) bool { + // are we expecting a directory + if p._directory && !isdir { + return false + } + + if fnmatch.Match(p._fnmatch, path, fnmatch.FNM_PATHNAME) { + return true + } else if p._anchored { + return false + } + + // match against the trailing path elements + return fnmatch.Match(p._fnmatch, path, fnmatch.FNM_PATHNAME) +} // Match() + +// +// "any" patterns +// + +// any returns a Pattern designed to match paths that include at least one +// any pattern '**', specifying recursive matching. +func (p *pattern) any(tokens []*Token) Pattern { + // consider only the non-SEPARATOR tokens, as these will be matched + // against the path components + _tokens := make([]*Token, 0) + for _, _token := range tokens { + if _token.Type != SEPARATOR { + _tokens = append(_tokens, _token) + } + } + + return &any{*p, _tokens} +} // any() + +// Match returns true if the given path matches the any pattern. If the +// pattern is meant for directories only, and the path is not a directory, +// Match will return false. The matching is performed by recursively applying +// fnmatch() with flags set to FNM_PATHNAME. It is assumed path is relative to +// the base path of the owning GitIgnore. +func (a *any) Match(path string, isdir bool) bool { + // are we expecting a directory? + if a._directory && !isdir { + return false + } + + // split the path into components + _parts := strings.Split(path, string(_SEPARATOR)) + + // attempt to match the parts against the pattern tokens + return a.match(_parts, a._tokens) +} // Match() + +// match performs the recursive matching for 'any' patterns. An 'any' +// token '**' may match any path component, or no path component. +func (a *any) match(path []string, tokens []*Token) bool { + // if we have no more tokens, then we have matched this path + // if there are also no more path elements, otherwise there's no match + if len(tokens) == 0 { + return len(path) == 0 + } + + // what token are we trying to match? + _token := tokens[0] + switch _token.Type { + case ANY: + if len(path) == 0 { + return a.match(path, tokens[1:]) + } else { + return a.match(path, tokens[1:]) || a.match(path[1:], tokens) + } + + default: + // if we have a non-ANY token, then we must have a non-empty path + if len(path) != 0 { + // if the current path element matches this token, + // we match if the remainder of the path matches the + // remaining tokens + if fnmatch.Match(_token.Token(), path[0], fnmatch.FNM_PATHNAME) { + return a.match(path[1:], tokens[1:]) + } + } + } + + // if we are here, then we have no match + return false +} // match() + +// ensure the patterns confirm to the Pattern interface +var _ Pattern = &name{} +var _ Pattern = &path{} +var _ Pattern = &any{} diff --git a/vendor/github.com/denormal/go-gitignore/position.go b/vendor/github.com/denormal/go-gitignore/position.go new file mode 100644 index 00000000000..c80c5f5fad6 --- /dev/null +++ b/vendor/github.com/denormal/go-gitignore/position.go @@ -0,0 +1,35 @@ +package gitignore + +import ( + "fmt" +) + +// Position represents the position of the .gitignore parser, and the position +// of a .gitignore pattern within the parsed stream. +type Position struct { + File string + Line int + Column int + Offset int +} + +// String returns a string representation of the current position. +func (p Position) String() string { + _prefix := "" + if p.File != "" { + _prefix = p.File + ": " + } + + if p.Line == 0 { + return fmt.Sprintf("%s+%d", _prefix, p.Offset) + } else if p.Column == 0 { + return fmt.Sprintf("%s%d", _prefix, p.Line) + } else { + return fmt.Sprintf("%s%d:%d", _prefix, p.Line, p.Column) + } +} // String() + +// Zero returns true if the Position represents the zero Position +func (p Position) Zero() bool { + return p.Line+p.Column+p.Offset == 0 +} // Zero() diff --git a/vendor/github.com/denormal/go-gitignore/repository.go b/vendor/github.com/denormal/go-gitignore/repository.go new file mode 100644 index 00000000000..10d0bebc466 --- /dev/null +++ b/vendor/github.com/denormal/go-gitignore/repository.go @@ -0,0 +1,274 @@ +package gitignore + +import ( + "os" + "path/filepath" + "strings" +) + +const File = ".gitignore" + +// repository is the implementation of the set of .gitignore files within a +// repository hierarchy +type repository struct { + ignore + _errors func(e Error) bool + _cache Cache + _file string + _exclude GitIgnore +} // repository{} + +// NewRepository returns a GitIgnore instance representing a git repository +// with root directory base. If base is not a directory, or base cannot be +// read, NewRepository will return an error. +// +// Internally, NewRepository uses NewRepositoryWithFile. +func NewRepository(base string) (GitIgnore, error) { + return NewRepositoryWithFile(base, File) +} // NewRepository() + +// NewRepositoryWithFile returns a GitIgnore instance representing a git +// repository with root directory base. The repository will use file as +// the name of the files within the repository from which to load the +// .gitignore patterns. If file is the empty string, NewRepositoryWithFile +// uses ".gitignore". If the ignore file name is ".gitignore", the returned +// GitIgnore instance will also consider patterns listed in +// $GIT_DIR/info/exclude when performing repository matching. +// +// Internally, NewRepositoryWithFile uses NewRepositoryWithErrors. +func NewRepositoryWithFile(base, file string) (GitIgnore, error) { + // define an error handler to catch any file access errors + // - record the first encountered error + var _error Error + _errors := func(e Error) bool { + if _error == nil { + _error = e + } + return true + } + + // attempt to retrieve the repository represented by this file + _repository := NewRepositoryWithErrors(base, file, _errors) + + // did we encounter an error? + // - if the error has a zero Position then it was encountered + // before parsing was attempted, so we return that error + if _error != nil { + if _error.Position().Zero() { + return nil, _error.Underlying() + } + } + + // otherwise, we ignore the parser errors + return _repository, nil +} // NewRepositoryWithFile() + +// NewRepositoryWithErrors returns a GitIgnore instance representing a git +// repository with a root directory base. As with NewRepositoryWithFile, file +// specifies the name of the files within the repository containing the +// .gitignore patterns, and defaults to ".gitignore" if file is not specified. +// If the ignore file name is ".gitignore", the returned GitIgnore instance +// will also consider patterns listed in $GIT_DIR/info/exclude when performing +// repository matching. +// +// If errors is given, it will be invoked for each error encountered while +// matching a path against the repository GitIgnore (such as file permission +// denied, or errors during .gitignore parsing). See Match below. +// +// Internally, NewRepositoryWithErrors uses NewRepositoryWithCache. +func NewRepositoryWithErrors(base, file string, errors func(e Error) bool) GitIgnore { + return NewRepositoryWithCache(base, file, NewCache(), errors) +} // NewRepositoryWithErrors() + +// NewRepositoryWithCache returns a GitIgnore instance representing a git +// repository with a root directory base. As with NewRepositoryWithErrors, +// file specifies the name of the files within the repository containing the +// .gitignore patterns, and defaults to ".gitignore" if file is not specified. +// If the ignore file name is ".gitignore", the returned GitIgnore instance +// will also consider patterns listed in $GIT_DIR/info/exclude when performing +// repository matching. +// +// NewRepositoryWithCache will attempt to load each .gitignore within the +// repository only once, using NewWithCache to store the corresponding +// GitIgnore instance in cache. If cache is given as nil, +// NewRepositoryWithCache will create a Cache instance for this repository. +// +// If errors is given, it will be invoked for each error encountered while +// matching a path against the repository GitIgnore (such as file permission +// denied, or errors during .gitignore parsing). See Match below. +func NewRepositoryWithCache(base, file string, cache Cache, errors func(e Error) bool) GitIgnore { + // do we have an error handler? + _errors := errors + if _errors == nil { + _errors = func(e Error) bool { return true } + } + + // extract the absolute path of the base directory + _base, _err := filepath.Abs(base) + if _err != nil { + _errors(NewError(_err, Position{})) + return nil + } + + // ensure the given base is a directory + _info, _err := os.Stat(_base) + if _info != nil { + if !_info.IsDir() { + _err = InvalidDirectoryError + } + } + if _err != nil { + _errors(NewError(_err, Position{})) + return nil + } + + // if we haven't been given a base file name, use the default + if file == "" { + file = File + } + + // are we matching .gitignore files? + // - if we are, we also consider $GIT_DIR/info/exclude + var _exclude GitIgnore + if file == File { + _exclude, _err = exclude(_base) + if _err != nil { + _errors(NewError(_err, Position{})) + return nil + } + } + + // create the repository instance + _ignore := ignore{_base: _base} + _repository := &repository{ + ignore: _ignore, + _errors: _errors, + _exclude: _exclude, + _cache: cache, + _file: file, + } + + return _repository +} // NewRepositoryWithCache() + +// Match attempts to match the path against this repository. Matching proceeds +// according to normal gitignore rules, where .gtignore files in the same +// directory as path, take precedence over .gitignore files higher up the +// path hierarchy, and child files and directories are ignored if the parent +// is ignored. If the path is matched by a gitignore pattern in the repository, +// a Match is returned detailing the matched pattern. The returned Match +// can be used to determine if the path should be ignored or included according +// to the repository. +// +// If an error is encountered during matching, the repository error handler +// (if configured via NewRepositoryWithErrors or NewRepositoryWithCache), will +// be called. If the error handler returns false, matching will terminate and +// Match will return nil. If handler returns true, Match will continue +// processing in an attempt to match path. +// +// Match will raise an error and return nil if the absolute path cannot be +// determined, or if its not possible to determine if path represents a file +// or a directory. +// +// If path is not located under the root of this repository, Match returns nil. +func (r *repository) Match(path string) Match { + // ensure we have the absolute path for the given file + _path, _err := filepath.Abs(path) + if _err != nil { + r._errors(NewError(_err, Position{})) + return nil + } + + // is the path a file or a directory? + _info, _err := os.Stat(_path) + if _err != nil { + r._errors(NewError(_err, Position{})) + return nil + } + _isdir := _info.IsDir() + + // attempt to match the absolute path + return r.Absolute(_path, _isdir) +} // Match() + +// Absolute attempts to match an absolute path against this repository. If the +// path is not located under the base directory of this repository, or is not +// matched by this repository, nil is returned. +func (r *repository) Absolute(path string, isdir bool) Match { + // does the file share the same directory as this ignore file? + if !strings.HasPrefix(path, r.Base()) { + return nil + } + + // extract the relative path of this file + _prefix := len(r.Base()) + 1 + _rel := string(path[_prefix:]) + return r.Relative(_rel, isdir) +} // Absolute() + +// Relative attempts to match a path relative to the repository base directory. +// If the path is not matched by the repository, nil is returned. +func (r *repository) Relative(path string, isdir bool) Match { + // if there's no path, then there's nothing to match + _path := filepath.Clean(path) + if _path == "." { + return nil + } + + // repository matching: + // - a child path cannot be considered if its parent is ignored + // - a .gitignore in a lower directory overrides a .gitignore in a + // higher directory + + // first, is the parent directory ignored? + // - extract the parent directory from the current path + _parent, _local := filepath.Split(_path) + _match := r.Relative(_parent, true) + if _match != nil { + if _match.Ignore() { + return _match + } + } + _parent = filepath.Clean(_parent) + + // the parent directory isn't ignored, so we now look at the original path + // - we consider .gitignore files in the current directory first, then + // move up the path hierarchy + var _last string + for { + _file := filepath.Join(r._base, _parent, r._file) + _ignore := NewWithCache(_file, r._cache, r._errors) + if _ignore != nil { + _match := _ignore.Relative(_local, isdir) + if _match != nil { + return _match + } + } + + // if there's no parent, then we're done + // - since we use filepath.Clean() we look for "." + if _parent == "." { + break + } + + // we don't have a match for this file, so we progress up the + // path hierarchy + // - we are manually building _local using the .gitignore + // separator "/", which is how we handle operating system + // file system differences + _parent, _last = filepath.Split(_parent) + _parent = filepath.Clean(_parent) + _local = _last + string(_SEPARATOR) + _local + } + + // do we have a global exclude file? (i.e. GIT_DIR/info/exclude) + if r._exclude != nil { + return r._exclude.Relative(path, isdir) + } + + // we have no match + return nil +} // Relative() + +// ensure repository satisfies the GitIgnore interface +var _ GitIgnore = &repository{} diff --git a/vendor/github.com/denormal/go-gitignore/rune.go b/vendor/github.com/denormal/go-gitignore/rune.go new file mode 100644 index 00000000000..890c9b10f99 --- /dev/null +++ b/vendor/github.com/denormal/go-gitignore/rune.go @@ -0,0 +1,15 @@ +package gitignore + +const ( + // define the sentinel runes of the lexer + _EOF = rune(0) + _CR = rune('\r') + _NEWLINE = rune('\n') + _COMMENT = rune('#') + _SEPARATOR = rune('/') + _ESCAPE = rune('\\') + _SPACE = rune(' ') + _TAB = rune('\t') + _NEGATION = rune('!') + _WILDCARD = rune('*') +) diff --git a/vendor/github.com/denormal/go-gitignore/token.go b/vendor/github.com/denormal/go-gitignore/token.go new file mode 100644 index 00000000000..4cbe3675e17 --- /dev/null +++ b/vendor/github.com/denormal/go-gitignore/token.go @@ -0,0 +1,43 @@ +package gitignore + +import ( + "fmt" +) + +// Token represents a parsed token from a .gitignore stream, encapsulating the +// token type, the runes comprising the token, and the position within the +// stream of the first rune of the token. +type Token struct { + Type TokenType + Word []rune + Position +} + +// NewToken returns a Token instance of the given t, represented by the +// word runes, at the stream position pos. If the token type is not know, the +// returned instance will have type BAD. +func NewToken(t TokenType, word []rune, pos Position) *Token { + // ensure the type is valid + if t < ILLEGAL || t > BAD { + t = BAD + } + + // return the token + return &Token{Type: t, Word: word, Position: pos} +} // NewToken() + +// Name returns a string representation of the Token type. +func (t *Token) Name() string { + return t.Type.String() +} // Name() + +// Token returns the string representation of the Token word. +func (t *Token) Token() string { + return string(t.Word) +} // Token() + +// String returns a string representation of the Token, encapsulating its +// position in the input stream, its name (i.e. type), and its runes. +func (t *Token) String() string { + return fmt.Sprintf("%s: %s %q", t.Position.String(), t.Name(), t.Token()) +} // String() diff --git a/vendor/github.com/denormal/go-gitignore/tokenset.go b/vendor/github.com/denormal/go-gitignore/tokenset.go new file mode 100644 index 00000000000..136308865b8 --- /dev/null +++ b/vendor/github.com/denormal/go-gitignore/tokenset.go @@ -0,0 +1,15 @@ +package gitignore + +// tokenset represents an ordered list of Tokens +type tokenset []*Token + +// String() returns a concatenated string of all runes represented by the +// list of tokens. +func (t tokenset) String() string { + // concatenate the tokens into a single string + _rtn := "" + for _, _t := range []*Token(t) { + _rtn = _rtn + _t.Token() + } + return _rtn +} // String() diff --git a/vendor/github.com/denormal/go-gitignore/tokentype.go b/vendor/github.com/denormal/go-gitignore/tokentype.go new file mode 100644 index 00000000000..7d397baf29d --- /dev/null +++ b/vendor/github.com/denormal/go-gitignore/tokentype.go @@ -0,0 +1,42 @@ +package gitignore + +type TokenType int + +const ( + ILLEGAL TokenType = iota + EOF + EOL + WHITESPACE + COMMENT + SEPARATOR + NEGATION + PATTERN + ANY + BAD +) + +// String returns a string representation of the Token type. +func (t TokenType) String() string { + switch t { + case ILLEGAL: + return "ILLEGAL" + case EOF: + return "EOF" + case EOL: + return "EOL" + case WHITESPACE: + return "WHITESPACE" + case COMMENT: + return "COMMENT" + case SEPARATOR: + return "SEPARATOR" + case NEGATION: + return "NEGATION" + case PATTERN: + return "PATTERN" + case ANY: + return "ANY" + default: + return "BAD TOKEN" + } +} // String() diff --git a/vendor/github.com/dgrijalva/jwt-go/v4/.gitignore b/vendor/github.com/dgrijalva/jwt-go/v4/.gitignore new file mode 100644 index 00000000000..80bed650ec0 --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/v4/.gitignore @@ -0,0 +1,4 @@ +.DS_Store +bin + + diff --git a/vendor/github.com/dgrijalva/jwt-go/v4/.travis.yml b/vendor/github.com/dgrijalva/jwt-go/v4/.travis.yml new file mode 100644 index 00000000000..ade3c583196 --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/v4/.travis.yml @@ -0,0 +1,11 @@ +language: go + +script: + - go vet ./... + - go test -v ./... + +go: + - "1.11" + - "1.12" + - "1.13" + - tip diff --git a/vendor/github.com/dgrijalva/jwt-go/v4/LICENSE b/vendor/github.com/dgrijalva/jwt-go/v4/LICENSE new file mode 100644 index 00000000000..df83a9c2f01 --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/v4/LICENSE @@ -0,0 +1,8 @@ +Copyright (c) 2012 Dave Grijalva + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + diff --git a/vendor/github.com/dgrijalva/jwt-go/v4/MIGRATION_GUIDE.md b/vendor/github.com/dgrijalva/jwt-go/v4/MIGRATION_GUIDE.md new file mode 100644 index 00000000000..343afd8144f --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/v4/MIGRATION_GUIDE.md @@ -0,0 +1,101 @@ +## Migration Guide from v3 -> v4 + +TODO: write this + +## Migration Guide from v2 -> v3 + +Version 3 adds several new, frequently requested features. To do so, it introduces a few breaking changes. We've worked to keep these as minimal as possible. This guide explains the breaking changes and how you can quickly update your code. + +### `Token.Claims` is now an interface type + +The most requested feature from the 2.0 verison of this library was the ability to provide a custom type to the JSON parser for claims. This was implemented by introducing a new interface, `Claims`, to replace `map[string]interface{}`. We also included two concrete implementations of `Claims`: `MapClaims` and `StandardClaims`. + +`MapClaims` is an alias for `map[string]interface{}` with built in validation behavior. It is the default claims type when using `Parse`. The usage is unchanged except you must type cast the claims property. + +The old example for parsing a token looked like this.. + +```go + if token, err := jwt.Parse(tokenString, keyLookupFunc); err == nil { + fmt.Printf("Token for user %v expires %v", token.Claims["user"], token.Claims["exp"]) + } +``` + +is now directly mapped to... + +```go + if token, err := jwt.Parse(tokenString, keyLookupFunc); err == nil { + claims := token.Claims.(jwt.MapClaims) + fmt.Printf("Token for user %v expires %v", claims["user"], claims["exp"]) + } +``` + +`StandardClaims` is designed to be embedded in your custom type. You can supply a custom claims type with the new `ParseWithClaims` function. Here's an example of using a custom claims type. + +```go + type MyCustomClaims struct { + User string + *StandardClaims + } + + if token, err := jwt.ParseWithClaims(tokenString, &MyCustomClaims{}, keyLookupFunc); err == nil { + claims := token.Claims.(*MyCustomClaims) + fmt.Printf("Token for user %v expires %v", claims.User, claims.StandardClaims.ExpiresAt) + } +``` + +### `ParseFromRequest` has been moved + +To keep this library focused on the tokens without becoming overburdened with complex request processing logic, `ParseFromRequest` and its new companion `ParseFromRequestWithClaims` have been moved to a subpackage, `request`. The method signatues have also been augmented to receive a new argument: `Extractor`. + +`Extractors` do the work of picking the token string out of a request. The interface is simple and composable. + +This simple parsing example: + +```go + if token, err := jwt.ParseFromRequest(tokenString, req, keyLookupFunc); err == nil { + fmt.Printf("Token for user %v expires %v", token.Claims["user"], token.Claims["exp"]) + } +``` + +is directly mapped to: + +```go + if token, err := request.ParseFromRequest(req, request.OAuth2Extractor, keyLookupFunc); err == nil { + claims := token.Claims.(jwt.MapClaims) + fmt.Printf("Token for user %v expires %v", claims["user"], claims["exp"]) + } +``` + +There are several concrete `Extractor` types provided for your convenience: + +* `HeaderExtractor` will search a list of headers until one contains content. +* `ArgumentExtractor` will search a list of keys in request query and form arguments until one contains content. +* `MultiExtractor` will try a list of `Extractors` in order until one returns content. +* `AuthorizationHeaderExtractor` will look in the `Authorization` header for a `Bearer` token. +* `OAuth2Extractor` searches the places an OAuth2 token would be specified (per the spec): `Authorization` header and `access_token` argument +* `PostExtractionFilter` wraps an `Extractor`, allowing you to process the content before it's parsed. A simple example is stripping the `Bearer ` text from a header + + +### RSA signing methods no longer accept `[]byte` keys + +Due to a [critical vulnerability](https://auth0.com/blog/2015/03/31/critical-vulnerabilities-in-json-web-token-libraries/), we've decided the convenience of accepting `[]byte` instead of `rsa.PublicKey` or `rsa.PrivateKey` isn't worth the risk of misuse. + +To replace this behavior, we've added two helper methods: `ParseRSAPrivateKeyFromPEM(key []byte) (*rsa.PrivateKey, error)` and `ParseRSAPublicKeyFromPEM(key []byte) (*rsa.PublicKey, error)`. These are just simple helpers for unpacking PEM encoded PKCS1 and PKCS8 keys. If your keys are encoded any other way, all you need to do is convert them to the `crypto/rsa` package's types. + +```go + func keyLookupFunc(*Token) (interface{}, error) { + // Don't forget to validate the alg is what you expect: + if _, ok := token.Method.(*jwt.SigningMethodRSA); !ok { + return nil, fmt.Errorf("unexpected signing method: %v", token.Header["alg"]) + } + + // Look up key + key, err := lookupPublicKey(token.Header["kid"]) + if err != nil { + return nil, err + } + + // Unpack key from PEM encoded PKCS8 + return jwt.ParseRSAPublicKeyFromPEM(key) + } +``` diff --git a/vendor/github.com/dgrijalva/jwt-go/v4/README.md b/vendor/github.com/dgrijalva/jwt-go/v4/README.md new file mode 100644 index 00000000000..230589bf69a --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/v4/README.md @@ -0,0 +1,101 @@ +# jwt-go + +[![Build Status](https://travis-ci.org/dgrijalva/jwt-go.svg?branch=master)](https://travis-ci.org/dgrijalva/jwt-go) +[![GoDoc](https://godoc.org/github.com/dgrijalva/jwt-go?status.svg)](https://godoc.org/github.com/dgrijalva/jwt-go) + +A [go](http://www.golang.org) (or 'golang' for search engine friendliness) implementation of [JSON Web Tokens](http://self-issued.info/docs/draft-ietf-oauth-json-web-token.html) + +**NEW VERSION:** Version 4 of this library is now available. This is the first non-backward-compatible version in a long time. There are a few changes that all users will notice, such as the new types introduced in members of `StandardClaims`. More changes are additive or only impact more advanced use. See VERSION_HISTORY.md for a list of changes as well as **TODO** MIGRATION_GUIDE.md for help updating your code. + +**SECURITY NOTICE:** Some older versions of Go have a security issue in the cryotp/elliptic. Recommendation is to upgrade to at least 1.8.3. See issue #216 for more detail. + +**SECURITY NOTICE:** It's important that you [validate the `alg` presented is what you expect](https://auth0.com/blog/critical-vulnerabilities-in-json-web-token-libraries/). This library attempts to make it easy to do the right thing by requiring key types match the expected alg, but you should take the extra step to verify it in your usage. See the examples provided. + +## What the heck is a JWT? + +JWT.io has [a great introduction](https://jwt.io/introduction) to JSON Web Tokens. + +In short, it's a signed JSON object that does something useful (for example, authentication). It's commonly used for `Bearer` tokens in Oauth 2. A token is made of three parts, separated by `.`'s. The first two parts are JSON objects, that have been [base64url](http://tools.ietf.org/html/rfc4648) encoded. The last part is the signature, encoded the same way. + +The first part is called the header. It contains the necessary information for verifying the last part, the signature. For example, which encryption method was used for signing and what key was used. + +The part in the middle is the interesting bit. It's called the Claims and contains the actual stuff you care about. Refer to [the RFC](http://self-issued.info/docs/draft-jones-json-web-token.html) for information about reserved keys and the proper way to add your own. + +## What's in the box? + +This library supports the parsing and verification as well as the generation and signing of JWTs. Current supported signing algorithms are HMAC SHA, RSA, RSA-PSS, and ECDSA, though hooks are present for adding your own. + +## Examples + +See [the project documentation](https://godoc.org/github.com/dgrijalva/jwt-go) for examples of usage: + +- [Simple example of parsing and validating a token](https://godoc.org/github.com/dgrijalva/jwt-go#example-Parse--Hmac) +- [Simple example of building and signing a token](https://godoc.org/github.com/dgrijalva/jwt-go#example-New--Hmac) +- [Directory of Examples](https://godoc.org/github.com/dgrijalva/jwt-go#pkg-examples) + +## Extensions + +This library publishes all the necessary components for adding your own signing methods. Simply implement the `SigningMethod` interface and register a factory method using `RegisterSigningMethod`. + +Here's an example of an extension that integrates with multiple Google Cloud Platform signing tools (AppEngine, IAM API, Cloud KMS): https://github.com/someone1/gcp-jwt-go + +## Compliance + +This library was last reviewed to comply with [RTF 7519](http://www.rfc-editor.org/info/rfc7519) dated May 2015 with a few notable differences: + +- In order to protect against accidental use of [Unsecured JWTs](http://self-issued.info/docs/draft-ietf-oauth-json-web-token.html#UnsecuredJWT), tokens using `alg=none` will only be accepted if the constant `jwt.UnsafeAllowNoneSignatureType` is provided as the key. + +## Project Status & Versioning + +This library is considered production ready. Feedback and feature requests are appreciated. The API should be considered stable. There should be very few backwards-incompatible changes outside of major version updates (and only with good reason). + +This project uses [Semantic Versioning 2.0.0](http://semver.org). Accepted pull requests will land on `master`. Periodically, versions will be tagged from `master`. You can find all the releases on [the project releases page](https://github.com/dgrijalva/jwt-go/releases). + +As of version 4, this project is compatible with go modules. You should use that to ensure you have no unpleasant surprises when updating. + +**BREAKING CHANGES:\*** + +- Version 4.0.0 includes _a lot_ of changes from the 3.x line, including a few that break the API. We've tried to break as few things as possible, so there should just be a few type signature changes. A full list of breaking changes is available in `VERSION_HISTORY.md`. See `MIGRATION_GUIDE.md` for more information on updating your code. + +## Usage Tips + +### Signing vs Encryption + +A token is simply a JSON object that is signed by its author. this tells you exactly two things about the data: + +- The author of the token was in the possession of the signing secret +- The data has not been modified since it was signed + +It's important to know that JWT does not provide encryption, which means anyone who has access to the token can read its contents. If you need to protect (encrypt) the data, there is a companion spec, `JWE`, that provides this functionality. JWE is currently outside the scope of this library. + +### Choosing a Signing Method + +There are several signing methods available, and you should probably take the time to learn about the various options before choosing one. The principal design decision is most likely going to be symmetric vs asymmetric. + +Symmetric signing methods, such as HSA, use only a single secret. This is probably the simplest signing method to use since any `[]byte` can be used as a valid secret. They are also slightly computationally faster to use, though this rarely is enough to matter. Symmetric signing methods work the best when both producers and consumers of tokens are trusted, or even the same system. Since the same secret is used to both sign and validate tokens, you can't easily distribute the key for validation. + +Asymmetric signing methods, such as RSA, use different keys for signing and verifying tokens. This makes it possible to produce tokens with a private key, and allow any consumer to access the public key for verification. + +### Signing Methods and Key Types + +Each signing method expects a different object type for its signing keys. See the package documentation for details. Here are the most common ones: + +- The [HMAC signing method](https://godoc.org/github.com/dgrijalva/jwt-go#SigningMethodHMAC) (`HS256`,`HS384`,`HS512`) expect `[]byte` values for signing and validation +- The [RSA signing method](https://godoc.org/github.com/dgrijalva/jwt-go#SigningMethodRSA) (`RS256`,`RS384`,`RS512`) expect `*rsa.PrivateKey` for signing and `*rsa.PublicKey` for validation +- The [ECDSA signing method](https://godoc.org/github.com/dgrijalva/jwt-go#SigningMethodECDSA) (`ES256`,`ES384`,`ES512`) expect `*ecdsa.PrivateKey` for signing and `*ecdsa.PublicKey` for validation + +### JWT and OAuth + +It's worth mentioning that OAuth and JWT are not the same thing. A JWT token is simply a signed JSON object. It can be used anywhere such a thing is useful. There is some confusion, though, as JWT is the most common type of bearer token used in OAuth2 authentication. + +Without going too far down the rabbit hole, here's a description of the interaction of these technologies: + +- OAuth is a protocol for allowing an identity provider to be separate from the service a user is logging in to. For example, whenever you use Facebook to log into a different service (Yelp, Spotify, etc), you are using OAuth. +- OAuth defines several options for passing around authentication data. One popular method is called a "bearer token". A bearer token is simply a string that _should_ only be held by an authenticated user. Thus, simply presenting this token proves your identity. You can probably derive from here why a JWT might make a good bearer token. +- Because bearer tokens are used for authentication, it's important they're kept secret. This is why transactions that use bearer tokens typically happen over SSL. + +## More + +Documentation can be found [on godoc.org](http://godoc.org/github.com/dgrijalva/jwt-go). + +The command line utility included in this project (cmd/jwt) provides a straightforward example of token creation and parsing as well as a useful tool for debugging your own integration. You'll also find several implementation examples in the documentation. diff --git a/vendor/github.com/dgrijalva/jwt-go/v4/VERSION_HISTORY.md b/vendor/github.com/dgrijalva/jwt-go/v4/VERSION_HISTORY.md new file mode 100644 index 00000000000..fffaa37f877 --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/v4/VERSION_HISTORY.md @@ -0,0 +1,141 @@ +## `jwt-go` Version History + +#### 4.0.0 + +* **Compatibility Breaking Changes**: See MIGRATION_GUIDE.md for tips on updating your code + * Errors have been updated significantly to take advantage of the changes to errors in go1.13/go2/xerrors + * The previous 'enum' describing error types has been replaced with unique types for every kind of error + * The new error types carry more information and interoperate properly with errors.As + * Dropping (official) support for go1.10 or older. Older versions dating back to 1.4 **may** continue to work, but are not being considered or tested against. 1.3 and previous will no longer work with this library. + * Behavior of time values has changed significantly, primarily to handle nil values, but also to be more consistent with Go: + * All time values used by the library are expressed using time.Time and time.Duration. This includes automatic parsing and encoding of the JWT unix timestamp format. + * `StandardClaims` time values use the new `Time` type, which wraps time.Time to handle things nil values gracefully + * The method for describing custom parsing/validating behaviors has changed. The properties exposed on Parser have been replaced with `ParserOption`s. See https://dave.cheney.net/2014/10/17/functional-options-for-friendly-apis for the theory. See `ParserOption` and `SigningOption` for more details + * Per the spec, if the `aud` claim is present, it will be automatically validated. See `WithAudience` and `WithoutAudienceValidation` + * The `Valid` method on `Claims` now takes an argument, `ValidationHelper`. +* Added support for Leeway. If you have issues with clock sku between the issuing machine and the consuming machine, you can allow for a grace period. See `WithLeeway` +* Added support for custom JSON encoder/decoders. See `WithMarshaller` and `WithUnmarshaller` +* Added support for issuer validation. See `WithIssuer` +* Updated error messages and comments to comply with linter recommendations +* Added `KnownKeyfunc` for when you know both the signing method and the key without needing to look at the token Header. This should dramatically simplify many common use cases. +* Added `ValidationHelper` to make it easier to implement custom Claims types without having to rewrite a bunch of built-in behavior, such as time comparison and leeway. `ValidationHelper` is built with `ParserOptions` and provides the same methods used by built in claims types to handle validation. +* Added support for `crypto.Signer` on several signing methods. This was a common request. +* Added new type, `ClaimStrings`, which will correctly handle properties such as `aud` that can be either an array of strings or a single string. `ClaimStrings` is an alias to `[]string`, but with custom decoding behavior. This means all the built in `aud` validation behavior now expects `[]string` instead of `string`. This was a common request. + + +#### 3.2.0 + +* Added method `ParseUnverified` to allow users to split up the tasks of parsing and validation +* HMAC signing method returns `ErrInvalidKeyType` instead of `ErrInvalidKey` where appropriate +* Added options to `request.ParseFromRequest`, which allows for an arbitrary list of modifiers to parsing behavior. Initial set include `WithClaims` and `WithParser`. Existing usage of this function will continue to work as before. +* Deprecated `ParseFromRequestWithClaims` to simplify API in the future. + +#### 3.1.0 + +* Improvements to `jwt` command line tool +* Added `SkipClaimsValidation` option to `Parser` +* Documentation updates + +#### 3.0.0 + +* **Compatibility Breaking Changes**: See MIGRATION_GUIDE.md for tips on updating your code + * Dropped support for `[]byte` keys when using RSA signing methods. This convenience feature could contribute to security vulnerabilities involving mismatched key types with signing methods. + * `ParseFromRequest` has been moved to `request` subpackage and usage has changed + * The `Claims` property on `Token` is now type `Claims` instead of `map[string]interface{}`. The default value is type `MapClaims`, which is an alias to `map[string]interface{}`. This makes it possible to use a custom type when decoding claims. +* Other Additions and Changes + * Added `Claims` interface type to allow users to decode the claims into a custom type + * Added `ParseWithClaims`, which takes a third argument of type `Claims`. Use this function instead of `Parse` if you have a custom type you'd like to decode into. + * Dramatically improved the functionality and flexibility of `ParseFromRequest`, which is now in the `request` subpackage + * Added `ParseFromRequestWithClaims` which is the `FromRequest` equivalent of `ParseWithClaims` + * Added new interface type `Extractor`, which is used for extracting JWT strings from http requests. Used with `ParseFromRequest` and `ParseFromRequestWithClaims`. + * Added several new, more specific, validation errors to error type bitmask + * Moved examples from README to executable example files + * Signing method registry is now thread safe + * Added new property to `ValidationError`, which contains the raw error returned by calls made by parse/verify (such as those returned by keyfunc or json parser) + +#### 2.7.0 + +This will likely be the last backwards compatible release before 3.0.0, excluding essential bug fixes. + +* Added new option `-show` to the `jwt` command that will just output the decoded token without verifying +* Error text for expired tokens includes how long it's been expired +* Fixed incorrect error returned from `ParseRSAPublicKeyFromPEM` +* Documentation updates + +#### 2.6.0 + +* Exposed inner error within ValidationError +* Fixed validation errors when using UseJSONNumber flag +* Added several unit tests + +#### 2.5.0 + +* Added support for signing method none. You shouldn't use this. The API tries to make this clear. +* Updated/fixed some documentation +* Added more helpful error message when trying to parse tokens that begin with `BEARER ` + +#### 2.4.0 + +* Added new type, Parser, to allow for configuration of various parsing parameters + * You can now specify a list of valid signing methods. Anything outside this set will be rejected. + * You can now opt to use the `json.Number` type instead of `float64` when parsing token JSON +* Added support for [Travis CI](https://travis-ci.org/dgrijalva/jwt-go) +* Fixed some bugs with ECDSA parsing + +#### 2.3.0 + +* Added support for ECDSA signing methods +* Added support for RSA PSS signing methods (requires go v1.4) + +#### 2.2.0 + +* Gracefully handle a `nil` `Keyfunc` being passed to `Parse`. Result will now be the parsed token and an error, instead of a panic. + +#### 2.1.0 + +Backwards compatible API change that was missed in 2.0.0. + +* The `SignedString` method on `Token` now takes `interface{}` instead of `[]byte` + +#### 2.0.0 + +There were two major reasons for breaking backwards compatibility with this update. The first was a refactor required to expand the width of the RSA and HMAC-SHA signing implementations. There will likely be no required code changes to support this change. + +The second update, while unfortunately requiring a small change in integration, is required to open up this library to other signing methods. Not all keys used for all signing methods have a single standard on-disk representation. Requiring `[]byte` as the type for all keys proved too limiting. Additionally, this implementation allows for pre-parsed tokens to be reused, which might matter in an application that parses a high volume of tokens with a small set of keys. Backwards compatibilty has been maintained for passing `[]byte` to the RSA signing methods, but they will also accept `*rsa.PublicKey` and `*rsa.PrivateKey`. + +It is likely the only integration change required here will be to change `func(t *jwt.Token) ([]byte, error)` to `func(t *jwt.Token) (interface{}, error)` when calling `Parse`. + +* **Compatibility Breaking Changes** + * `SigningMethodHS256` is now `*SigningMethodHMAC` instead of `type struct` + * `SigningMethodRS256` is now `*SigningMethodRSA` instead of `type struct` + * `KeyFunc` now returns `interface{}` instead of `[]byte` + * `SigningMethod.Sign` now takes `interface{}` instead of `[]byte` for the key + * `SigningMethod.Verify` now takes `interface{}` instead of `[]byte` for the key +* Renamed type `SigningMethodHS256` to `SigningMethodHMAC`. Specific sizes are now just instances of this type. + * Added public package global `SigningMethodHS256` + * Added public package global `SigningMethodHS384` + * Added public package global `SigningMethodHS512` +* Renamed type `SigningMethodRS256` to `SigningMethodRSA`. Specific sizes are now just instances of this type. + * Added public package global `SigningMethodRS256` + * Added public package global `SigningMethodRS384` + * Added public package global `SigningMethodRS512` +* Moved sample private key for HMAC tests from an inline value to a file on disk. Value is unchanged. +* Refactored the RSA implementation to be easier to read +* Exposed helper methods `ParseRSAPrivateKeyFromPEM` and `ParseRSAPublicKeyFromPEM` + +#### 1.0.2 + +* Fixed bug in parsing public keys from certificates +* Added more tests around the parsing of keys for RS256 +* Code refactoring in RS256 implementation. No functional changes + +#### 1.0.1 + +* Fixed panic if RS256 signing method was passed an invalid key + +#### 1.0.0 + +* First versioned release +* API stabilized +* Supports creating, signing, parsing, and validating JWT tokens +* Supports RS256 and HS256 signing methods \ No newline at end of file diff --git a/vendor/github.com/dgrijalva/jwt-go/v4/claim_strings.go b/vendor/github.com/dgrijalva/jwt-go/v4/claim_strings.go new file mode 100644 index 00000000000..057b684da04 --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/v4/claim_strings.go @@ -0,0 +1,47 @@ +package jwt + +import ( + "encoding/json" + "reflect" +) + +// ClaimStrings is used for parsing claim properties that +// can be either a string or array of strings +type ClaimStrings []string + +// ParseClaimStrings is used to produce a ClaimStrings value +// from the various forms it may present during encoding/decodeing +func ParseClaimStrings(value interface{}) (ClaimStrings, error) { + switch v := value.(type) { + case string: + return ClaimStrings{v}, nil + case []string: + return ClaimStrings(v), nil + case []interface{}: + result := make(ClaimStrings, 0, len(v)) + for i, vv := range v { + if x, ok := vv.(string); ok { + result = append(result, x) + } else { + return nil, &json.UnsupportedTypeError{Type: reflect.TypeOf(v[i])} + } + } + return result, nil + case nil: + return nil, nil + default: + return nil, &json.UnsupportedTypeError{Type: reflect.TypeOf(v)} + } +} + +// UnmarshalJSON implements the json package's Unmarshaler interface +func (c *ClaimStrings) UnmarshalJSON(data []byte) error { + var value interface{} + err := json.Unmarshal(data, &value) + if err != nil { + return err + } + + *c, err = ParseClaimStrings(value) + return err +} diff --git a/vendor/github.com/dgrijalva/jwt-go/v4/claims.go b/vendor/github.com/dgrijalva/jwt-go/v4/claims.go new file mode 100644 index 00000000000..a065328bc2b --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/v4/claims.go @@ -0,0 +1,64 @@ +package jwt + +// Claims is the interface used to hold the claims values of a token +// For a type to be a Claims object, it must have a Valid method that determines +// if the token is invalid for any supported reason +// Claims are parsed and encoded using the standard library's encoding/json +// package. Claims are passed directly to that. +type Claims interface { + // A nil validation helper should use the default helper + Valid(*ValidationHelper) error +} + +// StandardClaims is a structured version of Claims Section, as referenced at +// https://tools.ietf.org/html/rfc7519#section-4.1 +// See examples for how to use this with your own claim types +type StandardClaims struct { + Audience ClaimStrings `json:"aud,omitempty"` + ExpiresAt *Time `json:"exp,omitempty"` + ID string `json:"jti,omitempty"` + IssuedAt *Time `json:"iat,omitempty"` + Issuer string `json:"iss,omitempty"` + NotBefore *Time `json:"nbf,omitempty"` + Subject string `json:"sub,omitempty"` +} + +// Valid validates standard claims using ValidationHelper +// Validates time based claims "exp, nbf" (see: WithLeeway) +// Validates "aud" if present in claims. (see: WithAudience, WithoutAudienceValidation) +// Validates "iss" if option is provided (see: WithIssuer) +func (c StandardClaims) Valid(h *ValidationHelper) error { + var vErr error + + if h == nil { + h = DefaultValidationHelper + } + + if err := h.ValidateExpiresAt(c.ExpiresAt); err != nil { + vErr = wrapError(err, vErr) + } + + if err := h.ValidateNotBefore(c.NotBefore); err != nil { + vErr = wrapError(err, vErr) + } + + if err := h.ValidateAudience(c.Audience); err != nil { + vErr = wrapError(err, vErr) + } + + if err := h.ValidateIssuer(c.Issuer); err != nil { + vErr = wrapError(err, vErr) + } + + return vErr +} + +// VerifyAudience compares the aud claim against cmp. +func (c *StandardClaims) VerifyAudience(h *ValidationHelper, cmp string) error { + return h.ValidateAudienceAgainst(c.Audience, cmp) +} + +// VerifyIssuer compares the iss claim against cmp. +func (c *StandardClaims) VerifyIssuer(h *ValidationHelper, cmp string) error { + return h.ValidateIssuerAgainst(c.Issuer, cmp) +} diff --git a/vendor/github.com/dgrijalva/jwt-go/v4/doc.go b/vendor/github.com/dgrijalva/jwt-go/v4/doc.go new file mode 100644 index 00000000000..a86dc1a3b34 --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/v4/doc.go @@ -0,0 +1,4 @@ +// Package jwt is a Go implementation of JSON Web Tokens: http://self-issued.info/docs/draft-jones-json-web-token.html +// +// See README.md for more info. +package jwt diff --git a/vendor/github.com/dgrijalva/jwt-go/v4/ecdsa.go b/vendor/github.com/dgrijalva/jwt-go/v4/ecdsa.go new file mode 100644 index 00000000000..9a0d6083453 --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/v4/ecdsa.go @@ -0,0 +1,173 @@ +package jwt + +import ( + "crypto" + "crypto/ecdsa" + "crypto/rand" + "encoding/asn1" + "fmt" + "math/big" +) + +// SigningMethodECDSA implements the ECDSA family of signing methods signing methods +// Expects *ecdsa.PrivateKey for signing and *ecdsa.PublicKey for verification +type SigningMethodECDSA struct { + Name string + Hash crypto.Hash + KeySize int + CurveBits int +} + +// Mirrors the struct from crypto/ecdsa, we expect ecdsa.PrivateKey.Sign function to return this struct asn1 encoded +type ecdsaSignature struct { + R, S *big.Int +} + +// Specific instances for EC256 and company +var ( + SigningMethodES256 *SigningMethodECDSA + SigningMethodES384 *SigningMethodECDSA + SigningMethodES512 *SigningMethodECDSA +) + +func init() { + // ES256 + SigningMethodES256 = &SigningMethodECDSA{"ES256", crypto.SHA256, 32, 256} + RegisterSigningMethod(SigningMethodES256.Alg(), func() SigningMethod { + return SigningMethodES256 + }) + + // ES384 + SigningMethodES384 = &SigningMethodECDSA{"ES384", crypto.SHA384, 48, 384} + RegisterSigningMethod(SigningMethodES384.Alg(), func() SigningMethod { + return SigningMethodES384 + }) + + // ES512 + SigningMethodES512 = &SigningMethodECDSA{"ES512", crypto.SHA512, 66, 521} + RegisterSigningMethod(SigningMethodES512.Alg(), func() SigningMethod { + return SigningMethodES512 + }) +} + +// Alg implements SigningMethod +func (m *SigningMethodECDSA) Alg() string { + return m.Name +} + +// Verify implements the Verify method from SigningMethod +// For this verify method, key must be an ecdsa.PublicKey struct +func (m *SigningMethodECDSA) Verify(signingString, signature string, key interface{}) error { + var err error + + // Decode the signature + var sig []byte + if sig, err = DecodeSegment(signature); err != nil { + return err + } + + // Get the key + var ecdsaKey *ecdsa.PublicKey + var ok bool + + switch k := key.(type) { + case *ecdsa.PublicKey: + ecdsaKey = k + case crypto.Signer: + pub := k.Public() + if ecdsaKey, ok = pub.(*ecdsa.PublicKey); !ok { + return &InvalidKeyError{Message: fmt.Sprintf("crypto.Signer returned an unexpected public key type: %T", pub)} + } + default: + return NewInvalidKeyTypeError("*ecdsa.PublicKey or crypto.Signer", key) + } + + if len(sig) != 2*m.KeySize { + return &UnverfiableTokenError{Message: "signature length is invalid"} + } + + r := big.NewInt(0).SetBytes(sig[:m.KeySize]) + s := big.NewInt(0).SetBytes(sig[m.KeySize:]) + + // Create hasher + if !m.Hash.Available() { + return ErrHashUnavailable + } + hasher := m.Hash.New() + hasher.Write([]byte(signingString)) + + // Verify the signature + if verifystatus := ecdsa.Verify(ecdsaKey, hasher.Sum(nil), r, s); verifystatus == true { + return nil + } + return new(InvalidSignatureError) +} + +// Sign implements the Sign method from SigningMethod +// For this signing method, key must be an ecdsa.PrivateKey struct +func (m *SigningMethodECDSA) Sign(signingString string, key interface{}) (string, error) { + var signer crypto.Signer + var pub *ecdsa.PublicKey + var ok bool + + if signer, ok = key.(crypto.Signer); !ok { + return "", NewInvalidKeyTypeError("*ecdsa.PrivateKey or crypto.Signer", key) + } + + //sanity check that the signer is an ecdsa signer + if pub, ok = signer.Public().(*ecdsa.PublicKey); !ok { + return "", &InvalidKeyError{Message: fmt.Sprintf("signer returned unexpected public key type: %T", pub)} + } + + // Create the hasher + if !m.Hash.Available() { + return "", ErrHashUnavailable + } + + hasher := m.Hash.New() + hasher.Write([]byte(signingString)) + + // Sign the string and return r, s + asn1Sig, err := signer.Sign(rand.Reader, hasher.Sum(nil), m.Hash) + if err != nil { + return "", err + } + + //the ecdsa.PrivateKey Sign function returns an asn1 encoded signature which is not what we want + // so we unmarshal it to get r and s to encode as described in rfc7518 section-3.4 + var ecdsaSig ecdsaSignature + rest, err := asn1.Unmarshal(asn1Sig, &ecdsaSig) + if err != nil { + return "", err + } + + if len(rest) != 0 { + return "", &UnverfiableTokenError{Message: "unexpected extra bytes in ecda signature"} + } + + curveBits := pub.Curve.Params().BitSize + + if m.CurveBits != curveBits { + return "", &InvalidKeyError{Message: "CurveBits in public key don't match those in signing method"} + } + + keyBytes := curveBits / 8 + if curveBits%8 > 0 { + keyBytes++ + } + + // We serialize the output (r and s) into big-endian byte arrays and pad + // them with zeros on the left to make sure the sizes work out. Both arrays + // must be keyBytes long, and the output must be 2*keyBytes long. + rBytes := ecdsaSig.R.Bytes() + rBytesPadded := make([]byte, keyBytes) + copy(rBytesPadded[keyBytes-len(rBytes):], rBytes) + + sBytes := ecdsaSig.S.Bytes() + sBytesPadded := make([]byte, keyBytes) + copy(sBytesPadded[keyBytes-len(sBytes):], sBytes) + + out := append(rBytesPadded, sBytesPadded...) + + return EncodeSegment(out), nil +} diff --git a/vendor/github.com/dgrijalva/jwt-go/v4/ecdsa_utils.go b/vendor/github.com/dgrijalva/jwt-go/v4/ecdsa_utils.go new file mode 100644 index 00000000000..627e7db3cf3 --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/v4/ecdsa_utils.go @@ -0,0 +1,70 @@ +package jwt + +import ( + "crypto/ecdsa" + "crypto/x509" + "encoding/pem" + "errors" +) + +// Errors returned by EC signing methods +var ( + ErrNotECPublicKey = errors.New("key is not a valid ECDSA public key") + ErrNotECPrivateKey = errors.New("key is not a valid ECDSA private key") +) + +// ParseECPrivateKeyFromPEM is a helper function for +// parsing a PEM encoded Elliptic Curve Private Key Structure +func ParseECPrivateKeyFromPEM(key []byte) (*ecdsa.PrivateKey, error) { + var err error + + // Parse PEM block + var block *pem.Block + if block, _ = pem.Decode(key); block == nil { + return nil, ErrKeyMustBePEMEncoded + } + + // Parse the key + var parsedKey interface{} + if parsedKey, err = x509.ParseECPrivateKey(block.Bytes); err != nil { + return nil, err + } + + var pkey *ecdsa.PrivateKey + var ok bool + if pkey, ok = parsedKey.(*ecdsa.PrivateKey); !ok { + return nil, ErrNotECPrivateKey + } + + return pkey, nil +} + +// ParseECPublicKeyFromPEM is a helper function for +// parsing a PEM encoded PKCS1 or PKCS8 public key +func ParseECPublicKeyFromPEM(key []byte) (*ecdsa.PublicKey, error) { + var err error + + // Parse PEM block + var block *pem.Block + if block, _ = pem.Decode(key); block == nil { + return nil, ErrKeyMustBePEMEncoded + } + + // Parse the key + var parsedKey interface{} + if parsedKey, err = x509.ParsePKIXPublicKey(block.Bytes); err != nil { + if cert, err := x509.ParseCertificate(block.Bytes); err == nil { + parsedKey = cert.PublicKey + } else { + return nil, err + } + } + + var pkey *ecdsa.PublicKey + var ok bool + if pkey, ok = parsedKey.(*ecdsa.PublicKey); !ok { + return nil, ErrNotECPublicKey + } + + return pkey, nil +} diff --git a/vendor/github.com/dgrijalva/jwt-go/v4/errors.go b/vendor/github.com/dgrijalva/jwt-go/v4/errors.go new file mode 100644 index 00000000000..d7a53ba7f7f --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/v4/errors.go @@ -0,0 +1,204 @@ +package jwt + +import ( + "fmt" + "time" +) + +// Error constants +var ( + ErrHashUnavailable = new(HashUnavailableError) +) + +// Embeds b within a, if a is a valid wrapper. returns a +// If a is not a valid wrapper, b is dropped +// If one of the errors is nil, the other is returned +func wrapError(a, b error) error { + if b == nil { + return a + } + if a == nil { + return b + } + + type iErrorWrapper interface { + Wrap(error) + } + if w, ok := a.(iErrorWrapper); ok { + w.Wrap(b) + } + return a +} + +// ErrorWrapper provides a simple, concrete helper for implementing nestable errors +type ErrorWrapper struct{ err error } + +// Unwrap implements xerrors.Wrapper +func (w ErrorWrapper) Unwrap() error { + return w.err +} + +// Wrap stores the provided error value and returns it when Unwrap is called +func (w ErrorWrapper) Wrap(err error) { + w.err = err +} + +// InvalidKeyError is returned if the key is unusable for some reason other than type +type InvalidKeyError struct { + Message string + ErrorWrapper +} + +func (e *InvalidKeyError) Error() string { + return fmt.Sprintf("key is invalid: %v", e.Message) +} + +// InvalidKeyTypeError is returned if the key is unusable because it is of an incompatible type +type InvalidKeyTypeError struct { + Expected, Received string // String descriptions of expected and received types + ErrorWrapper +} + +func (e *InvalidKeyTypeError) Error() string { + if e.Expected == "" && e.Received == "" { + return "key is of invalid type" + } + return fmt.Sprintf("key is of invalid type: expected %v, received %v", e.Expected, e.Received) +} + +// NewInvalidKeyTypeError creates an InvalidKeyTypeError, automatically capturing the type +// of received +func NewInvalidKeyTypeError(expected string, received interface{}) error { + return &InvalidKeyTypeError{Expected: expected, Received: fmt.Sprintf("%T", received)} +} + +// MalformedTokenError means the token failed to parse or exhibits some other +// non-standard property that prevents it being processed by this library +type MalformedTokenError struct { + Message string + ErrorWrapper +} + +func (e *MalformedTokenError) Error() string { + if e.Message == "" { + return "token is malformed" + } + return fmt.Sprintf("token is malformed: %v", e.Message) +} + +// UnverfiableTokenError means there's something wrong with the signature that prevents +// this library from verifying it. +type UnverfiableTokenError struct { + Message string + ErrorWrapper +} + +func (e *UnverfiableTokenError) Error() string { + if e.Message == "" { + return "token is unverifiable" + } + return fmt.Sprintf("token is unverifiable: %v", e.Message) +} + +// InvalidSignatureError means the signature on the token is invalid +type InvalidSignatureError struct { + Message string + ErrorWrapper +} + +func (e *InvalidSignatureError) Error() string { + if e.Message == "" { + return "token signature is invalid" + } + return fmt.Sprintf("token signature is invalid: %v", e.Message) +} + +// TokenExpiredError allows the caller to know the delta between now and the expired time and the unvalidated claims. +// A client system may have a bug that doesn't refresh a token in time, or there may be clock skew so this information can help you understand. +type TokenExpiredError struct { + At time.Time // The time at which the exp was evaluated. Includes leeway. + ExpiredBy time.Duration // How long the token had been expired at time of evaluation + ErrorWrapper // Value for unwrapping +} + +func (e *TokenExpiredError) Error() string { + return fmt.Sprintf("token is expired by %v", e.ExpiredBy) +} + +// TokenNotValidYetError means the token failed the 'nbf' check. It's possible +// this token will become valid once the 'nbf' time is reached. If you are encountering +// this unexpectedly, you may want to provide a bit of Leeway to account for clock skew. See WithLeeway +type TokenNotValidYetError struct { + At time.Time // The time at which the exp was evaluated. Includes leeway. + EarlyBy time.Duration // How long the token had been expired at time of evaluation + ErrorWrapper // Value for unwrapping +} + +func (e *TokenNotValidYetError) Error() string { + return fmt.Sprintf("token is not valid yet; wait %v", e.EarlyBy) +} + +// InvalidAudienceError means the token failed the audience check +// per the spec, if an 'aud' claim is present, the value must be verified +// See: WithAudience and WithoutAudienceValidation +type InvalidAudienceError struct { + Message string + ErrorWrapper +} + +func (e *InvalidAudienceError) Error() string { + if e.Message == "" { + return "token audience is invalid" + } + return fmt.Sprintf("token audience is invalid: %v", e.Message) +} + +// InvalidIssuerError means the token failed issuer validation +// Issuer validation is only run, by default, if the WithIssuer option is provided +type InvalidIssuerError struct { + Message string + ErrorWrapper +} + +func (e *InvalidIssuerError) Error() string { + if e.Message == "" { + return "token issuer is invalid" + } + return fmt.Sprintf("token issuer is invalid: %v", e.Message) +} + +// InvalidClaimsError is a catchall type for claims errors that don't have their own type +type InvalidClaimsError struct { + Message string + ErrorWrapper +} + +func (e *InvalidClaimsError) Error() string { + if e.Message == "" { + return "token claim is invalid" + } + return fmt.Sprintf("token claim is invalid: %v", e.Message) +} + +// SigningError is a catchall type for signing errors +type SigningError struct { + Message string + ErrorWrapper +} + +func (e *SigningError) Error() string { + if e.Message == "" { + return "error encountered during signing" + } + return fmt.Sprintf("error encountered during signing: %v", e.Message) +} + +// HashUnavailableError measn the request hash function isn't available +// See: https://godoc.org/crypto#Hash.Available +type HashUnavailableError struct { + ErrorWrapper +} + +func (e *HashUnavailableError) Error() string { + return "the requested hash function is unavailable" +} diff --git a/vendor/github.com/dgrijalva/jwt-go/v4/go.mod b/vendor/github.com/dgrijalva/jwt-go/v4/go.mod new file mode 100644 index 00000000000..284213dea7c --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/v4/go.mod @@ -0,0 +1,5 @@ +module github.com/dgrijalva/jwt-go/v4 + +go 1.12 + +require golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 diff --git a/vendor/github.com/dgrijalva/jwt-go/v4/go.sum b/vendor/github.com/dgrijalva/jwt-go/v4/go.sum new file mode 100644 index 00000000000..3ab73eafaed --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/v4/go.sum @@ -0,0 +1,2 @@ +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/vendor/github.com/dgrijalva/jwt-go/v4/hmac.go b/vendor/github.com/dgrijalva/jwt-go/v4/hmac.go new file mode 100644 index 00000000000..2c9101257e9 --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/v4/hmac.go @@ -0,0 +1,97 @@ +package jwt + +import ( + "crypto" + "crypto/hmac" + "errors" +) + +// SigningMethodHMAC implements the HMAC-SHA family of signing methods +// Expects key type of []byte for both signing and validation +type SigningMethodHMAC struct { + Name string + Hash crypto.Hash +} + +// Specific instances for HS256 and company +var ( + SigningMethodHS256 *SigningMethodHMAC + SigningMethodHS384 *SigningMethodHMAC + SigningMethodHS512 *SigningMethodHMAC + ErrSignatureInvalid = errors.New("signature is invalid") +) + +func init() { + // HS256 + SigningMethodHS256 = &SigningMethodHMAC{"HS256", crypto.SHA256} + RegisterSigningMethod(SigningMethodHS256.Alg(), func() SigningMethod { + return SigningMethodHS256 + }) + + // HS384 + SigningMethodHS384 = &SigningMethodHMAC{"HS384", crypto.SHA384} + RegisterSigningMethod(SigningMethodHS384.Alg(), func() SigningMethod { + return SigningMethodHS384 + }) + + // HS512 + SigningMethodHS512 = &SigningMethodHMAC{"HS512", crypto.SHA512} + RegisterSigningMethod(SigningMethodHS512.Alg(), func() SigningMethod { + return SigningMethodHS512 + }) +} + +// Alg implements SigningMethod +func (m *SigningMethodHMAC) Alg() string { + return m.Name +} + +// Verify the signature of HSXXX tokens. Returns nil if the signature is valid. +func (m *SigningMethodHMAC) Verify(signingString, signature string, key interface{}) error { + // Verify the key is the right type + keyBytes, ok := key.([]byte) + if !ok { + return NewInvalidKeyTypeError("[]byte", key) + } + + // Decode signature, for comparison + sig, err := DecodeSegment(signature) + if err != nil { + return err + } + + // Can we use the specified hashing method? + if !m.Hash.Available() { + return ErrHashUnavailable + } + + // This signing method is symmetric, so we validate the signature + // by reproducing the signature from the signing string and key, then + // comparing that against the provided signature. + hasher := hmac.New(m.Hash.New, keyBytes) + hasher.Write([]byte(signingString)) + if !hmac.Equal(sig, hasher.Sum(nil)) { + return ErrSignatureInvalid + } + + // No validation errors. Signature is good. + return nil +} + +// Sign implements the Sign method from SigningMethod +// Key must be []byte +func (m *SigningMethodHMAC) Sign(signingString string, key interface{}) (string, error) { + keyBytes, ok := key.([]byte) + if !ok { + return "", NewInvalidKeyTypeError("[]byte", key) + } + + if !m.Hash.Available() { + return "", ErrHashUnavailable + } + + hasher := hmac.New(m.Hash.New, keyBytes) + hasher.Write([]byte(signingString)) + + return EncodeSegment(hasher.Sum(nil)), nil +} diff --git a/vendor/github.com/dgrijalva/jwt-go/v4/keyfunc.go b/vendor/github.com/dgrijalva/jwt-go/v4/keyfunc.go new file mode 100644 index 00000000000..2f15a8fc94e --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/v4/keyfunc.go @@ -0,0 +1,21 @@ +package jwt + +import "fmt" + +// Keyfunc is the type passed to Parse methods to supply +// the key for verification. The function receives the parsed, +// but unverified Token. This allows you to use properties in the +// Header of the token (such as `kid`) to identify which key to use. +type Keyfunc func(*Token) (interface{}, error) + +// KnownKeyfunc is a helper for generating a Keyfunc from a known +// signing method and key. If your implementation only supports a single signing method +// and key, this is for you. +func KnownKeyfunc(signingMethod SigningMethod, key interface{}) Keyfunc { + return func(t *Token) (interface{}, error) { + if signingMethod.Alg() != t.Header["alg"] { + return nil, fmt.Errorf("unexpected signing method: %v, expected: %v", t.Header["alg"], signingMethod.Alg()) + } + return key, nil + } +} diff --git a/vendor/github.com/dgrijalva/jwt-go/v4/map_claims.go b/vendor/github.com/dgrijalva/jwt-go/v4/map_claims.go new file mode 100644 index 00000000000..d721c3388cf --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/v4/map_claims.go @@ -0,0 +1,83 @@ +package jwt + +// MapClaims is the Claims type that uses the map[string]interface{} for JSON decoding +// This is the default Claims type if you don't supply one +type MapClaims map[string]interface{} + +// VerifyAudience compares the aud claim against cmp. +func (m MapClaims) VerifyAudience(h *ValidationHelper, cmp string) error { + if aud, err := ParseClaimStrings(m["aud"]); err == nil && aud != nil { + return h.ValidateAudienceAgainst(aud, cmp) + } else if err != nil { + return &MalformedTokenError{Message: "couldn't parse 'aud' value"} + } + return nil +} + +// VerifyIssuer compares the iss claim against cmp. +func (m MapClaims) VerifyIssuer(h *ValidationHelper, cmp string) error { + iss, ok := m["iss"].(string) + if !ok { + return &InvalidIssuerError{Message: "'iss' expected but not present"} + } + return h.ValidateIssuerAgainst(iss, cmp) +} + +// Valid validates standard claims using ValidationHelper +// Validates time based claims "exp, nbf" (see: WithLeeway) +// Validates "aud" if present in claims. (see: WithAudience, WithoutAudienceValidation) +// Validates "iss" if option is provided (see: WithIssuer) +func (m MapClaims) Valid(h *ValidationHelper) error { + var vErr error + + if h == nil { + h = DefaultValidationHelper + } + + exp, err := m.LoadTimeValue("exp") + if err != nil { + return err + } + + if err = h.ValidateExpiresAt(exp); err != nil { + vErr = wrapError(err, vErr) + } + + nbf, err := m.LoadTimeValue("nbf") + if err != nil { + return err + } + + if err = h.ValidateNotBefore(nbf); err != nil { + vErr = wrapError(err, vErr) + } + + // Try to parse the 'aud' claim + if aud, err := ParseClaimStrings(m["aud"]); err == nil && aud != nil { + // If it's present and well formed, validate + if err = h.ValidateAudience(aud); err != nil { + vErr = wrapError(err, vErr) + } + } else if err != nil { + // If it's present and not well formed, return an error + return &MalformedTokenError{Message: "couldn't parse 'aud' value"} + } + + iss, _ := m["iss"].(string) + if err = h.ValidateIssuer(iss); err != nil { + vErr = wrapError(err, vErr) + } + + return vErr +} + +// LoadTimeValue extracts a *Time value from a key in m +func (m MapClaims) LoadTimeValue(key string) (*Time, error) { + value, ok := m[key] + if !ok { + // No value present in map + return nil, nil + } + + return ParseTime(value) +} diff --git a/vendor/github.com/dgrijalva/jwt-go/v4/none.go b/vendor/github.com/dgrijalva/jwt-go/v4/none.go new file mode 100644 index 00000000000..a5caed37b48 --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/v4/none.go @@ -0,0 +1,54 @@ +package jwt + +// SigningMethodNone implements the none signing method. This is required by the spec +// but you probably should never use it. +var SigningMethodNone *signingMethodNone + +// UnsafeAllowNoneSignatureType must be returned from Keyfunc in order for the +// none signing method to be allowed. This is intended to make is possible to use +// this signing method, but not by accident +const UnsafeAllowNoneSignatureType unsafeNoneMagicConstant = "none signing method allowed" + +// NoneSignatureTypeDisallowedError is the error value returned when the none signing method +// is used without UnsafeAllowNoneSignatureType +var NoneSignatureTypeDisallowedError error + +type signingMethodNone struct{} +type unsafeNoneMagicConstant string + +func init() { + SigningMethodNone = &signingMethodNone{} + NoneSignatureTypeDisallowedError = &InvalidSignatureError{Message: "'none' signature type is not allowed"} + + RegisterSigningMethod(SigningMethodNone.Alg(), func() SigningMethod { + return SigningMethodNone + }) +} + +func (m *signingMethodNone) Alg() string { + return "none" +} + +// Only allow 'none' alg type if UnsafeAllowNoneSignatureType is specified as the key +func (m *signingMethodNone) Verify(signingString, signature string, key interface{}) (err error) { + // Key must be UnsafeAllowNoneSignatureType to prevent accidentally + // accepting 'none' signing method + if _, ok := key.(unsafeNoneMagicConstant); !ok { + return NoneSignatureTypeDisallowedError + } + // If signing method is none, signature must be an empty string + if signature != "" { + return &InvalidSignatureError{Message: "'none' signing method with non-empty signature"} + } + + // Accept 'none' signing method. + return nil +} + +// Only allow 'none' signing if UnsafeAllowNoneSignatureType is specified as the key +func (m *signingMethodNone) Sign(signingString string, key interface{}) (string, error) { + if _, ok := key.(unsafeNoneMagicConstant); ok { + return "", nil + } + return "", NoneSignatureTypeDisallowedError +} diff --git a/vendor/github.com/dgrijalva/jwt-go/v4/parser.go b/vendor/github.com/dgrijalva/jwt-go/v4/parser.go new file mode 100644 index 00000000000..1f1a9c09e77 --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/v4/parser.go @@ -0,0 +1,168 @@ +package jwt + +import ( + "bytes" + "encoding/json" + "fmt" + "strings" +) + +// Parser is the type used to parse and validate a JWT token from string +type Parser struct { + validMethods []string // If populated, only these methods will be considered valid + useJSONNumber bool // Use JSON Number format in JSON decoder + skipClaimsValidation bool // Skip claims validation during token parsing + unmarshaller TokenUnmarshaller // Use this instead of encoding/json + *ValidationHelper +} + +// NewParser returns a new Parser with the specified options +func NewParser(options ...ParserOption) *Parser { + p := &Parser{ + ValidationHelper: new(ValidationHelper), + } + for _, option := range options { + option(p) + } + return p +} + +// Parse will parse, validate, and return a token. +// keyFunc will receive the parsed token and should return the key for validating. +// If everything is kosher, err will be nil +func (p *Parser) Parse(tokenString string, keyFunc Keyfunc) (*Token, error) { + return p.ParseWithClaims(tokenString, MapClaims{}, keyFunc) +} + +// ParseWithClaims is just like parse, but with the claims type specified +func (p *Parser) ParseWithClaims(tokenString string, claims Claims, keyFunc Keyfunc) (*Token, error) { + token, parts, err := p.ParseUnverified(tokenString, claims) + if err != nil { + return token, err + } + + // Verify signing method is in the required set + if p.validMethods != nil { + var signingMethodValid = false + var alg = token.Method.Alg() + for _, m := range p.validMethods { + if m == alg { + signingMethodValid = true + break + } + } + if !signingMethodValid { + // signing method is not in the listed set + return token, &UnverfiableTokenError{Message: fmt.Sprintf("signing method %v is invalid", alg)} + } + } + + // Lookup key + var key interface{} + if keyFunc == nil { + // keyFunc was not provided. short circuiting validation + return token, &UnverfiableTokenError{Message: "no Keyfunc was provided."} + } + if key, err = keyFunc(token); err != nil { + // keyFunc returned an error + return token, wrapError(&UnverfiableTokenError{Message: "Keyfunc returned an error"}, err) + } + + var vErr error + + // Perform validation + token.Signature = parts[2] + if err = token.Method.Verify(strings.Join(parts[0:2], "."), token.Signature, key); err != nil { + vErr = wrapError(&InvalidSignatureError{}, err) + } + + // Validate Claims + if !p.skipClaimsValidation && vErr == nil { + if err := token.Claims.Valid(p.ValidationHelper); err != nil { + vErr = wrapError(err, vErr) + } + } + + if vErr == nil { + token.Valid = true + } + + return token, vErr +} + +// ParseUnverified is used to inspect a token without validating it +// WARNING: Don't use this method unless you know what you're doing +// +// This method parses the token but doesn't validate the signature. It's only +// ever useful in cases where you know the signature is valid (because it has +// been checked previously in the stack) and you want to extract values from +// it. Or for debuggery. +func (p *Parser) ParseUnverified(tokenString string, claims Claims) (token *Token, parts []string, err error) { + parts = strings.Split(tokenString, ".") + if len(parts) != 3 { + return nil, parts, &MalformedTokenError{Message: "token contains an invalid number of segments"} + } + + token = &Token{Raw: tokenString} + + // choose unmarshaller + var unmarshaller = p.unmarshaller + if unmarshaller == nil { + unmarshaller = p.defaultUnmarshaller + } + + // parse Header + var headerBytes []byte + if headerBytes, err = DecodeSegment(parts[0]); err != nil { + if strings.HasPrefix(strings.ToLower(tokenString), "bearer ") { + return token, parts, &MalformedTokenError{Message: "tokenstring should not contain 'bearer '"} + } + return token, parts, wrapError(&MalformedTokenError{Message: "failed to decode token header"}, err) + } + if err = unmarshaller(CodingContext{HeaderFieldDescriptor, nil}, headerBytes, &token.Header); err != nil { + return token, parts, wrapError(&MalformedTokenError{Message: "failed to unmarshal token header"}, err) + } + + // parse Claims + var claimBytes []byte + token.Claims = claims + + if claimBytes, err = DecodeSegment(parts[1]); err != nil { + return token, parts, wrapError(&MalformedTokenError{Message: "failed to decode token claims"}, err) + } + // JSON Decode. Special case for map type to avoid weird pointer behavior + ctx := CodingContext{ClaimsFieldDescriptor, token.Header} + if c, ok := token.Claims.(MapClaims); ok { + err = unmarshaller(ctx, claimBytes, &c) + } else { + err = unmarshaller(ctx, claimBytes, &claims) + } + // Handle decode error + if err != nil { + return token, parts, wrapError(&MalformedTokenError{Message: "failed to unmarshal token claims"}, err) + } + + // Lookup signature method + if method, ok := token.Header["alg"].(string); ok { + if token.Method = GetSigningMethod(method); token.Method == nil { + return token, parts, &UnverfiableTokenError{Message: "signing method (alg) is unavailable."} + } + } else { + return token, parts, &UnverfiableTokenError{Message: "signing method (alg) is unspecified."} + } + + return token, parts, nil +} + +func (p *Parser) defaultUnmarshaller(ctx CodingContext, data []byte, v interface{}) error { + // If we don't need a special parser, use Unmarshal + // We never use a special encoder for the header + if !p.useJSONNumber || ctx.FieldDescriptor == HeaderFieldDescriptor { + return json.Unmarshal(data, v) + } + + // To enable the JSONNumber mode, we must use Decoder instead of Unmarshal + dec := json.NewDecoder(bytes.NewBuffer(data)) + dec.UseNumber() + return dec.Decode(v) +} diff --git a/vendor/github.com/dgrijalva/jwt-go/v4/parser_option.go b/vendor/github.com/dgrijalva/jwt-go/v4/parser_option.go new file mode 100644 index 00000000000..fd285ebe972 --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/v4/parser_option.go @@ -0,0 +1,74 @@ +package jwt + +import "time" + +// ParserOption implements functional options for parser behavior +// see: https://dave.cheney.net/2014/10/17/functional-options-for-friendly-apis +type ParserOption func(*Parser) + +// WithValidMethods returns the ParserOption for specifying valid signing methods +func WithValidMethods(valid []string) ParserOption { + return func(p *Parser) { + p.validMethods = valid + } +} + +// WithJSONNumber returns the ParserOption for using json.Number instead of float64 when parsing +// numeric values. Used most commonly with MapClaims, but it can be useful in some cases with +// structured claims types +func WithJSONNumber() ParserOption { + return func(p *Parser) { + p.useJSONNumber = true + } +} + +// WithoutClaimsValidation returns the ParserOption for disabling claims validation +// This does not disable signature validation. Use this if you want intend to implement +// claims validation via other means +func WithoutClaimsValidation() ParserOption { + return func(p *Parser) { + p.skipClaimsValidation = true + } +} + +// WithLeeway returns the ParserOption for specifying the leeway window. +func WithLeeway(d time.Duration) ParserOption { + return func(p *Parser) { + p.ValidationHelper.leeway = d + } +} + +// WithAudience returns the ParserOption for specifying an expected aud member value +func WithAudience(aud string) ParserOption { + return func(p *Parser) { + p.ValidationHelper.audience = &aud + } +} + +// WithoutAudienceValidation returns the ParserOption that specifies audience check should be skipped +func WithoutAudienceValidation() ParserOption { + return func(p *Parser) { + p.ValidationHelper.skipAudience = true + } +} + +// WithIssuer returns the ParserOption that specifies a value to compare against the iss claim +func WithIssuer(iss string) ParserOption { + return func(p *Parser) { + p.ValidationHelper.issuer = &iss + } +} + +// TokenUnmarshaller is the function signature required to supply custom JSON decoding logic. +// It is the same as json.Marshal with the addition of the FieldDescriptor. +// The field value will let your marshaller know which field is being processed. +// This is to facilitate things like compression, where you wouldn't want to compress +// the head. +type TokenUnmarshaller func(ctx CodingContext, data []byte, v interface{}) error + +// WithUnmarshaller returns the ParserOption that replaces the specified decoder +func WithUnmarshaller(um TokenUnmarshaller) ParserOption { + return func(p *Parser) { + p.unmarshaller = um + } +} diff --git a/vendor/github.com/dgrijalva/jwt-go/v4/rsa.go b/vendor/github.com/dgrijalva/jwt-go/v4/rsa.go new file mode 100644 index 00000000000..72ba2d6632b --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/v4/rsa.go @@ -0,0 +1,115 @@ +package jwt + +import ( + "crypto" + "crypto/rand" + "crypto/rsa" + "fmt" +) + +// SigningMethodRSA implements the RSA family of signing methods signing methods +// Expects *rsa.PrivateKey for signing and *rsa.PublicKey for validation +type SigningMethodRSA struct { + Name string + Hash crypto.Hash +} + +// Specific instances for RS256 and company +var ( + SigningMethodRS256 *SigningMethodRSA + SigningMethodRS384 *SigningMethodRSA + SigningMethodRS512 *SigningMethodRSA +) + +func init() { + // RS256 + SigningMethodRS256 = &SigningMethodRSA{"RS256", crypto.SHA256} + RegisterSigningMethod(SigningMethodRS256.Alg(), func() SigningMethod { + return SigningMethodRS256 + }) + + // RS384 + SigningMethodRS384 = &SigningMethodRSA{"RS384", crypto.SHA384} + RegisterSigningMethod(SigningMethodRS384.Alg(), func() SigningMethod { + return SigningMethodRS384 + }) + + // RS512 + SigningMethodRS512 = &SigningMethodRSA{"RS512", crypto.SHA512} + RegisterSigningMethod(SigningMethodRS512.Alg(), func() SigningMethod { + return SigningMethodRS512 + }) +} + +// Alg implements the Alg method from SigningMethod +func (m *SigningMethodRSA) Alg() string { + return m.Name +} + +// Verify implements the Verify method from SigningMethod +// For this signing method, must be an *rsa.PublicKey structure. +func (m *SigningMethodRSA) Verify(signingString, signature string, key interface{}) error { + var err error + + // Decode the signature + var sig []byte + if sig, err = DecodeSegment(signature); err != nil { + return err + } + + var rsaKey *rsa.PublicKey + var ok bool + + switch k := key.(type) { + case *rsa.PublicKey: + rsaKey = k + case crypto.Signer: + pub := k.Public() + if rsaKey, ok = pub.(*rsa.PublicKey); !ok { + return &InvalidKeyError{Message: fmt.Sprintf("signer returned unexpected public key type: %T", pub)} + } + default: + return NewInvalidKeyTypeError("*rsa.PublicKey or crypto.Signer", key) + } + + // Create hasher + if !m.Hash.Available() { + return ErrHashUnavailable + } + hasher := m.Hash.New() + hasher.Write([]byte(signingString)) + + // Verify the signature + return rsa.VerifyPKCS1v15(rsaKey, m.Hash, hasher.Sum(nil), sig) +} + +// Sign implements the Sign method from SigningMethod +// For this signing method, must be an *rsa.PrivateKey structure. +func (m *SigningMethodRSA) Sign(signingString string, key interface{}) (string, error) { + var signer crypto.Signer + var ok bool + + if signer, ok = key.(crypto.Signer); !ok { + return "", NewInvalidKeyTypeError("*rsa.PublicKey or crypto.Signer", key) + } + + //sanity check that the signer is an rsa signer + if pub, ok := signer.Public().(*rsa.PublicKey); !ok { + return "", &InvalidKeyError{Message: fmt.Sprintf("signer returned unexpected public key type: %T", pub)} + } + + // Create the hasher + if !m.Hash.Available() { + return "", ErrHashUnavailable + } + + hasher := m.Hash.New() + hasher.Write([]byte(signingString)) + + // Sign the string and return the encoded bytes + sigBytes, err := signer.Sign(rand.Reader, hasher.Sum(nil), m.Hash) + if err != nil { + return "", err + } + return EncodeSegment(sigBytes), nil +} diff --git a/vendor/github.com/dgrijalva/jwt-go/v4/rsa_pss.go b/vendor/github.com/dgrijalva/jwt-go/v4/rsa_pss.go new file mode 100644 index 00000000000..aa3ba39973a --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/v4/rsa_pss.go @@ -0,0 +1,138 @@ +// +build go1.4 + +package jwt + +import ( + "crypto" + "crypto/rand" + "crypto/rsa" + "fmt" +) + +// SigningMethodRSAPSS implements the RSAPSS family of signing methods +type SigningMethodRSAPSS struct { + *SigningMethodRSA + Options *rsa.PSSOptions +} + +// Specific instances for RS/PS and company +var ( + SigningMethodPS256 *SigningMethodRSAPSS + SigningMethodPS384 *SigningMethodRSAPSS + SigningMethodPS512 *SigningMethodRSAPSS +) + +func init() { + // PS256 + SigningMethodPS256 = &SigningMethodRSAPSS{ + &SigningMethodRSA{ + Name: "PS256", + Hash: crypto.SHA256, + }, + &rsa.PSSOptions{ + SaltLength: rsa.PSSSaltLengthAuto, + Hash: crypto.SHA256, + }, + } + RegisterSigningMethod(SigningMethodPS256.Alg(), func() SigningMethod { + return SigningMethodPS256 + }) + + // PS384 + SigningMethodPS384 = &SigningMethodRSAPSS{ + &SigningMethodRSA{ + Name: "PS384", + Hash: crypto.SHA384, + }, + &rsa.PSSOptions{ + SaltLength: rsa.PSSSaltLengthAuto, + Hash: crypto.SHA384, + }, + } + RegisterSigningMethod(SigningMethodPS384.Alg(), func() SigningMethod { + return SigningMethodPS384 + }) + + // PS512 + SigningMethodPS512 = &SigningMethodRSAPSS{ + &SigningMethodRSA{ + Name: "PS512", + Hash: crypto.SHA512, + }, + &rsa.PSSOptions{ + SaltLength: rsa.PSSSaltLengthAuto, + Hash: crypto.SHA512, + }, + } + RegisterSigningMethod(SigningMethodPS512.Alg(), func() SigningMethod { + return SigningMethodPS512 + }) +} + +// Verify implements the Verify method from SigningMethod +// For this verify method, key must be an rsa.PublicKey struct +func (m *SigningMethodRSAPSS) Verify(signingString, signature string, key interface{}) error { + var err error + + // Decode the signature + var sig []byte + if sig, err = DecodeSegment(signature); err != nil { + return err + } + + var rsaKey *rsa.PublicKey + var ok bool + + switch k := key.(type) { + case *rsa.PublicKey: + rsaKey = k + case crypto.Signer: + pub := k.Public() + if rsaKey, ok = pub.(*rsa.PublicKey); !ok { + return &InvalidKeyError{Message: fmt.Sprintf("signer returned unexpected public key type: %T", pub)} + } + default: + return NewInvalidKeyTypeError("*rsa.PublicKey or crypto.Signer", key) + } + + // Create hasher + if !m.Hash.Available() { + return ErrHashUnavailable + } + hasher := m.Hash.New() + hasher.Write([]byte(signingString)) + + return rsa.VerifyPSS(rsaKey, m.Hash, hasher.Sum(nil), sig, m.Options) +} + +// Sign implements the Sign method from SigningMethod +// For this signing method, key must be an rsa.PrivateKey struct +func (m *SigningMethodRSAPSS) Sign(signingString string, key interface{}) (string, error) { + var signer crypto.Signer + var ok bool + + if signer, ok = key.(crypto.Signer); !ok { + return "", NewInvalidKeyTypeError("*rsa.PrivateKey or crypto.Signer", key) + } + + //sanity check that the signer is an rsa signer + if pub, ok := signer.Public().(*rsa.PublicKey); !ok { + return "", &InvalidKeyError{Message: fmt.Sprintf("signer returned unexpected public key type: %T", pub)} + } + + // Create the hasher + if !m.Hash.Available() { + return "", ErrHashUnavailable + } + + hasher := m.Hash.New() + hasher.Write([]byte(signingString)) + + // Sign the string and return the encoded bytes + sigBytes, err := signer.Sign(rand.Reader, hasher.Sum(nil), m.Options) + if err != nil { + return "", err + } + return EncodeSegment(sigBytes), nil + +} diff --git a/vendor/github.com/dgrijalva/jwt-go/v4/rsa_utils.go b/vendor/github.com/dgrijalva/jwt-go/v4/rsa_utils.go new file mode 100644 index 00000000000..b0dae4bfb33 --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/v4/rsa_utils.go @@ -0,0 +1,105 @@ +package jwt + +import ( + "crypto/rsa" + "crypto/x509" + "encoding/pem" + "errors" +) + +// Errors returned by RSA Signing Method and helpers +var ( + ErrKeyMustBePEMEncoded = errors.New("invalid Key: Key must be PEM encoded PKCS1 or PKCS8 private key") + ErrNotRSAPrivateKey = errors.New("key is not a valid RSA private key") + ErrNotRSAPublicKey = errors.New("key is not a valid RSA public key") +) + +// ParseRSAPrivateKeyFromPEM is a helper method for +// parsing PEM encoded PKCS1 or PKCS8 private key +func ParseRSAPrivateKeyFromPEM(key []byte) (*rsa.PrivateKey, error) { + var err error + + // Parse PEM block + var block *pem.Block + if block, _ = pem.Decode(key); block == nil { + return nil, ErrKeyMustBePEMEncoded + } + + var parsedKey interface{} + if parsedKey, err = x509.ParsePKCS1PrivateKey(block.Bytes); err != nil { + if parsedKey, err = x509.ParsePKCS8PrivateKey(block.Bytes); err != nil { + return nil, err + } + } + + var pkey *rsa.PrivateKey + var ok bool + if pkey, ok = parsedKey.(*rsa.PrivateKey); !ok { + return nil, ErrNotRSAPrivateKey + } + + return pkey, nil +} + +// ParseRSAPrivateKeyFromPEMWithPassword is a helper method for +// parsing PEM encoded PKCS1 or PKCS8 private key, encrypted with a password +func ParseRSAPrivateKeyFromPEMWithPassword(key []byte, password string) (*rsa.PrivateKey, error) { + var err error + + // Parse PEM block + var block *pem.Block + if block, _ = pem.Decode(key); block == nil { + return nil, ErrKeyMustBePEMEncoded + } + + var parsedKey interface{} + + var blockDecrypted []byte + if blockDecrypted, err = x509.DecryptPEMBlock(block, []byte(password)); err != nil { + return nil, err + } + + if parsedKey, err = x509.ParsePKCS1PrivateKey(blockDecrypted); err != nil { + if parsedKey, err = x509.ParsePKCS8PrivateKey(blockDecrypted); err != nil { + return nil, err + } + } + + var pkey *rsa.PrivateKey + var ok bool + if pkey, ok = parsedKey.(*rsa.PrivateKey); !ok { + return nil, ErrNotRSAPrivateKey + } + + return pkey, nil +} + +// ParseRSAPublicKeyFromPEM is a helper method for +// parsing a PEM encoded PKCS1 or PKCS8 public key +func ParseRSAPublicKeyFromPEM(key []byte) (*rsa.PublicKey, error) { + var err error + + // Parse PEM block + var block *pem.Block + if block, _ = pem.Decode(key); block == nil { + return nil, ErrKeyMustBePEMEncoded + } + + // Parse the key + var parsedKey interface{} + if parsedKey, err = x509.ParsePKIXPublicKey(block.Bytes); err != nil { + if cert, err := x509.ParseCertificate(block.Bytes); err == nil { + parsedKey = cert.PublicKey + } else { + return nil, err + } + } + + var pkey *rsa.PublicKey + var ok bool + if pkey, ok = parsedKey.(*rsa.PublicKey); !ok { + return nil, ErrNotRSAPublicKey + } + + return pkey, nil +} diff --git a/vendor/github.com/dgrijalva/jwt-go/v4/signing_method.go b/vendor/github.com/dgrijalva/jwt-go/v4/signing_method.go new file mode 100644 index 00000000000..5e50243ef3c --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/v4/signing_method.go @@ -0,0 +1,37 @@ +package jwt + +import ( + "sync" +) + +var signingMethods = map[string]func() SigningMethod{} +var signingMethodLock = new(sync.RWMutex) + +// SigningMethod is the interface used for signing and verifying tokens +type SigningMethod interface { + Verify(signingString, signature string, key interface{}) error // Returns nil if signature is valid + Sign(signingString string, key interface{}) (string, error) // Returns encoded signature or error + Alg() string // returns the alg identifier for this method (example: 'HS256') +} + +// RegisterSigningMethod stores the "alg" name and a factory function pair +// used internally for looking up a signing method based on "alg". +// This is typically done during init() in the method's implementation +func RegisterSigningMethod(alg string, f func() SigningMethod) { + signingMethodLock.Lock() + defer signingMethodLock.Unlock() + + signingMethods[alg] = f +} + +// GetSigningMethod returns the signing method registered by RegisterSigningMethod +// This is used by the library internally during parsing and validation. +func GetSigningMethod(alg string) (method SigningMethod) { + signingMethodLock.RLock() + defer signingMethodLock.RUnlock() + + if methodF, ok := signingMethods[alg]; ok { + method = methodF() + } + return +} diff --git a/vendor/github.com/dgrijalva/jwt-go/v4/signing_option.go b/vendor/github.com/dgrijalva/jwt-go/v4/signing_option.go new file mode 100644 index 00000000000..cecead29708 --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/v4/signing_option.go @@ -0,0 +1,38 @@ +package jwt + +// CodingContext provides context to TokenMarshaller and TokenUnmarshaller +type CodingContext struct { + FieldDescriptor // Which field are we encoding/decoding? + Header map[string]interface{} // The token Header, if available +} + +// FieldDescriptor describes which field is being processed. Used by CodingContext +// This is to enable the marshaller to treat the head and body differently +type FieldDescriptor uint8 + +// Constants describe which field is being processed by custom Marshaller +const ( + HeaderFieldDescriptor FieldDescriptor = 0 + ClaimsFieldDescriptor FieldDescriptor = 1 +) + +// SigningOption can be passed to signing related methods on Token to customize behavior +type SigningOption func(*signingOptions) + +type signingOptions struct { + marshaller TokenMarshaller +} + +// TokenMarshaller is the interface you must implement to provide custom JSON marshalling +// behavior. It is the same as json.Marshal with the addition of the FieldDescriptor. +// The field value will let your marshaller know which field is being processed. +// This is to facilitate things like compression, where you wouldn't want to compress +// the head. +type TokenMarshaller func(ctx CodingContext, v interface{}) ([]byte, error) + +// WithMarshaller returns a SigningOption that will tell the signing code to use your custom Marshaller +func WithMarshaller(m TokenMarshaller) SigningOption { + return func(o *signingOptions) { + o.marshaller = m + } +} diff --git a/vendor/github.com/dgrijalva/jwt-go/v4/time.go b/vendor/github.com/dgrijalva/jwt-go/v4/time.go new file mode 100644 index 00000000000..aee72aeeeb7 --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/v4/time.go @@ -0,0 +1,78 @@ +package jwt + +import ( + "encoding/json" + "reflect" + "time" +) + +// TimePrecision determines how precisely time is measured +// by this library. When serializing and deserialzing tokens, +// time values are automatically truncated to this precision. +// See the time package's Truncate method for more detail +const TimePrecision = time.Microsecond + +// Time is how this library represents time values. It's mostly +// a wrapper for the standard library's time.Time, but adds +// specialized JSON decoding behavior to interop with the way +// time is represented by JWT. Also makes it possible to represent +// nil values. +type Time struct { + time.Time +} + +// NewTime creates a new Time value from a float64, following +// the JWT spec. +func NewTime(t float64) *Time { + return At(time.Unix(0, int64(t*float64(time.Second)))) +} + +// Now returns a new Time value using the current time. +// You can override Now by changing the value of TimeFunc +func Now() *Time { + return At(TimeFunc()) +} + +// At makes a Time value from a standard library time.Time value +func At(at time.Time) *Time { + return &Time{at.Truncate(TimePrecision)} +} + +// ParseTime is used for creating a Time value from various +// possible representations that can occur in serialization. +func ParseTime(value interface{}) (*Time, error) { + switch v := value.(type) { + case int64: + return NewTime(float64(v)), nil + case float64: + return NewTime(v), nil + case json.Number: + vv, err := v.Float64() + if err != nil { + return nil, err + } + return NewTime(vv), nil + case nil: + return nil, nil + default: + return nil, &json.UnsupportedTypeError{Type: reflect.TypeOf(v)} + } +} + +// UnmarshalJSON implements the json package's Unmarshaler interface +func (t *Time) UnmarshalJSON(data []byte) error { + var value json.Number + err := json.Unmarshal(data, &value) + if err != nil { + return err + } + v, err := ParseTime(value) + *t = *v + return err +} + +// MarshalJSON implements the json package's Marshaler interface +func (t *Time) MarshalJSON() ([]byte, error) { + f := float64(t.Truncate(TimePrecision).UnixNano()) / float64(time.Second) + return json.Marshal(f) +} diff --git a/vendor/github.com/dgrijalva/jwt-go/v4/token.go b/vendor/github.com/dgrijalva/jwt-go/v4/token.go new file mode 100644 index 00000000000..5ab1eb55346 --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/v4/token.go @@ -0,0 +1,110 @@ +package jwt + +import ( + "encoding/base64" + "encoding/json" + "strings" + "time" +) + +// TimeFunc provides the current time when parsing token to validate "exp" claim (expiration time). +// You can override it to use another time value. This is useful for testing or if your +// server uses a different time zone than your tokens. +var TimeFunc = time.Now + +// Token represents JWT Token. Different fields will be used depending on whether you're +// creating or parsing/verifying a token. +type Token struct { + Raw string // The raw token. Populated when you Parse a token + Method SigningMethod // The signing method used or to be used + Header map[string]interface{} // The first segment of the token + Claims Claims // The second segment of the token + Signature string // The third segment of the token. Populated when you Parse a token + Valid bool // Is the token valid? Populated when you Parse/Verify a token +} + +// New creates a new Token. Takes a signing method. Uses the default claims type, MapClaims. +func New(method SigningMethod) *Token { + return NewWithClaims(method, MapClaims{}) +} + +// NewWithClaims creats a new token with a specified signing method and claims type +func NewWithClaims(method SigningMethod, claims Claims) *Token { + return &Token{ + Header: map[string]interface{}{ + "typ": "JWT", + "alg": method.Alg(), + }, + Claims: claims, + Method: method, + } +} + +// SignedString returns the complete, signed token +func (t *Token) SignedString(key interface{}, opts ...SigningOption) (string, error) { + var sig, sstr string + var err error + if sstr, err = t.SigningString(opts...); err != nil { + return "", err + } + if sig, err = t.Method.Sign(sstr, key); err != nil { + return "", err + } + return strings.Join([]string{sstr, sig}, "."), nil +} + +// SigningString generates the signing string. This is the +// most expensive part of the whole deal. Unless you +// need this for something special, just go straight for +// the SignedString. +func (t *Token) SigningString(opts ...SigningOption) (string, error) { + // Process options + var cfg = new(signingOptions) + for _, opt := range opts { + opt(cfg) + } + // Setup default marshaller + if cfg.marshaller == nil { + cfg.marshaller = t.defaultMarshaller + } + + // Encode the two parts, then combine + inputParts := []interface{}{t.Header, t.Claims} + parts := make([]string, 2) + for i, v := range inputParts { + ctx := CodingContext{FieldDescriptor(i), t.Header} + jsonValue, err := cfg.marshaller(ctx, v) + if err != nil { + return "", err + } + parts[i] = EncodeSegment(jsonValue) + } + return strings.Join(parts, "."), nil +} + +func (t *Token) defaultMarshaller(ctx CodingContext, v interface{}) ([]byte, error) { + return json.Marshal(v) +} + +// Parse then validate, and return a token. +// keyFunc will receive the parsed token and should return the key for validating. +// If everything is kosher, err will be nil +// Claims type will be the default, MapClaims +func Parse(tokenString string, keyFunc Keyfunc, options ...ParserOption) (*Token, error) { + return NewParser(options...).Parse(tokenString, keyFunc) +} + +// ParseWithClaims is Parse, but with a specified Claims type +func ParseWithClaims(tokenString string, claims Claims, keyFunc Keyfunc, options ...ParserOption) (*Token, error) { + return NewParser(options...).ParseWithClaims(tokenString, claims, keyFunc) +} + +// EncodeSegment is used internally for JWT specific base64url encoding with padding stripped +func EncodeSegment(seg []byte) string { + return base64.RawURLEncoding.EncodeToString(seg) +} + +// DecodeSegment is used internally for JWT specific base64url encoding with padding stripped +func DecodeSegment(seg string) ([]byte, error) { + return base64.RawURLEncoding.DecodeString(seg) +} diff --git a/vendor/github.com/dgrijalva/jwt-go/v4/validation_helper.go b/vendor/github.com/dgrijalva/jwt-go/v4/validation_helper.go new file mode 100644 index 00000000000..2edd0a46476 --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/v4/validation_helper.go @@ -0,0 +1,151 @@ +package jwt + +import ( + "crypto/subtle" + "fmt" + "time" +) + +// DefaultValidationHelper is used by Claims.Valid if none is provided +var DefaultValidationHelper = &ValidationHelper{} + +// ValidationHelper is built by the parser and passed +// to Claims.Value to carry parse/validation options +// This standalone type exists to allow implementations to do whatever custom +// behavior is required while still being able to call upon the standard behavior +// as necessary. +type ValidationHelper struct { + nowFunc func() time.Time // Override for time.Now. Mostly used for testing + leeway time.Duration // Leeway to provide when validating time values + audience *string // Expected audience value + skipAudience bool // Ignore aud check + issuer *string // Expected issuer value. ignored if nil +} + +// NewValidationHelper creates a validation helper from a list of parser options +// Not all parser options will impact validation +// If you already have a custom parser, you can use its ValidationHelper value +// instead of creating a new one +func NewValidationHelper(options ...ParserOption) *ValidationHelper { + p := NewParser(options...) + return p.ValidationHelper +} + +func (h *ValidationHelper) now() time.Time { + if h.nowFunc != nil { + return h.nowFunc() + } + return TimeFunc() +} + +// Before returns true if Now is before t +// Takes leeway into account +func (h *ValidationHelper) Before(t time.Time) bool { + return h.now().Before(t.Add(-h.leeway)) +} + +// After returns true if Now is after t +// Takes leeway into account +func (h *ValidationHelper) After(t time.Time) bool { + return h.now().After(t.Add(h.leeway)) +} + +// ValidateExpiresAt returns an error if the expiration time is invalid +// Takes leeway into account +func (h *ValidationHelper) ValidateExpiresAt(exp *Time) error { + // 'exp' claim is not set. ignore. + if exp == nil { + return nil + } + + // Expiration has passed + if h.After(exp.Time) { + delta := h.now().Sub(exp.Time) + return &TokenExpiredError{At: h.now(), ExpiredBy: delta} + } + + // Expiration has not passed + return nil +} + +// ValidateNotBefore returns an error if the nbf time has not been reached +// Takes leeway into account +func (h *ValidationHelper) ValidateNotBefore(nbf *Time) error { + // 'nbf' claim is not set. ignore. + if nbf == nil { + return nil + } + + // Nbf hasn't been reached + if h.Before(nbf.Time) { + delta := nbf.Time.Sub(h.now()) + return &TokenNotValidYetError{At: h.now(), EarlyBy: delta} + } + // Nbf has been reached. valid. + return nil +} + +// ValidateAudience verifies that aud contains the audience value provided +// by the WithAudience option. +// Per the spec (https://tools.ietf.org/html/rfc7519#section-4.1.3), if the aud +// claim is present, +func (h *ValidationHelper) ValidateAudience(aud ClaimStrings) error { + // Skip flag + if h.skipAudience { + return nil + } + + // If there's no audience claim, ignore + if aud == nil || len(aud) == 0 { + return nil + } + + // If there is an audience claim, but no value provided, fail + if h.audience == nil { + return &InvalidAudienceError{Message: "audience value was expected but not provided"} + } + + return h.ValidateAudienceAgainst(aud, *h.audience) +} + +// ValidateAudienceAgainst checks that the compare value is included in the aud list +// It is used by ValidateAudience, but exposed as a helper for other implementations +func (h *ValidationHelper) ValidateAudienceAgainst(aud ClaimStrings, compare string) error { + if aud == nil { + return nil + } + + // Compare provided value with aud claim. + // This code avoids the early return to make this check more or less constant time. + // I'm not certain that's actually required in this context. + var match = false + for _, audStr := range aud { + if subtle.ConstantTimeCompare([]byte(audStr), []byte(compare)) == 1 { + match = true + } + } + if !match { + return &InvalidAudienceError{Message: fmt.Sprintf("'%v' wasn't found in aud claim", compare)} + } + return nil + +} + +// ValidateIssuer checks the claim value against the value provided by WithIssuer +func (h *ValidationHelper) ValidateIssuer(iss string) error { + // Always passes validation if issuer is not provided + if h.issuer == nil { + return nil + } + + return h.ValidateIssuerAgainst(iss, *h.issuer) +} + +// ValidateIssuerAgainst checks the claim value against the value provided, ignoring the WithIssuer value +func (h *ValidationHelper) ValidateIssuerAgainst(iss string, compare string) error { + if subtle.ConstantTimeCompare([]byte(iss), []byte(compare)) == 1 { + return nil + } + + return &InvalidIssuerError{Message: "'iss' value doesn't match expectation"} +} diff --git a/vendor/github.com/docker/cli/AUTHORS b/vendor/github.com/docker/cli/AUTHORS index ecb6251ba0d..04edcf794e2 100644 --- a/vendor/github.com/docker/cli/AUTHORS +++ b/vendor/github.com/docker/cli/AUTHORS @@ -11,7 +11,6 @@ Abin Shahab Ace Tang Addam Hardy Adolfo Ochagavía -Adrian Plata Adrien Duermael Adrien Folie Ahmet Alp Balkan @@ -137,7 +136,6 @@ Dafydd Crosby dalanlan Damien Nadé Dan Cotora -Daniel Cassidy Daniel Dao Daniel Farrell Daniel Gasienica @@ -217,7 +215,6 @@ Felix Rabe Filip Jareš Flavio Crisciani Florian Klein -Forest Johnson Foysal Iqbal François Scala Fred Lifton @@ -234,7 +231,6 @@ George MacRorie George Xie Gianluca Borello Gildas Cuisinier -Goksu Toprak Gou Rao Grant Reaber Greg Pflaum @@ -355,7 +351,6 @@ Kara Alexandra Kareem Khazem Karthik Nayak Kat Samperi -Kathryn Spiers Katie McLaughlin Ke Xu Kei Ohmura @@ -377,6 +372,7 @@ Krasi Georgiev Kris-Mikael Krister Kun Zhang Kunal Kushwaha +Kyle Spiers Lachlan Cooper Lai Jiangshan Lars Kellogg-Stedman @@ -541,7 +537,6 @@ Qiang Huang Qinglan Peng qudongfang Raghavendra K T -Ravi Shekhar Jethani Ray Tsang Reficul Remy Suen @@ -558,7 +553,6 @@ Robin Naundorf Robin Speekenbrink Rodolfo Ortiz Rogelio Canedo -Rohan Verma Roland Kammerer Roman Dudin Rory Hunter @@ -707,7 +701,6 @@ Yuan Sun Yue Zhang Yunxiang Huang Zachary Romero -Zander Mackie zebrilee Zhang Kun Zhang Wei diff --git a/vendor/github.com/docker/cli/NOTICE b/vendor/github.com/docker/cli/NOTICE index 58b19b6d15b..0c74e15b057 100644 --- a/vendor/github.com/docker/cli/NOTICE +++ b/vendor/github.com/docker/cli/NOTICE @@ -3,7 +3,7 @@ Copyright 2012-2017 Docker, Inc. This product includes software developed at Docker, Inc. (https://www.docker.com). -This product contains software (https://github.com/creack/pty) developed +This product contains software (https://github.com/kr/pty) developed by Keith Rarick, licensed under the MIT License. The following is courtesy of our legal counsel: diff --git a/vendor/github.com/docker/cli/cli/config/config.go b/vendor/github.com/docker/cli/cli/config/config.go index 6e4d73dfad7..f5e33f2b3e9 100644 --- a/vendor/github.com/docker/cli/cli/config/config.go +++ b/vendor/github.com/docker/cli/cli/config/config.go @@ -106,13 +106,9 @@ func Load(configDir string) (*configfile.ConfigFile, error) { } // Can't find latest config file so check for the old one - homedir, err := os.UserHomeDir() - if err != nil { - return configFile, errors.Wrap(err, oldConfigfile) - } - confFile := filepath.Join(homedir, oldConfigfile) + confFile := filepath.Join(homedir.Get(), oldConfigfile) if _, err := os.Stat(confFile); err != nil { - return configFile, nil // missing file is not an error + return configFile, nil //missing file is not an error } file, err := os.Open(confFile) if err != nil { diff --git a/vendor/github.com/docker/cli/cli/config/configfile/file.go b/vendor/github.com/docker/cli/cli/config/configfile/file.go index a4e97a5caa6..388a5d54d69 100644 --- a/vendor/github.com/docker/cli/cli/config/configfile/file.go +++ b/vendor/github.com/docker/cli/cli/config/configfile/file.go @@ -196,9 +196,6 @@ func (configFile *ConfigFile) Save() error { os.Remove(temp.Name()) return err } - // Try copying the current config file (if any) ownership and permissions - copyFilePermissions(configFile.Filename, temp.Name()) - return os.Rename(temp.Name(), configFile.Filename) } diff --git a/vendor/github.com/docker/cli/cli/config/configfile/file_unix.go b/vendor/github.com/docker/cli/cli/config/configfile/file_unix.go deleted file mode 100644 index 3ca65c6140d..00000000000 --- a/vendor/github.com/docker/cli/cli/config/configfile/file_unix.go +++ /dev/null @@ -1,35 +0,0 @@ -// +build !windows - -package configfile - -import ( - "os" - "syscall" -) - -// copyFilePermissions copies file ownership and permissions from "src" to "dst", -// ignoring any error during the process. -func copyFilePermissions(src, dst string) { - var ( - mode os.FileMode = 0600 - uid, gid int - ) - - fi, err := os.Stat(src) - if err != nil { - return - } - if fi.Mode().IsRegular() { - mode = fi.Mode() - } - if err := os.Chmod(dst, mode); err != nil { - return - } - - uid = int(fi.Sys().(*syscall.Stat_t).Uid) - gid = int(fi.Sys().(*syscall.Stat_t).Gid) - - if uid > 0 && gid > 0 { - _ = os.Chown(dst, uid, gid) - } -} diff --git a/vendor/github.com/docker/cli/cli/config/configfile/file_windows.go b/vendor/github.com/docker/cli/cli/config/configfile/file_windows.go deleted file mode 100644 index 42fffc39ad2..00000000000 --- a/vendor/github.com/docker/cli/cli/config/configfile/file_windows.go +++ /dev/null @@ -1,5 +0,0 @@ -package configfile - -func copyFilePermissions(src, dst string) { - // TODO implement for Windows -} diff --git a/vendor/github.com/docker/docker/AUTHORS b/vendor/github.com/docker/docker/AUTHORS index ad166ba8df6..246e2a33f5b 100644 --- a/vendor/github.com/docker/docker/AUTHORS +++ b/vendor/github.com/docker/docker/AUTHORS @@ -4,32 +4,22 @@ Aanand Prasad Aaron Davidson Aaron Feng -Aaron Hnatiw Aaron Huslage -Aaron L. Xu Aaron Lehmann Aaron Welch -Aaron.L.Xu Abel Muiño Abhijeet Kasurde -Abhinandan Prativadi Abhinav Ajgaonkar Abhishek Chanda -Abhishek Sharma Abin Shahab Adam Avilla -Adam Dobrawy -Adam Eijdenberg Adam Kunk Adam Miller Adam Mills -Adam Pointer Adam Singer Adam Walz -Addam Hardy Aditi Rajagopal Aditya -Adnan Khan Adolfo Ochagavía Adria Casas Adrian Moisey @@ -41,108 +31,81 @@ Ahmed Kamal Ahmet Alp Balkan Aidan Feldman Aidan Hobson Sayers -AJ Bowen +AJ Bowen Ajey Charantimath ajneu -Akash Gupta -Akihiro Matsushima -Akihiro Suda -Akim Demaille -Akira Koyasu -Akshay Karle +Akihiro Suda Al Tobey alambike Alan Scherger Alan Thompson Albert Callarisa Albert Zhang -Alejandro González Hevia Aleksa Sarai Aleksandrs Fadins Alena Prokharchyk -Alessandro Boch +Alessandro Boch Alessio Biancalana Alex Chan -Alex Chen Alex Coventry Alex Crawford Alex Ellis Alex Gaynor -Alex Goodman Alex Olshansky Alex Samorukov Alex Warhawk Alexander Artemenko Alexander Boyd Alexander Larsson -Alexander Midlash Alexander Morozov Alexander Shopov Alexandre Beslic -Alexandre Garnier Alexandre González -Alexandre Jomin Alexandru Sfirlogea -Alexei Margasov Alexey Guskov Alexey Kotlyarov Alexey Shamrin Alexis THOMAS -Alfred Landrum Ali Dehghani -Alicia Lauerman -Alihan Demir Allen Madsen -Allen Sun +Allen Sun almoehi Alvaro Saurin -Alvin Deng Alvin Richards amangoel Amen Belayneh -Amir Goldstein Amit Bakshi Amit Krishnan Amit Shukla -Amr Gawish Amy Lindburg Anand Patil AnandkumarPatel Anatoly Borodin Anchal Agrawal -Anda Xu Anders Janmyr Andre Dublin <81dublin@gmail.com> Andre Granovsky -Andrea Denisse Gómez Andrea Luzzardi Andrea Turli -Andreas Elvers Andreas Köhler Andreas Savvides Andreas Tiefenthaler Andrei Gherzan -Andrei Vagin Andrew C. Bodine Andrew Clay Shafer Andrew Duckworth Andrew France Andrew Gerrand Andrew Guenther -Andrew He -Andrew Hsu Andrew Kuklewicz Andrew Macgregor Andrew Macpherson Andrew Martin -Andrew McDonnell Andrew Munsell -Andrew Pennebaker Andrew Po -Andrew Weiss +Andrew Weiss Andrew Williams Andrews Medina -Andrey Kolomentsev Andrey Petrov Andrey Stolbovsky André Martins @@ -157,16 +120,12 @@ Andy Wilson Anes Hasicic Anil Belur Anil Madhavapeddy -Ankit Jain Ankush Agarwal Anonmily -Anran Qiao -Anshul Pundir Anthon van der Neut Anthony Baire Anthony Bishopric Anthony Dahanne -Anthony Sottile Anton Löfgren Anton Nikitin Anton Polonskiy @@ -177,21 +136,16 @@ Antony Messerli Anuj Bahuguna Anusha Ragunathan apocas -Arash Deshmeh ArikaChen -Arko Dasgupta Arnaud Lefebvre Arnaud Porterie -Arnaud Rebillout Arthur Barr Arthur Gautier Artur Meyster Arun Gupta -Asad Saeeduddin Asbjørn Enge averagehuman Avi Das -Avi Kivity Avi Miller Avi Vaid ayoshitake @@ -202,50 +156,38 @@ Barry Allard Bartłomiej Piotrowski Bastiaan Bakker bdevloed -Ben Bonnefoy Ben Firshman Ben Golub -Ben Gould Ben Hall Ben Sargent Ben Severson Ben Toews Ben Wiklund Benjamin Atkin -Benjamin Baker -Benjamin Boudreau -Benjamin Yolken Benoit Chesneau Bernerd Schaefer -Bernhard M. Wiedemann Bert Goethals -Bevisy Zhang Bharath Thiruveedula Bhiraj Butala -Bhumika Bayani Bilal Amarni -Bill Wang -Bily Zhang -Bin Liu -Bingshen Wang +Bill W +bin liu Blake Geno Boaz Shuster bobby abbott -Boris Pruessmann -Boshi Lian +boucher Bouke Haarsma Boyd Hemphill boynux Bradley Cicenas Bradley Wright Brandon Liu -Brandon Philips +Brandon Philips Brandon Rhodes Brendan Dixon Brent Salisbury Brett Higgins Brett Kochendorfer -Brett Randall Brian (bex) Exelbierd Brian Bland Brian DeHamer @@ -254,22 +196,20 @@ Brian Flad Brian Goff Brian McCallister Brian Olsen -Brian Schwind Brian Shumate Brian Torres-Gil Brian Trump Brice Jaglin Briehan Lombaard -Brielle Broder Bruno Bigras Bruno Binet Bruno Gazzera Bruno Renié -Bruno Tavares Bryan Bess Bryan Boreham Bryan Matsuo Bryan Murphy +buddhamagnet Burke Libbey Byung Kang Caleb Spare @@ -282,22 +222,16 @@ Cao Weiwei Carl Henrik Lunde Carl Loa Odin Carl X. Su -Carlo Mion Carlos Alexandro Becker -Carlos de Paula Carlos Sanchez Carol Fager-Higgins Cary Casey Bisson -Catalin Pirvu -Ce Gao Cedric Davies Cezar Sa Espinola Chad Swenson Chance Zibolski -Chander Govindarajan -Chanhun Jeong -Chao Wang +Chander G Charles Chan Charles Hooper Charles Law @@ -305,58 +239,39 @@ Charles Lindsay Charles Merriam Charles Sarrazin Charles Smith -Charlie Drage Charlie Lewis Chase Bolt ChaYoung You Chen Chao -Chen Chuanliang Chen Hanxiao -Chen Min -Chen Mingjie -Chen Qiu -Cheng-mean Liu -Chengfei Shang -Chengguang Xu -chenyuzhu -Chetan Birajdar +cheney90 Chewey Chia-liang Kao chli Cholerae Hu Chris Alfonso Chris Armstrong -Chris Dias Chris Dituri Chris Fordham -Chris Gavin -Chris Gibson Chris Khoo -Chris McKinnel Chris McKinnel -Chris Price Chris Seto Chris Snow Chris St. Pierre Chris Stivers Chris Swan -Chris Telfer Chris Wahl Chris Weyl -Chris White +chrismckinnel Christian Berendt -Christian Brauner Christian Böhme -Christian Muehlhaeuser Christian Persson Christian Rotzoll Christian Simon Christian Stefanescu +ChristoperBiscardi Christophe Mehay Christophe Troestler -Christophe Vidal -Christopher Biscardi -Christopher Crone Christopher Currie Christopher Jones Christopher Latham @@ -366,26 +281,19 @@ Chun Chen Ciro S. Costa Clayton Coleman Clinton Kitson -Cody Roseborough Coenraad Loubser Colin Dunklau -Colin Hebert -Colin Panisset Colin Rice Colin Walters Collin Guarino Colm Hally companycy -Corbin Coleman -Corey Farrell Cory Forsyth cressie176 CrimsonGlory Cristian Staretu cristiano balducci Cruceru Calin-Cristian -CUI Wei -Cyprian Gracz Cyril F Daan van Berkel Daehyeok Mun @@ -405,18 +313,14 @@ Dan Keder Dan Levy Dan McPherson Dan Stine +Dan Walsh Dan Williams -Dani Hodovic -Dani Louca Daniel Antlinger -Daniel Dao Daniel Exner Daniel Farrell Daniel Garcia Daniel Gasienica -Daniel Grunwell Daniel Hiltgen -Daniel J Walsh Daniel Menet Daniel Mizyrycki Daniel Nephin @@ -424,29 +328,23 @@ Daniel Norberg Daniel Nordberg Daniel Robinson Daniel S -Daniel Sweet Daniel Von Fange -Daniel Watkins Daniel X Moore Daniel YC Lin Daniel Zhang +Daniel, Dao Quang Minh Danny Berger -Danny Milosavljevic Danny Yates -Danyal Khaliq Darren Coxall Darren Shepherd Darren Stahl -Dattatraya Kumbhar Davanum Srinivas Dave Barboza -Dave Goodchild Dave Henderson Dave MacDonald Dave Tucker David Anderson David Calavera -David Chung David Corking David Cramer David Currie @@ -454,40 +352,30 @@ David Davis David Dooling David Gageot David Gebler -David Glasser David Lawrence David Lechner David M. Karr David Mackey David Mat David Mcanulty -David McKay -David P Hilton David Pelaez David R. Jenni David Röthlisberger -David Sheets +David Sheets David Sissitka David Trott -David Wang <00107082@163.com> -David Williamson David Xia David Young Davide Ceretti Dawn Chen dbdd dcylabs -Debayan De -Deborah Gertrude Digges +decadent deed02392 -Deep Debroy Deng Guangxing Deni Bertovic -Denis Defreyne Denis Gladkikh Denis Ollier -Dennis Chen -Dennis Chen Dennis Docter Derek Derek @@ -498,18 +386,13 @@ Deshi Xiao devmeyster Devvyn Murphy Dharmit Shah -Dhawal Yogesh Bhanushali -Diego Romero -Diego Siqueira Dieter Reuter Dillon Dixon Dima Stopel Dimitri John Ledkov -Dimitris Mandalidis Dimitris Rozakis Dimitry Andric Dinesh Subhraveti -Ding Fei Diogo Monica DiuDiugirl Djibril Koné @@ -518,16 +401,10 @@ Dmitri Logvinenko Dmitri Shuralyov Dmitry Demeshchuk Dmitry Gusev -Dmitry Kononenko -Dmitry Sharshakov -Dmitry Shyshkin Dmitry Smirnov Dmitry V. Krivenok Dmitry Vorobev Dolph Mathews -Dominic Tubach -Dominic Yin -Dominik Dingel Dominik Finkbeiner Dominik Honnef Don Kirkby @@ -535,18 +412,15 @@ Don Kjer Don Spaulding Donald Huang Dong Chen -Donghwa Kim Donovan Jones Doron Podoleanu Doug Davis Doug MacEachern Doug Tangren -Douglas Curtis Dr Nic Williams dragon788 Dražen Lučanin Drew Erny -Drew Hubl Dustin Sallings Ed Costello Edmund Wagner @@ -554,112 +428,85 @@ Eiichi Tsukata Eike Herzbach Eivin Giske Skaaren Eivind Uggedal -Elan Ruusamäe -Elango Sivanandam -Elena Morozova -Eli Uriegas -Elias Faxö +Elan Ruusamäe Elias Probst Elijah Zupancic eluck Elvir Kuric -Emil Davtyan Emil Hernvall Emily Maier Emily Rose Emir Ozer Enguerran Eohyung Lee -epeterso Eric Barch -Eric Curtin -Eric G. Noriega Eric Hanchrow Eric Lee Eric Myhre Eric Paris Eric Rafaloff -Eric Rosenberg +Eric Rosenberg Eric Sage -Eric Soderstrom +Eric Windisch Eric Yang Eric-Olivier Lamey -Erica Windisch Erik Bray Erik Dubbelboer Erik Hollensbe Erik Inge Bolsø Erik Kristensen -Erik St. Martin Erik Weathers Erno Hopearuoho Erwin van der Koogh -Ethan Bell -Ethan Mosbaugh -Euan Kemp -Eugen Krizo +Euan Eugene Yakubovich +eugenkrizo +evalle Evan Allrich Evan Carmi Evan Hazlett Evan Krall Evan Phoenix Evan Wies -Evelyn Xu Everett Toews -Evgeny Shmarnev Evgeny Vereshchagin Ewa Czechowska Eystein Måløy Stenberg ezbercih -Ezra Silvera -Fabian Kramm -Fabian Lauer -Fabian Raetz Fabiano Rosas Fabio Falci -Fabio Kung Fabio Rapposelli Fabio Rehm Fabrizio Regini Fabrizio Soppelsa Faiz Khan falmp -Fangming Fang Fangyuan Gao <21551127@zju.edu.cn> -fanjiyun Fareed Dudhia Fathi Boudra Federico Gimenez -Felipe Oliveira -Felipe Ruhland -Felix Abecassis Felix Geisendörfer -Felix Hupfeld +Felix Hupfeld Felix Rabe -Felix Ruess +Felix Ruess Felix Schindler -Feng Yan -Fengtu Wang Ferenc Szabo Fernando Fero Volar Ferran Rodenas Filipe Brandenburger Filipe Oliveira +fl0yd Flavio Castelli -Flavio Crisciani +FLGMwt Florian Florian Klein Florian Maier -Florian Noeding Florian Weingarten Florin Asavoaie -Florin Patan fonglh -Foysal Iqbal +fortinux Francesc Campoy -Francesco Mari Francis Chuang Francisco Carriedo Francisco Souza @@ -667,47 +514,34 @@ Frank Groeneveld Frank Herrmann Frank Macreery Frank Rosquin -frankyang Fred Lifton Frederick F. Kautz IV Frederik Loeffert Frederik Nordahl Jul Sabroe Freek Kalter -Frieder Bluemle -Fu JinLin +frosforever +fy2462 Félix Baylac-Jacqué Félix Cantournet Gabe Rosenhouse Gabor Nagy -Gabriel Linder Gabriel Monroy -Gabriel Nicolas Avellaneda -Gaetan de Villele +GabrielNicolasAvellaneda Galen Sampson -Gang Qiao Gareth Rushgrove Garrett Barboza -Gary Schaetz Gaurav gautam, prasanna -Gaël PORTAY -Genki Takiuchi GennadySpb Geoffrey Bachelet -Geon Kim -George Kontridze George MacRorie George Xie Georgi Hristozov Gereon Frey German DZ Gert van Valkenhoef -Gerwim Feiken -Ghislain Bourgeois -Giampaolo Mancini Gianluca Borello Gildas Cuisinier -Giovan Isa Musthofa gissehel Giuseppe Mazzotta Gleb Fotengauer-Malinovskiy @@ -715,64 +549,43 @@ Gleb M Borisov Glyn Normington GoBella Goffert van Gool -Goldwyn Rodrigues -Gopikannan Venugopalsamy Gosuke Miyashita -Gou Rao +Gou Rao Govinda Fichtner -Grant Millar Grant Reaber Graydon Hoare Greg Fausak -Greg Pflaum -Greg Stephens Greg Thornton -Grzegorz Jaśkiewicz +grossws +grunny +gs11 Guilhem Lettron Guilherme Salgado Guillaume Dufour Guillaume J. Charmes guoxiuyan -Guri Gurjeet Singh Guruprasad -Gustav Sinder gwx296173 Günter Zöchbauer -Haichao Yang -haikuoliu -Hakan Özler -Hamish Hutchings Hans Kristian Flaatten Hans Rødtang Hao Shu Wei Hao Zhang <21521210@zju.edu.cn> Harald Albers -Harald Niesche Harley Laue Harold Cooper -Harrison Turton Harry Zhang -Harshal Patil -Harshal Patil He Simei -He Xiaoxi -He Xin heartlock <21521209@zju.edu.cn> Hector Castro -Helen Xie Henning Sprang -Hiroshi Hatake -Hiroyuki Sasagawa Hobofan Hollie Teal Hong Xu -Hongbin Lu -Hongxu Jia hsinko <21551195@zju.edu.cn> Hu Keping Hu Tao -HuanHuan Ye Huanzhong Zhang Huayi Zhang Hugo Duncan @@ -781,53 +594,38 @@ Hunter Blanks huqun Huu Nguyen hyeongkyu.lee -Hyzhou Zhy -Iago López Galeiras +hyp3rdino +Hyzhou <1187766782@qq.com> Ian Babrou Ian Bishop Ian Bull Ian Calvert -Ian Campbell -Ian Chen Ian Lee Ian Main -Ian Philpot Ian Truslove Iavael Icaro Seara -Ignacio Capurro Igor Dolzhikov -Igor Karpovich -Iliana Weller Ilkka Laukkanen Ilya Dmitrichenko Ilya Gusev -Ilya Khlopotov +ILYA Khlopotov imre Fitos inglesp Ingo Gottwald -Innovimax Isaac Dupree Isabel Jimenez Isao Jonas -Iskander Sharipov Ivan Babrou Ivan Fraixedes Ivan Grcic -Ivan Markin J Bruni J. Nunn Jack Danger Canty -Jack Laxson Jacob Atzen Jacob Edelman -Jacob Tomlinson -Jacob Vallejo -Jacob Wen -Jaivish Kothari Jake Champlin Jake Moshenko -Jake Sanders jakedt James Allen James Carey @@ -838,11 +636,9 @@ James Kyburz James Kyle James Lal James Mills -James Nesbitt James Nugent James Turnbull -James Watkins-Harvey -Jamie Hannaford +Jamie Hannaford Jamshid Afshar Jan Keromnes Jan Koprowski @@ -874,16 +670,11 @@ jaxgeller Jay Jay Jay Kamat -Jean Rouge Jean-Baptiste Barth Jean-Baptiste Dalido -Jean-Christophe Berthon Jean-Paul Calderone -Jean-Pierre Huynh Jean-Tiare Le Bigot -Jeeva S. Chelladhurai Jeff Anderson -Jeff Hajewski Jeff Johnston Jeff Lindsay Jeff Mickey @@ -895,42 +686,32 @@ Jeffrey Bolle Jeffrey Morgan Jeffrey van Gogh Jenny Gebske -Jeremy Chambers Jeremy Grosser Jeremy Price Jeremy Qian Jeremy Unruh -Jeremy Yallop -Jeroen Franse Jeroen Jacobs Jesse Dearing Jesse Dubay -Jessica Frazelle +Jessica Frazelle Jezeniel Zapanta +jgeiger Jhon Honce Ji.Zhilong -Jian Liao Jian Zhang -Jiang Jinyang -Jie Luo -Jihyun Hwang +jianbosun Jilles Oldenbeuving Jim Alateras -Jim Ehrismann -Jim Galasyn -Jim Minter Jim Perrin Jimmy Cuadra Jimmy Puckett -Jimmy Song +jimmyxian Jinsoo Park -Jintao Zhang -Jiri Appl Jiri Popelka -Jiuyue Ma Jiří Župka +jjy +jmzwcn Joao Fernandes -Joao Trindade Joe Beda Joe Doliner Joe Ferguson @@ -941,7 +722,6 @@ Joel Friedly Joel Handwell Joel Hansson Joel Wurtz -Joey Geiger Joey Geiger Joey Gibson Joffrey F @@ -953,28 +733,18 @@ John Costa John Feminella John Gardiner Myers John Gossman -John Harris -John Howard -John Laswell -John Maguire -John Mulhausen +John Howard (VM) John OBrien III John Starks -John Stephens John Tims -John V. Martinez John Warwick John Willis -Jon Johnson -Jon Surrell +johnharris85 Jon Wedaman -Jonas Dohse Jonas Pfenniger -Jonathan A. Schweder Jonathan A. Sternberg Jonathan Boulle Jonathan Camp -Jonathan Choy Jonathan Dowland Jonathan Lebon Jonathan Lomas @@ -983,62 +753,47 @@ Jonathan Mueller Jonathan Pares Jonathan Rudenberg Jonathan Stoppani -Jonh Wendell -Joni Sar Joost Cassee +Jordan Jordan Arentsen -Jordan Jennings Jordan Sissel -Jorge Marin -Jorit Kleine-Möllhoff -Jose Diaz-Gonzalez +Jose Diaz-Gonzalez Joseph Anthony Pasquale Holsten Joseph Hager Joseph Kern -Joseph Rothrock Josh Josh Bodah -Josh Bonczkowski Josh Chorlton -Josh Eveleth Josh Hawn Josh Horwitz Josh Poimboeuf -Josh Soref -Josh Wilson Josiah Kiehl José Tomás Albornoz -Joyce Jang JP +jrabbit Julian Taylor Julien Barbier Julien Bisconti Julien Bordellier Julien Dubois -Julien Kassar -Julien Maitrehenry Julien Pervillé Julio Montes Jun-Ru Chang Jussi Nummelin Justas Brazauskas -Justen Martin Justin Cormack Justin Force -Justin Menga Justin Plock Justin Simonelis Justin Terry Justyn Temme Jyrki Puttonen -Jérémy Leherpeur -Jérôme Petazzoni +Jérôme Petazzoni Jörg Thalheim -K. Heller Kai Blin -Kai Qiang Wu (Kennan) +Kai Qiang Wu(Kennan) Kamil Domański -Kamjar Gerami +kamjar gerami Kanstantsin Shautsou Kara Alexandra Karan Lyons @@ -1046,112 +801,88 @@ Kareem Khazem kargakis Karl Grzeszczak Karol Duleba -Karthik Karanth -Karthik Nayak -Kasper Fabæch Brandt -Kate Heddleston Katie McLaughlin Kato Kazuyoshi Katrina Owen Kawsar Saiyeed -Kay Yan kayrus -Kazuhiro Sera -Ke Li Ke Xu -Kei Ohmura Keith Hudgins Keli Hu Ken Cochrane Ken Herner Ken ICHIKAWA -Ken Reese Kenfe-Mickaël Laventure Kenjiro Nakayama Kent Johnson -Kenta Tada Kevin "qwazerty" Houdebert Kevin Burke Kevin Clark -Kevin Feyrer Kevin J. Lynagh Kevin Jing Qiu -Kevin Kern Kevin Menard -Kevin Meredith Kevin P. Kucharczyk -Kevin Parsons Kevin Richardson Kevin Shi Kevin Wallace Kevin Yap +kevinmeredith Keyvan Fatehi kies Kim BKC Carlbacker Kim Eik Kimbro Staken -Kir Kolyshkin +Kir Kolyshkin Kiran Gangadharan +Kirill Kolyshkin Kirill SIbirev knappe Kohei Tsuruta Koichi Shiraishi Konrad Kleine -Konstantin Gribov Konstantin L Konstantin Pelykh -Krasi Georgiev Krasimir Georgiev Kris-Mikael Krister Kristian Haugene Kristina Zabunova -Krystian Wojcicki +krrg Kun Zhang -Kunal Kushwaha -Kunal Tyagi +Kunal Kushwaha Kyle Conroy Kyle Linden -Kyle Wuolle kyu Lachlan Coote Lai Jiangshan Lajos Papp Lakshan Perera Lalatendu Mohanty +lalyos Lance Chen Lance Kinley Lars Butler Lars Kellogg-Stedman Lars R. Damerow -Lars-Magnus Skog Laszlo Meszaros -Laura Frank Laurent Erignoux Laurie Voss Leandro Siqueira Lee Chao <932819864@qq.com> Lee, Meng-Han leeplay -Lei Gong Lei Jitang Len Weincier Lennie -Leo Gallucci Leszek Kowalski Levi Blackstone Levi Gross -Lewis Daly Lewis Marshall Lewis Peckover -Li Yi Liam Macgillavry Liana Lo Liang Mingqiang Liang-Chi Hsieh -Liao Qingwei -Lifubang -Lihua Tang -Lily Guo +liaoqingwei limsy Lin Lu LingFaKe @@ -1160,68 +891,50 @@ Liran Tal Liron Levin Liu Bo Liu Hua -liwenqi lixiaobing10051267 -Liz Zhang LIZAO LI -Lizzie Dixon <_@lizzie.io> Lloyd Dewolf Lokesh Mandvekar longliqiang88 <394564827@qq.com> Lorenz Leutgeb -Lorenzo Fontana -Lotus Fenn -Louis Delossantos +Lorenzo Fontana Louis Opter -Luca Favatella Luca Marturana Luca Orlandi Luca-Bogdan Grigorescu Lucas Chan Lucas Chi -Lucas Molas -Lucas Silvestre Luciano Mores Luis Martínez de Bartolomé Izquierdo -Luiz Svoboda -Lukas Heeren Lukas Waslowski lukaspustina Lukasz Zajaczkowski -Luke Marsden -Lyn +lukemarsden Lynda O'Leary -lzhfromutsc Lénaïc Huard -Ma Müller Ma Shimiao Mabin -Madhan Raj Mookkandy Madhav Puri Madhu Venugopal -Mageee +Mageee <21521230.zju.edu.cn> Mahesh Tiyyagura malnick Malte Janduda +manchoz Manfred Touron Manfred Zabarauskas -Manjunath A Kumatagi Mansi Nahar +mansinahar Manuel Meurer -Manuel Rüger Manuel Woelker mapk0y Marc Abramowitz Marc Kuo Marc Tamsky -Marcel Edmund Franke -Marcelo Horacio Fortino Marcelo Salazar Marco Hennings -Marcus Cobden Marcus Farkas Marcus Linke -Marcus Martins Marcus Ramberg Marek Goldmann Marian Marinov @@ -1231,35 +944,24 @@ Marius Gundersen Marius Sturm Marius Voila Mark Allen -Mark Jeromin Mark McGranaghan Mark McKinstry -Mark Milstein -Mark Oates -Mark Parker Mark West -Markan Patel Marko Mikulicic Marko Tibold Markus Fix -Markus Kortlang Martijn Dwars Martijn van Oosterhout Martin Honermeyer Martin Kelly Martin Mosegaard Amdisen -Martin Muzatko Martin Redmond Mary Anthony Masahito Zembutsu -Masato Ohba -Masayuki Morita Mason Malone Mateusz Sulima Mathias Monnerville -Mathieu Champlon Mathieu Le Marec - Pasquet -Mathieu Parent Matt Apperson Matt Bachmann Matt Bentley @@ -1268,21 +970,17 @@ Matt Hoyle Matt McCormick Matt Moore Matt Richardson -Matt Rickard Matt Robenolt -Matt Schurenko -Matt Williams Matthew Heon -Matthew Lapworth Matthew Mayer -Matthew Mosesohn Matthew Mueller Matthew Riley Matthias Klumpp Matthias Kühnle Matthias Rampke Matthieu Hauglustaine -Mattias Jernberg +mattymo +mattyw Mauricio Garavaglia mauriyouth Max Shytikov @@ -1291,8 +989,6 @@ Maxim Ivanov Maxim Kulkin Maxim Treskin Maxime Petazzoni -Maximiliano Maccanti -Maxwell Meaglith Ma meejah Megan Kostick @@ -1312,79 +1008,66 @@ Michael Friis Michael Gorsuch Michael Grauer Michael Holzheu -Michael Hudson-Doyle +Michael Hudson-Doyle Michael Huettermann -Michael Irwin Michael Käufl Michael Neale -Michael Nussbaum Michael Prokop Michael Scharf -Michael Spetsiotis Michael Stapelberg Michael Steinert Michael Thies Michael West -Michael Zhao Michal Fojtik Michal Gebauer Michal Jemala -Michal Minář +Michal Minar Michal Wieczorek Michaël Pailloncy Michał Czeraszkiewicz -Michał Gryko -Michiel de Jong -Mickaël Fortunato -Mickaël Remars +Michiel@unhosted +Mickaël FORTUNATO Miguel Angel Fernández Miguel Morales Mihai Borobocea Mihuleacc Sergiu Mike Brown -Mike Casas Mike Chelen Mike Danese Mike Dillon Mike Dougherty -Mike Estes Mike Gaffney Mike Goelzer Mike Leone -Mike Lundy Mike MacCana Mike Naberezny Mike Snitzer mikelinjie <294893458@qq.com> Mikhail Sobolev -Miklos Szegedi -Milind Chawre Miloslav Trmač mingqing Mingzhen Feng Misty Stanley-Jones Mitch Capper -Mizuki Urushida mlarcher Mohammad Banikazemi -Mohammad Nasirifar Mohammed Aaqib Ansari Mohit Soni -Moorthy RS Morgan Bauer Morgante Pell Morgy93 Morten Siebuhr Morton Fox Moysés Borges -mrfly +mqliang Mrunal Patel -Muayyad Alsadi +msabansal +mschurenko +muge Mustafa Akın Muthukumar R Máximo Cuadros Médi-Rémi Hashim -Nace Oroz Nahum Shalman Nakul Pathak Nalin Dahyabhai @@ -1392,7 +1075,6 @@ Nan Monnand Deng Naoki Orii Natalie Parker Natanael Copa -Natasha Jarus Nate Brennand Nate Eagleson Nate Jones @@ -1401,83 +1083,66 @@ Nathan Kleyn Nathan LeClaire Nathan McCauley Nathan Williams -Naveed Jamil Neal McBurnett -Neil Horman Neil Peterson Nelson Chen Neyazul Haque Nghia Tran Niall O'Higgins Nicholas E. Rabenau -Nick Adcock +nick Nick DeCoursin Nick Irvine -Nick Neisen Nick Parker Nick Payne -Nick Russo Nick Stenning Nick Stinemates -NickrenREN Nicola Kabar -Nicolas Borboën -Nicolas De Loof +Nicolas Borboën +Nicolas De loof Nicolas Dudebout Nicolas Goy Nicolas Kaiser -Nicolas Sterchele -Nicolas V Castet Nicolás Hock Isaza Nigel Poulton -Nik Nyby -Nikhil Chawla NikolaMandic -Nikolas Garofil -Nikolay Milovanov +nikolas Nirmal Mehta Nishant Totla NIWA Hideyuki -Noah Meyerhans -Noah Treuhaft -NobodyOnSE noducks Nolan Darilek -Noriki Nakamura nponeccop Nuutti Kotivuori nzwsch O.S. Tezer objectified -Odin Ugedal +OddBloke +odk- Oguz Bilgic Oh Jinkyun Ohad Schneider ohmystack Ole Reifschneider Oliver Neal -Oliver Reason Olivier Gambier Olle Jonsson -Olli Janatuinen -Olly Pomeroy -Omri Shiv Oriol Francès +orkaa Oskar Niburski Otto Kekäläinen -Ouyang Liduo -Ovidio Mallo -Panagiotis Moustafellos +oyld +ozlerhakan +paetling +pandrew +panticz Paolo G. Giarrusso -Pascal -Pascal Bach Pascal Borreli Pascal Hartig Patrick Böänziger Patrick Devine Patrick Hemmer Patrick Stapleton -Patrik Cyvoct pattichen Paul paul @@ -1487,7 +1152,6 @@ Paul Bowsher Paul Furtado Paul Hammond Paul Jimenez -Paul Kehrer Paul Lietar Paul Liljenberg Paul Morie @@ -1495,31 +1159,24 @@ Paul Nasrat Paul Weaver Paulo Ribeiro Pavel Lobashov -Pavel Matěja -Pavel Pletenev Pavel Pospisil Pavel Sutyrin -Pavel Tikhomirov +Pavel Tikhomirov Pavlos Ratis Pavol Vargovcik -Pawel Konczalski Peeyush Gupta Peggy Li Pei Su -Peng Tao Penghan Wang -Per Weijnitz perhapszzy@sina.com +pestophagous Peter Bourgon Peter Braden -Peter Bücker -Peter Choi -Peter Dave Hello +Peter Choi +Peter Dave Hello Peter Edge Peter Ericson Peter Esbensen -Peter Jaffe -Peter Kang Peter Malmgren Peter Salvatore Peter Volpe @@ -1528,13 +1185,10 @@ Petr Švihlík Phil Phil Estes Phil Spitler -Philip Alexander Etling Philip Monroe -Philipp Gillé Philipp Wahala Philipp Weissensteiner Phillip Alexander -phineas pidster Piergiuliano Bossi Pierre @@ -1547,28 +1201,22 @@ pixelistik Porjo Poul Kjeldager Sørensen Pradeep Chhetri -Pradip Dhara Prasanna Gautam -Pratik Karki Prayag Verma -Priya Wadhwa -Projjol Banerji Przemek Hejman -Pure White pysqz +qg <1373319223@qq.com> +qhuang Qiang Huang -Qinglan Peng -qudongfang +qq690388648 <690388648@qq.com> Quentin Brossard Quentin Perez Quentin Tayssier r0n22 -Radostin Stoyanov Rafal Jeczalik Rafe Colton Raghavendra K T Raghuram Devarakonda -Raja Sami Rajat Pandit Rajdeep Dua Ralf Sippl @@ -1577,19 +1225,15 @@ Ralph Bean Ramkumar Ramachandra Ramon Brooker Ramon van Alteren -RaviTeja Pothana -Ray Tsang +Ray Tsang ReadmeCritic Recursive Madman -Reficul Regan McCooey Remi Rampin -Remy Suen Renato Riccieri Santos Zannon -Renaud Gaubert +resouer +rgstephens Rhys Hiltner -Ri Xu -Ricardo N Feliciano Rich Moyse Rich Seymour Richard @@ -1607,22 +1251,17 @@ Riku Voipio Riley Guerin Ritesh H Shukla Riyaz Faizullabhoy -Rob Gulewich Rob Vesse Robert Bachmann Robert Bittle Robert Obryk -Robert Schneider Robert Stern -Robert Terhaar +Robert Terhaar Robert Wallis -Robert Wang Roberto G. Hashioka -Roberto Muñoz Fernández Robin Naundorf Robin Schneider Robin Speekenbrink -Robin Thoni robpc Rodolfo Carvalho Rodrigo Vaz @@ -1630,20 +1269,15 @@ Roel Van Nyen Roger Peppe Rohit Jnagal Rohit Kadam -Rohit Kapur -Rojin George Roland Huß Roland Kammerer Roland Moriz Roma Sokolov -Roman Dudin Roman Strashkin Ron Smits Ron Williams -Rong Gao -Rong Zhang -Rongxiang Song root +root root root root @@ -1651,33 +1285,23 @@ Rory Hunter Rory McCune Ross Boucher Rovanion Luckey -Royce Remer Rozhnov Alexandr +rsmoorthy Rudolph Gottesheim -Rui Cao Rui Lopes -Ruilin Li Runshen Zhu -Russ Magee -Ryan Abrams Ryan Anderson Ryan Aslett Ryan Belgrave Ryan Detzel Ryan Fowler -Ryan Liu Ryan McLaughlin Ryan O'Donnell Ryan Seto -Ryan Simmen -Ryan Stelly Ryan Thomas Ryan Trauntvein Ryan Wallner -Ryan Zhang -ryancooper7 RyanDeng -Ryo Nakao Rémy Greinhofer s. rannou s00318865 @@ -1685,8 +1309,7 @@ Sabin Basyal Sachin Joshi Sagar Hani Sainath Grandhi -Sakeven Jiang -Salahuddin Khan +sakeven Sally O'Malley Sam Abed Sam Alba @@ -1695,25 +1318,21 @@ Sam J Sharpe Sam Neirinck Sam Reis Sam Rijs -Sam Whited Sambuddha Basu Sami Wagiaalla Samuel Andaya Samuel Dion-Girardeau Samuel Karp Samuel PHAN -Sandeep Bansal Sankar சங்கர் Sanket Saurav Santhosh Manohar sapphiredev -Sargun Dhillon -Sascha Andres -Sascha Grunert -SataQiu Satnam Singh +satoru Satoshi Amemiya Satoshi Tagomori +scaleoutsean Scott Bessler Scott Collier Scott Johnston @@ -1722,11 +1341,8 @@ Scott Walls sdreyesg Sean Christopherson Sean Cronin -Sean Lee -Sean McIntyre Sean OMeara Sean P. Kane -Sean Rodman Sebastiaan van Steenis Sebastiaan van Stijn Senthil Kumar Selvaraj @@ -1736,50 +1352,36 @@ Seongyeol Lim Serge Hallyn Sergey Alekseev Sergey Evstifeev -Sergii Kabashniuk -Sergio Lopez Serhat Gülçiçek -SeungUkLee Sevki Hasirci Shane Canon Shane da Silva -Shaun Kaasten shaunol Shawn Landden Shawn Siefkas shawnhe -Shayne Wang Shekhar Gulati Sheng Yang Shengbo Song Shev Yan Shih-Yuan Lee Shijiang Wei -Shijun Qin Shishir Mahajan Shoubhik Bose Shourya Sarcar -Shu-Wai Chow shuai-z Shukui Yang Shuwei Hao Sian Lerk Lau -Sidhartha Mani sidharthamani Silas Sewell -Silvan Jegen -Simão Reis Simei He -Simon Barendse Simon Eskildsen -Simon Ferquel Simon Leinen -Simon Menke Simon Taranto -Simon Vikstrom Sindhu S Sjoerd Langkemper -skanehira +skaasten Solganik Alexander Solomon Hykes Song Gao @@ -1790,63 +1392,47 @@ Spencer Smith Sridatta Thatipamala Sridhar Ratnakumar Srini Brahmaroutu -Srinivasan Srivatsan -Staf Wagemakers -Stanislav Bondarenko +srinsriv Steeve Morin Stefan Berger Stefan J. Wernli Stefan Praszalowicz -Stefan S. -Stefan Scherer +Stefan Scherer Stefan Staudenmeyer Stefan Weil -Stephan Spindler -Stephen Benjamin Stephen Crosby Stephen Day Stephen Drake Stephen Rust -Steve Desmond -Steve Dougherty Steve Durrheimer Steve Francia Steve Koch Steven Burgess Steven Erenst -Steven Hartland Steven Iveson Steven Merrill Steven Richards Steven Taylor -Stig Larsson Subhajit Ghosh Sujith Haridasan -Sun Gengze <690388648@qq.com> -Sun Jianbo -Sune Keller -Sunny Gogoi Suryakumar Sudar Sven Dowideit Swapnil Daingade Sylvain Baubeau Sylvain Bellemare Sébastien -Sébastien HOUZÉ Sébastien Luttringer Sébastien Stormacq -Tabakhase Tadej Janež TAGOMORI Satoshi tang0th -Tangi Colin +Tangi COLIN Tatsuki Sugiura Tatsushi Inagaki -Taylan Isikdemir Taylor Jones +tbonza Ted M. Young Tehmasp Chaudhri -Tejaswini Duggaraju Tejesh Mehta terryding77 <550147740@qq.com> tgic @@ -1861,7 +1447,7 @@ Thomas Gazagnaire Thomas Grainger Thomas Hansen Thomas Leonard -Thomas Léveil +Thomas LEVEIL Thomas Orozco Thomas Riccardi Thomas Schroeter @@ -1869,26 +1455,21 @@ Thomas Sjögren Thomas Swift Thomas Tanaka Thomas Texier -Ti Zhou Tianon Gravi Tianyi Wang Tibor Vass Tiffany Jernigan Tiffany Low -Tim -Tim Bart Tim Bosse Tim Dettrick Tim Düsterhus Tim Hockin -Tim Potter Tim Ruffles Tim Smith Tim Terhorst Tim Wang Tim Waugh Tim Wraight -Tim Zju <21651152@zju.edu.cn> timfeirg Timothy Hobbs tjwebb123 @@ -1905,14 +1486,11 @@ Todd Lunter Todd Whiteman Toli Kuznets Tom Barlow -Tom Booth Tom Denham Tom Fotherby Tom Howe Tom Hulihan Tom Maaswinkel -Tom Sweeney -Tom Wilkie Tom X. Tobin Tomas Tomecek Tomasz Kopczynski @@ -1920,37 +1498,31 @@ Tomasz Lipinski Tomasz Nurkiewicz Tommaso Visconti Tomáš Hrčka +Tonis Tiigi Tonny Xu -Tony Abboud Tony Daws Tony Miller toogley Torstein Husebø -Tõnis Tiigi tpng tracylihui <793912329@qq.com> -Trapier Marshall Travis Cline Travis Thieman Trent Ogren Trevor Trevor Pounds -Trevor Sullivan -Trishna Guha +trishnaguha Tristan Carel Troy Denton -Tycho Andersen Tyler Brock -Tyler Brown Tzu-Jung Lee -uhayate +Tõnis Tiigi Ulysse Carion -Umesh Yadav -Utz Bacher +unknown vagrant Vaidas Jablonskis -vanderliang Veres Lajos +vgeta Victor Algaze Victor Coisne Victor Costan @@ -1961,20 +1533,19 @@ Victor Palma Victor Vieux Victoria Bialas Vijaya Kumar K -Vikram bir Singh Viktor Stanchev Viktor Vojnovski VinayRaghavanKS Vincent Batts +Vincent Bernat Vincent Bernat -Vincent Demeester +Vincent Demeester Vincent Giersch Vincent Mayers Vincent Woo Vinod Kulkarni Vishal Doshi Vishnu Kannan -Vitaly Ostrosablin Vitor Monteiro Vivek Agarwal Vivek Dasgupta @@ -1985,41 +1556,24 @@ Vladimir Pouzanov Vladimir Rutsky Vladimir Varankin VladimirAus -Vlastimil Zeman Vojtech Vitek (V-Teq) waitingkuo Walter Leibbrandt Walter Stanish -Wang Chao -Wang Guoliang -Wang Jie -Wang Long -Wang Ping +WANG Chao Wang Xing -Wang Yuexiao Ward Vandewege WarheadsSE -Wassim Dhif Wayne Chang -Wayne Song -Weerasak Chongnguluam -Wei Fu -Wei Wu Wei-Ting Kuo -weipeng weiyan Weiyang Zhu Wen Cheng Ma Wendel Fleming -Wenjun Tang Wenkai Yin -Wentao Zhang Wenxuan Zhao Wenyu You <21551128@zju.edu.cn> -Wenzhi Liang Wes Morgan -Wewang Xiaorenfine -Wiktor Kwapisiewicz Will Dietz Will Rouesnel Will Weaver @@ -2027,74 +1581,49 @@ willhf William Delanoue William Henry William Hubbs -William Martin William Riancho William Thurston WiseTrem +wlan0 Wolfgang Powisch +wonderflow Wonjun Kim xamyzhao -Xian Chaobo -Xianglin Gao Xianlu Bird -Xiao YongBiao XiaoBing Jiang -Xiaodong Zhang -Xiaoxi He Xiaoxu Chen -Xiaoyu Zhang -xichengliudui <1693291525@qq.com> xiekeyang -Ximo Guanter Gonzálbez -Xinbo Weng -Xinfeng Liu Xinzi Zhou Xiuming Chen -Xuecong Liao +xlgao-zju xuzhaokui -Yadnyawalkya Tale Yahya YAMADA Tsuyoshi -Yamasaki Masahide Yan Feng Yang Bai -Yang Pengfei -yangchenliang +yangshukui Yanqiang Miao -Yao Zaiyong -Yash Murty -Yassine Tijani Yasunori Mahata -Yazhong Liu Yestin Sun Yi EungJun Yibai Zhang Yihang Ho -Ying Li +Ying Li Yohei Ueda Yong Tang -Yongxin Li Yongzhi Pan -Yosef Fertel -You-Sheng Yang (楊有勝) -youcai +yorkie Youcef YEKHLEF -Yu Changchun -Yu Chengxia -Yu Peng -Yu-Ju Hong Yuan Sun -Yuanhong Peng -Yue Zhang -Yuhao Fang -Yuichiro Kaneko -Yunxiang Huang +yuchangchun +yuchengxia +yuexiao-wang +YuPengZTE Yurii Rashkovskii -Yusuf Tarık Günaydın -Yves Junqueira +yuzou Zac Dover Zach Borboa -Zachary Jaffee +Zachary Jaffee Zain Memon Zaiste! Zane DeGraffenried @@ -2103,33 +1632,21 @@ Zen Lin(Zhinan Lin) Zhang Kun Zhang Wei Zhang Wentao -ZhangHang -zhangxianwei Zhenan Ye <21551168@zju.edu.cn> -zhenghenghuo -Zhenhai Gao -Zhenkun Bi -Zhou Hao -Zhoulin Xie +zhouhao Zhu Guihua -Zhu Kunjia Zhuoyun Wei -Ziheng Liu Zilin Du zimbatm Ziming Dong ZJUshuaizhou <21551191@zju.edu.cn> zmarouf Zoltan Tombol -Zou Yu zqh -Zuhayr Elahi +Zuhayr Elahi Zunayed Ali Álex González Álvaro Lázaro Átila Camurça Alves 尹吉峰 -徐俊杰 -慕陶 搏通 -黄艳红00139573 diff --git a/vendor/github.com/docker/docker/LICENSE b/vendor/github.com/docker/docker/LICENSE index 6d8d58fb676..8f3fee627a4 100644 --- a/vendor/github.com/docker/docker/LICENSE +++ b/vendor/github.com/docker/docker/LICENSE @@ -176,7 +176,7 @@ END OF TERMS AND CONDITIONS - Copyright 2013-2018 Docker, Inc. + Copyright 2013-2016 Docker, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/vendor/github.com/docker/docker/NOTICE b/vendor/github.com/docker/docker/NOTICE index 58b19b6d15b..8a37c1c7bc4 100644 --- a/vendor/github.com/docker/docker/NOTICE +++ b/vendor/github.com/docker/docker/NOTICE @@ -1,9 +1,9 @@ Docker -Copyright 2012-2017 Docker, Inc. +Copyright 2012-2016 Docker, Inc. This product includes software developed at Docker, Inc. (https://www.docker.com). -This product contains software (https://github.com/creack/pty) developed +This product contains software (https://github.com/kr/pty) developed by Keith Rarick, licensed under the MIT License. The following is courtesy of our legal counsel: diff --git a/vendor/github.com/docker/docker/pkg/homedir/homedir.go b/vendor/github.com/docker/docker/pkg/homedir/homedir.go new file mode 100644 index 00000000000..8154e83f0c9 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/homedir/homedir.go @@ -0,0 +1,39 @@ +package homedir + +import ( + "os" + "runtime" + + "github.com/opencontainers/runc/libcontainer/user" +) + +// Key returns the env var name for the user's home dir based on +// the platform being run on +func Key() string { + if runtime.GOOS == "windows" { + return "USERPROFILE" + } + return "HOME" +} + +// Get returns the home directory of the current user with the help of +// environment variables depending on the target operating system. +// Returned path should be used with "path/filepath" to form new paths. +func Get() string { + home := os.Getenv(Key()) + if home == "" && runtime.GOOS != "windows" { + if u, err := user.CurrentUser(); err == nil { + return u.Home + } + } + return home +} + +// GetShortcutString returns the string that is shortcut to user's home directory +// in the native shell of the platform running on. +func GetShortcutString() string { + if runtime.GOOS == "windows" { + return "%USERPROFILE%" // be careful while using in format functions + } + return "~" +} diff --git a/vendor/github.com/docker/docker/pkg/homedir/homedir_linux.go b/vendor/github.com/docker/docker/pkg/homedir/homedir_linux.go deleted file mode 100644 index 5e6310fdcd6..00000000000 --- a/vendor/github.com/docker/docker/pkg/homedir/homedir_linux.go +++ /dev/null @@ -1,93 +0,0 @@ -package homedir // import "github.com/docker/docker/pkg/homedir" - -import ( - "errors" - "os" - "path/filepath" - "strings" -) - -// GetRuntimeDir returns XDG_RUNTIME_DIR. -// XDG_RUNTIME_DIR is typically configured via pam_systemd. -// GetRuntimeDir returns non-nil error if XDG_RUNTIME_DIR is not set. -// -// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html -func GetRuntimeDir() (string, error) { - if xdgRuntimeDir := os.Getenv("XDG_RUNTIME_DIR"); xdgRuntimeDir != "" { - return xdgRuntimeDir, nil - } - return "", errors.New("could not get XDG_RUNTIME_DIR") -} - -// StickRuntimeDirContents sets the sticky bit on files that are under -// XDG_RUNTIME_DIR, so that the files won't be periodically removed by the system. -// -// StickyRuntimeDir returns slice of sticked files. -// StickyRuntimeDir returns nil error if XDG_RUNTIME_DIR is not set. -// -// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html -func StickRuntimeDirContents(files []string) ([]string, error) { - runtimeDir, err := GetRuntimeDir() - if err != nil { - // ignore error if runtimeDir is empty - return nil, nil - } - runtimeDir, err = filepath.Abs(runtimeDir) - if err != nil { - return nil, err - } - var sticked []string - for _, f := range files { - f, err = filepath.Abs(f) - if err != nil { - return sticked, err - } - if strings.HasPrefix(f, runtimeDir+"/") { - if err = stick(f); err != nil { - return sticked, err - } - sticked = append(sticked, f) - } - } - return sticked, nil -} - -func stick(f string) error { - st, err := os.Stat(f) - if err != nil { - return err - } - m := st.Mode() - m |= os.ModeSticky - return os.Chmod(f, m) -} - -// GetDataHome returns XDG_DATA_HOME. -// GetDataHome returns $HOME/.local/share and nil error if XDG_DATA_HOME is not set. -// -// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html -func GetDataHome() (string, error) { - if xdgDataHome := os.Getenv("XDG_DATA_HOME"); xdgDataHome != "" { - return xdgDataHome, nil - } - home := os.Getenv("HOME") - if home == "" { - return "", errors.New("could not get either XDG_DATA_HOME or HOME") - } - return filepath.Join(home, ".local", "share"), nil -} - -// GetConfigHome returns XDG_CONFIG_HOME. -// GetConfigHome returns $HOME/.config and nil error if XDG_CONFIG_HOME is not set. -// -// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html -func GetConfigHome() (string, error) { - if xdgConfigHome := os.Getenv("XDG_CONFIG_HOME"); xdgConfigHome != "" { - return xdgConfigHome, nil - } - home := os.Getenv("HOME") - if home == "" { - return "", errors.New("could not get either XDG_CONFIG_HOME or HOME") - } - return filepath.Join(home, ".config"), nil -} diff --git a/vendor/github.com/docker/docker/pkg/homedir/homedir_others.go b/vendor/github.com/docker/docker/pkg/homedir/homedir_others.go deleted file mode 100644 index 67ab9e9b31e..00000000000 --- a/vendor/github.com/docker/docker/pkg/homedir/homedir_others.go +++ /dev/null @@ -1,27 +0,0 @@ -// +build !linux - -package homedir // import "github.com/docker/docker/pkg/homedir" - -import ( - "errors" -) - -// GetRuntimeDir is unsupported on non-linux system. -func GetRuntimeDir() (string, error) { - return "", errors.New("homedir.GetRuntimeDir() is not supported on this system") -} - -// StickRuntimeDirContents is unsupported on non-linux system. -func StickRuntimeDirContents(files []string) ([]string, error) { - return nil, errors.New("homedir.StickRuntimeDirContents() is not supported on this system") -} - -// GetDataHome is unsupported on non-linux system. -func GetDataHome() (string, error) { - return "", errors.New("homedir.GetDataHome() is not supported on this system") -} - -// GetConfigHome is unsupported on non-linux system. -func GetConfigHome() (string, error) { - return "", errors.New("homedir.GetConfigHome() is not supported on this system") -} diff --git a/vendor/github.com/docker/docker/pkg/homedir/homedir_unix.go b/vendor/github.com/docker/docker/pkg/homedir/homedir_unix.go deleted file mode 100644 index 441bd727b60..00000000000 --- a/vendor/github.com/docker/docker/pkg/homedir/homedir_unix.go +++ /dev/null @@ -1,38 +0,0 @@ -// +build !windows - -package homedir // import "github.com/docker/docker/pkg/homedir" - -import ( - "os" - "os/user" -) - -// Key returns the env var name for the user's home dir based on -// the platform being run on -func Key() string { - return "HOME" -} - -// Get returns the home directory of the current user with the help of -// environment variables depending on the target operating system. -// Returned path should be used with "path/filepath" to form new paths. -// -// If linking statically with cgo enabled against glibc, ensure the -// osusergo build tag is used. -// -// If needing to do nss lookups, do not disable cgo or set osusergo. -func Get() string { - home := os.Getenv(Key()) - if home == "" { - if u, err := user.Current(); err == nil { - return u.HomeDir - } - } - return home -} - -// GetShortcutString returns the string that is shortcut to user's home directory -// in the native shell of the platform running on. -func GetShortcutString() string { - return "~" -} diff --git a/vendor/github.com/docker/docker/pkg/homedir/homedir_windows.go b/vendor/github.com/docker/docker/pkg/homedir/homedir_windows.go deleted file mode 100644 index 2f81813b287..00000000000 --- a/vendor/github.com/docker/docker/pkg/homedir/homedir_windows.go +++ /dev/null @@ -1,24 +0,0 @@ -package homedir // import "github.com/docker/docker/pkg/homedir" - -import ( - "os" -) - -// Key returns the env var name for the user's home dir based on -// the platform being run on -func Key() string { - return "USERPROFILE" -} - -// Get returns the home directory of the current user with the help of -// environment variables depending on the target operating system. -// Returned path should be used with "path/filepath" to form new paths. -func Get() string { - return os.Getenv(Key()) -} - -// GetShortcutString returns the string that is shortcut to user's home directory -// in the native shell of the platform running on. -func GetShortcutString() string { - return "%USERPROFILE%" // be careful while using in format functions -} diff --git a/vendor/github.com/evanphx/json-patch/LICENSE b/vendor/github.com/evanphx/json-patch/LICENSE new file mode 100644 index 00000000000..df76d7d7716 --- /dev/null +++ b/vendor/github.com/evanphx/json-patch/LICENSE @@ -0,0 +1,25 @@ +Copyright (c) 2014, Evan Phoenix +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. +* Neither the name of the Evan Phoenix nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/evanphx/json-patch/README.md b/vendor/github.com/evanphx/json-patch/README.md new file mode 100644 index 00000000000..28e35169375 --- /dev/null +++ b/vendor/github.com/evanphx/json-patch/README.md @@ -0,0 +1,317 @@ +# JSON-Patch +`jsonpatch` is a library which provides functionality for both applying +[RFC6902 JSON patches](http://tools.ietf.org/html/rfc6902) against documents, as +well as for calculating & applying [RFC7396 JSON merge patches](https://tools.ietf.org/html/rfc7396). + +[![GoDoc](https://godoc.org/github.com/evanphx/json-patch?status.svg)](http://godoc.org/github.com/evanphx/json-patch) +[![Build Status](https://travis-ci.org/evanphx/json-patch.svg?branch=master)](https://travis-ci.org/evanphx/json-patch) +[![Report Card](https://goreportcard.com/badge/github.com/evanphx/json-patch)](https://goreportcard.com/report/github.com/evanphx/json-patch) + +# Get It! + +**Latest and greatest**: +```bash +go get -u github.com/evanphx/json-patch/v5 +``` + +**Stable Versions**: +* Version 5: `go get -u gopkg.in/evanphx/json-patch.v5` +* Version 4: `go get -u gopkg.in/evanphx/json-patch.v4` + +(previous versions below `v3` are unavailable) + +# Use It! +* [Create and apply a merge patch](#create-and-apply-a-merge-patch) +* [Create and apply a JSON Patch](#create-and-apply-a-json-patch) +* [Comparing JSON documents](#comparing-json-documents) +* [Combine merge patches](#combine-merge-patches) + + +# Configuration + +* There is a global configuration variable `jsonpatch.SupportNegativeIndices`. + This defaults to `true` and enables the non-standard practice of allowing + negative indices to mean indices starting at the end of an array. This + functionality can be disabled by setting `jsonpatch.SupportNegativeIndices = + false`. + +* There is a global configuration variable `jsonpatch.AccumulatedCopySizeLimit`, + which limits the total size increase in bytes caused by "copy" operations in a + patch. It defaults to 0, which means there is no limit. + +These global variables control the behavior of `jsonpatch.Apply`. + +An alternative to `jsonpatch.Apply` is `jsonpatch.ApplyWithOptions` whose behavior +is controlled by an `options` parameter of type `*jsonpatch.ApplyOptions`. + +Structure `jsonpatch.ApplyOptions` includes the configuration options above +and adds two new options: `AllowMissingPathOnRemove` and `EnsurePathExistsOnAdd`. + +When `AllowMissingPathOnRemove` is set to `true`, `jsonpatch.ApplyWithOptions` will ignore +`remove` operations whose `path` points to a non-existent location in the JSON document. +`AllowMissingPathOnRemove` defaults to `false` which will lead to `jsonpatch.ApplyWithOptions` +returning an error when hitting a missing `path` on `remove`. + +When `EnsurePathExistsOnAdd` is set to `true`, `jsonpatch.ApplyWithOptions` will make sure +that `add` operations produce all the `path` elements that are missing from the target object. + +Use `jsonpatch.NewApplyOptions` to create an instance of `jsonpatch.ApplyOptions` +whose values are populated from the global configuration variables. + +## Create and apply a merge patch +Given both an original JSON document and a modified JSON document, you can create +a [Merge Patch](https://tools.ietf.org/html/rfc7396) document. + +It can describe the changes needed to convert from the original to the +modified JSON document. + +Once you have a merge patch, you can apply it to other JSON documents using the +`jsonpatch.MergePatch(document, patch)` function. + +```go +package main + +import ( + "fmt" + + jsonpatch "github.com/evanphx/json-patch" +) + +func main() { + // Let's create a merge patch from these two documents... + original := []byte(`{"name": "John", "age": 24, "height": 3.21}`) + target := []byte(`{"name": "Jane", "age": 24}`) + + patch, err := jsonpatch.CreateMergePatch(original, target) + if err != nil { + panic(err) + } + + // Now lets apply the patch against a different JSON document... + + alternative := []byte(`{"name": "Tina", "age": 28, "height": 3.75}`) + modifiedAlternative, err := jsonpatch.MergePatch(alternative, patch) + + fmt.Printf("patch document: %s\n", patch) + fmt.Printf("updated alternative doc: %s\n", modifiedAlternative) +} +``` + +When ran, you get the following output: + +```bash +$ go run main.go +patch document: {"height":null,"name":"Jane"} +updated alternative doc: {"age":28,"name":"Jane"} +``` + +## Create and apply a JSON Patch +You can create patch objects using `DecodePatch([]byte)`, which can then +be applied against JSON documents. + +The following is an example of creating a patch from two operations, and +applying it against a JSON document. + +```go +package main + +import ( + "fmt" + + jsonpatch "github.com/evanphx/json-patch" +) + +func main() { + original := []byte(`{"name": "John", "age": 24, "height": 3.21}`) + patchJSON := []byte(`[ + {"op": "replace", "path": "/name", "value": "Jane"}, + {"op": "remove", "path": "/height"} + ]`) + + patch, err := jsonpatch.DecodePatch(patchJSON) + if err != nil { + panic(err) + } + + modified, err := patch.Apply(original) + if err != nil { + panic(err) + } + + fmt.Printf("Original document: %s\n", original) + fmt.Printf("Modified document: %s\n", modified) +} +``` + +When ran, you get the following output: + +```bash +$ go run main.go +Original document: {"name": "John", "age": 24, "height": 3.21} +Modified document: {"age":24,"name":"Jane"} +``` + +## Comparing JSON documents +Due to potential whitespace and ordering differences, one cannot simply compare +JSON strings or byte-arrays directly. + +As such, you can instead use `jsonpatch.Equal(document1, document2)` to +determine if two JSON documents are _structurally_ equal. This ignores +whitespace differences, and key-value ordering. + +```go +package main + +import ( + "fmt" + + jsonpatch "github.com/evanphx/json-patch" +) + +func main() { + original := []byte(`{"name": "John", "age": 24, "height": 3.21}`) + similar := []byte(` + { + "age": 24, + "height": 3.21, + "name": "John" + } + `) + different := []byte(`{"name": "Jane", "age": 20, "height": 3.37}`) + + if jsonpatch.Equal(original, similar) { + fmt.Println(`"original" is structurally equal to "similar"`) + } + + if !jsonpatch.Equal(original, different) { + fmt.Println(`"original" is _not_ structurally equal to "different"`) + } +} +``` + +When ran, you get the following output: +```bash +$ go run main.go +"original" is structurally equal to "similar" +"original" is _not_ structurally equal to "different" +``` + +## Combine merge patches +Given two JSON merge patch documents, it is possible to combine them into a +single merge patch which can describe both set of changes. + +The resulting merge patch can be used such that applying it results in a +document structurally similar as merging each merge patch to the document +in succession. + +```go +package main + +import ( + "fmt" + + jsonpatch "github.com/evanphx/json-patch" +) + +func main() { + original := []byte(`{"name": "John", "age": 24, "height": 3.21}`) + + nameAndHeight := []byte(`{"height":null,"name":"Jane"}`) + ageAndEyes := []byte(`{"age":4.23,"eyes":"blue"}`) + + // Let's combine these merge patch documents... + combinedPatch, err := jsonpatch.MergeMergePatches(nameAndHeight, ageAndEyes) + if err != nil { + panic(err) + } + + // Apply each patch individual against the original document + withoutCombinedPatch, err := jsonpatch.MergePatch(original, nameAndHeight) + if err != nil { + panic(err) + } + + withoutCombinedPatch, err = jsonpatch.MergePatch(withoutCombinedPatch, ageAndEyes) + if err != nil { + panic(err) + } + + // Apply the combined patch against the original document + + withCombinedPatch, err := jsonpatch.MergePatch(original, combinedPatch) + if err != nil { + panic(err) + } + + // Do both result in the same thing? They should! + if jsonpatch.Equal(withCombinedPatch, withoutCombinedPatch) { + fmt.Println("Both JSON documents are structurally the same!") + } + + fmt.Printf("combined merge patch: %s", combinedPatch) +} +``` + +When ran, you get the following output: +```bash +$ go run main.go +Both JSON documents are structurally the same! +combined merge patch: {"age":4.23,"eyes":"blue","height":null,"name":"Jane"} +``` + +# CLI for comparing JSON documents +You can install the commandline program `json-patch`. + +This program can take multiple JSON patch documents as arguments, +and fed a JSON document from `stdin`. It will apply the patch(es) against +the document and output the modified doc. + +**patch.1.json** +```json +[ + {"op": "replace", "path": "/name", "value": "Jane"}, + {"op": "remove", "path": "/height"} +] +``` + +**patch.2.json** +```json +[ + {"op": "add", "path": "/address", "value": "123 Main St"}, + {"op": "replace", "path": "/age", "value": "21"} +] +``` + +**document.json** +```json +{ + "name": "John", + "age": 24, + "height": 3.21 +} +``` + +You can then run: + +```bash +$ go install github.com/evanphx/json-patch/cmd/json-patch +$ cat document.json | json-patch -p patch.1.json -p patch.2.json +{"address":"123 Main St","age":"21","name":"Jane"} +``` + +# Help It! +Contributions are welcomed! Leave [an issue](https://github.com/evanphx/json-patch/issues) +or [create a PR](https://github.com/evanphx/json-patch/compare). + + +Before creating a pull request, we'd ask that you make sure tests are passing +and that you have added new tests when applicable. + +Contributors can run tests using: + +```bash +go test -cover ./... +``` + +Builds for pull requests are tested automatically +using [TravisCI](https://travis-ci.org/evanphx/json-patch). diff --git a/vendor/github.com/evanphx/json-patch/errors.go b/vendor/github.com/evanphx/json-patch/errors.go new file mode 100644 index 00000000000..75304b4437c --- /dev/null +++ b/vendor/github.com/evanphx/json-patch/errors.go @@ -0,0 +1,38 @@ +package jsonpatch + +import "fmt" + +// AccumulatedCopySizeError is an error type returned when the accumulated size +// increase caused by copy operations in a patch operation has exceeded the +// limit. +type AccumulatedCopySizeError struct { + limit int64 + accumulated int64 +} + +// NewAccumulatedCopySizeError returns an AccumulatedCopySizeError. +func NewAccumulatedCopySizeError(l, a int64) *AccumulatedCopySizeError { + return &AccumulatedCopySizeError{limit: l, accumulated: a} +} + +// Error implements the error interface. +func (a *AccumulatedCopySizeError) Error() string { + return fmt.Sprintf("Unable to complete the copy, the accumulated size increase of copy is %d, exceeding the limit %d", a.accumulated, a.limit) +} + +// ArraySizeError is an error type returned when the array size has exceeded +// the limit. +type ArraySizeError struct { + limit int + size int +} + +// NewArraySizeError returns an ArraySizeError. +func NewArraySizeError(l, s int) *ArraySizeError { + return &ArraySizeError{limit: l, size: s} +} + +// Error implements the error interface. +func (a *ArraySizeError) Error() string { + return fmt.Sprintf("Unable to create array of size %d, limit is %d", a.size, a.limit) +} diff --git a/vendor/github.com/evanphx/json-patch/merge.go b/vendor/github.com/evanphx/json-patch/merge.go new file mode 100644 index 00000000000..ad88d40181c --- /dev/null +++ b/vendor/github.com/evanphx/json-patch/merge.go @@ -0,0 +1,389 @@ +package jsonpatch + +import ( + "bytes" + "encoding/json" + "fmt" + "reflect" +) + +func merge(cur, patch *lazyNode, mergeMerge bool) *lazyNode { + curDoc, err := cur.intoDoc() + + if err != nil { + pruneNulls(patch) + return patch + } + + patchDoc, err := patch.intoDoc() + + if err != nil { + return patch + } + + mergeDocs(curDoc, patchDoc, mergeMerge) + + return cur +} + +func mergeDocs(doc, patch *partialDoc, mergeMerge bool) { + for k, v := range *patch { + if v == nil { + if mergeMerge { + (*doc)[k] = nil + } else { + delete(*doc, k) + } + } else { + cur, ok := (*doc)[k] + + if !ok || cur == nil { + if !mergeMerge { + pruneNulls(v) + } + + (*doc)[k] = v + } else { + (*doc)[k] = merge(cur, v, mergeMerge) + } + } + } +} + +func pruneNulls(n *lazyNode) { + sub, err := n.intoDoc() + + if err == nil { + pruneDocNulls(sub) + } else { + ary, err := n.intoAry() + + if err == nil { + pruneAryNulls(ary) + } + } +} + +func pruneDocNulls(doc *partialDoc) *partialDoc { + for k, v := range *doc { + if v == nil { + delete(*doc, k) + } else { + pruneNulls(v) + } + } + + return doc +} + +func pruneAryNulls(ary *partialArray) *partialArray { + newAry := []*lazyNode{} + + for _, v := range *ary { + if v != nil { + pruneNulls(v) + } + newAry = append(newAry, v) + } + + *ary = newAry + + return ary +} + +var ErrBadJSONDoc = fmt.Errorf("Invalid JSON Document") +var ErrBadJSONPatch = fmt.Errorf("Invalid JSON Patch") +var errBadMergeTypes = fmt.Errorf("Mismatched JSON Documents") + +// MergeMergePatches merges two merge patches together, such that +// applying this resulting merged merge patch to a document yields the same +// as merging each merge patch to the document in succession. +func MergeMergePatches(patch1Data, patch2Data []byte) ([]byte, error) { + return doMergePatch(patch1Data, patch2Data, true) +} + +// MergePatch merges the patchData into the docData. +func MergePatch(docData, patchData []byte) ([]byte, error) { + return doMergePatch(docData, patchData, false) +} + +func doMergePatch(docData, patchData []byte, mergeMerge bool) ([]byte, error) { + doc := &partialDoc{} + + docErr := json.Unmarshal(docData, doc) + + patch := &partialDoc{} + + patchErr := json.Unmarshal(patchData, patch) + + if _, ok := docErr.(*json.SyntaxError); ok { + return nil, ErrBadJSONDoc + } + + if _, ok := patchErr.(*json.SyntaxError); ok { + return nil, ErrBadJSONPatch + } + + if docErr == nil && *doc == nil { + return nil, ErrBadJSONDoc + } + + if patchErr == nil && *patch == nil { + return nil, ErrBadJSONPatch + } + + if docErr != nil || patchErr != nil { + // Not an error, just not a doc, so we turn straight into the patch + if patchErr == nil { + if mergeMerge { + doc = patch + } else { + doc = pruneDocNulls(patch) + } + } else { + patchAry := &partialArray{} + patchErr = json.Unmarshal(patchData, patchAry) + + if patchErr != nil { + return nil, ErrBadJSONPatch + } + + pruneAryNulls(patchAry) + + out, patchErr := json.Marshal(patchAry) + + if patchErr != nil { + return nil, ErrBadJSONPatch + } + + return out, nil + } + } else { + mergeDocs(doc, patch, mergeMerge) + } + + return json.Marshal(doc) +} + +// resemblesJSONArray indicates whether the byte-slice "appears" to be +// a JSON array or not. +// False-positives are possible, as this function does not check the internal +// structure of the array. It only checks that the outer syntax is present and +// correct. +func resemblesJSONArray(input []byte) bool { + input = bytes.TrimSpace(input) + + hasPrefix := bytes.HasPrefix(input, []byte("[")) + hasSuffix := bytes.HasSuffix(input, []byte("]")) + + return hasPrefix && hasSuffix +} + +// CreateMergePatch will return a merge patch document capable of converting +// the original document(s) to the modified document(s). +// The parameters can be bytes of either two JSON Documents, or two arrays of +// JSON documents. +// The merge patch returned follows the specification defined at http://tools.ietf.org/html/draft-ietf-appsawg-json-merge-patch-07 +func CreateMergePatch(originalJSON, modifiedJSON []byte) ([]byte, error) { + originalResemblesArray := resemblesJSONArray(originalJSON) + modifiedResemblesArray := resemblesJSONArray(modifiedJSON) + + // Do both byte-slices seem like JSON arrays? + if originalResemblesArray && modifiedResemblesArray { + return createArrayMergePatch(originalJSON, modifiedJSON) + } + + // Are both byte-slices are not arrays? Then they are likely JSON objects... + if !originalResemblesArray && !modifiedResemblesArray { + return createObjectMergePatch(originalJSON, modifiedJSON) + } + + // None of the above? Then return an error because of mismatched types. + return nil, errBadMergeTypes +} + +// createObjectMergePatch will return a merge-patch document capable of +// converting the original document to the modified document. +func createObjectMergePatch(originalJSON, modifiedJSON []byte) ([]byte, error) { + originalDoc := map[string]interface{}{} + modifiedDoc := map[string]interface{}{} + + err := json.Unmarshal(originalJSON, &originalDoc) + if err != nil { + return nil, ErrBadJSONDoc + } + + err = json.Unmarshal(modifiedJSON, &modifiedDoc) + if err != nil { + return nil, ErrBadJSONDoc + } + + dest, err := getDiff(originalDoc, modifiedDoc) + if err != nil { + return nil, err + } + + return json.Marshal(dest) +} + +// createArrayMergePatch will return an array of merge-patch documents capable +// of converting the original document to the modified document for each +// pair of JSON documents provided in the arrays. +// Arrays of mismatched sizes will result in an error. +func createArrayMergePatch(originalJSON, modifiedJSON []byte) ([]byte, error) { + originalDocs := []json.RawMessage{} + modifiedDocs := []json.RawMessage{} + + err := json.Unmarshal(originalJSON, &originalDocs) + if err != nil { + return nil, ErrBadJSONDoc + } + + err = json.Unmarshal(modifiedJSON, &modifiedDocs) + if err != nil { + return nil, ErrBadJSONDoc + } + + total := len(originalDocs) + if len(modifiedDocs) != total { + return nil, ErrBadJSONDoc + } + + result := []json.RawMessage{} + for i := 0; i < len(originalDocs); i++ { + original := originalDocs[i] + modified := modifiedDocs[i] + + patch, err := createObjectMergePatch(original, modified) + if err != nil { + return nil, err + } + + result = append(result, json.RawMessage(patch)) + } + + return json.Marshal(result) +} + +// Returns true if the array matches (must be json types). +// As is idiomatic for go, an empty array is not the same as a nil array. +func matchesArray(a, b []interface{}) bool { + if len(a) != len(b) { + return false + } + if (a == nil && b != nil) || (a != nil && b == nil) { + return false + } + for i := range a { + if !matchesValue(a[i], b[i]) { + return false + } + } + return true +} + +// Returns true if the values matches (must be json types) +// The types of the values must match, otherwise it will always return false +// If two map[string]interface{} are given, all elements must match. +func matchesValue(av, bv interface{}) bool { + if reflect.TypeOf(av) != reflect.TypeOf(bv) { + return false + } + switch at := av.(type) { + case string: + bt := bv.(string) + if bt == at { + return true + } + case float64: + bt := bv.(float64) + if bt == at { + return true + } + case bool: + bt := bv.(bool) + if bt == at { + return true + } + case nil: + // Both nil, fine. + return true + case map[string]interface{}: + bt := bv.(map[string]interface{}) + if len(bt) != len(at) { + return false + } + for key := range bt { + av, aOK := at[key] + bv, bOK := bt[key] + if aOK != bOK { + return false + } + if !matchesValue(av, bv) { + return false + } + } + return true + case []interface{}: + bt := bv.([]interface{}) + return matchesArray(at, bt) + } + return false +} + +// getDiff returns the (recursive) difference between a and b as a map[string]interface{}. +func getDiff(a, b map[string]interface{}) (map[string]interface{}, error) { + into := map[string]interface{}{} + for key, bv := range b { + av, ok := a[key] + // value was added + if !ok { + into[key] = bv + continue + } + // If types have changed, replace completely + if reflect.TypeOf(av) != reflect.TypeOf(bv) { + into[key] = bv + continue + } + // Types are the same, compare values + switch at := av.(type) { + case map[string]interface{}: + bt := bv.(map[string]interface{}) + dst := make(map[string]interface{}, len(bt)) + dst, err := getDiff(at, bt) + if err != nil { + return nil, err + } + if len(dst) > 0 { + into[key] = dst + } + case string, float64, bool: + if !matchesValue(av, bv) { + into[key] = bv + } + case []interface{}: + bt := bv.([]interface{}) + if !matchesArray(at, bt) { + into[key] = bv + } + case nil: + switch bv.(type) { + case nil: + // Both nil, fine. + default: + into[key] = bv + } + default: + panic(fmt.Sprintf("Unknown type:%T in key %s", av, key)) + } + } + // Now add all deleted values as nil + for key := range a { + _, found := b[key] + if !found { + into[key] = nil + } + } + return into, nil +} diff --git a/vendor/github.com/evanphx/json-patch/patch.go b/vendor/github.com/evanphx/json-patch/patch.go new file mode 100644 index 00000000000..18298549076 --- /dev/null +++ b/vendor/github.com/evanphx/json-patch/patch.go @@ -0,0 +1,788 @@ +package jsonpatch + +import ( + "bytes" + "encoding/json" + "fmt" + "strconv" + "strings" + + "github.com/pkg/errors" +) + +const ( + eRaw = iota + eDoc + eAry +) + +var ( + // SupportNegativeIndices decides whether to support non-standard practice of + // allowing negative indices to mean indices starting at the end of an array. + // Default to true. + SupportNegativeIndices bool = true + // AccumulatedCopySizeLimit limits the total size increase in bytes caused by + // "copy" operations in a patch. + AccumulatedCopySizeLimit int64 = 0 +) + +var ( + ErrTestFailed = errors.New("test failed") + ErrMissing = errors.New("missing value") + ErrUnknownType = errors.New("unknown object type") + ErrInvalid = errors.New("invalid state detected") + ErrInvalidIndex = errors.New("invalid index referenced") +) + +type lazyNode struct { + raw *json.RawMessage + doc partialDoc + ary partialArray + which int +} + +// Operation is a single JSON-Patch step, such as a single 'add' operation. +type Operation map[string]*json.RawMessage + +// Patch is an ordered collection of Operations. +type Patch []Operation + +type partialDoc map[string]*lazyNode +type partialArray []*lazyNode + +type container interface { + get(key string) (*lazyNode, error) + set(key string, val *lazyNode) error + add(key string, val *lazyNode) error + remove(key string) error +} + +func newLazyNode(raw *json.RawMessage) *lazyNode { + return &lazyNode{raw: raw, doc: nil, ary: nil, which: eRaw} +} + +func (n *lazyNode) MarshalJSON() ([]byte, error) { + switch n.which { + case eRaw: + return json.Marshal(n.raw) + case eDoc: + return json.Marshal(n.doc) + case eAry: + return json.Marshal(n.ary) + default: + return nil, ErrUnknownType + } +} + +func (n *lazyNode) UnmarshalJSON(data []byte) error { + dest := make(json.RawMessage, len(data)) + copy(dest, data) + n.raw = &dest + n.which = eRaw + return nil +} + +func deepCopy(src *lazyNode) (*lazyNode, int, error) { + if src == nil { + return nil, 0, nil + } + a, err := src.MarshalJSON() + if err != nil { + return nil, 0, err + } + sz := len(a) + ra := make(json.RawMessage, sz) + copy(ra, a) + return newLazyNode(&ra), sz, nil +} + +func (n *lazyNode) intoDoc() (*partialDoc, error) { + if n.which == eDoc { + return &n.doc, nil + } + + if n.raw == nil { + return nil, ErrInvalid + } + + err := json.Unmarshal(*n.raw, &n.doc) + + if err != nil { + return nil, err + } + + n.which = eDoc + return &n.doc, nil +} + +func (n *lazyNode) intoAry() (*partialArray, error) { + if n.which == eAry { + return &n.ary, nil + } + + if n.raw == nil { + return nil, ErrInvalid + } + + err := json.Unmarshal(*n.raw, &n.ary) + + if err != nil { + return nil, err + } + + n.which = eAry + return &n.ary, nil +} + +func (n *lazyNode) compact() []byte { + buf := &bytes.Buffer{} + + if n.raw == nil { + return nil + } + + err := json.Compact(buf, *n.raw) + + if err != nil { + return *n.raw + } + + return buf.Bytes() +} + +func (n *lazyNode) tryDoc() bool { + if n.raw == nil { + return false + } + + err := json.Unmarshal(*n.raw, &n.doc) + + if err != nil { + return false + } + + n.which = eDoc + return true +} + +func (n *lazyNode) tryAry() bool { + if n.raw == nil { + return false + } + + err := json.Unmarshal(*n.raw, &n.ary) + + if err != nil { + return false + } + + n.which = eAry + return true +} + +func (n *lazyNode) equal(o *lazyNode) bool { + if n.which == eRaw { + if !n.tryDoc() && !n.tryAry() { + if o.which != eRaw { + return false + } + + return bytes.Equal(n.compact(), o.compact()) + } + } + + if n.which == eDoc { + if o.which == eRaw { + if !o.tryDoc() { + return false + } + } + + if o.which != eDoc { + return false + } + + if len(n.doc) != len(o.doc) { + return false + } + + for k, v := range n.doc { + ov, ok := o.doc[k] + + if !ok { + return false + } + + if (v == nil) != (ov == nil) { + return false + } + + if v == nil && ov == nil { + continue + } + + if !v.equal(ov) { + return false + } + } + + return true + } + + if o.which != eAry && !o.tryAry() { + return false + } + + if len(n.ary) != len(o.ary) { + return false + } + + for idx, val := range n.ary { + if !val.equal(o.ary[idx]) { + return false + } + } + + return true +} + +// Kind reads the "op" field of the Operation. +func (o Operation) Kind() string { + if obj, ok := o["op"]; ok && obj != nil { + var op string + + err := json.Unmarshal(*obj, &op) + + if err != nil { + return "unknown" + } + + return op + } + + return "unknown" +} + +// Path reads the "path" field of the Operation. +func (o Operation) Path() (string, error) { + if obj, ok := o["path"]; ok && obj != nil { + var op string + + err := json.Unmarshal(*obj, &op) + + if err != nil { + return "unknown", err + } + + return op, nil + } + + return "unknown", errors.Wrapf(ErrMissing, "operation missing path field") +} + +// From reads the "from" field of the Operation. +func (o Operation) From() (string, error) { + if obj, ok := o["from"]; ok && obj != nil { + var op string + + err := json.Unmarshal(*obj, &op) + + if err != nil { + return "unknown", err + } + + return op, nil + } + + return "unknown", errors.Wrapf(ErrMissing, "operation, missing from field") +} + +func (o Operation) value() *lazyNode { + if obj, ok := o["value"]; ok { + return newLazyNode(obj) + } + + return nil +} + +// ValueInterface decodes the operation value into an interface. +func (o Operation) ValueInterface() (interface{}, error) { + if obj, ok := o["value"]; ok && obj != nil { + var v interface{} + + err := json.Unmarshal(*obj, &v) + + if err != nil { + return nil, err + } + + return v, nil + } + + return nil, errors.Wrapf(ErrMissing, "operation, missing value field") +} + +func isArray(buf []byte) bool { +Loop: + for _, c := range buf { + switch c { + case ' ': + case '\n': + case '\t': + continue + case '[': + return true + default: + break Loop + } + } + + return false +} + +func findObject(pd *container, path string) (container, string) { + doc := *pd + + split := strings.Split(path, "/") + + if len(split) < 2 { + return nil, "" + } + + parts := split[1 : len(split)-1] + + key := split[len(split)-1] + + var err error + + for _, part := range parts { + + next, ok := doc.get(decodePatchKey(part)) + + if next == nil || ok != nil { + return nil, "" + } + + if isArray(*next.raw) { + doc, err = next.intoAry() + + if err != nil { + return nil, "" + } + } else { + doc, err = next.intoDoc() + + if err != nil { + return nil, "" + } + } + } + + return doc, decodePatchKey(key) +} + +func (d *partialDoc) set(key string, val *lazyNode) error { + (*d)[key] = val + return nil +} + +func (d *partialDoc) add(key string, val *lazyNode) error { + (*d)[key] = val + return nil +} + +func (d *partialDoc) get(key string) (*lazyNode, error) { + return (*d)[key], nil +} + +func (d *partialDoc) remove(key string) error { + _, ok := (*d)[key] + if !ok { + return errors.Wrapf(ErrMissing, "Unable to remove nonexistent key: %s", key) + } + + delete(*d, key) + return nil +} + +// set should only be used to implement the "replace" operation, so "key" must +// be an already existing index in "d". +func (d *partialArray) set(key string, val *lazyNode) error { + idx, err := strconv.Atoi(key) + if err != nil { + return err + } + (*d)[idx] = val + return nil +} + +func (d *partialArray) add(key string, val *lazyNode) error { + if key == "-" { + *d = append(*d, val) + return nil + } + + idx, err := strconv.Atoi(key) + if err != nil { + return errors.Wrapf(err, "value was not a proper array index: '%s'", key) + } + + sz := len(*d) + 1 + + ary := make([]*lazyNode, sz) + + cur := *d + + if idx >= len(ary) { + return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) + } + + if idx < 0 { + if !SupportNegativeIndices { + return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) + } + if idx < -len(ary) { + return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) + } + idx += len(ary) + } + + copy(ary[0:idx], cur[0:idx]) + ary[idx] = val + copy(ary[idx+1:], cur[idx:]) + + *d = ary + return nil +} + +func (d *partialArray) get(key string) (*lazyNode, error) { + idx, err := strconv.Atoi(key) + + if err != nil { + return nil, err + } + + if idx >= len(*d) { + return nil, errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) + } + + return (*d)[idx], nil +} + +func (d *partialArray) remove(key string) error { + idx, err := strconv.Atoi(key) + if err != nil { + return err + } + + cur := *d + + if idx >= len(cur) { + return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) + } + + if idx < 0 { + if !SupportNegativeIndices { + return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) + } + if idx < -len(cur) { + return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) + } + idx += len(cur) + } + + ary := make([]*lazyNode, len(cur)-1) + + copy(ary[0:idx], cur[0:idx]) + copy(ary[idx:], cur[idx+1:]) + + *d = ary + return nil + +} + +func (p Patch) add(doc *container, op Operation) error { + path, err := op.Path() + if err != nil { + return errors.Wrapf(ErrMissing, "add operation failed to decode path") + } + + con, key := findObject(doc, path) + + if con == nil { + return errors.Wrapf(ErrMissing, "add operation does not apply: doc is missing path: \"%s\"", path) + } + + err = con.add(key, op.value()) + if err != nil { + return errors.Wrapf(err, "error in add for path: '%s'", path) + } + + return nil +} + +func (p Patch) remove(doc *container, op Operation) error { + path, err := op.Path() + if err != nil { + return errors.Wrapf(ErrMissing, "remove operation failed to decode path") + } + + con, key := findObject(doc, path) + + if con == nil { + return errors.Wrapf(ErrMissing, "remove operation does not apply: doc is missing path: \"%s\"", path) + } + + err = con.remove(key) + if err != nil { + return errors.Wrapf(err, "error in remove for path: '%s'", path) + } + + return nil +} + +func (p Patch) replace(doc *container, op Operation) error { + path, err := op.Path() + if err != nil { + return errors.Wrapf(err, "replace operation failed to decode path") + } + + con, key := findObject(doc, path) + + if con == nil { + return errors.Wrapf(ErrMissing, "replace operation does not apply: doc is missing path: %s", path) + } + + _, ok := con.get(key) + if ok != nil { + return errors.Wrapf(ErrMissing, "replace operation does not apply: doc is missing key: %s", path) + } + + err = con.set(key, op.value()) + if err != nil { + return errors.Wrapf(err, "error in remove for path: '%s'", path) + } + + return nil +} + +func (p Patch) move(doc *container, op Operation) error { + from, err := op.From() + if err != nil { + return errors.Wrapf(err, "move operation failed to decode from") + } + + con, key := findObject(doc, from) + + if con == nil { + return errors.Wrapf(ErrMissing, "move operation does not apply: doc is missing from path: %s", from) + } + + val, err := con.get(key) + if err != nil { + return errors.Wrapf(err, "error in move for path: '%s'", key) + } + + err = con.remove(key) + if err != nil { + return errors.Wrapf(err, "error in move for path: '%s'", key) + } + + path, err := op.Path() + if err != nil { + return errors.Wrapf(err, "move operation failed to decode path") + } + + con, key = findObject(doc, path) + + if con == nil { + return errors.Wrapf(ErrMissing, "move operation does not apply: doc is missing destination path: %s", path) + } + + err = con.add(key, val) + if err != nil { + return errors.Wrapf(err, "error in move for path: '%s'", path) + } + + return nil +} + +func (p Patch) test(doc *container, op Operation) error { + path, err := op.Path() + if err != nil { + return errors.Wrapf(err, "test operation failed to decode path") + } + + con, key := findObject(doc, path) + + if con == nil { + return errors.Wrapf(ErrMissing, "test operation does not apply: is missing path: %s", path) + } + + val, err := con.get(key) + if err != nil { + return errors.Wrapf(err, "error in test for path: '%s'", path) + } + + if val == nil { + if op.value().raw == nil { + return nil + } + return errors.Wrapf(ErrTestFailed, "testing value %s failed", path) + } else if op.value() == nil { + return errors.Wrapf(ErrTestFailed, "testing value %s failed", path) + } + + if val.equal(op.value()) { + return nil + } + + return errors.Wrapf(ErrTestFailed, "testing value %s failed", path) +} + +func (p Patch) copy(doc *container, op Operation, accumulatedCopySize *int64) error { + from, err := op.From() + if err != nil { + return errors.Wrapf(err, "copy operation failed to decode from") + } + + con, key := findObject(doc, from) + + if con == nil { + return errors.Wrapf(ErrMissing, "copy operation does not apply: doc is missing from path: %s", from) + } + + val, err := con.get(key) + if err != nil { + return errors.Wrapf(err, "error in copy for from: '%s'", from) + } + + path, err := op.Path() + if err != nil { + return errors.Wrapf(ErrMissing, "copy operation failed to decode path") + } + + con, key = findObject(doc, path) + + if con == nil { + return errors.Wrapf(ErrMissing, "copy operation does not apply: doc is missing destination path: %s", path) + } + + valCopy, sz, err := deepCopy(val) + if err != nil { + return errors.Wrapf(err, "error while performing deep copy") + } + + (*accumulatedCopySize) += int64(sz) + if AccumulatedCopySizeLimit > 0 && *accumulatedCopySize > AccumulatedCopySizeLimit { + return NewAccumulatedCopySizeError(AccumulatedCopySizeLimit, *accumulatedCopySize) + } + + err = con.add(key, valCopy) + if err != nil { + return errors.Wrapf(err, "error while adding value during copy") + } + + return nil +} + +// Equal indicates if 2 JSON documents have the same structural equality. +func Equal(a, b []byte) bool { + ra := make(json.RawMessage, len(a)) + copy(ra, a) + la := newLazyNode(&ra) + + rb := make(json.RawMessage, len(b)) + copy(rb, b) + lb := newLazyNode(&rb) + + return la.equal(lb) +} + +// DecodePatch decodes the passed JSON document as an RFC 6902 patch. +func DecodePatch(buf []byte) (Patch, error) { + var p Patch + + err := json.Unmarshal(buf, &p) + + if err != nil { + return nil, err + } + + return p, nil +} + +// Apply mutates a JSON document according to the patch, and returns the new +// document. +func (p Patch) Apply(doc []byte) ([]byte, error) { + return p.ApplyIndent(doc, "") +} + +// ApplyIndent mutates a JSON document according to the patch, and returns the new +// document indented. +func (p Patch) ApplyIndent(doc []byte, indent string) ([]byte, error) { + if len(doc) == 0 { + return doc, nil + } + + var pd container + if doc[0] == '[' { + pd = &partialArray{} + } else { + pd = &partialDoc{} + } + + err := json.Unmarshal(doc, pd) + + if err != nil { + return nil, err + } + + err = nil + + var accumulatedCopySize int64 + + for _, op := range p { + switch op.Kind() { + case "add": + err = p.add(&pd, op) + case "remove": + err = p.remove(&pd, op) + case "replace": + err = p.replace(&pd, op) + case "move": + err = p.move(&pd, op) + case "test": + err = p.test(&pd, op) + case "copy": + err = p.copy(&pd, op, &accumulatedCopySize) + default: + err = fmt.Errorf("Unexpected kind: %s", op.Kind()) + } + + if err != nil { + return nil, err + } + } + + if indent != "" { + return json.MarshalIndent(pd, "", indent) + } + + return json.Marshal(pd) +} + +// From http://tools.ietf.org/html/rfc6901#section-4 : +// +// Evaluation of each reference token begins by decoding any escaped +// character sequence. This is performed by first transforming any +// occurrence of the sequence '~1' to '/', and then transforming any +// occurrence of the sequence '~0' to '~'. + +var ( + rfc6901Decoder = strings.NewReplacer("~1", "/", "~0", "~") +) + +func decodePatchKey(k string) string { + return rfc6901Decoder.Replace(k) +} diff --git a/vendor/github.com/fatih/structs/.gitignore b/vendor/github.com/fatih/structs/.gitignore new file mode 100644 index 00000000000..836562412fe --- /dev/null +++ b/vendor/github.com/fatih/structs/.gitignore @@ -0,0 +1,23 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test diff --git a/vendor/github.com/fatih/structs/.travis.yml b/vendor/github.com/fatih/structs/.travis.yml new file mode 100644 index 00000000000..a08df798127 --- /dev/null +++ b/vendor/github.com/fatih/structs/.travis.yml @@ -0,0 +1,13 @@ +language: go +go: + - 1.7.x + - 1.8.x + - 1.9.x + - tip +sudo: false +before_install: +- go get github.com/axw/gocov/gocov +- go get github.com/mattn/goveralls +- if ! go get github.com/golang/tools/cmd/cover; then go get golang.org/x/tools/cmd/cover; fi +script: +- $HOME/gopath/bin/goveralls -service=travis-ci diff --git a/vendor/github.com/fatih/structs/LICENSE b/vendor/github.com/fatih/structs/LICENSE new file mode 100644 index 00000000000..34504e4b3ef --- /dev/null +++ b/vendor/github.com/fatih/structs/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 Fatih Arslan + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/fatih/structs/README.md b/vendor/github.com/fatih/structs/README.md new file mode 100644 index 00000000000..a75eabf37bb --- /dev/null +++ b/vendor/github.com/fatih/structs/README.md @@ -0,0 +1,163 @@ +# Structs [![GoDoc](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](http://godoc.org/github.com/fatih/structs) [![Build Status](http://img.shields.io/travis/fatih/structs.svg?style=flat-square)](https://travis-ci.org/fatih/structs) [![Coverage Status](http://img.shields.io/coveralls/fatih/structs.svg?style=flat-square)](https://coveralls.io/r/fatih/structs) + +Structs contains various utilities to work with Go (Golang) structs. It was +initially used by me to convert a struct into a `map[string]interface{}`. With +time I've added other utilities for structs. It's basically a high level +package based on primitives from the reflect package. Feel free to add new +functions or improve the existing code. + +## Install + +```bash +go get github.com/fatih/structs +``` + +## Usage and Examples + +Just like the standard lib `strings`, `bytes` and co packages, `structs` has +many global functions to manipulate or organize your struct data. Lets define +and declare a struct: + +```go +type Server struct { + Name string `json:"name,omitempty"` + ID int + Enabled bool + users []string // not exported + http.Server // embedded +} + +server := &Server{ + Name: "gopher", + ID: 123456, + Enabled: true, +} +``` + +```go +// Convert a struct to a map[string]interface{} +// => {"Name":"gopher", "ID":123456, "Enabled":true} +m := structs.Map(server) + +// Convert the values of a struct to a []interface{} +// => ["gopher", 123456, true] +v := structs.Values(server) + +// Convert the names of a struct to a []string +// (see "Names methods" for more info about fields) +n := structs.Names(server) + +// Convert the values of a struct to a []*Field +// (see "Field methods" for more info about fields) +f := structs.Fields(server) + +// Return the struct name => "Server" +n := structs.Name(server) + +// Check if any field of a struct is initialized or not. +h := structs.HasZero(server) + +// Check if all fields of a struct is initialized or not. +z := structs.IsZero(server) + +// Check if server is a struct or a pointer to struct +i := structs.IsStruct(server) +``` + +### Struct methods + +The structs functions can be also used as independent methods by creating a new +`*structs.Struct`. This is handy if you want to have more control over the +structs (such as retrieving a single Field). + +```go +// Create a new struct type: +s := structs.New(server) + +m := s.Map() // Get a map[string]interface{} +v := s.Values() // Get a []interface{} +f := s.Fields() // Get a []*Field +n := s.Names() // Get a []string +f := s.Field(name) // Get a *Field based on the given field name +f, ok := s.FieldOk(name) // Get a *Field based on the given field name +n := s.Name() // Get the struct name +h := s.HasZero() // Check if any field is uninitialized +z := s.IsZero() // Check if all fields are uninitialized +``` + +### Field methods + +We can easily examine a single Field for more detail. Below you can see how we +get and interact with various field methods: + + +```go +s := structs.New(server) + +// Get the Field struct for the "Name" field +name := s.Field("Name") + +// Get the underlying value, value => "gopher" +value := name.Value().(string) + +// Set the field's value +name.Set("another gopher") + +// Get the field's kind, kind => "string" +name.Kind() + +// Check if the field is exported or not +if name.IsExported() { + fmt.Println("Name field is exported") +} + +// Check if the value is a zero value, such as "" for string, 0 for int +if !name.IsZero() { + fmt.Println("Name is initialized") +} + +// Check if the field is an anonymous (embedded) field +if !name.IsEmbedded() { + fmt.Println("Name is not an embedded field") +} + +// Get the Field's tag value for tag name "json", tag value => "name,omitempty" +tagValue := name.Tag("json") +``` + +Nested structs are supported too: + +```go +addrField := s.Field("Server").Field("Addr") + +// Get the value for addr +a := addrField.Value().(string) + +// Or get all fields +httpServer := s.Field("Server").Fields() +``` + +We can also get a slice of Fields from the Struct type to iterate over all +fields. This is handy if you wish to examine all fields: + +```go +s := structs.New(server) + +for _, f := range s.Fields() { + fmt.Printf("field name: %+v\n", f.Name()) + + if f.IsExported() { + fmt.Printf("value : %+v\n", f.Value()) + fmt.Printf("is zero : %+v\n", f.IsZero()) + } +} +``` + +## Credits + + * [Fatih Arslan](https://github.com/fatih) + * [Cihangir Savas](https://github.com/cihangir) + +## License + +The MIT License (MIT) - see LICENSE.md for more details diff --git a/vendor/github.com/fatih/structs/field.go b/vendor/github.com/fatih/structs/field.go new file mode 100644 index 00000000000..e69783230b4 --- /dev/null +++ b/vendor/github.com/fatih/structs/field.go @@ -0,0 +1,141 @@ +package structs + +import ( + "errors" + "fmt" + "reflect" +) + +var ( + errNotExported = errors.New("field is not exported") + errNotSettable = errors.New("field is not settable") +) + +// Field represents a single struct field that encapsulates high level +// functions around the field. +type Field struct { + value reflect.Value + field reflect.StructField + defaultTag string +} + +// Tag returns the value associated with key in the tag string. If there is no +// such key in the tag, Tag returns the empty string. +func (f *Field) Tag(key string) string { + return f.field.Tag.Get(key) +} + +// Value returns the underlying value of the field. It panics if the field +// is not exported. +func (f *Field) Value() interface{} { + return f.value.Interface() +} + +// IsEmbedded returns true if the given field is an anonymous field (embedded) +func (f *Field) IsEmbedded() bool { + return f.field.Anonymous +} + +// IsExported returns true if the given field is exported. +func (f *Field) IsExported() bool { + return f.field.PkgPath == "" +} + +// IsZero returns true if the given field is not initialized (has a zero value). +// It panics if the field is not exported. +func (f *Field) IsZero() bool { + zero := reflect.Zero(f.value.Type()).Interface() + current := f.Value() + + return reflect.DeepEqual(current, zero) +} + +// Name returns the name of the given field +func (f *Field) Name() string { + return f.field.Name +} + +// Kind returns the fields kind, such as "string", "map", "bool", etc .. +func (f *Field) Kind() reflect.Kind { + return f.value.Kind() +} + +// Set sets the field to given value v. It returns an error if the field is not +// settable (not addressable or not exported) or if the given value's type +// doesn't match the fields type. +func (f *Field) Set(val interface{}) error { + // we can't set unexported fields, so be sure this field is exported + if !f.IsExported() { + return errNotExported + } + + // do we get here? not sure... + if !f.value.CanSet() { + return errNotSettable + } + + given := reflect.ValueOf(val) + + if f.value.Kind() != given.Kind() { + return fmt.Errorf("wrong kind. got: %s want: %s", given.Kind(), f.value.Kind()) + } + + f.value.Set(given) + return nil +} + +// Zero sets the field to its zero value. It returns an error if the field is not +// settable (not addressable or not exported). +func (f *Field) Zero() error { + zero := reflect.Zero(f.value.Type()).Interface() + return f.Set(zero) +} + +// Fields returns a slice of Fields. This is particular handy to get the fields +// of a nested struct . A struct tag with the content of "-" ignores the +// checking of that particular field. Example: +// +// // Field is ignored by this package. +// Field *http.Request `structs:"-"` +// +// It panics if field is not exported or if field's kind is not struct +func (f *Field) Fields() []*Field { + return getFields(f.value, f.defaultTag) +} + +// Field returns the field from a nested struct. It panics if the nested struct +// is not exported or if the field was not found. +func (f *Field) Field(name string) *Field { + field, ok := f.FieldOk(name) + if !ok { + panic("field not found") + } + + return field +} + +// FieldOk returns the field from a nested struct. The boolean returns whether +// the field was found (true) or not (false). +func (f *Field) FieldOk(name string) (*Field, bool) { + value := &f.value + // value must be settable so we need to make sure it holds the address of the + // variable and not a copy, so we can pass the pointer to strctVal instead of a + // copy (which is not assigned to any variable, hence not settable). + // see "https://blog.golang.org/laws-of-reflection#TOC_8." + if f.value.Kind() != reflect.Ptr { + a := f.value.Addr() + value = &a + } + v := strctVal(value.Interface()) + t := v.Type() + + field, ok := t.FieldByName(name) + if !ok { + return nil, false + } + + return &Field{ + field: field, + value: v.FieldByName(name), + }, true +} diff --git a/vendor/github.com/fatih/structs/structs.go b/vendor/github.com/fatih/structs/structs.go new file mode 100644 index 00000000000..3a87706525f --- /dev/null +++ b/vendor/github.com/fatih/structs/structs.go @@ -0,0 +1,584 @@ +// Package structs contains various utilities functions to work with structs. +package structs + +import ( + "fmt" + + "reflect" +) + +var ( + // DefaultTagName is the default tag name for struct fields which provides + // a more granular to tweak certain structs. Lookup the necessary functions + // for more info. + DefaultTagName = "structs" // struct's field default tag name +) + +// Struct encapsulates a struct type to provide several high level functions +// around the struct. +type Struct struct { + raw interface{} + value reflect.Value + TagName string +} + +// New returns a new *Struct with the struct s. It panics if the s's kind is +// not struct. +func New(s interface{}) *Struct { + return &Struct{ + raw: s, + value: strctVal(s), + TagName: DefaultTagName, + } +} + +// Map converts the given struct to a map[string]interface{}, where the keys +// of the map are the field names and the values of the map the associated +// values of the fields. The default key string is the struct field name but +// can be changed in the struct field's tag value. The "structs" key in the +// struct's field tag value is the key name. Example: +// +// // Field appears in map as key "myName". +// Name string `structs:"myName"` +// +// A tag value with the content of "-" ignores that particular field. Example: +// +// // Field is ignored by this package. +// Field bool `structs:"-"` +// +// A tag value with the content of "string" uses the stringer to get the value. Example: +// +// // The value will be output of Animal's String() func. +// // Map will panic if Animal does not implement String(). +// Field *Animal `structs:"field,string"` +// +// A tag value with the option of "flatten" used in a struct field is to flatten its fields +// in the output map. Example: +// +// // The FieldStruct's fields will be flattened into the output map. +// FieldStruct time.Time `structs:",flatten"` +// +// A tag value with the option of "omitnested" stops iterating further if the type +// is a struct. Example: +// +// // Field is not processed further by this package. +// Field time.Time `structs:"myName,omitnested"` +// Field *http.Request `structs:",omitnested"` +// +// A tag value with the option of "omitempty" ignores that particular field if +// the field value is empty. Example: +// +// // Field appears in map as key "myName", but the field is +// // skipped if empty. +// Field string `structs:"myName,omitempty"` +// +// // Field appears in map as key "Field" (the default), but +// // the field is skipped if empty. +// Field string `structs:",omitempty"` +// +// Note that only exported fields of a struct can be accessed, non exported +// fields will be neglected. +func (s *Struct) Map() map[string]interface{} { + out := make(map[string]interface{}) + s.FillMap(out) + return out +} + +// FillMap is the same as Map. Instead of returning the output, it fills the +// given map. +func (s *Struct) FillMap(out map[string]interface{}) { + if out == nil { + return + } + + fields := s.structFields() + + for _, field := range fields { + name := field.Name + val := s.value.FieldByName(name) + isSubStruct := false + var finalVal interface{} + + tagName, tagOpts := parseTag(field.Tag.Get(s.TagName)) + if tagName != "" { + name = tagName + } + + // if the value is a zero value and the field is marked as omitempty do + // not include + if tagOpts.Has("omitempty") { + zero := reflect.Zero(val.Type()).Interface() + current := val.Interface() + + if reflect.DeepEqual(current, zero) { + continue + } + } + + if !tagOpts.Has("omitnested") { + finalVal = s.nested(val) + + v := reflect.ValueOf(val.Interface()) + if v.Kind() == reflect.Ptr { + v = v.Elem() + } + + switch v.Kind() { + case reflect.Map, reflect.Struct: + isSubStruct = true + } + } else { + finalVal = val.Interface() + } + + if tagOpts.Has("string") { + s, ok := val.Interface().(fmt.Stringer) + if ok { + out[name] = s.String() + } + continue + } + + if isSubStruct && (tagOpts.Has("flatten")) { + for k := range finalVal.(map[string]interface{}) { + out[k] = finalVal.(map[string]interface{})[k] + } + } else { + out[name] = finalVal + } + } +} + +// Values converts the given s struct's field values to a []interface{}. A +// struct tag with the content of "-" ignores the that particular field. +// Example: +// +// // Field is ignored by this package. +// Field int `structs:"-"` +// +// A value with the option of "omitnested" stops iterating further if the type +// is a struct. Example: +// +// // Fields is not processed further by this package. +// Field time.Time `structs:",omitnested"` +// Field *http.Request `structs:",omitnested"` +// +// A tag value with the option of "omitempty" ignores that particular field and +// is not added to the values if the field value is empty. Example: +// +// // Field is skipped if empty +// Field string `structs:",omitempty"` +// +// Note that only exported fields of a struct can be accessed, non exported +// fields will be neglected. +func (s *Struct) Values() []interface{} { + fields := s.structFields() + + var t []interface{} + + for _, field := range fields { + val := s.value.FieldByName(field.Name) + + _, tagOpts := parseTag(field.Tag.Get(s.TagName)) + + // if the value is a zero value and the field is marked as omitempty do + // not include + if tagOpts.Has("omitempty") { + zero := reflect.Zero(val.Type()).Interface() + current := val.Interface() + + if reflect.DeepEqual(current, zero) { + continue + } + } + + if tagOpts.Has("string") { + s, ok := val.Interface().(fmt.Stringer) + if ok { + t = append(t, s.String()) + } + continue + } + + if IsStruct(val.Interface()) && !tagOpts.Has("omitnested") { + // look out for embedded structs, and convert them to a + // []interface{} to be added to the final values slice + t = append(t, Values(val.Interface())...) + } else { + t = append(t, val.Interface()) + } + } + + return t +} + +// Fields returns a slice of Fields. A struct tag with the content of "-" +// ignores the checking of that particular field. Example: +// +// // Field is ignored by this package. +// Field bool `structs:"-"` +// +// It panics if s's kind is not struct. +func (s *Struct) Fields() []*Field { + return getFields(s.value, s.TagName) +} + +// Names returns a slice of field names. A struct tag with the content of "-" +// ignores the checking of that particular field. Example: +// +// // Field is ignored by this package. +// Field bool `structs:"-"` +// +// It panics if s's kind is not struct. +func (s *Struct) Names() []string { + fields := getFields(s.value, s.TagName) + + names := make([]string, len(fields)) + + for i, field := range fields { + names[i] = field.Name() + } + + return names +} + +func getFields(v reflect.Value, tagName string) []*Field { + if v.Kind() == reflect.Ptr { + v = v.Elem() + } + + t := v.Type() + + var fields []*Field + + for i := 0; i < t.NumField(); i++ { + field := t.Field(i) + + if tag := field.Tag.Get(tagName); tag == "-" { + continue + } + + f := &Field{ + field: field, + value: v.FieldByName(field.Name), + } + + fields = append(fields, f) + + } + + return fields +} + +// Field returns a new Field struct that provides several high level functions +// around a single struct field entity. It panics if the field is not found. +func (s *Struct) Field(name string) *Field { + f, ok := s.FieldOk(name) + if !ok { + panic("field not found") + } + + return f +} + +// FieldOk returns a new Field struct that provides several high level functions +// around a single struct field entity. The boolean returns true if the field +// was found. +func (s *Struct) FieldOk(name string) (*Field, bool) { + t := s.value.Type() + + field, ok := t.FieldByName(name) + if !ok { + return nil, false + } + + return &Field{ + field: field, + value: s.value.FieldByName(name), + defaultTag: s.TagName, + }, true +} + +// IsZero returns true if all fields in a struct is a zero value (not +// initialized) A struct tag with the content of "-" ignores the checking of +// that particular field. Example: +// +// // Field is ignored by this package. +// Field bool `structs:"-"` +// +// A value with the option of "omitnested" stops iterating further if the type +// is a struct. Example: +// +// // Field is not processed further by this package. +// Field time.Time `structs:"myName,omitnested"` +// Field *http.Request `structs:",omitnested"` +// +// Note that only exported fields of a struct can be accessed, non exported +// fields will be neglected. It panics if s's kind is not struct. +func (s *Struct) IsZero() bool { + fields := s.structFields() + + for _, field := range fields { + val := s.value.FieldByName(field.Name) + + _, tagOpts := parseTag(field.Tag.Get(s.TagName)) + + if IsStruct(val.Interface()) && !tagOpts.Has("omitnested") { + ok := IsZero(val.Interface()) + if !ok { + return false + } + + continue + } + + // zero value of the given field, such as "" for string, 0 for int + zero := reflect.Zero(val.Type()).Interface() + + // current value of the given field + current := val.Interface() + + if !reflect.DeepEqual(current, zero) { + return false + } + } + + return true +} + +// HasZero returns true if a field in a struct is not initialized (zero value). +// A struct tag with the content of "-" ignores the checking of that particular +// field. Example: +// +// // Field is ignored by this package. +// Field bool `structs:"-"` +// +// A value with the option of "omitnested" stops iterating further if the type +// is a struct. Example: +// +// // Field is not processed further by this package. +// Field time.Time `structs:"myName,omitnested"` +// Field *http.Request `structs:",omitnested"` +// +// Note that only exported fields of a struct can be accessed, non exported +// fields will be neglected. It panics if s's kind is not struct. +func (s *Struct) HasZero() bool { + fields := s.structFields() + + for _, field := range fields { + val := s.value.FieldByName(field.Name) + + _, tagOpts := parseTag(field.Tag.Get(s.TagName)) + + if IsStruct(val.Interface()) && !tagOpts.Has("omitnested") { + ok := HasZero(val.Interface()) + if ok { + return true + } + + continue + } + + // zero value of the given field, such as "" for string, 0 for int + zero := reflect.Zero(val.Type()).Interface() + + // current value of the given field + current := val.Interface() + + if reflect.DeepEqual(current, zero) { + return true + } + } + + return false +} + +// Name returns the structs's type name within its package. For more info refer +// to Name() function. +func (s *Struct) Name() string { + return s.value.Type().Name() +} + +// structFields returns the exported struct fields for a given s struct. This +// is a convenient helper method to avoid duplicate code in some of the +// functions. +func (s *Struct) structFields() []reflect.StructField { + t := s.value.Type() + + var f []reflect.StructField + + for i := 0; i < t.NumField(); i++ { + field := t.Field(i) + // we can't access the value of unexported fields + if field.PkgPath != "" { + continue + } + + // don't check if it's omitted + if tag := field.Tag.Get(s.TagName); tag == "-" { + continue + } + + f = append(f, field) + } + + return f +} + +func strctVal(s interface{}) reflect.Value { + v := reflect.ValueOf(s) + + // if pointer get the underlying element≤ + for v.Kind() == reflect.Ptr { + v = v.Elem() + } + + if v.Kind() != reflect.Struct { + panic("not struct") + } + + return v +} + +// Map converts the given struct to a map[string]interface{}. For more info +// refer to Struct types Map() method. It panics if s's kind is not struct. +func Map(s interface{}) map[string]interface{} { + return New(s).Map() +} + +// FillMap is the same as Map. Instead of returning the output, it fills the +// given map. +func FillMap(s interface{}, out map[string]interface{}) { + New(s).FillMap(out) +} + +// Values converts the given struct to a []interface{}. For more info refer to +// Struct types Values() method. It panics if s's kind is not struct. +func Values(s interface{}) []interface{} { + return New(s).Values() +} + +// Fields returns a slice of *Field. For more info refer to Struct types +// Fields() method. It panics if s's kind is not struct. +func Fields(s interface{}) []*Field { + return New(s).Fields() +} + +// Names returns a slice of field names. For more info refer to Struct types +// Names() method. It panics if s's kind is not struct. +func Names(s interface{}) []string { + return New(s).Names() +} + +// IsZero returns true if all fields is equal to a zero value. For more info +// refer to Struct types IsZero() method. It panics if s's kind is not struct. +func IsZero(s interface{}) bool { + return New(s).IsZero() +} + +// HasZero returns true if any field is equal to a zero value. For more info +// refer to Struct types HasZero() method. It panics if s's kind is not struct. +func HasZero(s interface{}) bool { + return New(s).HasZero() +} + +// IsStruct returns true if the given variable is a struct or a pointer to +// struct. +func IsStruct(s interface{}) bool { + v := reflect.ValueOf(s) + if v.Kind() == reflect.Ptr { + v = v.Elem() + } + + // uninitialized zero value of a struct + if v.Kind() == reflect.Invalid { + return false + } + + return v.Kind() == reflect.Struct +} + +// Name returns the structs's type name within its package. It returns an +// empty string for unnamed types. It panics if s's kind is not struct. +func Name(s interface{}) string { + return New(s).Name() +} + +// nested retrieves recursively all types for the given value and returns the +// nested value. +func (s *Struct) nested(val reflect.Value) interface{} { + var finalVal interface{} + + v := reflect.ValueOf(val.Interface()) + if v.Kind() == reflect.Ptr { + v = v.Elem() + } + + switch v.Kind() { + case reflect.Struct: + n := New(val.Interface()) + n.TagName = s.TagName + m := n.Map() + + // do not add the converted value if there are no exported fields, ie: + // time.Time + if len(m) == 0 { + finalVal = val.Interface() + } else { + finalVal = m + } + case reflect.Map: + // get the element type of the map + mapElem := val.Type() + switch val.Type().Kind() { + case reflect.Ptr, reflect.Array, reflect.Map, + reflect.Slice, reflect.Chan: + mapElem = val.Type().Elem() + if mapElem.Kind() == reflect.Ptr { + mapElem = mapElem.Elem() + } + } + + // only iterate over struct types, ie: map[string]StructType, + // map[string][]StructType, + if mapElem.Kind() == reflect.Struct || + (mapElem.Kind() == reflect.Slice && + mapElem.Elem().Kind() == reflect.Struct) { + m := make(map[string]interface{}, val.Len()) + for _, k := range val.MapKeys() { + m[k.String()] = s.nested(val.MapIndex(k)) + } + finalVal = m + break + } + + // TODO(arslan): should this be optional? + finalVal = val.Interface() + case reflect.Slice, reflect.Array: + if val.Type().Kind() == reflect.Interface { + finalVal = val.Interface() + break + } + + // TODO(arslan): should this be optional? + // do not iterate of non struct types, just pass the value. Ie: []int, + // []string, co... We only iterate further if it's a struct. + // i.e []foo or []*foo + if val.Type().Elem().Kind() != reflect.Struct && + !(val.Type().Elem().Kind() == reflect.Ptr && + val.Type().Elem().Elem().Kind() == reflect.Struct) { + finalVal = val.Interface() + break + } + + slices := make([]interface{}, val.Len()) + for x := 0; x < val.Len(); x++ { + slices[x] = s.nested(val.Index(x)) + } + finalVal = slices + default: + finalVal = val.Interface() + } + + return finalVal +} diff --git a/vendor/github.com/fatih/structs/tags.go b/vendor/github.com/fatih/structs/tags.go new file mode 100644 index 00000000000..136a31eba9a --- /dev/null +++ b/vendor/github.com/fatih/structs/tags.go @@ -0,0 +1,32 @@ +package structs + +import "strings" + +// tagOptions contains a slice of tag options +type tagOptions []string + +// Has returns true if the given option is available in tagOptions +func (t tagOptions) Has(opt string) bool { + for _, tagOpt := range t { + if tagOpt == opt { + return true + } + } + + return false +} + +// parseTag splits a struct field's tag into its name and a list of options +// which comes after a name. A tag is in the form of: "name,option1,option2". +// The name can be neglectected. +func parseTag(tag string) (string, tagOptions) { + // tag is one of followings: + // "" + // "name" + // "name,opt" + // "name,opt,opt2" + // ",opt" + + res := strings.Split(tag, ",") + return res[0], res[1:] +} diff --git a/vendor/github.com/felixge/fgprof/LICENSE.txt b/vendor/github.com/felixge/fgprof/LICENSE.txt new file mode 100644 index 00000000000..3e424911bdb --- /dev/null +++ b/vendor/github.com/felixge/fgprof/LICENSE.txt @@ -0,0 +1,8 @@ +The MIT License (MIT) +Copyright © 2020 Felix Geisendörfer + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/felixge/fgprof/README.md b/vendor/github.com/felixge/fgprof/README.md new file mode 100644 index 00000000000..fe0c0a25d30 --- /dev/null +++ b/vendor/github.com/felixge/fgprof/README.md @@ -0,0 +1,214 @@ +[![go.dev reference](https://img.shields.io/badge/go.dev-reference-007d9c?logo=go)](https://pkg.go.dev/github.com/felixge/fgprof) +![GitHub Workflow Status](https://img.shields.io/github/workflow/status/felixge/fgprof/Go) +![GitHub](https://img.shields.io/github/license/felixge/fgprof) + +# :rocket: fgprof - The Full Go Profiler + +fgprof is a sampling [Go](https://golang.org/) profiler that allows you to analyze On-CPU as well as [Off-CPU](http://www.brendangregg.com/offcpuanalysis.html) (e.g. I/O) time together. + +Go's builtin sampling CPU profiler can only show On-CPU time, but it's better than fgprof at that. Go also includes tracing profilers that can analyze I/O, but they can't be combined with the CPU profiler. + +fgprof is designed for analyzing applications with mixed I/O and CPU workloads. + +## Quick Start + +If this is the first time you hear about fgprof, you should start by reading +about [The Problem](#the-problem) & [How it Works](#how-it-works). + +There is no need to choose between fgprof and the builtin profiler. Here is how to add both to your application: + +```go +package main + +import( + _ "net/http/pprof" + "github.com/felixge/fgprof" +) + +func main() { + http.DefaultServeMux.Handle("/debug/fgprof", fgprof.Handler()) + go func() { + log.Println(http.ListenAndServe(":6060", nil)) + }() + + // +} +``` + +fgprof is compatible with the `go tool pprof` visualizer, so taking and analyzing a 3s profile is as simple as: + +``` +go tool pprof --http=:6061 http://localhost:6060/debug/fgprof?seconds=3 +``` + +![](./assets/fgprof_pprof.png) + +Additionally fgprof supports the plain text format used by Brendan Gregg's [FlameGraph](http://www.brendangregg.com/flamegraphs.html) utility: + +``` +git clone https://github.com/brendangregg/FlameGraph +cd FlameGraph +curl -s 'localhost:6060/debug/fgprof?seconds=3&format=folded' > fgprof.folded +./flamegraph.pl fgprof.folded > fgprof.svg +``` + +![](./assets/fgprof_gregg.png) + +Which tool you prefer is up to you, but one thing I like about Gregg's tool is that you can filter the plaintext files using grep which can be very useful when analyzing large programs. + +If you don't have a program to profile right now, you can `go run ./example` which should allow you to reproduce the graphs you see above. If you've never seen such graphs before, and are unsure how to read them, head over to Brendan Gregg's [Flame Graph](http://www.brendangregg.com/flamegraphs.html) page. + +## The Problem + +Let's say you've been tasked to optimize a simple program that has a loop calling out to three functions: + +```go +func main() { + for { + // Http request to a web service that might be slow. + slowNetworkRequest() + // Some heavy CPU computation. + cpuIntensiveTask() + // Poorly named function that you don't understand yet. + weirdFunction() + } +} +``` + +One way to decide which of these three functions you should focus your attention on would be to wrap each function call like this: + +```go +start := time.Start() +slowNetworkRequest() +fmt.Printf("slowNetworkRequest: %s\n", time.Since(start)) +// ... +``` + +However, this can be very tedious for large programs. You'll also have to figure out how to average the numbers in case they fluctuate. And once you've done that, you'll have to repeat the process for the functions called by the function you decide to focus on. + +### /debug/pprof/profile + +So, this seems like a perfect use case for a profiler. Let's try the `/debug/pprof/profile` endpoint of the builtin `net/http/pprof` pkg to analyze our program for 10s: + +```go +import _ "net/http/pprof" + +func main() { + go func() { + log.Println(http.ListenAndServe(":6060", nil)) + }() + + // +} +``` + +``` +go tool pprof -http=:6061 http://localhost:6060/debug/pprof/profile?seconds=10 +``` + +That was easy! Looks like we're spending all our time in `cpuIntensiveTask()`, so let's focus on that? + +![](./assets/pprof_cpu.png) + +But before we get carried away, let's quickly double check this assumption by manually timing our function calls with `time.Since()` as described above: + +``` +slowNetworkRequest: 66.815041ms +cpuIntensiveTask: 30.000672ms +weirdFunction: 10.64764ms +slowNetworkRequest: 67.194516ms +cpuIntensiveTask: 30.000912ms +weirdFunction: 10.105371ms +// ... +``` + +Oh no, the builtin CPU profiler is misleading us! How is that possible? Well, it turns out the builtin profiler only shows On-CPU time. Time spent waiting on I/O is completely hidden from us. + +### /debug/pprof/trace + +Let's try something else. The `/debug/pprof/trace` endpoint includes a "synchronization blocking profile", maybe that's what we need? + +``` +curl -so pprof.trace http://localhost:6060/debug/pprof/trace?seconds=10 +go tool trace --pprof=sync pprof.trace > sync.pprof +go tool pprof --http=:6061 sync.pprof +``` + +Oh no, we're being mislead again. This profiler thinks all our time is spent on `slowNetworkRequest()`. It's completely missing `cpuIntensiveTask()`. And what about `weirdFunction()`? It seems like no builtin profiler can see it? + +![](./assets/pprof_trace.png) + +### /debug/fgprof + +So what can we do? Let's try fgprof, which is designed to analyze mixed I/O and CPU workloads like the one we're dealing with here. We can easily add it alongside the builtin profilers. + +```go +import( + _ "net/http/pprof" + "github.com/felixge/fgprof" +) + +func main() { + http.DefaultServeMux.Handle("/debug/fgprof", fgprof.Handler()) + go func() { + log.Println(http.ListenAndServe(":6060", nil)) + }() + + // +} +``` + + + +``` +go tool pprof --http=:6061 http://localhost:6060/debug/fgprof?seconds=10 +``` + +Finally, a profile that shows all three of our functions and how much time we're spending on them. It also turns out our `weirdFunction()` was simply calling `time.Sleep()`, how weird indeed! + +![](./assets/fgprof_pprof.png) + +## How it Works + +### fgprof + +fgprof is implemented as a background goroutine that wakes up 99 times per second and calls `runtime.GoroutineProfile`. This returns a list of all goroutines regardless of their current On/Off CPU scheduling status and their call stacks. + +This data is used to maintain an in-memory stack counter which can be converted to the pprof or folded output format. The meat of the implementation is super simple and < 100 lines of code, you should [check it out](./fgprof.go). + +Generally speaking, fgprof should not have a big impact on the performance of your program. However `runtime.GoroutineProfile` calls `stopTheWorld()` and could be slow if you have a lot of goroutines. For now the advise is to test the impact of the profiler on a development environment before running it against production instances. In the future this README will try to provide a more detailed analysis of the performance impact. + +### Go's builtin CPU Profiler + +The builtin Go CPU profiler uses the [setitimer(2)](https://linux.die.net/man/2/setitimer) system call to ask the operating system to be sent a `SIGPROF` signal 100 times a second. Each signal stops the Go process and gets delivered to a random thread's `sigtrampgo()` function. This function then proceeds to call `sigprof()` or `sigprofNonGo()` to record the thread's current stack. + +Since Go uses non-blocking I/O, Goroutines that wait on I/O are parked and not running on any threads. Therefore they end up being largely invisible to Go's builtin CPU profiler. + +## The Future of Go Profiling + +There is a great proposal for [hardware performance counters for CPU profiling](https://go.googlesource.com/proposal/+/refs/changes/08/219508/2/design/36821-perf-counter-pprof.md#5-empirical-evidence-on-the-accuracy-and-precision-of-pmu-profiles) in Go. The proposal is aimed at making the builtin CPU Profiler even more accurate, especially under highly parallel workloads on many CPUs. It also includes a very in-depth analysis of the current profiler. Based on the design, I think the proposed profiler would also be blind to I/O workloads, but still seems appealing for CPU based workloads. + +As far as fgprof itself is concerned, I might implement streaming output, leaving the final aggregation to other tools. This would open the door to even more advanced analysis, perhaps by integrating with tools such as [flamescope](https://github.com/Netflix/flamescope). + +Additionally I'm also open to the idea of contributing fgprof to the Go project itself. I've [floated the idea](https://groups.google.com/g/golang-dev/c/LCJyvL90xv8) on the golang-dev mailing list, so let's see what happens. + + +## Known Issues + +There is no perfect approach to profiling, and fgprof is no exception. Below is a list of known issues that will hopefully not be of practical concern for most users, but are important to highlight. + +- fgprof can't catch goroutines while they are running in loops without function calls, only when they get asynchronously preempted. This can lead to reporting inaccuracies. Use the builtin CPU profiler if this is a problem for you. +- fgprof may not work in Go 1.13 if another goroutine is in a loop without function calls the whole time. Async preemption in Go 1.14 should mostly fix this issue. +- Internal C functions are not showing up in the stack traces, e.g. `runtime.nanotime` which is called by `time.Since` in the example program. +- The current implementation is relying on the Go scheduler to schedule the internal goroutine at a fixed sample rate. Scheduler delays, especially biased ones, might cause inaccuracies. + +## Credits + +The following articles helped me to learn more about how profilers in general, and the Go profiler in particular work. + +- [How do Ruby & Python profilers work?](https://jvns.ca/blog/2017/12/17/how-do-ruby---python-profilers-work-/) by Julia Evans +- [Profiling Go programs with pprof](https://jvns.ca/blog/2017/09/24/profiling-go-with-pprof/) by Julia Evans + +## License + +fgprof is licensed under the MIT License. diff --git a/vendor/github.com/felixge/fgprof/fgprof.go b/vendor/github.com/felixge/fgprof/fgprof.go new file mode 100644 index 00000000000..dba16161ec3 --- /dev/null +++ b/vendor/github.com/felixge/fgprof/fgprof.go @@ -0,0 +1,97 @@ +// fgprof is a sampling Go profiler that allows you to analyze On-CPU as well +// as [Off-CPU](http://www.brendangregg.com/offcpuanalysis.html) (e.g. I/O) +// time together. +package fgprof + +import ( + "io" + "runtime" + "strings" + "time" +) + +// Start begins profiling the goroutines of the program and returns a function +// that needs to be invoked by the caller to stop the profiling and write the +// results to w using the given format. +func Start(w io.Writer, format Format) func() error { + // Go's CPU profiler uses 100hz, but 99hz might be less likely to result in + // accidental synchronization with the program we're profiling. + const hz = 99 + ticker := time.NewTicker(time.Second / hz) + stopCh := make(chan struct{}) + + stackCounts := stackCounter{} + go func() { + defer ticker.Stop() + + for { + select { + case <-ticker.C: + stackCounts.Update() + case <-stopCh: + return + } + } + }() + + return func() error { + stopCh <- struct{}{} + return writeFormat(w, stackCounts, format, hz) + } +} + +type stackCounter map[string]int + +func (s stackCounter) Update() { + // Determine the runtime.Frame of this func so we can hide it from our + // profiling output. + rpc := make([]uintptr, 1) + n := runtime.Callers(1, rpc) + if n < 1 { + panic("could not determine selfFrame") + } + selfFrame, _ := runtime.CallersFrames(rpc).Next() + + // COPYRIGHT: The code for populating `p` below is copied from + // writeRuntimeProfile in src/runtime/pprof/pprof.go. + // + // Find out how many records there are (GoroutineProfile(nil)), + // allocate that many records, and get the data. + // There's a race—more records might be added between + // the two calls—so allocate a few extra records for safety + // and also try again if we're very unlucky. + // The loop should only execute one iteration in the common case. + var p []runtime.StackRecord + n, ok := runtime.GoroutineProfile(nil) + for { + // Allocate room for a slightly bigger profile, + // in case a few more entries have been added + // since the call to ThreadProfile. + p = make([]runtime.StackRecord, n+10) + n, ok = runtime.GoroutineProfile(p) + if ok { + p = p[0:n] + break + } + // Profile grew; try again. + } + +outer: + for _, pp := range p { + frames := runtime.CallersFrames(pp.Stack()) + + var stack []string + for { + frame, more := frames.Next() + if !more { + break + } else if frame.Entry == selfFrame.Entry { + continue outer + } + + stack = append([]string{frame.Function}, stack...) + } + key := strings.Join(stack, ";") + s[key]++ + } +} diff --git a/vendor/github.com/felixge/fgprof/format.go b/vendor/github.com/felixge/fgprof/format.go new file mode 100644 index 00000000000..1a351e39c2b --- /dev/null +++ b/vendor/github.com/felixge/fgprof/format.go @@ -0,0 +1,102 @@ +package fgprof + +import ( + "fmt" + "io" + "sort" + "strings" + + "github.com/google/pprof/profile" +) + +type Format string + +const ( + // FormatFolded is used by Brendan Gregg's FlameGraph utility, see + // https://github.com/brendangregg/FlameGraph#2-fold-stacks. + FormatFolded Format = "folded" + // FormatPprof is used by Google's pprof utility, see + // https://github.com/google/pprof/blob/master/proto/README.md. + FormatPprof Format = "pprof" +) + +func writeFormat(w io.Writer, s stackCounter, f Format, hz int) error { + switch f { + case FormatFolded: + return writeFolded(w, s) + case FormatPprof: + return toPprof(s, hz).Write(w) + default: + return fmt.Errorf("unknown format: %q", f) + } +} + +func writeFolded(w io.Writer, s stackCounter) error { + for _, stack := range sortedKeys(s) { + count := s[stack] + if _, err := fmt.Fprintf(w, "%s %d\n", stack, count); err != nil { + return err + } + } + return nil +} + +func toPprof(s stackCounter, hz int) *profile.Profile { + functionID := uint64(1) + locationID := uint64(1) + line := int64(1) + + p := &profile.Profile{} + m := &profile.Mapping{ID: 1, HasFunctions: true} + p.Mapping = []*profile.Mapping{m} + p.SampleType = []*profile.ValueType{ + { + Type: "samples", + Unit: "count", + }, + { + Type: "time", + Unit: "nanoseconds", + }, + } + + for stack, count := range s { + sample := &profile.Sample{ + Value: []int64{ + int64(count), + int64(1000 * 1000 * 1000 / hz * count), + }, + } + for _, fnName := range strings.Split(stack, ";") { + function := &profile.Function{ + ID: functionID, + Name: fnName, + } + p.Function = append(p.Function, function) + + location := &profile.Location{ + ID: locationID, + Mapping: m, + Line: []profile.Line{{Function: function}}, + } + p.Location = append(p.Location, location) + sample.Location = append([]*profile.Location{location}, sample.Location...) + + line++ + + locationID++ + functionID++ + } + p.Sample = append(p.Sample, sample) + } + return p +} + +func sortedKeys(s stackCounter) []string { + var keys []string + for stack := range s { + keys = append(keys, stack) + } + sort.Strings(keys) + return keys +} diff --git a/vendor/github.com/felixge/fgprof/go.mod b/vendor/github.com/felixge/fgprof/go.mod new file mode 100644 index 00000000000..b2ce6fd982c --- /dev/null +++ b/vendor/github.com/felixge/fgprof/go.mod @@ -0,0 +1,5 @@ +module github.com/felixge/fgprof + +go 1.14 + +require github.com/google/pprof v0.0.0-20200615235658-03e1cf38a040 diff --git a/vendor/github.com/felixge/fgprof/go.sum b/vendor/github.com/felixge/fgprof/go.sum new file mode 100644 index 00000000000..07ac5705c59 --- /dev/null +++ b/vendor/github.com/felixge/fgprof/go.sum @@ -0,0 +1,7 @@ +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/google/pprof v0.0.0-20200615235658-03e1cf38a040 h1:i7RUpu0EybzQyQvPT7J3MmODs4+gPcHsD/pqW0uIYVo= +github.com/google/pprof v0.0.0-20200615235658-03e1cf38a040/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= diff --git a/vendor/github.com/felixge/fgprof/handler.go b/vendor/github.com/felixge/fgprof/handler.go new file mode 100644 index 00000000000..a25cdc695e8 --- /dev/null +++ b/vendor/github.com/felixge/fgprof/handler.go @@ -0,0 +1,32 @@ +package fgprof + +import ( + "fmt" + "net/http" + "time" +) + +// Handler returns an http handler that takes an optional "seconds" query +// argument that defaults to "30" and produces a profile over this duration. +// The optional "format" parameter controls if the output is written in +// Google's "pprof" format (default) or Brendan Gregg's "folded" stack format. +func Handler() http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + var seconds int + if s := r.URL.Query().Get("seconds"); s == "" { + seconds = 30 + } else if _, err := fmt.Sscanf(s, "%d", &seconds); err != nil || seconds <= 0 { + w.WriteHeader(http.StatusBadRequest) + fmt.Fprintf(w, "bad seconds: %d: %s\n", seconds, err) + } + + format := Format(r.URL.Query().Get("format")) + if format == "" { + format = FormatPprof + } + + stop := Start(w, format) + defer stop() + time.Sleep(time.Duration(seconds) * time.Second) + }) +} diff --git a/vendor/github.com/felixge/fgprof/pprof.go b/vendor/github.com/felixge/fgprof/pprof.go new file mode 100644 index 00000000000..f0908e8e01b --- /dev/null +++ b/vendor/github.com/felixge/fgprof/pprof.go @@ -0,0 +1,56 @@ +package fgprof + +import ( + "strings" + + "github.com/google/pprof/profile" +) + +func toProfile(s stackCounter, hz int) *profile.Profile { + functionID := uint64(1) + locationID := uint64(1) + + p := &profile.Profile{} + m := &profile.Mapping{ID: 1, HasFunctions: true} + p.Mapping = []*profile.Mapping{m} + p.SampleType = []*profile.ValueType{ + { + Type: "samples", + Unit: "count", + }, + { + Type: "time", + Unit: "nanoseconds", + }, + } + + for _, stack := range sortedKeys(s) { + count := s[stack] + sample := &profile.Sample{ + Value: []int64{ + int64(count), + int64(1000 * 1000 * 1000 / hz * count), + }, + } + for _, fnName := range strings.Split(stack, ";") { + function := &profile.Function{ + ID: functionID, + Name: fnName, + } + p.Function = append(p.Function, function) + + location := &profile.Location{ + ID: locationID, + Mapping: m, + Line: []profile.Line{{Function: function}}, + } + p.Location = append(p.Location, location) + sample.Location = append(sample.Location, location) + + locationID++ + functionID++ + } + p.Sample = append(p.Sample, sample) + } + return p +} diff --git a/vendor/github.com/form3tech-oss/jwt-go/.gitignore b/vendor/github.com/form3tech-oss/jwt-go/.gitignore new file mode 100644 index 00000000000..c0e81a8d926 --- /dev/null +++ b/vendor/github.com/form3tech-oss/jwt-go/.gitignore @@ -0,0 +1,5 @@ +.DS_Store +bin +.idea/ + + diff --git a/vendor/github.com/form3tech-oss/jwt-go/.travis.yml b/vendor/github.com/form3tech-oss/jwt-go/.travis.yml new file mode 100644 index 00000000000..3c7fb7e1ae6 --- /dev/null +++ b/vendor/github.com/form3tech-oss/jwt-go/.travis.yml @@ -0,0 +1,12 @@ +language: go + +script: + - go vet ./... + - go test -v ./... + +go: + - 1.12 + - 1.13 + - 1.14 + - 1.15 + - tip diff --git a/vendor/github.com/form3tech-oss/jwt-go/LICENSE b/vendor/github.com/form3tech-oss/jwt-go/LICENSE new file mode 100644 index 00000000000..df83a9c2f01 --- /dev/null +++ b/vendor/github.com/form3tech-oss/jwt-go/LICENSE @@ -0,0 +1,8 @@ +Copyright (c) 2012 Dave Grijalva + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + diff --git a/vendor/github.com/form3tech-oss/jwt-go/MIGRATION_GUIDE.md b/vendor/github.com/form3tech-oss/jwt-go/MIGRATION_GUIDE.md new file mode 100644 index 00000000000..7fc1f793cbc --- /dev/null +++ b/vendor/github.com/form3tech-oss/jwt-go/MIGRATION_GUIDE.md @@ -0,0 +1,97 @@ +## Migration Guide from v2 -> v3 + +Version 3 adds several new, frequently requested features. To do so, it introduces a few breaking changes. We've worked to keep these as minimal as possible. This guide explains the breaking changes and how you can quickly update your code. + +### `Token.Claims` is now an interface type + +The most requested feature from the 2.0 verison of this library was the ability to provide a custom type to the JSON parser for claims. This was implemented by introducing a new interface, `Claims`, to replace `map[string]interface{}`. We also included two concrete implementations of `Claims`: `MapClaims` and `StandardClaims`. + +`MapClaims` is an alias for `map[string]interface{}` with built in validation behavior. It is the default claims type when using `Parse`. The usage is unchanged except you must type cast the claims property. + +The old example for parsing a token looked like this.. + +```go + if token, err := jwt.Parse(tokenString, keyLookupFunc); err == nil { + fmt.Printf("Token for user %v expires %v", token.Claims["user"], token.Claims["exp"]) + } +``` + +is now directly mapped to... + +```go + if token, err := jwt.Parse(tokenString, keyLookupFunc); err == nil { + claims := token.Claims.(jwt.MapClaims) + fmt.Printf("Token for user %v expires %v", claims["user"], claims["exp"]) + } +``` + +`StandardClaims` is designed to be embedded in your custom type. You can supply a custom claims type with the new `ParseWithClaims` function. Here's an example of using a custom claims type. + +```go + type MyCustomClaims struct { + User string + *StandardClaims + } + + if token, err := jwt.ParseWithClaims(tokenString, &MyCustomClaims{}, keyLookupFunc); err == nil { + claims := token.Claims.(*MyCustomClaims) + fmt.Printf("Token for user %v expires %v", claims.User, claims.StandardClaims.ExpiresAt) + } +``` + +### `ParseFromRequest` has been moved + +To keep this library focused on the tokens without becoming overburdened with complex request processing logic, `ParseFromRequest` and its new companion `ParseFromRequestWithClaims` have been moved to a subpackage, `request`. The method signatues have also been augmented to receive a new argument: `Extractor`. + +`Extractors` do the work of picking the token string out of a request. The interface is simple and composable. + +This simple parsing example: + +```go + if token, err := jwt.ParseFromRequest(tokenString, req, keyLookupFunc); err == nil { + fmt.Printf("Token for user %v expires %v", token.Claims["user"], token.Claims["exp"]) + } +``` + +is directly mapped to: + +```go + if token, err := request.ParseFromRequest(req, request.OAuth2Extractor, keyLookupFunc); err == nil { + claims := token.Claims.(jwt.MapClaims) + fmt.Printf("Token for user %v expires %v", claims["user"], claims["exp"]) + } +``` + +There are several concrete `Extractor` types provided for your convenience: + +* `HeaderExtractor` will search a list of headers until one contains content. +* `ArgumentExtractor` will search a list of keys in request query and form arguments until one contains content. +* `MultiExtractor` will try a list of `Extractors` in order until one returns content. +* `AuthorizationHeaderExtractor` will look in the `Authorization` header for a `Bearer` token. +* `OAuth2Extractor` searches the places an OAuth2 token would be specified (per the spec): `Authorization` header and `access_token` argument +* `PostExtractionFilter` wraps an `Extractor`, allowing you to process the content before it's parsed. A simple example is stripping the `Bearer ` text from a header + + +### RSA signing methods no longer accept `[]byte` keys + +Due to a [critical vulnerability](https://auth0.com/blog/2015/03/31/critical-vulnerabilities-in-json-web-token-libraries/), we've decided the convenience of accepting `[]byte` instead of `rsa.PublicKey` or `rsa.PrivateKey` isn't worth the risk of misuse. + +To replace this behavior, we've added two helper methods: `ParseRSAPrivateKeyFromPEM(key []byte) (*rsa.PrivateKey, error)` and `ParseRSAPublicKeyFromPEM(key []byte) (*rsa.PublicKey, error)`. These are just simple helpers for unpacking PEM encoded PKCS1 and PKCS8 keys. If your keys are encoded any other way, all you need to do is convert them to the `crypto/rsa` package's types. + +```go + func keyLookupFunc(*Token) (interface{}, error) { + // Don't forget to validate the alg is what you expect: + if _, ok := token.Method.(*jwt.SigningMethodRSA); !ok { + return nil, fmt.Errorf("Unexpected signing method: %v", token.Header["alg"]) + } + + // Look up key + key, err := lookupPublicKey(token.Header["kid"]) + if err != nil { + return nil, err + } + + // Unpack key from PEM encoded PKCS8 + return jwt.ParseRSAPublicKeyFromPEM(key) + } +``` diff --git a/vendor/github.com/form3tech-oss/jwt-go/README.md b/vendor/github.com/form3tech-oss/jwt-go/README.md new file mode 100644 index 00000000000..d7749077fde --- /dev/null +++ b/vendor/github.com/form3tech-oss/jwt-go/README.md @@ -0,0 +1,104 @@ +# jwt-go + +[![Build Status](https://travis-ci.org/dgrijalva/jwt-go.svg?branch=master)](https://travis-ci.org/dgrijalva/jwt-go) +[![GoDoc](https://godoc.org/github.com/dgrijalva/jwt-go?status.svg)](https://godoc.org/github.com/dgrijalva/jwt-go) + +A [go](http://www.golang.org) (or 'golang' for search engine friendliness) implementation of [JSON Web Tokens](http://self-issued.info/docs/draft-ietf-oauth-json-web-token.html) + +**NEW VERSION COMING:** There have been a lot of improvements suggested since the version 3.0.0 released in 2016. I'm working now on cutting two different releases: 3.2.0 will contain any non-breaking changes or enhancements. 4.0.0 will follow shortly which will include breaking changes. See the 4.0.0 milestone to get an idea of what's coming. If you have other ideas, or would like to participate in 4.0.0, now's the time. If you depend on this library and don't want to be interrupted, I recommend you use your dependency mangement tool to pin to version 3. + +**SECURITY NOTICE:** Some older versions of Go have a security issue in the cryotp/elliptic. Recommendation is to upgrade to at least 1.8.3. See issue #216 for more detail. + +**SECURITY NOTICE:** It's important that you [validate the `alg` presented is what you expect](https://auth0.com/blog/critical-vulnerabilities-in-json-web-token-libraries/). This library attempts to make it easy to do the right thing by requiring key types match the expected alg, but you should take the extra step to verify it in your usage. See the examples provided. + +## What the heck is a JWT? + +JWT.io has [a great introduction](https://jwt.io/introduction) to JSON Web Tokens. + +In short, it's a signed JSON object that does something useful (for example, authentication). It's commonly used for `Bearer` tokens in Oauth 2. A token is made of three parts, separated by `.`'s. The first two parts are JSON objects, that have been [base64url](http://tools.ietf.org/html/rfc4648) encoded. The last part is the signature, encoded the same way. + +The first part is called the header. It contains the necessary information for verifying the last part, the signature. For example, which encryption method was used for signing and what key was used. + +The part in the middle is the interesting bit. It's called the Claims and contains the actual stuff you care about. Refer to [the RFC](http://self-issued.info/docs/draft-ietf-oauth-json-web-token.html) for information about reserved keys and the proper way to add your own. + +## What's in the box? + +This library supports the parsing and verification as well as the generation and signing of JWTs. Current supported signing algorithms are HMAC SHA, RSA, RSA-PSS, and ECDSA, though hooks are present for adding your own. + +## Examples + +See [the project documentation](https://godoc.org/github.com/dgrijalva/jwt-go) for examples of usage: + +* [Simple example of parsing and validating a token](https://godoc.org/github.com/dgrijalva/jwt-go#example-Parse--Hmac) +* [Simple example of building and signing a token](https://godoc.org/github.com/dgrijalva/jwt-go#example-New--Hmac) +* [Directory of Examples](https://godoc.org/github.com/dgrijalva/jwt-go#pkg-examples) + +## Extensions + +This library publishes all the necessary components for adding your own signing methods. Simply implement the `SigningMethod` interface and register a factory method using `RegisterSigningMethod`. + +Here's an example of an extension that integrates with multiple Google Cloud Platform signing tools (AppEngine, IAM API, Cloud KMS): https://github.com/someone1/gcp-jwt-go + +## Compliance + +This library was last reviewed to comply with [RTF 7519](http://www.rfc-editor.org/info/rfc7519) dated May 2015 with a few notable differences: + +* In order to protect against accidental use of [Unsecured JWTs](http://self-issued.info/docs/draft-ietf-oauth-json-web-token.html#UnsecuredJWT), tokens using `alg=none` will only be accepted if the constant `jwt.UnsafeAllowNoneSignatureType` is provided as the key. + +## Project Status & Versioning + +This library is considered production ready. Feedback and feature requests are appreciated. The API should be considered stable. There should be very few backwards-incompatible changes outside of major version updates (and only with good reason). + +This project uses [Semantic Versioning 2.0.0](http://semver.org). Accepted pull requests will land on `master`. Periodically, versions will be tagged from `master`. You can find all the releases on [the project releases page](https://github.com/dgrijalva/jwt-go/releases). + +While we try to make it obvious when we make breaking changes, there isn't a great mechanism for pushing announcements out to users. You may want to use this alternative package include: `gopkg.in/dgrijalva/jwt-go.v3`. It will do the right thing WRT semantic versioning. + +**BREAKING CHANGES:*** +* Version 3.0.0 includes _a lot_ of changes from the 2.x line, including a few that break the API. We've tried to break as few things as possible, so there should just be a few type signature changes. A full list of breaking changes is available in `VERSION_HISTORY.md`. See `MIGRATION_GUIDE.md` for more information on updating your code. + +## Usage Tips + +### Signing vs Encryption + +A token is simply a JSON object that is signed by its author. this tells you exactly two things about the data: + +* The author of the token was in the possession of the signing secret +* The data has not been modified since it was signed + +It's important to know that JWT does not provide encryption, which means anyone who has access to the token can read its contents. If you need to protect (encrypt) the data, there is a companion spec, `JWE`, that provides this functionality. JWE is currently outside the scope of this library. + +### Choosing a Signing Method + +There are several signing methods available, and you should probably take the time to learn about the various options before choosing one. The principal design decision is most likely going to be symmetric vs asymmetric. + +Symmetric signing methods, such as HSA, use only a single secret. This is probably the simplest signing method to use since any `[]byte` can be used as a valid secret. They are also slightly computationally faster to use, though this rarely is enough to matter. Symmetric signing methods work the best when both producers and consumers of tokens are trusted, or even the same system. Since the same secret is used to both sign and validate tokens, you can't easily distribute the key for validation. + +Asymmetric signing methods, such as RSA, use different keys for signing and verifying tokens. This makes it possible to produce tokens with a private key, and allow any consumer to access the public key for verification. + +### Signing Methods and Key Types + +Each signing method expects a different object type for its signing keys. See the package documentation for details. Here are the most common ones: + +* The [HMAC signing method](https://godoc.org/github.com/dgrijalva/jwt-go#SigningMethodHMAC) (`HS256`,`HS384`,`HS512`) expect `[]byte` values for signing and validation +* The [RSA signing method](https://godoc.org/github.com/dgrijalva/jwt-go#SigningMethodRSA) (`RS256`,`RS384`,`RS512`) expect `*rsa.PrivateKey` for signing and `*rsa.PublicKey` for validation +* The [ECDSA signing method](https://godoc.org/github.com/dgrijalva/jwt-go#SigningMethodECDSA) (`ES256`,`ES384`,`ES512`) expect `*ecdsa.PrivateKey` for signing and `*ecdsa.PublicKey` for validation + +### JWT and OAuth + +It's worth mentioning that OAuth and JWT are not the same thing. A JWT token is simply a signed JSON object. It can be used anywhere such a thing is useful. There is some confusion, though, as JWT is the most common type of bearer token used in OAuth2 authentication. + +Without going too far down the rabbit hole, here's a description of the interaction of these technologies: + +* OAuth is a protocol for allowing an identity provider to be separate from the service a user is logging in to. For example, whenever you use Facebook to log into a different service (Yelp, Spotify, etc), you are using OAuth. +* OAuth defines several options for passing around authentication data. One popular method is called a "bearer token". A bearer token is simply a string that _should_ only be held by an authenticated user. Thus, simply presenting this token proves your identity. You can probably derive from here why a JWT might make a good bearer token. +* Because bearer tokens are used for authentication, it's important they're kept secret. This is why transactions that use bearer tokens typically happen over SSL. + +### Troubleshooting + +This library uses descriptive error messages whenever possible. If you are not getting the expected result, have a look at the errors. The most common place people get stuck is providing the correct type of key to the parser. See the above section on signing methods and key types. + +## More + +Documentation can be found [on godoc.org](http://godoc.org/github.com/dgrijalva/jwt-go). + +The command line utility included in this project (cmd/jwt) provides a straightforward example of token creation and parsing as well as a useful tool for debugging your own integration. You'll also find several implementation examples in the documentation. diff --git a/vendor/github.com/form3tech-oss/jwt-go/VERSION_HISTORY.md b/vendor/github.com/form3tech-oss/jwt-go/VERSION_HISTORY.md new file mode 100644 index 00000000000..6370298313a --- /dev/null +++ b/vendor/github.com/form3tech-oss/jwt-go/VERSION_HISTORY.md @@ -0,0 +1,118 @@ +## `jwt-go` Version History + +#### 3.2.0 + +* Added method `ParseUnverified` to allow users to split up the tasks of parsing and validation +* HMAC signing method returns `ErrInvalidKeyType` instead of `ErrInvalidKey` where appropriate +* Added options to `request.ParseFromRequest`, which allows for an arbitrary list of modifiers to parsing behavior. Initial set include `WithClaims` and `WithParser`. Existing usage of this function will continue to work as before. +* Deprecated `ParseFromRequestWithClaims` to simplify API in the future. + +#### 3.1.0 + +* Improvements to `jwt` command line tool +* Added `SkipClaimsValidation` option to `Parser` +* Documentation updates + +#### 3.0.0 + +* **Compatibility Breaking Changes**: See MIGRATION_GUIDE.md for tips on updating your code + * Dropped support for `[]byte` keys when using RSA signing methods. This convenience feature could contribute to security vulnerabilities involving mismatched key types with signing methods. + * `ParseFromRequest` has been moved to `request` subpackage and usage has changed + * The `Claims` property on `Token` is now type `Claims` instead of `map[string]interface{}`. The default value is type `MapClaims`, which is an alias to `map[string]interface{}`. This makes it possible to use a custom type when decoding claims. +* Other Additions and Changes + * Added `Claims` interface type to allow users to decode the claims into a custom type + * Added `ParseWithClaims`, which takes a third argument of type `Claims`. Use this function instead of `Parse` if you have a custom type you'd like to decode into. + * Dramatically improved the functionality and flexibility of `ParseFromRequest`, which is now in the `request` subpackage + * Added `ParseFromRequestWithClaims` which is the `FromRequest` equivalent of `ParseWithClaims` + * Added new interface type `Extractor`, which is used for extracting JWT strings from http requests. Used with `ParseFromRequest` and `ParseFromRequestWithClaims`. + * Added several new, more specific, validation errors to error type bitmask + * Moved examples from README to executable example files + * Signing method registry is now thread safe + * Added new property to `ValidationError`, which contains the raw error returned by calls made by parse/verify (such as those returned by keyfunc or json parser) + +#### 2.7.0 + +This will likely be the last backwards compatible release before 3.0.0, excluding essential bug fixes. + +* Added new option `-show` to the `jwt` command that will just output the decoded token without verifying +* Error text for expired tokens includes how long it's been expired +* Fixed incorrect error returned from `ParseRSAPublicKeyFromPEM` +* Documentation updates + +#### 2.6.0 + +* Exposed inner error within ValidationError +* Fixed validation errors when using UseJSONNumber flag +* Added several unit tests + +#### 2.5.0 + +* Added support for signing method none. You shouldn't use this. The API tries to make this clear. +* Updated/fixed some documentation +* Added more helpful error message when trying to parse tokens that begin with `BEARER ` + +#### 2.4.0 + +* Added new type, Parser, to allow for configuration of various parsing parameters + * You can now specify a list of valid signing methods. Anything outside this set will be rejected. + * You can now opt to use the `json.Number` type instead of `float64` when parsing token JSON +* Added support for [Travis CI](https://travis-ci.org/dgrijalva/jwt-go) +* Fixed some bugs with ECDSA parsing + +#### 2.3.0 + +* Added support for ECDSA signing methods +* Added support for RSA PSS signing methods (requires go v1.4) + +#### 2.2.0 + +* Gracefully handle a `nil` `Keyfunc` being passed to `Parse`. Result will now be the parsed token and an error, instead of a panic. + +#### 2.1.0 + +Backwards compatible API change that was missed in 2.0.0. + +* The `SignedString` method on `Token` now takes `interface{}` instead of `[]byte` + +#### 2.0.0 + +There were two major reasons for breaking backwards compatibility with this update. The first was a refactor required to expand the width of the RSA and HMAC-SHA signing implementations. There will likely be no required code changes to support this change. + +The second update, while unfortunately requiring a small change in integration, is required to open up this library to other signing methods. Not all keys used for all signing methods have a single standard on-disk representation. Requiring `[]byte` as the type for all keys proved too limiting. Additionally, this implementation allows for pre-parsed tokens to be reused, which might matter in an application that parses a high volume of tokens with a small set of keys. Backwards compatibilty has been maintained for passing `[]byte` to the RSA signing methods, but they will also accept `*rsa.PublicKey` and `*rsa.PrivateKey`. + +It is likely the only integration change required here will be to change `func(t *jwt.Token) ([]byte, error)` to `func(t *jwt.Token) (interface{}, error)` when calling `Parse`. + +* **Compatibility Breaking Changes** + * `SigningMethodHS256` is now `*SigningMethodHMAC` instead of `type struct` + * `SigningMethodRS256` is now `*SigningMethodRSA` instead of `type struct` + * `KeyFunc` now returns `interface{}` instead of `[]byte` + * `SigningMethod.Sign` now takes `interface{}` instead of `[]byte` for the key + * `SigningMethod.Verify` now takes `interface{}` instead of `[]byte` for the key +* Renamed type `SigningMethodHS256` to `SigningMethodHMAC`. Specific sizes are now just instances of this type. + * Added public package global `SigningMethodHS256` + * Added public package global `SigningMethodHS384` + * Added public package global `SigningMethodHS512` +* Renamed type `SigningMethodRS256` to `SigningMethodRSA`. Specific sizes are now just instances of this type. + * Added public package global `SigningMethodRS256` + * Added public package global `SigningMethodRS384` + * Added public package global `SigningMethodRS512` +* Moved sample private key for HMAC tests from an inline value to a file on disk. Value is unchanged. +* Refactored the RSA implementation to be easier to read +* Exposed helper methods `ParseRSAPrivateKeyFromPEM` and `ParseRSAPublicKeyFromPEM` + +#### 1.0.2 + +* Fixed bug in parsing public keys from certificates +* Added more tests around the parsing of keys for RS256 +* Code refactoring in RS256 implementation. No functional changes + +#### 1.0.1 + +* Fixed panic if RS256 signing method was passed an invalid key + +#### 1.0.0 + +* First versioned release +* API stabilized +* Supports creating, signing, parsing, and validating JWT tokens +* Supports RS256 and HS256 signing methods \ No newline at end of file diff --git a/vendor/github.com/form3tech-oss/jwt-go/claims.go b/vendor/github.com/form3tech-oss/jwt-go/claims.go new file mode 100644 index 00000000000..624890666c6 --- /dev/null +++ b/vendor/github.com/form3tech-oss/jwt-go/claims.go @@ -0,0 +1,136 @@ +package jwt + +import ( + "crypto/subtle" + "fmt" + "time" +) + +// For a type to be a Claims object, it must just have a Valid method that determines +// if the token is invalid for any supported reason +type Claims interface { + Valid() error +} + +// Structured version of Claims Section, as referenced at +// https://tools.ietf.org/html/rfc7519#section-4.1 +// See examples for how to use this with your own claim types +type StandardClaims struct { + Audience []string `json:"aud,omitempty"` + ExpiresAt int64 `json:"exp,omitempty"` + Id string `json:"jti,omitempty"` + IssuedAt int64 `json:"iat,omitempty"` + Issuer string `json:"iss,omitempty"` + NotBefore int64 `json:"nbf,omitempty"` + Subject string `json:"sub,omitempty"` +} + +// Validates time based claims "exp, iat, nbf". +// There is no accounting for clock skew. +// As well, if any of the above claims are not in the token, it will still +// be considered a valid claim. +func (c StandardClaims) Valid() error { + vErr := new(ValidationError) + now := TimeFunc().Unix() + + // The claims below are optional, by default, so if they are set to the + // default value in Go, let's not fail the verification for them. + if c.VerifyExpiresAt(now, false) == false { + delta := time.Unix(now, 0).Sub(time.Unix(c.ExpiresAt, 0)) + vErr.Inner = fmt.Errorf("token is expired by %v", delta) + vErr.Errors |= ValidationErrorExpired + } + + if c.VerifyIssuedAt(now, false) == false { + vErr.Inner = fmt.Errorf("Token used before issued") + vErr.Errors |= ValidationErrorIssuedAt + } + + if c.VerifyNotBefore(now, false) == false { + vErr.Inner = fmt.Errorf("token is not valid yet") + vErr.Errors |= ValidationErrorNotValidYet + } + + if vErr.valid() { + return nil + } + + return vErr +} + +// Compares the aud claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (c *StandardClaims) VerifyAudience(cmp string, req bool) bool { + return verifyAud(c.Audience, cmp, req) +} + +// Compares the exp claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (c *StandardClaims) VerifyExpiresAt(cmp int64, req bool) bool { + return verifyExp(c.ExpiresAt, cmp, req) +} + +// Compares the iat claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (c *StandardClaims) VerifyIssuedAt(cmp int64, req bool) bool { + return verifyIat(c.IssuedAt, cmp, req) +} + +// Compares the iss claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (c *StandardClaims) VerifyIssuer(cmp string, req bool) bool { + return verifyIss(c.Issuer, cmp, req) +} + +// Compares the nbf claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (c *StandardClaims) VerifyNotBefore(cmp int64, req bool) bool { + return verifyNbf(c.NotBefore, cmp, req) +} + +// ----- helpers + +func verifyAud(aud []string, cmp string, required bool) bool { + if len(aud) == 0 { + return !required + } + + for _, a := range aud { + if subtle.ConstantTimeCompare([]byte(a), []byte(cmp)) != 0 { + return true + } + } + return false +} + +func verifyExp(exp int64, now int64, required bool) bool { + if exp == 0 { + return !required + } + return now <= exp +} + +func verifyIat(iat int64, now int64, required bool) bool { + if iat == 0 { + return !required + } + return now >= iat +} + +func verifyIss(iss string, cmp string, required bool) bool { + if iss == "" { + return !required + } + if subtle.ConstantTimeCompare([]byte(iss), []byte(cmp)) != 0 { + return true + } else { + return false + } +} + +func verifyNbf(nbf int64, now int64, required bool) bool { + if nbf == 0 { + return !required + } + return now >= nbf +} diff --git a/vendor/github.com/form3tech-oss/jwt-go/doc.go b/vendor/github.com/form3tech-oss/jwt-go/doc.go new file mode 100644 index 00000000000..a86dc1a3b34 --- /dev/null +++ b/vendor/github.com/form3tech-oss/jwt-go/doc.go @@ -0,0 +1,4 @@ +// Package jwt is a Go implementation of JSON Web Tokens: http://self-issued.info/docs/draft-jones-json-web-token.html +// +// See README.md for more info. +package jwt diff --git a/vendor/github.com/form3tech-oss/jwt-go/ecdsa.go b/vendor/github.com/form3tech-oss/jwt-go/ecdsa.go new file mode 100644 index 00000000000..f977381240e --- /dev/null +++ b/vendor/github.com/form3tech-oss/jwt-go/ecdsa.go @@ -0,0 +1,148 @@ +package jwt + +import ( + "crypto" + "crypto/ecdsa" + "crypto/rand" + "errors" + "math/big" +) + +var ( + // Sadly this is missing from crypto/ecdsa compared to crypto/rsa + ErrECDSAVerification = errors.New("crypto/ecdsa: verification error") +) + +// Implements the ECDSA family of signing methods signing methods +// Expects *ecdsa.PrivateKey for signing and *ecdsa.PublicKey for verification +type SigningMethodECDSA struct { + Name string + Hash crypto.Hash + KeySize int + CurveBits int +} + +// Specific instances for EC256 and company +var ( + SigningMethodES256 *SigningMethodECDSA + SigningMethodES384 *SigningMethodECDSA + SigningMethodES512 *SigningMethodECDSA +) + +func init() { + // ES256 + SigningMethodES256 = &SigningMethodECDSA{"ES256", crypto.SHA256, 32, 256} + RegisterSigningMethod(SigningMethodES256.Alg(), func() SigningMethod { + return SigningMethodES256 + }) + + // ES384 + SigningMethodES384 = &SigningMethodECDSA{"ES384", crypto.SHA384, 48, 384} + RegisterSigningMethod(SigningMethodES384.Alg(), func() SigningMethod { + return SigningMethodES384 + }) + + // ES512 + SigningMethodES512 = &SigningMethodECDSA{"ES512", crypto.SHA512, 66, 521} + RegisterSigningMethod(SigningMethodES512.Alg(), func() SigningMethod { + return SigningMethodES512 + }) +} + +func (m *SigningMethodECDSA) Alg() string { + return m.Name +} + +// Implements the Verify method from SigningMethod +// For this verify method, key must be an ecdsa.PublicKey struct +func (m *SigningMethodECDSA) Verify(signingString, signature string, key interface{}) error { + var err error + + // Decode the signature + var sig []byte + if sig, err = DecodeSegment(signature); err != nil { + return err + } + + // Get the key + var ecdsaKey *ecdsa.PublicKey + switch k := key.(type) { + case *ecdsa.PublicKey: + ecdsaKey = k + default: + return ErrInvalidKeyType + } + + if len(sig) != 2*m.KeySize { + return ErrECDSAVerification + } + + r := big.NewInt(0).SetBytes(sig[:m.KeySize]) + s := big.NewInt(0).SetBytes(sig[m.KeySize:]) + + // Create hasher + if !m.Hash.Available() { + return ErrHashUnavailable + } + hasher := m.Hash.New() + hasher.Write([]byte(signingString)) + + // Verify the signature + if verifystatus := ecdsa.Verify(ecdsaKey, hasher.Sum(nil), r, s); verifystatus == true { + return nil + } else { + return ErrECDSAVerification + } +} + +// Implements the Sign method from SigningMethod +// For this signing method, key must be an ecdsa.PrivateKey struct +func (m *SigningMethodECDSA) Sign(signingString string, key interface{}) (string, error) { + // Get the key + var ecdsaKey *ecdsa.PrivateKey + switch k := key.(type) { + case *ecdsa.PrivateKey: + ecdsaKey = k + default: + return "", ErrInvalidKeyType + } + + // Create the hasher + if !m.Hash.Available() { + return "", ErrHashUnavailable + } + + hasher := m.Hash.New() + hasher.Write([]byte(signingString)) + + // Sign the string and return r, s + if r, s, err := ecdsa.Sign(rand.Reader, ecdsaKey, hasher.Sum(nil)); err == nil { + curveBits := ecdsaKey.Curve.Params().BitSize + + if m.CurveBits != curveBits { + return "", ErrInvalidKey + } + + keyBytes := curveBits / 8 + if curveBits%8 > 0 { + keyBytes += 1 + } + + // We serialize the outpus (r and s) into big-endian byte arrays and pad + // them with zeros on the left to make sure the sizes work out. Both arrays + // must be keyBytes long, and the output must be 2*keyBytes long. + rBytes := r.Bytes() + rBytesPadded := make([]byte, keyBytes) + copy(rBytesPadded[keyBytes-len(rBytes):], rBytes) + + sBytes := s.Bytes() + sBytesPadded := make([]byte, keyBytes) + copy(sBytesPadded[keyBytes-len(sBytes):], sBytes) + + out := append(rBytesPadded, sBytesPadded...) + + return EncodeSegment(out), nil + } else { + return "", err + } +} diff --git a/vendor/github.com/form3tech-oss/jwt-go/ecdsa_utils.go b/vendor/github.com/form3tech-oss/jwt-go/ecdsa_utils.go new file mode 100644 index 00000000000..db9f4be7d8e --- /dev/null +++ b/vendor/github.com/form3tech-oss/jwt-go/ecdsa_utils.go @@ -0,0 +1,69 @@ +package jwt + +import ( + "crypto/ecdsa" + "crypto/x509" + "encoding/pem" + "errors" +) + +var ( + ErrNotECPublicKey = errors.New("Key is not a valid ECDSA public key") + ErrNotECPrivateKey = errors.New("Key is not a valid ECDSA private key") +) + +// Parse PEM encoded Elliptic Curve Private Key Structure +func ParseECPrivateKeyFromPEM(key []byte) (*ecdsa.PrivateKey, error) { + var err error + + // Parse PEM block + var block *pem.Block + if block, _ = pem.Decode(key); block == nil { + return nil, ErrKeyMustBePEMEncoded + } + + // Parse the key + var parsedKey interface{} + if parsedKey, err = x509.ParseECPrivateKey(block.Bytes); err != nil { + if parsedKey, err = x509.ParsePKCS8PrivateKey(block.Bytes); err != nil { + return nil, err + } + } + + var pkey *ecdsa.PrivateKey + var ok bool + if pkey, ok = parsedKey.(*ecdsa.PrivateKey); !ok { + return nil, ErrNotECPrivateKey + } + + return pkey, nil +} + +// Parse PEM encoded PKCS1 or PKCS8 public key +func ParseECPublicKeyFromPEM(key []byte) (*ecdsa.PublicKey, error) { + var err error + + // Parse PEM block + var block *pem.Block + if block, _ = pem.Decode(key); block == nil { + return nil, ErrKeyMustBePEMEncoded + } + + // Parse the key + var parsedKey interface{} + if parsedKey, err = x509.ParsePKIXPublicKey(block.Bytes); err != nil { + if cert, err := x509.ParseCertificate(block.Bytes); err == nil { + parsedKey = cert.PublicKey + } else { + return nil, err + } + } + + var pkey *ecdsa.PublicKey + var ok bool + if pkey, ok = parsedKey.(*ecdsa.PublicKey); !ok { + return nil, ErrNotECPublicKey + } + + return pkey, nil +} diff --git a/vendor/github.com/form3tech-oss/jwt-go/errors.go b/vendor/github.com/form3tech-oss/jwt-go/errors.go new file mode 100644 index 00000000000..1c93024aad2 --- /dev/null +++ b/vendor/github.com/form3tech-oss/jwt-go/errors.go @@ -0,0 +1,59 @@ +package jwt + +import ( + "errors" +) + +// Error constants +var ( + ErrInvalidKey = errors.New("key is invalid") + ErrInvalidKeyType = errors.New("key is of invalid type") + ErrHashUnavailable = errors.New("the requested hash function is unavailable") +) + +// The errors that might occur when parsing and validating a token +const ( + ValidationErrorMalformed uint32 = 1 << iota // Token is malformed + ValidationErrorUnverifiable // Token could not be verified because of signing problems + ValidationErrorSignatureInvalid // Signature validation failed + + // Standard Claim validation errors + ValidationErrorAudience // AUD validation failed + ValidationErrorExpired // EXP validation failed + ValidationErrorIssuedAt // IAT validation failed + ValidationErrorIssuer // ISS validation failed + ValidationErrorNotValidYet // NBF validation failed + ValidationErrorId // JTI validation failed + ValidationErrorClaimsInvalid // Generic claims validation error +) + +// Helper for constructing a ValidationError with a string error message +func NewValidationError(errorText string, errorFlags uint32) *ValidationError { + return &ValidationError{ + text: errorText, + Errors: errorFlags, + } +} + +// The error from Parse if token is not valid +type ValidationError struct { + Inner error // stores the error returned by external dependencies, i.e.: KeyFunc + Errors uint32 // bitfield. see ValidationError... constants + text string // errors that do not have a valid error just have text +} + +// Validation error is an error type +func (e ValidationError) Error() string { + if e.Inner != nil { + return e.Inner.Error() + } else if e.text != "" { + return e.text + } else { + return "token is invalid" + } +} + +// No errors +func (e *ValidationError) valid() bool { + return e.Errors == 0 +} diff --git a/vendor/github.com/form3tech-oss/jwt-go/hmac.go b/vendor/github.com/form3tech-oss/jwt-go/hmac.go new file mode 100644 index 00000000000..addbe5d4018 --- /dev/null +++ b/vendor/github.com/form3tech-oss/jwt-go/hmac.go @@ -0,0 +1,95 @@ +package jwt + +import ( + "crypto" + "crypto/hmac" + "errors" +) + +// Implements the HMAC-SHA family of signing methods signing methods +// Expects key type of []byte for both signing and validation +type SigningMethodHMAC struct { + Name string + Hash crypto.Hash +} + +// Specific instances for HS256 and company +var ( + SigningMethodHS256 *SigningMethodHMAC + SigningMethodHS384 *SigningMethodHMAC + SigningMethodHS512 *SigningMethodHMAC + ErrSignatureInvalid = errors.New("signature is invalid") +) + +func init() { + // HS256 + SigningMethodHS256 = &SigningMethodHMAC{"HS256", crypto.SHA256} + RegisterSigningMethod(SigningMethodHS256.Alg(), func() SigningMethod { + return SigningMethodHS256 + }) + + // HS384 + SigningMethodHS384 = &SigningMethodHMAC{"HS384", crypto.SHA384} + RegisterSigningMethod(SigningMethodHS384.Alg(), func() SigningMethod { + return SigningMethodHS384 + }) + + // HS512 + SigningMethodHS512 = &SigningMethodHMAC{"HS512", crypto.SHA512} + RegisterSigningMethod(SigningMethodHS512.Alg(), func() SigningMethod { + return SigningMethodHS512 + }) +} + +func (m *SigningMethodHMAC) Alg() string { + return m.Name +} + +// Verify the signature of HSXXX tokens. Returns nil if the signature is valid. +func (m *SigningMethodHMAC) Verify(signingString, signature string, key interface{}) error { + // Verify the key is the right type + keyBytes, ok := key.([]byte) + if !ok { + return ErrInvalidKeyType + } + + // Decode signature, for comparison + sig, err := DecodeSegment(signature) + if err != nil { + return err + } + + // Can we use the specified hashing method? + if !m.Hash.Available() { + return ErrHashUnavailable + } + + // This signing method is symmetric, so we validate the signature + // by reproducing the signature from the signing string and key, then + // comparing that against the provided signature. + hasher := hmac.New(m.Hash.New, keyBytes) + hasher.Write([]byte(signingString)) + if !hmac.Equal(sig, hasher.Sum(nil)) { + return ErrSignatureInvalid + } + + // No validation errors. Signature is good. + return nil +} + +// Implements the Sign method from SigningMethod for this signing method. +// Key must be []byte +func (m *SigningMethodHMAC) Sign(signingString string, key interface{}) (string, error) { + if keyBytes, ok := key.([]byte); ok { + if !m.Hash.Available() { + return "", ErrHashUnavailable + } + + hasher := hmac.New(m.Hash.New, keyBytes) + hasher.Write([]byte(signingString)) + + return EncodeSegment(hasher.Sum(nil)), nil + } + + return "", ErrInvalidKeyType +} diff --git a/vendor/github.com/form3tech-oss/jwt-go/map_claims.go b/vendor/github.com/form3tech-oss/jwt-go/map_claims.go new file mode 100644 index 00000000000..bcc37b15bf8 --- /dev/null +++ b/vendor/github.com/form3tech-oss/jwt-go/map_claims.go @@ -0,0 +1,110 @@ +package jwt + +import ( + "encoding/json" + "errors" + // "fmt" +) + +// Claims type that uses the map[string]interface{} for JSON decoding +// This is the default claims type if you don't supply one +type MapClaims map[string]interface{} + +// Compares the aud claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (m MapClaims) VerifyAudience(cmp string, req bool) bool { + var aud []string + switch v := m["aud"].(type) { + case []string: + aud = v + case []interface{}: + for _, a := range v { + vs, ok := a.(string) + if !ok { + return false + } + aud = append(aud, vs) + } + case string: + aud = append(aud, v) + default: + return false + } + return verifyAud(aud, cmp, req) +} + +// Compares the exp claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (m MapClaims) VerifyExpiresAt(cmp int64, req bool) bool { + switch exp := m["exp"].(type) { + case float64: + return verifyExp(int64(exp), cmp, req) + case json.Number: + v, _ := exp.Int64() + return verifyExp(v, cmp, req) + } + return req == false +} + +// Compares the iat claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (m MapClaims) VerifyIssuedAt(cmp int64, req bool) bool { + switch iat := m["iat"].(type) { + case float64: + return verifyIat(int64(iat), cmp, req) + case json.Number: + v, _ := iat.Int64() + return verifyIat(v, cmp, req) + } + return req == false +} + +// Compares the iss claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (m MapClaims) VerifyIssuer(cmp string, req bool) bool { + iss, _ := m["iss"].(string) + return verifyIss(iss, cmp, req) +} + +// Compares the nbf claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (m MapClaims) VerifyNotBefore(cmp int64, req bool) bool { + switch nbf := m["nbf"].(type) { + case float64: + return verifyNbf(int64(nbf), cmp, req) + case json.Number: + v, _ := nbf.Int64() + return verifyNbf(v, cmp, req) + } + return req == false +} + +// Validates time based claims "exp, iat, nbf". +// There is no accounting for clock skew. +// As well, if any of the above claims are not in the token, it will still +// be considered a valid claim. +func (m MapClaims) Valid() error { + vErr := new(ValidationError) + now := TimeFunc().Unix() + + if m.VerifyExpiresAt(now, false) == false { + vErr.Inner = errors.New("Token is expired") + vErr.Errors |= ValidationErrorExpired + } + + if m.VerifyIssuedAt(now, false) == false { + vErr.Inner = errors.New("Token used before issued") + vErr.Errors |= ValidationErrorIssuedAt + } + + if m.VerifyNotBefore(now, false) == false { + vErr.Inner = errors.New("Token is not valid yet") + vErr.Errors |= ValidationErrorNotValidYet + } + + if vErr.valid() { + return nil + } + + return vErr +} diff --git a/vendor/github.com/form3tech-oss/jwt-go/none.go b/vendor/github.com/form3tech-oss/jwt-go/none.go new file mode 100644 index 00000000000..f04d189d067 --- /dev/null +++ b/vendor/github.com/form3tech-oss/jwt-go/none.go @@ -0,0 +1,52 @@ +package jwt + +// Implements the none signing method. This is required by the spec +// but you probably should never use it. +var SigningMethodNone *signingMethodNone + +const UnsafeAllowNoneSignatureType unsafeNoneMagicConstant = "none signing method allowed" + +var NoneSignatureTypeDisallowedError error + +type signingMethodNone struct{} +type unsafeNoneMagicConstant string + +func init() { + SigningMethodNone = &signingMethodNone{} + NoneSignatureTypeDisallowedError = NewValidationError("'none' signature type is not allowed", ValidationErrorSignatureInvalid) + + RegisterSigningMethod(SigningMethodNone.Alg(), func() SigningMethod { + return SigningMethodNone + }) +} + +func (m *signingMethodNone) Alg() string { + return "none" +} + +// Only allow 'none' alg type if UnsafeAllowNoneSignatureType is specified as the key +func (m *signingMethodNone) Verify(signingString, signature string, key interface{}) (err error) { + // Key must be UnsafeAllowNoneSignatureType to prevent accidentally + // accepting 'none' signing method + if _, ok := key.(unsafeNoneMagicConstant); !ok { + return NoneSignatureTypeDisallowedError + } + // If signing method is none, signature must be an empty string + if signature != "" { + return NewValidationError( + "'none' signing method with non-empty signature", + ValidationErrorSignatureInvalid, + ) + } + + // Accept 'none' signing method. + return nil +} + +// Only allow 'none' signing if UnsafeAllowNoneSignatureType is specified as the key +func (m *signingMethodNone) Sign(signingString string, key interface{}) (string, error) { + if _, ok := key.(unsafeNoneMagicConstant); ok { + return "", nil + } + return "", NoneSignatureTypeDisallowedError +} diff --git a/vendor/github.com/form3tech-oss/jwt-go/parser.go b/vendor/github.com/form3tech-oss/jwt-go/parser.go new file mode 100644 index 00000000000..d6901d9adb5 --- /dev/null +++ b/vendor/github.com/form3tech-oss/jwt-go/parser.go @@ -0,0 +1,148 @@ +package jwt + +import ( + "bytes" + "encoding/json" + "fmt" + "strings" +) + +type Parser struct { + ValidMethods []string // If populated, only these methods will be considered valid + UseJSONNumber bool // Use JSON Number format in JSON decoder + SkipClaimsValidation bool // Skip claims validation during token parsing +} + +// Parse, validate, and return a token. +// keyFunc will receive the parsed token and should return the key for validating. +// If everything is kosher, err will be nil +func (p *Parser) Parse(tokenString string, keyFunc Keyfunc) (*Token, error) { + return p.ParseWithClaims(tokenString, MapClaims{}, keyFunc) +} + +func (p *Parser) ParseWithClaims(tokenString string, claims Claims, keyFunc Keyfunc) (*Token, error) { + token, parts, err := p.ParseUnverified(tokenString, claims) + if err != nil { + return token, err + } + + // Verify signing method is in the required set + if p.ValidMethods != nil { + var signingMethodValid = false + var alg = token.Method.Alg() + for _, m := range p.ValidMethods { + if m == alg { + signingMethodValid = true + break + } + } + if !signingMethodValid { + // signing method is not in the listed set + return token, NewValidationError(fmt.Sprintf("signing method %v is invalid", alg), ValidationErrorSignatureInvalid) + } + } + + // Lookup key + var key interface{} + if keyFunc == nil { + // keyFunc was not provided. short circuiting validation + return token, NewValidationError("no Keyfunc was provided.", ValidationErrorUnverifiable) + } + if key, err = keyFunc(token); err != nil { + // keyFunc returned an error + if ve, ok := err.(*ValidationError); ok { + return token, ve + } + return token, &ValidationError{Inner: err, Errors: ValidationErrorUnverifiable} + } + + vErr := &ValidationError{} + + // Validate Claims + if !p.SkipClaimsValidation { + if err := token.Claims.Valid(); err != nil { + + // If the Claims Valid returned an error, check if it is a validation error, + // If it was another error type, create a ValidationError with a generic ClaimsInvalid flag set + if e, ok := err.(*ValidationError); !ok { + vErr = &ValidationError{Inner: err, Errors: ValidationErrorClaimsInvalid} + } else { + vErr = e + } + } + } + + // Perform validation + token.Signature = parts[2] + if err = token.Method.Verify(strings.Join(parts[0:2], "."), token.Signature, key); err != nil { + vErr.Inner = err + vErr.Errors |= ValidationErrorSignatureInvalid + } + + if vErr.valid() { + token.Valid = true + return token, nil + } + + return token, vErr +} + +// WARNING: Don't use this method unless you know what you're doing +// +// This method parses the token but doesn't validate the signature. It's only +// ever useful in cases where you know the signature is valid (because it has +// been checked previously in the stack) and you want to extract values from +// it. +func (p *Parser) ParseUnverified(tokenString string, claims Claims) (token *Token, parts []string, err error) { + parts = strings.Split(tokenString, ".") + if len(parts) != 3 { + return nil, parts, NewValidationError("token contains an invalid number of segments", ValidationErrorMalformed) + } + + token = &Token{Raw: tokenString} + + // parse Header + var headerBytes []byte + if headerBytes, err = DecodeSegment(parts[0]); err != nil { + if strings.HasPrefix(strings.ToLower(tokenString), "bearer ") { + return token, parts, NewValidationError("tokenstring should not contain 'bearer '", ValidationErrorMalformed) + } + return token, parts, &ValidationError{Inner: err, Errors: ValidationErrorMalformed} + } + if err = json.Unmarshal(headerBytes, &token.Header); err != nil { + return token, parts, &ValidationError{Inner: err, Errors: ValidationErrorMalformed} + } + + // parse Claims + var claimBytes []byte + token.Claims = claims + + if claimBytes, err = DecodeSegment(parts[1]); err != nil { + return token, parts, &ValidationError{Inner: err, Errors: ValidationErrorMalformed} + } + dec := json.NewDecoder(bytes.NewBuffer(claimBytes)) + if p.UseJSONNumber { + dec.UseNumber() + } + // JSON Decode. Special case for map type to avoid weird pointer behavior + if c, ok := token.Claims.(MapClaims); ok { + err = dec.Decode(&c) + } else { + err = dec.Decode(&claims) + } + // Handle decode error + if err != nil { + return token, parts, &ValidationError{Inner: err, Errors: ValidationErrorMalformed} + } + + // Lookup signature method + if method, ok := token.Header["alg"].(string); ok { + if token.Method = GetSigningMethod(method); token.Method == nil { + return token, parts, NewValidationError("signing method (alg) is unavailable.", ValidationErrorUnverifiable) + } + } else { + return token, parts, NewValidationError("signing method (alg) is unspecified.", ValidationErrorUnverifiable) + } + + return token, parts, nil +} diff --git a/vendor/github.com/form3tech-oss/jwt-go/rsa.go b/vendor/github.com/form3tech-oss/jwt-go/rsa.go new file mode 100644 index 00000000000..e4caf1ca4a1 --- /dev/null +++ b/vendor/github.com/form3tech-oss/jwt-go/rsa.go @@ -0,0 +1,101 @@ +package jwt + +import ( + "crypto" + "crypto/rand" + "crypto/rsa" +) + +// Implements the RSA family of signing methods signing methods +// Expects *rsa.PrivateKey for signing and *rsa.PublicKey for validation +type SigningMethodRSA struct { + Name string + Hash crypto.Hash +} + +// Specific instances for RS256 and company +var ( + SigningMethodRS256 *SigningMethodRSA + SigningMethodRS384 *SigningMethodRSA + SigningMethodRS512 *SigningMethodRSA +) + +func init() { + // RS256 + SigningMethodRS256 = &SigningMethodRSA{"RS256", crypto.SHA256} + RegisterSigningMethod(SigningMethodRS256.Alg(), func() SigningMethod { + return SigningMethodRS256 + }) + + // RS384 + SigningMethodRS384 = &SigningMethodRSA{"RS384", crypto.SHA384} + RegisterSigningMethod(SigningMethodRS384.Alg(), func() SigningMethod { + return SigningMethodRS384 + }) + + // RS512 + SigningMethodRS512 = &SigningMethodRSA{"RS512", crypto.SHA512} + RegisterSigningMethod(SigningMethodRS512.Alg(), func() SigningMethod { + return SigningMethodRS512 + }) +} + +func (m *SigningMethodRSA) Alg() string { + return m.Name +} + +// Implements the Verify method from SigningMethod +// For this signing method, must be an *rsa.PublicKey structure. +func (m *SigningMethodRSA) Verify(signingString, signature string, key interface{}) error { + var err error + + // Decode the signature + var sig []byte + if sig, err = DecodeSegment(signature); err != nil { + return err + } + + var rsaKey *rsa.PublicKey + var ok bool + + if rsaKey, ok = key.(*rsa.PublicKey); !ok { + return ErrInvalidKeyType + } + + // Create hasher + if !m.Hash.Available() { + return ErrHashUnavailable + } + hasher := m.Hash.New() + hasher.Write([]byte(signingString)) + + // Verify the signature + return rsa.VerifyPKCS1v15(rsaKey, m.Hash, hasher.Sum(nil), sig) +} + +// Implements the Sign method from SigningMethod +// For this signing method, must be an *rsa.PrivateKey structure. +func (m *SigningMethodRSA) Sign(signingString string, key interface{}) (string, error) { + var rsaKey *rsa.PrivateKey + var ok bool + + // Validate type of key + if rsaKey, ok = key.(*rsa.PrivateKey); !ok { + return "", ErrInvalidKey + } + + // Create the hasher + if !m.Hash.Available() { + return "", ErrHashUnavailable + } + + hasher := m.Hash.New() + hasher.Write([]byte(signingString)) + + // Sign the string and return the encoded bytes + if sigBytes, err := rsa.SignPKCS1v15(rand.Reader, rsaKey, m.Hash, hasher.Sum(nil)); err == nil { + return EncodeSegment(sigBytes), nil + } else { + return "", err + } +} diff --git a/vendor/github.com/form3tech-oss/jwt-go/rsa_pss.go b/vendor/github.com/form3tech-oss/jwt-go/rsa_pss.go new file mode 100644 index 00000000000..c0147086480 --- /dev/null +++ b/vendor/github.com/form3tech-oss/jwt-go/rsa_pss.go @@ -0,0 +1,142 @@ +// +build go1.4 + +package jwt + +import ( + "crypto" + "crypto/rand" + "crypto/rsa" +) + +// Implements the RSAPSS family of signing methods signing methods +type SigningMethodRSAPSS struct { + *SigningMethodRSA + Options *rsa.PSSOptions + // VerifyOptions is optional. If set overrides Options for rsa.VerifyPPS. + // Used to accept tokens signed with rsa.PSSSaltLengthAuto, what doesn't follow + // https://tools.ietf.org/html/rfc7518#section-3.5 but was used previously. + // See https://github.com/dgrijalva/jwt-go/issues/285#issuecomment-437451244 for details. + VerifyOptions *rsa.PSSOptions +} + +// Specific instances for RS/PS and company. +var ( + SigningMethodPS256 *SigningMethodRSAPSS + SigningMethodPS384 *SigningMethodRSAPSS + SigningMethodPS512 *SigningMethodRSAPSS +) + +func init() { + // PS256 + SigningMethodPS256 = &SigningMethodRSAPSS{ + SigningMethodRSA: &SigningMethodRSA{ + Name: "PS256", + Hash: crypto.SHA256, + }, + Options: &rsa.PSSOptions{ + SaltLength: rsa.PSSSaltLengthEqualsHash, + }, + VerifyOptions: &rsa.PSSOptions{ + SaltLength: rsa.PSSSaltLengthAuto, + }, + } + RegisterSigningMethod(SigningMethodPS256.Alg(), func() SigningMethod { + return SigningMethodPS256 + }) + + // PS384 + SigningMethodPS384 = &SigningMethodRSAPSS{ + SigningMethodRSA: &SigningMethodRSA{ + Name: "PS384", + Hash: crypto.SHA384, + }, + Options: &rsa.PSSOptions{ + SaltLength: rsa.PSSSaltLengthEqualsHash, + }, + VerifyOptions: &rsa.PSSOptions{ + SaltLength: rsa.PSSSaltLengthAuto, + }, + } + RegisterSigningMethod(SigningMethodPS384.Alg(), func() SigningMethod { + return SigningMethodPS384 + }) + + // PS512 + SigningMethodPS512 = &SigningMethodRSAPSS{ + SigningMethodRSA: &SigningMethodRSA{ + Name: "PS512", + Hash: crypto.SHA512, + }, + Options: &rsa.PSSOptions{ + SaltLength: rsa.PSSSaltLengthEqualsHash, + }, + VerifyOptions: &rsa.PSSOptions{ + SaltLength: rsa.PSSSaltLengthAuto, + }, + } + RegisterSigningMethod(SigningMethodPS512.Alg(), func() SigningMethod { + return SigningMethodPS512 + }) +} + +// Implements the Verify method from SigningMethod +// For this verify method, key must be an rsa.PublicKey struct +func (m *SigningMethodRSAPSS) Verify(signingString, signature string, key interface{}) error { + var err error + + // Decode the signature + var sig []byte + if sig, err = DecodeSegment(signature); err != nil { + return err + } + + var rsaKey *rsa.PublicKey + switch k := key.(type) { + case *rsa.PublicKey: + rsaKey = k + default: + return ErrInvalidKey + } + + // Create hasher + if !m.Hash.Available() { + return ErrHashUnavailable + } + hasher := m.Hash.New() + hasher.Write([]byte(signingString)) + + opts := m.Options + if m.VerifyOptions != nil { + opts = m.VerifyOptions + } + + return rsa.VerifyPSS(rsaKey, m.Hash, hasher.Sum(nil), sig, opts) +} + +// Implements the Sign method from SigningMethod +// For this signing method, key must be an rsa.PrivateKey struct +func (m *SigningMethodRSAPSS) Sign(signingString string, key interface{}) (string, error) { + var rsaKey *rsa.PrivateKey + + switch k := key.(type) { + case *rsa.PrivateKey: + rsaKey = k + default: + return "", ErrInvalidKeyType + } + + // Create the hasher + if !m.Hash.Available() { + return "", ErrHashUnavailable + } + + hasher := m.Hash.New() + hasher.Write([]byte(signingString)) + + // Sign the string and return the encoded bytes + if sigBytes, err := rsa.SignPSS(rand.Reader, rsaKey, m.Hash, hasher.Sum(nil), m.Options); err == nil { + return EncodeSegment(sigBytes), nil + } else { + return "", err + } +} diff --git a/vendor/github.com/form3tech-oss/jwt-go/rsa_utils.go b/vendor/github.com/form3tech-oss/jwt-go/rsa_utils.go new file mode 100644 index 00000000000..14c78c292a9 --- /dev/null +++ b/vendor/github.com/form3tech-oss/jwt-go/rsa_utils.go @@ -0,0 +1,101 @@ +package jwt + +import ( + "crypto/rsa" + "crypto/x509" + "encoding/pem" + "errors" +) + +var ( + ErrKeyMustBePEMEncoded = errors.New("Invalid Key: Key must be a PEM encoded PKCS1 or PKCS8 key") + ErrNotRSAPrivateKey = errors.New("Key is not a valid RSA private key") + ErrNotRSAPublicKey = errors.New("Key is not a valid RSA public key") +) + +// Parse PEM encoded PKCS1 or PKCS8 private key +func ParseRSAPrivateKeyFromPEM(key []byte) (*rsa.PrivateKey, error) { + var err error + + // Parse PEM block + var block *pem.Block + if block, _ = pem.Decode(key); block == nil { + return nil, ErrKeyMustBePEMEncoded + } + + var parsedKey interface{} + if parsedKey, err = x509.ParsePKCS1PrivateKey(block.Bytes); err != nil { + if parsedKey, err = x509.ParsePKCS8PrivateKey(block.Bytes); err != nil { + return nil, err + } + } + + var pkey *rsa.PrivateKey + var ok bool + if pkey, ok = parsedKey.(*rsa.PrivateKey); !ok { + return nil, ErrNotRSAPrivateKey + } + + return pkey, nil +} + +// Parse PEM encoded PKCS1 or PKCS8 private key protected with password +func ParseRSAPrivateKeyFromPEMWithPassword(key []byte, password string) (*rsa.PrivateKey, error) { + var err error + + // Parse PEM block + var block *pem.Block + if block, _ = pem.Decode(key); block == nil { + return nil, ErrKeyMustBePEMEncoded + } + + var parsedKey interface{} + + var blockDecrypted []byte + if blockDecrypted, err = x509.DecryptPEMBlock(block, []byte(password)); err != nil { + return nil, err + } + + if parsedKey, err = x509.ParsePKCS1PrivateKey(blockDecrypted); err != nil { + if parsedKey, err = x509.ParsePKCS8PrivateKey(blockDecrypted); err != nil { + return nil, err + } + } + + var pkey *rsa.PrivateKey + var ok bool + if pkey, ok = parsedKey.(*rsa.PrivateKey); !ok { + return nil, ErrNotRSAPrivateKey + } + + return pkey, nil +} + +// Parse PEM encoded PKCS1 or PKCS8 public key +func ParseRSAPublicKeyFromPEM(key []byte) (*rsa.PublicKey, error) { + var err error + + // Parse PEM block + var block *pem.Block + if block, _ = pem.Decode(key); block == nil { + return nil, ErrKeyMustBePEMEncoded + } + + // Parse the key + var parsedKey interface{} + if parsedKey, err = x509.ParsePKIXPublicKey(block.Bytes); err != nil { + if cert, err := x509.ParseCertificate(block.Bytes); err == nil { + parsedKey = cert.PublicKey + } else { + return nil, err + } + } + + var pkey *rsa.PublicKey + var ok bool + if pkey, ok = parsedKey.(*rsa.PublicKey); !ok { + return nil, ErrNotRSAPublicKey + } + + return pkey, nil +} diff --git a/vendor/github.com/form3tech-oss/jwt-go/signing_method.go b/vendor/github.com/form3tech-oss/jwt-go/signing_method.go new file mode 100644 index 00000000000..ed1f212b21e --- /dev/null +++ b/vendor/github.com/form3tech-oss/jwt-go/signing_method.go @@ -0,0 +1,35 @@ +package jwt + +import ( + "sync" +) + +var signingMethods = map[string]func() SigningMethod{} +var signingMethodLock = new(sync.RWMutex) + +// Implement SigningMethod to add new methods for signing or verifying tokens. +type SigningMethod interface { + Verify(signingString, signature string, key interface{}) error // Returns nil if signature is valid + Sign(signingString string, key interface{}) (string, error) // Returns encoded signature or error + Alg() string // returns the alg identifier for this method (example: 'HS256') +} + +// Register the "alg" name and a factory function for signing method. +// This is typically done during init() in the method's implementation +func RegisterSigningMethod(alg string, f func() SigningMethod) { + signingMethodLock.Lock() + defer signingMethodLock.Unlock() + + signingMethods[alg] = f +} + +// Get a signing method from an "alg" string +func GetSigningMethod(alg string) (method SigningMethod) { + signingMethodLock.RLock() + defer signingMethodLock.RUnlock() + + if methodF, ok := signingMethods[alg]; ok { + method = methodF() + } + return +} diff --git a/vendor/github.com/form3tech-oss/jwt-go/token.go b/vendor/github.com/form3tech-oss/jwt-go/token.go new file mode 100644 index 00000000000..d637e0867c6 --- /dev/null +++ b/vendor/github.com/form3tech-oss/jwt-go/token.go @@ -0,0 +1,108 @@ +package jwt + +import ( + "encoding/base64" + "encoding/json" + "strings" + "time" +) + +// TimeFunc provides the current time when parsing token to validate "exp" claim (expiration time). +// You can override it to use another time value. This is useful for testing or if your +// server uses a different time zone than your tokens. +var TimeFunc = time.Now + +// Parse methods use this callback function to supply +// the key for verification. The function receives the parsed, +// but unverified Token. This allows you to use properties in the +// Header of the token (such as `kid`) to identify which key to use. +type Keyfunc func(*Token) (interface{}, error) + +// A JWT Token. Different fields will be used depending on whether you're +// creating or parsing/verifying a token. +type Token struct { + Raw string // The raw token. Populated when you Parse a token + Method SigningMethod // The signing method used or to be used + Header map[string]interface{} // The first segment of the token + Claims Claims // The second segment of the token + Signature string // The third segment of the token. Populated when you Parse a token + Valid bool // Is the token valid? Populated when you Parse/Verify a token +} + +// Create a new Token. Takes a signing method +func New(method SigningMethod) *Token { + return NewWithClaims(method, MapClaims{}) +} + +func NewWithClaims(method SigningMethod, claims Claims) *Token { + return &Token{ + Header: map[string]interface{}{ + "typ": "JWT", + "alg": method.Alg(), + }, + Claims: claims, + Method: method, + } +} + +// Get the complete, signed token +func (t *Token) SignedString(key interface{}) (string, error) { + var sig, sstr string + var err error + if sstr, err = t.SigningString(); err != nil { + return "", err + } + if sig, err = t.Method.Sign(sstr, key); err != nil { + return "", err + } + return strings.Join([]string{sstr, sig}, "."), nil +} + +// Generate the signing string. This is the +// most expensive part of the whole deal. Unless you +// need this for something special, just go straight for +// the SignedString. +func (t *Token) SigningString() (string, error) { + var err error + parts := make([]string, 2) + for i, _ := range parts { + var jsonValue []byte + if i == 0 { + if jsonValue, err = json.Marshal(t.Header); err != nil { + return "", err + } + } else { + if jsonValue, err = json.Marshal(t.Claims); err != nil { + return "", err + } + } + + parts[i] = EncodeSegment(jsonValue) + } + return strings.Join(parts, "."), nil +} + +// Parse, validate, and return a token. +// keyFunc will receive the parsed token and should return the key for validating. +// If everything is kosher, err will be nil +func Parse(tokenString string, keyFunc Keyfunc) (*Token, error) { + return new(Parser).Parse(tokenString, keyFunc) +} + +func ParseWithClaims(tokenString string, claims Claims, keyFunc Keyfunc) (*Token, error) { + return new(Parser).ParseWithClaims(tokenString, claims, keyFunc) +} + +// Encode JWT specific base64url encoding with padding stripped +func EncodeSegment(seg []byte) string { + return strings.TrimRight(base64.URLEncoding.EncodeToString(seg), "=") +} + +// Decode JWT specific base64url encoding with padding stripped +func DecodeSegment(seg string) ([]byte, error) { + if l := len(seg) % 4; l > 0 { + seg += strings.Repeat("=", 4-l) + } + + return base64.URLEncoding.DecodeString(seg) +} diff --git a/vendor/github.com/fsnotify/fsnotify/.editorconfig b/vendor/github.com/fsnotify/fsnotify/.editorconfig new file mode 100644 index 00000000000..fad895851e5 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/.editorconfig @@ -0,0 +1,12 @@ +root = true + +[*.go] +indent_style = tab +indent_size = 4 +insert_final_newline = true + +[*.{yml,yaml}] +indent_style = space +indent_size = 2 +insert_final_newline = true +trim_trailing_whitespace = true diff --git a/vendor/github.com/fsnotify/fsnotify/.gitattributes b/vendor/github.com/fsnotify/fsnotify/.gitattributes new file mode 100644 index 00000000000..32f1001be0a --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/.gitattributes @@ -0,0 +1 @@ +go.sum linguist-generated diff --git a/vendor/github.com/fsnotify/fsnotify/.gitignore b/vendor/github.com/fsnotify/fsnotify/.gitignore new file mode 100644 index 00000000000..4cd0cbaf432 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/.gitignore @@ -0,0 +1,6 @@ +# Setup a Global .gitignore for OS and editor generated files: +# https://help.github.com/articles/ignoring-files +# git config --global core.excludesfile ~/.gitignore_global + +.vagrant +*.sublime-project diff --git a/vendor/github.com/fsnotify/fsnotify/.travis.yml b/vendor/github.com/fsnotify/fsnotify/.travis.yml new file mode 100644 index 00000000000..a9c30165cdd --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/.travis.yml @@ -0,0 +1,36 @@ +sudo: false +language: go + +go: + - "stable" + - "1.11.x" + - "1.10.x" + - "1.9.x" + +matrix: + include: + - go: "stable" + env: GOLINT=true + allow_failures: + - go: tip + fast_finish: true + + +before_install: + - if [ ! -z "${GOLINT}" ]; then go get -u golang.org/x/lint/golint; fi + +script: + - go test --race ./... + +after_script: + - test -z "$(gofmt -s -l -w . | tee /dev/stderr)" + - if [ ! -z "${GOLINT}" ]; then echo running golint; golint --set_exit_status ./...; else echo skipping golint; fi + - go vet ./... + +os: + - linux + - osx + - windows + +notifications: + email: false diff --git a/vendor/github.com/fsnotify/fsnotify/AUTHORS b/vendor/github.com/fsnotify/fsnotify/AUTHORS new file mode 100644 index 00000000000..5ab5d41c547 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/AUTHORS @@ -0,0 +1,52 @@ +# Names should be added to this file as +# Name or Organization +# The email address is not required for organizations. + +# You can update this list using the following command: +# +# $ git shortlog -se | awk '{print $2 " " $3 " " $4}' + +# Please keep the list sorted. + +Aaron L +Adrien Bustany +Amit Krishnan +Anmol Sethi +Bjørn Erik Pedersen +Bruno Bigras +Caleb Spare +Case Nelson +Chris Howey +Christoffer Buchholz +Daniel Wagner-Hall +Dave Cheney +Evan Phoenix +Francisco Souza +Hari haran +John C Barstow +Kelvin Fo +Ken-ichirou MATSUZAWA +Matt Layher +Nathan Youngman +Nickolai Zeldovich +Patrick +Paul Hammond +Pawel Knap +Pieter Droogendijk +Pursuit92 +Riku Voipio +Rob Figueiredo +Rodrigo Chiossi +Slawek Ligus +Soge Zhang +Tiffany Jernigan +Tilak Sharma +Tom Payne +Travis Cline +Tudor Golubenco +Vahe Khachikyan +Yukang +bronze1man +debrando +henrikedwards +铁哥 diff --git a/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md b/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md new file mode 100644 index 00000000000..be4d7ea2c14 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md @@ -0,0 +1,317 @@ +# Changelog + +## v1.4.7 / 2018-01-09 + +* BSD/macOS: Fix possible deadlock on closing the watcher on kqueue (thanks @nhooyr and @glycerine) +* Tests: Fix missing verb on format string (thanks @rchiossi) +* Linux: Fix deadlock in Remove (thanks @aarondl) +* Linux: Watch.Add improvements (avoid race, fix consistency, reduce garbage) (thanks @twpayne) +* Docs: Moved FAQ into the README (thanks @vahe) +* Linux: Properly handle inotify's IN_Q_OVERFLOW event (thanks @zeldovich) +* Docs: replace references to OS X with macOS + +## v1.4.2 / 2016-10-10 + +* Linux: use InotifyInit1 with IN_CLOEXEC to stop leaking a file descriptor to a child process when using fork/exec [#178](https://github.com/fsnotify/fsnotify/pull/178) (thanks @pattyshack) + +## v1.4.1 / 2016-10-04 + +* Fix flaky inotify stress test on Linux [#177](https://github.com/fsnotify/fsnotify/pull/177) (thanks @pattyshack) + +## v1.4.0 / 2016-10-01 + +* add a String() method to Event.Op [#165](https://github.com/fsnotify/fsnotify/pull/165) (thanks @oozie) + +## v1.3.1 / 2016-06-28 + +* Windows: fix for double backslash when watching the root of a drive [#151](https://github.com/fsnotify/fsnotify/issues/151) (thanks @brunoqc) + +## v1.3.0 / 2016-04-19 + +* Support linux/arm64 by [patching](https://go-review.googlesource.com/#/c/21971/) x/sys/unix and switching to to it from syscall (thanks @suihkulokki) [#135](https://github.com/fsnotify/fsnotify/pull/135) + +## v1.2.10 / 2016-03-02 + +* Fix golint errors in windows.go [#121](https://github.com/fsnotify/fsnotify/pull/121) (thanks @tiffanyfj) + +## v1.2.9 / 2016-01-13 + +kqueue: Fix logic for CREATE after REMOVE [#111](https://github.com/fsnotify/fsnotify/pull/111) (thanks @bep) + +## v1.2.8 / 2015-12-17 + +* kqueue: fix race condition in Close [#105](https://github.com/fsnotify/fsnotify/pull/105) (thanks @djui for reporting the issue and @ppknap for writing a failing test) +* inotify: fix race in test +* enable race detection for continuous integration (Linux, Mac, Windows) + +## v1.2.5 / 2015-10-17 + +* inotify: use epoll_create1 for arm64 support (requires Linux 2.6.27 or later) [#100](https://github.com/fsnotify/fsnotify/pull/100) (thanks @suihkulokki) +* inotify: fix path leaks [#73](https://github.com/fsnotify/fsnotify/pull/73) (thanks @chamaken) +* kqueue: watch for rename events on subdirectories [#83](https://github.com/fsnotify/fsnotify/pull/83) (thanks @guotie) +* kqueue: avoid infinite loops from symlinks cycles [#101](https://github.com/fsnotify/fsnotify/pull/101) (thanks @illicitonion) + +## v1.2.1 / 2015-10-14 + +* kqueue: don't watch named pipes [#98](https://github.com/fsnotify/fsnotify/pull/98) (thanks @evanphx) + +## v1.2.0 / 2015-02-08 + +* inotify: use epoll to wake up readEvents [#66](https://github.com/fsnotify/fsnotify/pull/66) (thanks @PieterD) +* inotify: closing watcher should now always shut down goroutine [#63](https://github.com/fsnotify/fsnotify/pull/63) (thanks @PieterD) +* kqueue: close kqueue after removing watches, fixes [#59](https://github.com/fsnotify/fsnotify/issues/59) + +## v1.1.1 / 2015-02-05 + +* inotify: Retry read on EINTR [#61](https://github.com/fsnotify/fsnotify/issues/61) (thanks @PieterD) + +## v1.1.0 / 2014-12-12 + +* kqueue: rework internals [#43](https://github.com/fsnotify/fsnotify/pull/43) + * add low-level functions + * only need to store flags on directories + * less mutexes [#13](https://github.com/fsnotify/fsnotify/issues/13) + * done can be an unbuffered channel + * remove calls to os.NewSyscallError +* More efficient string concatenation for Event.String() [#52](https://github.com/fsnotify/fsnotify/pull/52) (thanks @mdlayher) +* kqueue: fix regression in rework causing subdirectories to be watched [#48](https://github.com/fsnotify/fsnotify/issues/48) +* kqueue: cleanup internal watch before sending remove event [#51](https://github.com/fsnotify/fsnotify/issues/51) + +## v1.0.4 / 2014-09-07 + +* kqueue: add dragonfly to the build tags. +* Rename source code files, rearrange code so exported APIs are at the top. +* Add done channel to example code. [#37](https://github.com/fsnotify/fsnotify/pull/37) (thanks @chenyukang) + +## v1.0.3 / 2014-08-19 + +* [Fix] Windows MOVED_TO now translates to Create like on BSD and Linux. [#36](https://github.com/fsnotify/fsnotify/issues/36) + +## v1.0.2 / 2014-08-17 + +* [Fix] Missing create events on macOS. [#14](https://github.com/fsnotify/fsnotify/issues/14) (thanks @zhsso) +* [Fix] Make ./path and path equivalent. (thanks @zhsso) + +## v1.0.0 / 2014-08-15 + +* [API] Remove AddWatch on Windows, use Add. +* Improve documentation for exported identifiers. [#30](https://github.com/fsnotify/fsnotify/issues/30) +* Minor updates based on feedback from golint. + +## dev / 2014-07-09 + +* Moved to [github.com/fsnotify/fsnotify](https://github.com/fsnotify/fsnotify). +* Use os.NewSyscallError instead of returning errno (thanks @hariharan-uno) + +## dev / 2014-07-04 + +* kqueue: fix incorrect mutex used in Close() +* Update example to demonstrate usage of Op. + +## dev / 2014-06-28 + +* [API] Don't set the Write Op for attribute notifications [#4](https://github.com/fsnotify/fsnotify/issues/4) +* Fix for String() method on Event (thanks Alex Brainman) +* Don't build on Plan 9 or Solaris (thanks @4ad) + +## dev / 2014-06-21 + +* Events channel of type Event rather than *Event. +* [internal] use syscall constants directly for inotify and kqueue. +* [internal] kqueue: rename events to kevents and fileEvent to event. + +## dev / 2014-06-19 + +* Go 1.3+ required on Windows (uses syscall.ERROR_MORE_DATA internally). +* [internal] remove cookie from Event struct (unused). +* [internal] Event struct has the same definition across every OS. +* [internal] remove internal watch and removeWatch methods. + +## dev / 2014-06-12 + +* [API] Renamed Watch() to Add() and RemoveWatch() to Remove(). +* [API] Pluralized channel names: Events and Errors. +* [API] Renamed FileEvent struct to Event. +* [API] Op constants replace methods like IsCreate(). + +## dev / 2014-06-12 + +* Fix data race on kevent buffer (thanks @tilaks) [#98](https://github.com/howeyc/fsnotify/pull/98) + +## dev / 2014-05-23 + +* [API] Remove current implementation of WatchFlags. + * current implementation doesn't take advantage of OS for efficiency + * provides little benefit over filtering events as they are received, but has extra bookkeeping and mutexes + * no tests for the current implementation + * not fully implemented on Windows [#93](https://github.com/howeyc/fsnotify/issues/93#issuecomment-39285195) + +## v0.9.3 / 2014-12-31 + +* kqueue: cleanup internal watch before sending remove event [#51](https://github.com/fsnotify/fsnotify/issues/51) + +## v0.9.2 / 2014-08-17 + +* [Backport] Fix missing create events on macOS. [#14](https://github.com/fsnotify/fsnotify/issues/14) (thanks @zhsso) + +## v0.9.1 / 2014-06-12 + +* Fix data race on kevent buffer (thanks @tilaks) [#98](https://github.com/howeyc/fsnotify/pull/98) + +## v0.9.0 / 2014-01-17 + +* IsAttrib() for events that only concern a file's metadata [#79][] (thanks @abustany) +* [Fix] kqueue: fix deadlock [#77][] (thanks @cespare) +* [NOTICE] Development has moved to `code.google.com/p/go.exp/fsnotify` in preparation for inclusion in the Go standard library. + +## v0.8.12 / 2013-11-13 + +* [API] Remove FD_SET and friends from Linux adapter + +## v0.8.11 / 2013-11-02 + +* [Doc] Add Changelog [#72][] (thanks @nathany) +* [Doc] Spotlight and double modify events on macOS [#62][] (reported by @paulhammond) + +## v0.8.10 / 2013-10-19 + +* [Fix] kqueue: remove file watches when parent directory is removed [#71][] (reported by @mdwhatcott) +* [Fix] kqueue: race between Close and readEvents [#70][] (reported by @bernerdschaefer) +* [Doc] specify OS-specific limits in README (thanks @debrando) + +## v0.8.9 / 2013-09-08 + +* [Doc] Contributing (thanks @nathany) +* [Doc] update package path in example code [#63][] (thanks @paulhammond) +* [Doc] GoCI badge in README (Linux only) [#60][] +* [Doc] Cross-platform testing with Vagrant [#59][] (thanks @nathany) + +## v0.8.8 / 2013-06-17 + +* [Fix] Windows: handle `ERROR_MORE_DATA` on Windows [#49][] (thanks @jbowtie) + +## v0.8.7 / 2013-06-03 + +* [API] Make syscall flags internal +* [Fix] inotify: ignore event changes +* [Fix] race in symlink test [#45][] (reported by @srid) +* [Fix] tests on Windows +* lower case error messages + +## v0.8.6 / 2013-05-23 + +* kqueue: Use EVT_ONLY flag on Darwin +* [Doc] Update README with full example + +## v0.8.5 / 2013-05-09 + +* [Fix] inotify: allow monitoring of "broken" symlinks (thanks @tsg) + +## v0.8.4 / 2013-04-07 + +* [Fix] kqueue: watch all file events [#40][] (thanks @ChrisBuchholz) + +## v0.8.3 / 2013-03-13 + +* [Fix] inoitfy/kqueue memory leak [#36][] (reported by @nbkolchin) +* [Fix] kqueue: use fsnFlags for watching a directory [#33][] (reported by @nbkolchin) + +## v0.8.2 / 2013-02-07 + +* [Doc] add Authors +* [Fix] fix data races for map access [#29][] (thanks @fsouza) + +## v0.8.1 / 2013-01-09 + +* [Fix] Windows path separators +* [Doc] BSD License + +## v0.8.0 / 2012-11-09 + +* kqueue: directory watching improvements (thanks @vmirage) +* inotify: add `IN_MOVED_TO` [#25][] (requested by @cpisto) +* [Fix] kqueue: deleting watched directory [#24][] (reported by @jakerr) + +## v0.7.4 / 2012-10-09 + +* [Fix] inotify: fixes from https://codereview.appspot.com/5418045/ (ugorji) +* [Fix] kqueue: preserve watch flags when watching for delete [#21][] (reported by @robfig) +* [Fix] kqueue: watch the directory even if it isn't a new watch (thanks @robfig) +* [Fix] kqueue: modify after recreation of file + +## v0.7.3 / 2012-09-27 + +* [Fix] kqueue: watch with an existing folder inside the watched folder (thanks @vmirage) +* [Fix] kqueue: no longer get duplicate CREATE events + +## v0.7.2 / 2012-09-01 + +* kqueue: events for created directories + +## v0.7.1 / 2012-07-14 + +* [Fix] for renaming files + +## v0.7.0 / 2012-07-02 + +* [Feature] FSNotify flags +* [Fix] inotify: Added file name back to event path + +## v0.6.0 / 2012-06-06 + +* kqueue: watch files after directory created (thanks @tmc) + +## v0.5.1 / 2012-05-22 + +* [Fix] inotify: remove all watches before Close() + +## v0.5.0 / 2012-05-03 + +* [API] kqueue: return errors during watch instead of sending over channel +* kqueue: match symlink behavior on Linux +* inotify: add `DELETE_SELF` (requested by @taralx) +* [Fix] kqueue: handle EINTR (reported by @robfig) +* [Doc] Godoc example [#1][] (thanks @davecheney) + +## v0.4.0 / 2012-03-30 + +* Go 1 released: build with go tool +* [Feature] Windows support using winfsnotify +* Windows does not have attribute change notifications +* Roll attribute notifications into IsModify + +## v0.3.0 / 2012-02-19 + +* kqueue: add files when watch directory + +## v0.2.0 / 2011-12-30 + +* update to latest Go weekly code + +## v0.1.0 / 2011-10-19 + +* kqueue: add watch on file creation to match inotify +* kqueue: create file event +* inotify: ignore `IN_IGNORED` events +* event String() +* linux: common FileEvent functions +* initial commit + +[#79]: https://github.com/howeyc/fsnotify/pull/79 +[#77]: https://github.com/howeyc/fsnotify/pull/77 +[#72]: https://github.com/howeyc/fsnotify/issues/72 +[#71]: https://github.com/howeyc/fsnotify/issues/71 +[#70]: https://github.com/howeyc/fsnotify/issues/70 +[#63]: https://github.com/howeyc/fsnotify/issues/63 +[#62]: https://github.com/howeyc/fsnotify/issues/62 +[#60]: https://github.com/howeyc/fsnotify/issues/60 +[#59]: https://github.com/howeyc/fsnotify/issues/59 +[#49]: https://github.com/howeyc/fsnotify/issues/49 +[#45]: https://github.com/howeyc/fsnotify/issues/45 +[#40]: https://github.com/howeyc/fsnotify/issues/40 +[#36]: https://github.com/howeyc/fsnotify/issues/36 +[#33]: https://github.com/howeyc/fsnotify/issues/33 +[#29]: https://github.com/howeyc/fsnotify/issues/29 +[#25]: https://github.com/howeyc/fsnotify/issues/25 +[#24]: https://github.com/howeyc/fsnotify/issues/24 +[#21]: https://github.com/howeyc/fsnotify/issues/21 diff --git a/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md b/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md new file mode 100644 index 00000000000..828a60b24ba --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md @@ -0,0 +1,77 @@ +# Contributing + +## Issues + +* Request features and report bugs using the [GitHub Issue Tracker](https://github.com/fsnotify/fsnotify/issues). +* Please indicate the platform you are using fsnotify on. +* A code example to reproduce the problem is appreciated. + +## Pull Requests + +### Contributor License Agreement + +fsnotify is derived from code in the [golang.org/x/exp](https://godoc.org/golang.org/x/exp) package and it may be included [in the standard library](https://github.com/fsnotify/fsnotify/issues/1) in the future. Therefore fsnotify carries the same [LICENSE](https://github.com/fsnotify/fsnotify/blob/master/LICENSE) as Go. Contributors retain their copyright, so you need to fill out a short form before we can accept your contribution: [Google Individual Contributor License Agreement](https://developers.google.com/open-source/cla/individual). + +Please indicate that you have signed the CLA in your pull request. + +### How fsnotify is Developed + +* Development is done on feature branches. +* Tests are run on BSD, Linux, macOS and Windows. +* Pull requests are reviewed and [applied to master][am] using [hub][]. + * Maintainers may modify or squash commits rather than asking contributors to. +* To issue a new release, the maintainers will: + * Update the CHANGELOG + * Tag a version, which will become available through gopkg.in. + +### How to Fork + +For smooth sailing, always use the original import path. Installing with `go get` makes this easy. + +1. Install from GitHub (`go get -u github.com/fsnotify/fsnotify`) +2. Create your feature branch (`git checkout -b my-new-feature`) +3. Ensure everything works and the tests pass (see below) +4. Commit your changes (`git commit -am 'Add some feature'`) + +Contribute upstream: + +1. Fork fsnotify on GitHub +2. Add your remote (`git remote add fork git@github.com:mycompany/repo.git`) +3. Push to the branch (`git push fork my-new-feature`) +4. Create a new Pull Request on GitHub + +This workflow is [thoroughly explained by Katrina Owen](https://splice.com/blog/contributing-open-source-git-repositories-go/). + +### Testing + +fsnotify uses build tags to compile different code on Linux, BSD, macOS, and Windows. + +Before doing a pull request, please do your best to test your changes on multiple platforms, and list which platforms you were able/unable to test on. + +To aid in cross-platform testing there is a Vagrantfile for Linux and BSD. + +* Install [Vagrant](http://www.vagrantup.com/) and [VirtualBox](https://www.virtualbox.org/) +* Setup [Vagrant Gopher](https://github.com/nathany/vagrant-gopher) in your `src` folder. +* Run `vagrant up` from the project folder. You can also setup just one box with `vagrant up linux` or `vagrant up bsd` (note: the BSD box doesn't support Windows hosts at this time, and NFS may prompt for your host OS password) +* Once setup, you can run the test suite on a given OS with a single command `vagrant ssh linux -c 'cd fsnotify/fsnotify; go test'`. +* When you're done, you will want to halt or destroy the Vagrant boxes. + +Notice: fsnotify file system events won't trigger in shared folders. The tests get around this limitation by using the /tmp directory. + +Right now there is no equivalent solution for Windows and macOS, but there are Windows VMs [freely available from Microsoft](http://www.modern.ie/en-us/virtualization-tools#downloads). + +### Maintainers + +Help maintaining fsnotify is welcome. To be a maintainer: + +* Submit a pull request and sign the CLA as above. +* You must be able to run the test suite on Mac, Windows, Linux and BSD. + +To keep master clean, the fsnotify project uses the "apply mail" workflow outlined in Nathaniel Talbott's post ["Merge pull request" Considered Harmful][am]. This requires installing [hub][]. + +All code changes should be internal pull requests. + +Releases are tagged using [Semantic Versioning](http://semver.org/). + +[hub]: https://github.com/github/hub +[am]: http://blog.spreedly.com/2014/06/24/merge-pull-request-considered-harmful/#.VGa5yZPF_Zs diff --git a/vendor/github.com/fsnotify/fsnotify/LICENSE b/vendor/github.com/fsnotify/fsnotify/LICENSE new file mode 100644 index 00000000000..e180c8fb059 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/LICENSE @@ -0,0 +1,28 @@ +Copyright (c) 2012 The Go Authors. All rights reserved. +Copyright (c) 2012-2019 fsnotify Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/fsnotify/fsnotify/README.md b/vendor/github.com/fsnotify/fsnotify/README.md new file mode 100644 index 00000000000..b2629e5229c --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/README.md @@ -0,0 +1,130 @@ +# File system notifications for Go + +[![GoDoc](https://godoc.org/github.com/fsnotify/fsnotify?status.svg)](https://godoc.org/github.com/fsnotify/fsnotify) [![Go Report Card](https://goreportcard.com/badge/github.com/fsnotify/fsnotify)](https://goreportcard.com/report/github.com/fsnotify/fsnotify) + +fsnotify utilizes [golang.org/x/sys](https://godoc.org/golang.org/x/sys) rather than `syscall` from the standard library. Ensure you have the latest version installed by running: + +```console +go get -u golang.org/x/sys/... +``` + +Cross platform: Windows, Linux, BSD and macOS. + +| Adapter | OS | Status | +| --------------------- | -------------------------------- | ------------------------------------------------------------------------------------------------------------------------------- | +| inotify | Linux 2.6.27 or later, Android\* | Supported [![Build Status](https://travis-ci.org/fsnotify/fsnotify.svg?branch=master)](https://travis-ci.org/fsnotify/fsnotify) | +| kqueue | BSD, macOS, iOS\* | Supported [![Build Status](https://travis-ci.org/fsnotify/fsnotify.svg?branch=master)](https://travis-ci.org/fsnotify/fsnotify) | +| ReadDirectoryChangesW | Windows | Supported [![Build Status](https://travis-ci.org/fsnotify/fsnotify.svg?branch=master)](https://travis-ci.org/fsnotify/fsnotify) | +| FSEvents | macOS | [Planned](https://github.com/fsnotify/fsnotify/issues/11) | +| FEN | Solaris 11 | [In Progress](https://github.com/fsnotify/fsnotify/issues/12) | +| fanotify | Linux 2.6.37+ | [Planned](https://github.com/fsnotify/fsnotify/issues/114) | +| USN Journals | Windows | [Maybe](https://github.com/fsnotify/fsnotify/issues/53) | +| Polling | *All* | [Maybe](https://github.com/fsnotify/fsnotify/issues/9) | + +\* Android and iOS are untested. + +Please see [the documentation](https://godoc.org/github.com/fsnotify/fsnotify) and consult the [FAQ](#faq) for usage information. + +## API stability + +fsnotify is a fork of [howeyc/fsnotify](https://godoc.org/github.com/howeyc/fsnotify) with a new API as of v1.0. The API is based on [this design document](http://goo.gl/MrYxyA). + +All [releases](https://github.com/fsnotify/fsnotify/releases) are tagged based on [Semantic Versioning](http://semver.org/). Further API changes are [planned](https://github.com/fsnotify/fsnotify/milestones), and will be tagged with a new major revision number. + +Go 1.6 supports dependencies located in the `vendor/` folder. Unless you are creating a library, it is recommended that you copy fsnotify into `vendor/github.com/fsnotify/fsnotify` within your project, and likewise for `golang.org/x/sys`. + +## Usage + +```go +package main + +import ( + "log" + + "github.com/fsnotify/fsnotify" +) + +func main() { + watcher, err := fsnotify.NewWatcher() + if err != nil { + log.Fatal(err) + } + defer watcher.Close() + + done := make(chan bool) + go func() { + for { + select { + case event, ok := <-watcher.Events: + if !ok { + return + } + log.Println("event:", event) + if event.Op&fsnotify.Write == fsnotify.Write { + log.Println("modified file:", event.Name) + } + case err, ok := <-watcher.Errors: + if !ok { + return + } + log.Println("error:", err) + } + } + }() + + err = watcher.Add("/tmp/foo") + if err != nil { + log.Fatal(err) + } + <-done +} +``` + +## Contributing + +Please refer to [CONTRIBUTING][] before opening an issue or pull request. + +## Example + +See [example_test.go](https://github.com/fsnotify/fsnotify/blob/master/example_test.go). + +## FAQ + +**When a file is moved to another directory is it still being watched?** + +No (it shouldn't be, unless you are watching where it was moved to). + +**When I watch a directory, are all subdirectories watched as well?** + +No, you must add watches for any directory you want to watch (a recursive watcher is on the roadmap [#18][]). + +**Do I have to watch the Error and Event channels in a separate goroutine?** + +As of now, yes. Looking into making this single-thread friendly (see [howeyc #7][#7]) + +**Why am I receiving multiple events for the same file on OS X?** + +Spotlight indexing on OS X can result in multiple events (see [howeyc #62][#62]). A temporary workaround is to add your folder(s) to the *Spotlight Privacy settings* until we have a native FSEvents implementation (see [#11][]). + +**How many files can be watched at once?** + +There are OS-specific limits as to how many watches can be created: +* Linux: /proc/sys/fs/inotify/max_user_watches contains the limit, reaching this limit results in a "no space left on device" error. +* BSD / OSX: sysctl variables "kern.maxfiles" and "kern.maxfilesperproc", reaching these limits results in a "too many open files" error. + +**Why don't notifications work with NFS filesystems or filesystem in userspace (FUSE)?** + +fsnotify requires support from underlying OS to work. The current NFS protocol does not provide network level support for file notifications. + +[#62]: https://github.com/howeyc/fsnotify/issues/62 +[#18]: https://github.com/fsnotify/fsnotify/issues/18 +[#11]: https://github.com/fsnotify/fsnotify/issues/11 +[#7]: https://github.com/howeyc/fsnotify/issues/7 + +[contributing]: https://github.com/fsnotify/fsnotify/blob/master/CONTRIBUTING.md + +## Related Projects + +* [notify](https://github.com/rjeczalik/notify) +* [fsevents](https://github.com/fsnotify/fsevents) + diff --git a/vendor/github.com/fsnotify/fsnotify/fen.go b/vendor/github.com/fsnotify/fsnotify/fen.go new file mode 100644 index 00000000000..ced39cb881e --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/fen.go @@ -0,0 +1,37 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build solaris + +package fsnotify + +import ( + "errors" +) + +// Watcher watches a set of files, delivering events to a channel. +type Watcher struct { + Events chan Event + Errors chan error +} + +// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events. +func NewWatcher() (*Watcher, error) { + return nil, errors.New("FEN based watcher not yet supported for fsnotify\n") +} + +// Close removes all watches and closes the events channel. +func (w *Watcher) Close() error { + return nil +} + +// Add starts watching the named file or directory (non-recursively). +func (w *Watcher) Add(name string) error { + return nil +} + +// Remove stops watching the the named file or directory (non-recursively). +func (w *Watcher) Remove(name string) error { + return nil +} diff --git a/vendor/github.com/fsnotify/fsnotify/fsnotify.go b/vendor/github.com/fsnotify/fsnotify/fsnotify.go new file mode 100644 index 00000000000..89cab046d12 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/fsnotify.go @@ -0,0 +1,68 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !plan9 + +// Package fsnotify provides a platform-independent interface for file system notifications. +package fsnotify + +import ( + "bytes" + "errors" + "fmt" +) + +// Event represents a single file system notification. +type Event struct { + Name string // Relative path to the file or directory. + Op Op // File operation that triggered the event. +} + +// Op describes a set of file operations. +type Op uint32 + +// These are the generalized file operations that can trigger a notification. +const ( + Create Op = 1 << iota + Write + Remove + Rename + Chmod +) + +func (op Op) String() string { + // Use a buffer for efficient string concatenation + var buffer bytes.Buffer + + if op&Create == Create { + buffer.WriteString("|CREATE") + } + if op&Remove == Remove { + buffer.WriteString("|REMOVE") + } + if op&Write == Write { + buffer.WriteString("|WRITE") + } + if op&Rename == Rename { + buffer.WriteString("|RENAME") + } + if op&Chmod == Chmod { + buffer.WriteString("|CHMOD") + } + if buffer.Len() == 0 { + return "" + } + return buffer.String()[1:] // Strip leading pipe +} + +// String returns a string representation of the event in the form +// "file: REMOVE|WRITE|..." +func (e Event) String() string { + return fmt.Sprintf("%q: %s", e.Name, e.Op.String()) +} + +// Common errors that can be reported by a watcher +var ( + ErrEventOverflow = errors.New("fsnotify queue overflow") +) diff --git a/vendor/github.com/fsnotify/fsnotify/go.mod b/vendor/github.com/fsnotify/fsnotify/go.mod new file mode 100644 index 00000000000..ff11e13f224 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/go.mod @@ -0,0 +1,5 @@ +module github.com/fsnotify/fsnotify + +go 1.13 + +require golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9 diff --git a/vendor/github.com/fsnotify/fsnotify/go.sum b/vendor/github.com/fsnotify/fsnotify/go.sum new file mode 100644 index 00000000000..f60af9855da --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/go.sum @@ -0,0 +1,2 @@ +golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9 h1:L2auWcuQIvxz9xSEqzESnV/QN/gNRXNApHi3fYwl2w0= +golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= diff --git a/vendor/github.com/fsnotify/fsnotify/inotify.go b/vendor/github.com/fsnotify/fsnotify/inotify.go new file mode 100644 index 00000000000..d9fd1b88a05 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/inotify.go @@ -0,0 +1,337 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux + +package fsnotify + +import ( + "errors" + "fmt" + "io" + "os" + "path/filepath" + "strings" + "sync" + "unsafe" + + "golang.org/x/sys/unix" +) + +// Watcher watches a set of files, delivering events to a channel. +type Watcher struct { + Events chan Event + Errors chan error + mu sync.Mutex // Map access + fd int + poller *fdPoller + watches map[string]*watch // Map of inotify watches (key: path) + paths map[int]string // Map of watched paths (key: watch descriptor) + done chan struct{} // Channel for sending a "quit message" to the reader goroutine + doneResp chan struct{} // Channel to respond to Close +} + +// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events. +func NewWatcher() (*Watcher, error) { + // Create inotify fd + fd, errno := unix.InotifyInit1(unix.IN_CLOEXEC) + if fd == -1 { + return nil, errno + } + // Create epoll + poller, err := newFdPoller(fd) + if err != nil { + unix.Close(fd) + return nil, err + } + w := &Watcher{ + fd: fd, + poller: poller, + watches: make(map[string]*watch), + paths: make(map[int]string), + Events: make(chan Event), + Errors: make(chan error), + done: make(chan struct{}), + doneResp: make(chan struct{}), + } + + go w.readEvents() + return w, nil +} + +func (w *Watcher) isClosed() bool { + select { + case <-w.done: + return true + default: + return false + } +} + +// Close removes all watches and closes the events channel. +func (w *Watcher) Close() error { + if w.isClosed() { + return nil + } + + // Send 'close' signal to goroutine, and set the Watcher to closed. + close(w.done) + + // Wake up goroutine + w.poller.wake() + + // Wait for goroutine to close + <-w.doneResp + + return nil +} + +// Add starts watching the named file or directory (non-recursively). +func (w *Watcher) Add(name string) error { + name = filepath.Clean(name) + if w.isClosed() { + return errors.New("inotify instance already closed") + } + + const agnosticEvents = unix.IN_MOVED_TO | unix.IN_MOVED_FROM | + unix.IN_CREATE | unix.IN_ATTRIB | unix.IN_MODIFY | + unix.IN_MOVE_SELF | unix.IN_DELETE | unix.IN_DELETE_SELF + + var flags uint32 = agnosticEvents + + w.mu.Lock() + defer w.mu.Unlock() + watchEntry := w.watches[name] + if watchEntry != nil { + flags |= watchEntry.flags | unix.IN_MASK_ADD + } + wd, errno := unix.InotifyAddWatch(w.fd, name, flags) + if wd == -1 { + return errno + } + + if watchEntry == nil { + w.watches[name] = &watch{wd: uint32(wd), flags: flags} + w.paths[wd] = name + } else { + watchEntry.wd = uint32(wd) + watchEntry.flags = flags + } + + return nil +} + +// Remove stops watching the named file or directory (non-recursively). +func (w *Watcher) Remove(name string) error { + name = filepath.Clean(name) + + // Fetch the watch. + w.mu.Lock() + defer w.mu.Unlock() + watch, ok := w.watches[name] + + // Remove it from inotify. + if !ok { + return fmt.Errorf("can't remove non-existent inotify watch for: %s", name) + } + + // We successfully removed the watch if InotifyRmWatch doesn't return an + // error, we need to clean up our internal state to ensure it matches + // inotify's kernel state. + delete(w.paths, int(watch.wd)) + delete(w.watches, name) + + // inotify_rm_watch will return EINVAL if the file has been deleted; + // the inotify will already have been removed. + // watches and pathes are deleted in ignoreLinux() implicitly and asynchronously + // by calling inotify_rm_watch() below. e.g. readEvents() goroutine receives IN_IGNORE + // so that EINVAL means that the wd is being rm_watch()ed or its file removed + // by another thread and we have not received IN_IGNORE event. + success, errno := unix.InotifyRmWatch(w.fd, watch.wd) + if success == -1 { + // TODO: Perhaps it's not helpful to return an error here in every case. + // the only two possible errors are: + // EBADF, which happens when w.fd is not a valid file descriptor of any kind. + // EINVAL, which is when fd is not an inotify descriptor or wd is not a valid watch descriptor. + // Watch descriptors are invalidated when they are removed explicitly or implicitly; + // explicitly by inotify_rm_watch, implicitly when the file they are watching is deleted. + return errno + } + + return nil +} + +type watch struct { + wd uint32 // Watch descriptor (as returned by the inotify_add_watch() syscall) + flags uint32 // inotify flags of this watch (see inotify(7) for the list of valid flags) +} + +// readEvents reads from the inotify file descriptor, converts the +// received events into Event objects and sends them via the Events channel +func (w *Watcher) readEvents() { + var ( + buf [unix.SizeofInotifyEvent * 4096]byte // Buffer for a maximum of 4096 raw events + n int // Number of bytes read with read() + errno error // Syscall errno + ok bool // For poller.wait + ) + + defer close(w.doneResp) + defer close(w.Errors) + defer close(w.Events) + defer unix.Close(w.fd) + defer w.poller.close() + + for { + // See if we have been closed. + if w.isClosed() { + return + } + + ok, errno = w.poller.wait() + if errno != nil { + select { + case w.Errors <- errno: + case <-w.done: + return + } + continue + } + + if !ok { + continue + } + + n, errno = unix.Read(w.fd, buf[:]) + // If a signal interrupted execution, see if we've been asked to close, and try again. + // http://man7.org/linux/man-pages/man7/signal.7.html : + // "Before Linux 3.8, reads from an inotify(7) file descriptor were not restartable" + if errno == unix.EINTR { + continue + } + + // unix.Read might have been woken up by Close. If so, we're done. + if w.isClosed() { + return + } + + if n < unix.SizeofInotifyEvent { + var err error + if n == 0 { + // If EOF is received. This should really never happen. + err = io.EOF + } else if n < 0 { + // If an error occurred while reading. + err = errno + } else { + // Read was too short. + err = errors.New("notify: short read in readEvents()") + } + select { + case w.Errors <- err: + case <-w.done: + return + } + continue + } + + var offset uint32 + // We don't know how many events we just read into the buffer + // While the offset points to at least one whole event... + for offset <= uint32(n-unix.SizeofInotifyEvent) { + // Point "raw" to the event in the buffer + raw := (*unix.InotifyEvent)(unsafe.Pointer(&buf[offset])) + + mask := uint32(raw.Mask) + nameLen := uint32(raw.Len) + + if mask&unix.IN_Q_OVERFLOW != 0 { + select { + case w.Errors <- ErrEventOverflow: + case <-w.done: + return + } + } + + // If the event happened to the watched directory or the watched file, the kernel + // doesn't append the filename to the event, but we would like to always fill the + // the "Name" field with a valid filename. We retrieve the path of the watch from + // the "paths" map. + w.mu.Lock() + name, ok := w.paths[int(raw.Wd)] + // IN_DELETE_SELF occurs when the file/directory being watched is removed. + // This is a sign to clean up the maps, otherwise we are no longer in sync + // with the inotify kernel state which has already deleted the watch + // automatically. + if ok && mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF { + delete(w.paths, int(raw.Wd)) + delete(w.watches, name) + } + w.mu.Unlock() + + if nameLen > 0 { + // Point "bytes" at the first byte of the filename + bytes := (*[unix.PathMax]byte)(unsafe.Pointer(&buf[offset+unix.SizeofInotifyEvent])) + // The filename is padded with NULL bytes. TrimRight() gets rid of those. + name += "/" + strings.TrimRight(string(bytes[0:nameLen]), "\000") + } + + event := newEvent(name, mask) + + // Send the events that are not ignored on the events channel + if !event.ignoreLinux(mask) { + select { + case w.Events <- event: + case <-w.done: + return + } + } + + // Move to the next event in the buffer + offset += unix.SizeofInotifyEvent + nameLen + } + } +} + +// Certain types of events can be "ignored" and not sent over the Events +// channel. Such as events marked ignore by the kernel, or MODIFY events +// against files that do not exist. +func (e *Event) ignoreLinux(mask uint32) bool { + // Ignore anything the inotify API says to ignore + if mask&unix.IN_IGNORED == unix.IN_IGNORED { + return true + } + + // If the event is not a DELETE or RENAME, the file must exist. + // Otherwise the event is ignored. + // *Note*: this was put in place because it was seen that a MODIFY + // event was sent after the DELETE. This ignores that MODIFY and + // assumes a DELETE will come or has come if the file doesn't exist. + if !(e.Op&Remove == Remove || e.Op&Rename == Rename) { + _, statErr := os.Lstat(e.Name) + return os.IsNotExist(statErr) + } + return false +} + +// newEvent returns an platform-independent Event based on an inotify mask. +func newEvent(name string, mask uint32) Event { + e := Event{Name: name} + if mask&unix.IN_CREATE == unix.IN_CREATE || mask&unix.IN_MOVED_TO == unix.IN_MOVED_TO { + e.Op |= Create + } + if mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF || mask&unix.IN_DELETE == unix.IN_DELETE { + e.Op |= Remove + } + if mask&unix.IN_MODIFY == unix.IN_MODIFY { + e.Op |= Write + } + if mask&unix.IN_MOVE_SELF == unix.IN_MOVE_SELF || mask&unix.IN_MOVED_FROM == unix.IN_MOVED_FROM { + e.Op |= Rename + } + if mask&unix.IN_ATTRIB == unix.IN_ATTRIB { + e.Op |= Chmod + } + return e +} diff --git a/vendor/github.com/fsnotify/fsnotify/inotify_poller.go b/vendor/github.com/fsnotify/fsnotify/inotify_poller.go new file mode 100644 index 00000000000..b33f2b4d4b7 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/inotify_poller.go @@ -0,0 +1,187 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux + +package fsnotify + +import ( + "errors" + + "golang.org/x/sys/unix" +) + +type fdPoller struct { + fd int // File descriptor (as returned by the inotify_init() syscall) + epfd int // Epoll file descriptor + pipe [2]int // Pipe for waking up +} + +func emptyPoller(fd int) *fdPoller { + poller := new(fdPoller) + poller.fd = fd + poller.epfd = -1 + poller.pipe[0] = -1 + poller.pipe[1] = -1 + return poller +} + +// Create a new inotify poller. +// This creates an inotify handler, and an epoll handler. +func newFdPoller(fd int) (*fdPoller, error) { + var errno error + poller := emptyPoller(fd) + defer func() { + if errno != nil { + poller.close() + } + }() + poller.fd = fd + + // Create epoll fd + poller.epfd, errno = unix.EpollCreate1(unix.EPOLL_CLOEXEC) + if poller.epfd == -1 { + return nil, errno + } + // Create pipe; pipe[0] is the read end, pipe[1] the write end. + errno = unix.Pipe2(poller.pipe[:], unix.O_NONBLOCK|unix.O_CLOEXEC) + if errno != nil { + return nil, errno + } + + // Register inotify fd with epoll + event := unix.EpollEvent{ + Fd: int32(poller.fd), + Events: unix.EPOLLIN, + } + errno = unix.EpollCtl(poller.epfd, unix.EPOLL_CTL_ADD, poller.fd, &event) + if errno != nil { + return nil, errno + } + + // Register pipe fd with epoll + event = unix.EpollEvent{ + Fd: int32(poller.pipe[0]), + Events: unix.EPOLLIN, + } + errno = unix.EpollCtl(poller.epfd, unix.EPOLL_CTL_ADD, poller.pipe[0], &event) + if errno != nil { + return nil, errno + } + + return poller, nil +} + +// Wait using epoll. +// Returns true if something is ready to be read, +// false if there is not. +func (poller *fdPoller) wait() (bool, error) { + // 3 possible events per fd, and 2 fds, makes a maximum of 6 events. + // I don't know whether epoll_wait returns the number of events returned, + // or the total number of events ready. + // I decided to catch both by making the buffer one larger than the maximum. + events := make([]unix.EpollEvent, 7) + for { + n, errno := unix.EpollWait(poller.epfd, events, -1) + if n == -1 { + if errno == unix.EINTR { + continue + } + return false, errno + } + if n == 0 { + // If there are no events, try again. + continue + } + if n > 6 { + // This should never happen. More events were returned than should be possible. + return false, errors.New("epoll_wait returned more events than I know what to do with") + } + ready := events[:n] + epollhup := false + epollerr := false + epollin := false + for _, event := range ready { + if event.Fd == int32(poller.fd) { + if event.Events&unix.EPOLLHUP != 0 { + // This should not happen, but if it does, treat it as a wakeup. + epollhup = true + } + if event.Events&unix.EPOLLERR != 0 { + // If an error is waiting on the file descriptor, we should pretend + // something is ready to read, and let unix.Read pick up the error. + epollerr = true + } + if event.Events&unix.EPOLLIN != 0 { + // There is data to read. + epollin = true + } + } + if event.Fd == int32(poller.pipe[0]) { + if event.Events&unix.EPOLLHUP != 0 { + // Write pipe descriptor was closed, by us. This means we're closing down the + // watcher, and we should wake up. + } + if event.Events&unix.EPOLLERR != 0 { + // If an error is waiting on the pipe file descriptor. + // This is an absolute mystery, and should never ever happen. + return false, errors.New("Error on the pipe descriptor.") + } + if event.Events&unix.EPOLLIN != 0 { + // This is a regular wakeup, so we have to clear the buffer. + err := poller.clearWake() + if err != nil { + return false, err + } + } + } + } + + if epollhup || epollerr || epollin { + return true, nil + } + return false, nil + } +} + +// Close the write end of the poller. +func (poller *fdPoller) wake() error { + buf := make([]byte, 1) + n, errno := unix.Write(poller.pipe[1], buf) + if n == -1 { + if errno == unix.EAGAIN { + // Buffer is full, poller will wake. + return nil + } + return errno + } + return nil +} + +func (poller *fdPoller) clearWake() error { + // You have to be woken up a LOT in order to get to 100! + buf := make([]byte, 100) + n, errno := unix.Read(poller.pipe[0], buf) + if n == -1 { + if errno == unix.EAGAIN { + // Buffer is empty, someone else cleared our wake. + return nil + } + return errno + } + return nil +} + +// Close all poller file descriptors, but not the one passed to it. +func (poller *fdPoller) close() { + if poller.pipe[1] != -1 { + unix.Close(poller.pipe[1]) + } + if poller.pipe[0] != -1 { + unix.Close(poller.pipe[0]) + } + if poller.epfd != -1 { + unix.Close(poller.epfd) + } +} diff --git a/vendor/github.com/fsnotify/fsnotify/kqueue.go b/vendor/github.com/fsnotify/fsnotify/kqueue.go new file mode 100644 index 00000000000..86e76a3d676 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/kqueue.go @@ -0,0 +1,521 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build freebsd openbsd netbsd dragonfly darwin + +package fsnotify + +import ( + "errors" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "sync" + "time" + + "golang.org/x/sys/unix" +) + +// Watcher watches a set of files, delivering events to a channel. +type Watcher struct { + Events chan Event + Errors chan error + done chan struct{} // Channel for sending a "quit message" to the reader goroutine + + kq int // File descriptor (as returned by the kqueue() syscall). + + mu sync.Mutex // Protects access to watcher data + watches map[string]int // Map of watched file descriptors (key: path). + externalWatches map[string]bool // Map of watches added by user of the library. + dirFlags map[string]uint32 // Map of watched directories to fflags used in kqueue. + paths map[int]pathInfo // Map file descriptors to path names for processing kqueue events. + fileExists map[string]bool // Keep track of if we know this file exists (to stop duplicate create events). + isClosed bool // Set to true when Close() is first called +} + +type pathInfo struct { + name string + isDir bool +} + +// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events. +func NewWatcher() (*Watcher, error) { + kq, err := kqueue() + if err != nil { + return nil, err + } + + w := &Watcher{ + kq: kq, + watches: make(map[string]int), + dirFlags: make(map[string]uint32), + paths: make(map[int]pathInfo), + fileExists: make(map[string]bool), + externalWatches: make(map[string]bool), + Events: make(chan Event), + Errors: make(chan error), + done: make(chan struct{}), + } + + go w.readEvents() + return w, nil +} + +// Close removes all watches and closes the events channel. +func (w *Watcher) Close() error { + w.mu.Lock() + if w.isClosed { + w.mu.Unlock() + return nil + } + w.isClosed = true + + // copy paths to remove while locked + var pathsToRemove = make([]string, 0, len(w.watches)) + for name := range w.watches { + pathsToRemove = append(pathsToRemove, name) + } + w.mu.Unlock() + // unlock before calling Remove, which also locks + + for _, name := range pathsToRemove { + w.Remove(name) + } + + // send a "quit" message to the reader goroutine + close(w.done) + + return nil +} + +// Add starts watching the named file or directory (non-recursively). +func (w *Watcher) Add(name string) error { + w.mu.Lock() + w.externalWatches[name] = true + w.mu.Unlock() + _, err := w.addWatch(name, noteAllEvents) + return err +} + +// Remove stops watching the the named file or directory (non-recursively). +func (w *Watcher) Remove(name string) error { + name = filepath.Clean(name) + w.mu.Lock() + watchfd, ok := w.watches[name] + w.mu.Unlock() + if !ok { + return fmt.Errorf("can't remove non-existent kevent watch for: %s", name) + } + + const registerRemove = unix.EV_DELETE + if err := register(w.kq, []int{watchfd}, registerRemove, 0); err != nil { + return err + } + + unix.Close(watchfd) + + w.mu.Lock() + isDir := w.paths[watchfd].isDir + delete(w.watches, name) + delete(w.paths, watchfd) + delete(w.dirFlags, name) + w.mu.Unlock() + + // Find all watched paths that are in this directory that are not external. + if isDir { + var pathsToRemove []string + w.mu.Lock() + for _, path := range w.paths { + wdir, _ := filepath.Split(path.name) + if filepath.Clean(wdir) == name { + if !w.externalWatches[path.name] { + pathsToRemove = append(pathsToRemove, path.name) + } + } + } + w.mu.Unlock() + for _, name := range pathsToRemove { + // Since these are internal, not much sense in propagating error + // to the user, as that will just confuse them with an error about + // a path they did not explicitly watch themselves. + w.Remove(name) + } + } + + return nil +} + +// Watch all events (except NOTE_EXTEND, NOTE_LINK, NOTE_REVOKE) +const noteAllEvents = unix.NOTE_DELETE | unix.NOTE_WRITE | unix.NOTE_ATTRIB | unix.NOTE_RENAME + +// keventWaitTime to block on each read from kevent +var keventWaitTime = durationToTimespec(100 * time.Millisecond) + +// addWatch adds name to the watched file set. +// The flags are interpreted as described in kevent(2). +// Returns the real path to the file which was added, if any, which may be different from the one passed in the case of symlinks. +func (w *Watcher) addWatch(name string, flags uint32) (string, error) { + var isDir bool + // Make ./name and name equivalent + name = filepath.Clean(name) + + w.mu.Lock() + if w.isClosed { + w.mu.Unlock() + return "", errors.New("kevent instance already closed") + } + watchfd, alreadyWatching := w.watches[name] + // We already have a watch, but we can still override flags. + if alreadyWatching { + isDir = w.paths[watchfd].isDir + } + w.mu.Unlock() + + if !alreadyWatching { + fi, err := os.Lstat(name) + if err != nil { + return "", err + } + + // Don't watch sockets. + if fi.Mode()&os.ModeSocket == os.ModeSocket { + return "", nil + } + + // Don't watch named pipes. + if fi.Mode()&os.ModeNamedPipe == os.ModeNamedPipe { + return "", nil + } + + // Follow Symlinks + // Unfortunately, Linux can add bogus symlinks to watch list without + // issue, and Windows can't do symlinks period (AFAIK). To maintain + // consistency, we will act like everything is fine. There will simply + // be no file events for broken symlinks. + // Hence the returns of nil on errors. + if fi.Mode()&os.ModeSymlink == os.ModeSymlink { + name, err = filepath.EvalSymlinks(name) + if err != nil { + return "", nil + } + + w.mu.Lock() + _, alreadyWatching = w.watches[name] + w.mu.Unlock() + + if alreadyWatching { + return name, nil + } + + fi, err = os.Lstat(name) + if err != nil { + return "", nil + } + } + + watchfd, err = unix.Open(name, openMode, 0700) + if watchfd == -1 { + return "", err + } + + isDir = fi.IsDir() + } + + const registerAdd = unix.EV_ADD | unix.EV_CLEAR | unix.EV_ENABLE + if err := register(w.kq, []int{watchfd}, registerAdd, flags); err != nil { + unix.Close(watchfd) + return "", err + } + + if !alreadyWatching { + w.mu.Lock() + w.watches[name] = watchfd + w.paths[watchfd] = pathInfo{name: name, isDir: isDir} + w.mu.Unlock() + } + + if isDir { + // Watch the directory if it has not been watched before, + // or if it was watched before, but perhaps only a NOTE_DELETE (watchDirectoryFiles) + w.mu.Lock() + + watchDir := (flags&unix.NOTE_WRITE) == unix.NOTE_WRITE && + (!alreadyWatching || (w.dirFlags[name]&unix.NOTE_WRITE) != unix.NOTE_WRITE) + // Store flags so this watch can be updated later + w.dirFlags[name] = flags + w.mu.Unlock() + + if watchDir { + if err := w.watchDirectoryFiles(name); err != nil { + return "", err + } + } + } + return name, nil +} + +// readEvents reads from kqueue and converts the received kevents into +// Event values that it sends down the Events channel. +func (w *Watcher) readEvents() { + eventBuffer := make([]unix.Kevent_t, 10) + +loop: + for { + // See if there is a message on the "done" channel + select { + case <-w.done: + break loop + default: + } + + // Get new events + kevents, err := read(w.kq, eventBuffer, &keventWaitTime) + // EINTR is okay, the syscall was interrupted before timeout expired. + if err != nil && err != unix.EINTR { + select { + case w.Errors <- err: + case <-w.done: + break loop + } + continue + } + + // Flush the events we received to the Events channel + for len(kevents) > 0 { + kevent := &kevents[0] + watchfd := int(kevent.Ident) + mask := uint32(kevent.Fflags) + w.mu.Lock() + path := w.paths[watchfd] + w.mu.Unlock() + event := newEvent(path.name, mask) + + if path.isDir && !(event.Op&Remove == Remove) { + // Double check to make sure the directory exists. This can happen when + // we do a rm -fr on a recursively watched folders and we receive a + // modification event first but the folder has been deleted and later + // receive the delete event + if _, err := os.Lstat(event.Name); os.IsNotExist(err) { + // mark is as delete event + event.Op |= Remove + } + } + + if event.Op&Rename == Rename || event.Op&Remove == Remove { + w.Remove(event.Name) + w.mu.Lock() + delete(w.fileExists, event.Name) + w.mu.Unlock() + } + + if path.isDir && event.Op&Write == Write && !(event.Op&Remove == Remove) { + w.sendDirectoryChangeEvents(event.Name) + } else { + // Send the event on the Events channel. + select { + case w.Events <- event: + case <-w.done: + break loop + } + } + + if event.Op&Remove == Remove { + // Look for a file that may have overwritten this. + // For example, mv f1 f2 will delete f2, then create f2. + if path.isDir { + fileDir := filepath.Clean(event.Name) + w.mu.Lock() + _, found := w.watches[fileDir] + w.mu.Unlock() + if found { + // make sure the directory exists before we watch for changes. When we + // do a recursive watch and perform rm -fr, the parent directory might + // have gone missing, ignore the missing directory and let the + // upcoming delete event remove the watch from the parent directory. + if _, err := os.Lstat(fileDir); err == nil { + w.sendDirectoryChangeEvents(fileDir) + } + } + } else { + filePath := filepath.Clean(event.Name) + if fileInfo, err := os.Lstat(filePath); err == nil { + w.sendFileCreatedEventIfNew(filePath, fileInfo) + } + } + } + + // Move to next event + kevents = kevents[1:] + } + } + + // cleanup + err := unix.Close(w.kq) + if err != nil { + // only way the previous loop breaks is if w.done was closed so we need to async send to w.Errors. + select { + case w.Errors <- err: + default: + } + } + close(w.Events) + close(w.Errors) +} + +// newEvent returns an platform-independent Event based on kqueue Fflags. +func newEvent(name string, mask uint32) Event { + e := Event{Name: name} + if mask&unix.NOTE_DELETE == unix.NOTE_DELETE { + e.Op |= Remove + } + if mask&unix.NOTE_WRITE == unix.NOTE_WRITE { + e.Op |= Write + } + if mask&unix.NOTE_RENAME == unix.NOTE_RENAME { + e.Op |= Rename + } + if mask&unix.NOTE_ATTRIB == unix.NOTE_ATTRIB { + e.Op |= Chmod + } + return e +} + +func newCreateEvent(name string) Event { + return Event{Name: name, Op: Create} +} + +// watchDirectoryFiles to mimic inotify when adding a watch on a directory +func (w *Watcher) watchDirectoryFiles(dirPath string) error { + // Get all files + files, err := ioutil.ReadDir(dirPath) + if err != nil { + return err + } + + for _, fileInfo := range files { + filePath := filepath.Join(dirPath, fileInfo.Name()) + filePath, err = w.internalWatch(filePath, fileInfo) + if err != nil { + return err + } + + w.mu.Lock() + w.fileExists[filePath] = true + w.mu.Unlock() + } + + return nil +} + +// sendDirectoryEvents searches the directory for newly created files +// and sends them over the event channel. This functionality is to have +// the BSD version of fsnotify match Linux inotify which provides a +// create event for files created in a watched directory. +func (w *Watcher) sendDirectoryChangeEvents(dirPath string) { + // Get all files + files, err := ioutil.ReadDir(dirPath) + if err != nil { + select { + case w.Errors <- err: + case <-w.done: + return + } + } + + // Search for new files + for _, fileInfo := range files { + filePath := filepath.Join(dirPath, fileInfo.Name()) + err := w.sendFileCreatedEventIfNew(filePath, fileInfo) + + if err != nil { + return + } + } +} + +// sendFileCreatedEvent sends a create event if the file isn't already being tracked. +func (w *Watcher) sendFileCreatedEventIfNew(filePath string, fileInfo os.FileInfo) (err error) { + w.mu.Lock() + _, doesExist := w.fileExists[filePath] + w.mu.Unlock() + if !doesExist { + // Send create event + select { + case w.Events <- newCreateEvent(filePath): + case <-w.done: + return + } + } + + // like watchDirectoryFiles (but without doing another ReadDir) + filePath, err = w.internalWatch(filePath, fileInfo) + if err != nil { + return err + } + + w.mu.Lock() + w.fileExists[filePath] = true + w.mu.Unlock() + + return nil +} + +func (w *Watcher) internalWatch(name string, fileInfo os.FileInfo) (string, error) { + if fileInfo.IsDir() { + // mimic Linux providing delete events for subdirectories + // but preserve the flags used if currently watching subdirectory + w.mu.Lock() + flags := w.dirFlags[name] + w.mu.Unlock() + + flags |= unix.NOTE_DELETE | unix.NOTE_RENAME + return w.addWatch(name, flags) + } + + // watch file to mimic Linux inotify + return w.addWatch(name, noteAllEvents) +} + +// kqueue creates a new kernel event queue and returns a descriptor. +func kqueue() (kq int, err error) { + kq, err = unix.Kqueue() + if kq == -1 { + return kq, err + } + return kq, nil +} + +// register events with the queue +func register(kq int, fds []int, flags int, fflags uint32) error { + changes := make([]unix.Kevent_t, len(fds)) + + for i, fd := range fds { + // SetKevent converts int to the platform-specific types: + unix.SetKevent(&changes[i], fd, unix.EVFILT_VNODE, flags) + changes[i].Fflags = fflags + } + + // register the events + success, err := unix.Kevent(kq, changes, nil, nil) + if success == -1 { + return err + } + return nil +} + +// read retrieves pending events, or waits until an event occurs. +// A timeout of nil blocks indefinitely, while 0 polls the queue. +func read(kq int, events []unix.Kevent_t, timeout *unix.Timespec) ([]unix.Kevent_t, error) { + n, err := unix.Kevent(kq, nil, events, timeout) + if err != nil { + return nil, err + } + return events[0:n], nil +} + +// durationToTimespec prepares a timeout value +func durationToTimespec(d time.Duration) unix.Timespec { + return unix.NsecToTimespec(d.Nanoseconds()) +} diff --git a/vendor/github.com/fsnotify/fsnotify/open_mode_bsd.go b/vendor/github.com/fsnotify/fsnotify/open_mode_bsd.go new file mode 100644 index 00000000000..2306c4620bf --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/open_mode_bsd.go @@ -0,0 +1,11 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build freebsd openbsd netbsd dragonfly + +package fsnotify + +import "golang.org/x/sys/unix" + +const openMode = unix.O_NONBLOCK | unix.O_RDONLY | unix.O_CLOEXEC diff --git a/vendor/github.com/fsnotify/fsnotify/open_mode_darwin.go b/vendor/github.com/fsnotify/fsnotify/open_mode_darwin.go new file mode 100644 index 00000000000..870c4d6d184 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/open_mode_darwin.go @@ -0,0 +1,12 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin + +package fsnotify + +import "golang.org/x/sys/unix" + +// note: this constant is not defined on BSD +const openMode = unix.O_EVTONLY | unix.O_CLOEXEC diff --git a/vendor/github.com/fsnotify/fsnotify/windows.go b/vendor/github.com/fsnotify/fsnotify/windows.go new file mode 100644 index 00000000000..09436f31d82 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/windows.go @@ -0,0 +1,561 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build windows + +package fsnotify + +import ( + "errors" + "fmt" + "os" + "path/filepath" + "runtime" + "sync" + "syscall" + "unsafe" +) + +// Watcher watches a set of files, delivering events to a channel. +type Watcher struct { + Events chan Event + Errors chan error + isClosed bool // Set to true when Close() is first called + mu sync.Mutex // Map access + port syscall.Handle // Handle to completion port + watches watchMap // Map of watches (key: i-number) + input chan *input // Inputs to the reader are sent on this channel + quit chan chan<- error +} + +// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events. +func NewWatcher() (*Watcher, error) { + port, e := syscall.CreateIoCompletionPort(syscall.InvalidHandle, 0, 0, 0) + if e != nil { + return nil, os.NewSyscallError("CreateIoCompletionPort", e) + } + w := &Watcher{ + port: port, + watches: make(watchMap), + input: make(chan *input, 1), + Events: make(chan Event, 50), + Errors: make(chan error), + quit: make(chan chan<- error, 1), + } + go w.readEvents() + return w, nil +} + +// Close removes all watches and closes the events channel. +func (w *Watcher) Close() error { + if w.isClosed { + return nil + } + w.isClosed = true + + // Send "quit" message to the reader goroutine + ch := make(chan error) + w.quit <- ch + if err := w.wakeupReader(); err != nil { + return err + } + return <-ch +} + +// Add starts watching the named file or directory (non-recursively). +func (w *Watcher) Add(name string) error { + if w.isClosed { + return errors.New("watcher already closed") + } + in := &input{ + op: opAddWatch, + path: filepath.Clean(name), + flags: sysFSALLEVENTS, + reply: make(chan error), + } + w.input <- in + if err := w.wakeupReader(); err != nil { + return err + } + return <-in.reply +} + +// Remove stops watching the the named file or directory (non-recursively). +func (w *Watcher) Remove(name string) error { + in := &input{ + op: opRemoveWatch, + path: filepath.Clean(name), + reply: make(chan error), + } + w.input <- in + if err := w.wakeupReader(); err != nil { + return err + } + return <-in.reply +} + +const ( + // Options for AddWatch + sysFSONESHOT = 0x80000000 + sysFSONLYDIR = 0x1000000 + + // Events + sysFSACCESS = 0x1 + sysFSALLEVENTS = 0xfff + sysFSATTRIB = 0x4 + sysFSCLOSE = 0x18 + sysFSCREATE = 0x100 + sysFSDELETE = 0x200 + sysFSDELETESELF = 0x400 + sysFSMODIFY = 0x2 + sysFSMOVE = 0xc0 + sysFSMOVEDFROM = 0x40 + sysFSMOVEDTO = 0x80 + sysFSMOVESELF = 0x800 + + // Special events + sysFSIGNORED = 0x8000 + sysFSQOVERFLOW = 0x4000 +) + +func newEvent(name string, mask uint32) Event { + e := Event{Name: name} + if mask&sysFSCREATE == sysFSCREATE || mask&sysFSMOVEDTO == sysFSMOVEDTO { + e.Op |= Create + } + if mask&sysFSDELETE == sysFSDELETE || mask&sysFSDELETESELF == sysFSDELETESELF { + e.Op |= Remove + } + if mask&sysFSMODIFY == sysFSMODIFY { + e.Op |= Write + } + if mask&sysFSMOVE == sysFSMOVE || mask&sysFSMOVESELF == sysFSMOVESELF || mask&sysFSMOVEDFROM == sysFSMOVEDFROM { + e.Op |= Rename + } + if mask&sysFSATTRIB == sysFSATTRIB { + e.Op |= Chmod + } + return e +} + +const ( + opAddWatch = iota + opRemoveWatch +) + +const ( + provisional uint64 = 1 << (32 + iota) +) + +type input struct { + op int + path string + flags uint32 + reply chan error +} + +type inode struct { + handle syscall.Handle + volume uint32 + index uint64 +} + +type watch struct { + ov syscall.Overlapped + ino *inode // i-number + path string // Directory path + mask uint64 // Directory itself is being watched with these notify flags + names map[string]uint64 // Map of names being watched and their notify flags + rename string // Remembers the old name while renaming a file + buf [4096]byte +} + +type indexMap map[uint64]*watch +type watchMap map[uint32]indexMap + +func (w *Watcher) wakeupReader() error { + e := syscall.PostQueuedCompletionStatus(w.port, 0, 0, nil) + if e != nil { + return os.NewSyscallError("PostQueuedCompletionStatus", e) + } + return nil +} + +func getDir(pathname string) (dir string, err error) { + attr, e := syscall.GetFileAttributes(syscall.StringToUTF16Ptr(pathname)) + if e != nil { + return "", os.NewSyscallError("GetFileAttributes", e) + } + if attr&syscall.FILE_ATTRIBUTE_DIRECTORY != 0 { + dir = pathname + } else { + dir, _ = filepath.Split(pathname) + dir = filepath.Clean(dir) + } + return +} + +func getIno(path string) (ino *inode, err error) { + h, e := syscall.CreateFile(syscall.StringToUTF16Ptr(path), + syscall.FILE_LIST_DIRECTORY, + syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE, + nil, syscall.OPEN_EXISTING, + syscall.FILE_FLAG_BACKUP_SEMANTICS|syscall.FILE_FLAG_OVERLAPPED, 0) + if e != nil { + return nil, os.NewSyscallError("CreateFile", e) + } + var fi syscall.ByHandleFileInformation + if e = syscall.GetFileInformationByHandle(h, &fi); e != nil { + syscall.CloseHandle(h) + return nil, os.NewSyscallError("GetFileInformationByHandle", e) + } + ino = &inode{ + handle: h, + volume: fi.VolumeSerialNumber, + index: uint64(fi.FileIndexHigh)<<32 | uint64(fi.FileIndexLow), + } + return ino, nil +} + +// Must run within the I/O thread. +func (m watchMap) get(ino *inode) *watch { + if i := m[ino.volume]; i != nil { + return i[ino.index] + } + return nil +} + +// Must run within the I/O thread. +func (m watchMap) set(ino *inode, watch *watch) { + i := m[ino.volume] + if i == nil { + i = make(indexMap) + m[ino.volume] = i + } + i[ino.index] = watch +} + +// Must run within the I/O thread. +func (w *Watcher) addWatch(pathname string, flags uint64) error { + dir, err := getDir(pathname) + if err != nil { + return err + } + if flags&sysFSONLYDIR != 0 && pathname != dir { + return nil + } + ino, err := getIno(dir) + if err != nil { + return err + } + w.mu.Lock() + watchEntry := w.watches.get(ino) + w.mu.Unlock() + if watchEntry == nil { + if _, e := syscall.CreateIoCompletionPort(ino.handle, w.port, 0, 0); e != nil { + syscall.CloseHandle(ino.handle) + return os.NewSyscallError("CreateIoCompletionPort", e) + } + watchEntry = &watch{ + ino: ino, + path: dir, + names: make(map[string]uint64), + } + w.mu.Lock() + w.watches.set(ino, watchEntry) + w.mu.Unlock() + flags |= provisional + } else { + syscall.CloseHandle(ino.handle) + } + if pathname == dir { + watchEntry.mask |= flags + } else { + watchEntry.names[filepath.Base(pathname)] |= flags + } + if err = w.startRead(watchEntry); err != nil { + return err + } + if pathname == dir { + watchEntry.mask &= ^provisional + } else { + watchEntry.names[filepath.Base(pathname)] &= ^provisional + } + return nil +} + +// Must run within the I/O thread. +func (w *Watcher) remWatch(pathname string) error { + dir, err := getDir(pathname) + if err != nil { + return err + } + ino, err := getIno(dir) + if err != nil { + return err + } + w.mu.Lock() + watch := w.watches.get(ino) + w.mu.Unlock() + if watch == nil { + return fmt.Errorf("can't remove non-existent watch for: %s", pathname) + } + if pathname == dir { + w.sendEvent(watch.path, watch.mask&sysFSIGNORED) + watch.mask = 0 + } else { + name := filepath.Base(pathname) + w.sendEvent(filepath.Join(watch.path, name), watch.names[name]&sysFSIGNORED) + delete(watch.names, name) + } + return w.startRead(watch) +} + +// Must run within the I/O thread. +func (w *Watcher) deleteWatch(watch *watch) { + for name, mask := range watch.names { + if mask&provisional == 0 { + w.sendEvent(filepath.Join(watch.path, name), mask&sysFSIGNORED) + } + delete(watch.names, name) + } + if watch.mask != 0 { + if watch.mask&provisional == 0 { + w.sendEvent(watch.path, watch.mask&sysFSIGNORED) + } + watch.mask = 0 + } +} + +// Must run within the I/O thread. +func (w *Watcher) startRead(watch *watch) error { + if e := syscall.CancelIo(watch.ino.handle); e != nil { + w.Errors <- os.NewSyscallError("CancelIo", e) + w.deleteWatch(watch) + } + mask := toWindowsFlags(watch.mask) + for _, m := range watch.names { + mask |= toWindowsFlags(m) + } + if mask == 0 { + if e := syscall.CloseHandle(watch.ino.handle); e != nil { + w.Errors <- os.NewSyscallError("CloseHandle", e) + } + w.mu.Lock() + delete(w.watches[watch.ino.volume], watch.ino.index) + w.mu.Unlock() + return nil + } + e := syscall.ReadDirectoryChanges(watch.ino.handle, &watch.buf[0], + uint32(unsafe.Sizeof(watch.buf)), false, mask, nil, &watch.ov, 0) + if e != nil { + err := os.NewSyscallError("ReadDirectoryChanges", e) + if e == syscall.ERROR_ACCESS_DENIED && watch.mask&provisional == 0 { + // Watched directory was probably removed + if w.sendEvent(watch.path, watch.mask&sysFSDELETESELF) { + if watch.mask&sysFSONESHOT != 0 { + watch.mask = 0 + } + } + err = nil + } + w.deleteWatch(watch) + w.startRead(watch) + return err + } + return nil +} + +// readEvents reads from the I/O completion port, converts the +// received events into Event objects and sends them via the Events channel. +// Entry point to the I/O thread. +func (w *Watcher) readEvents() { + var ( + n, key uint32 + ov *syscall.Overlapped + ) + runtime.LockOSThread() + + for { + e := syscall.GetQueuedCompletionStatus(w.port, &n, &key, &ov, syscall.INFINITE) + watch := (*watch)(unsafe.Pointer(ov)) + + if watch == nil { + select { + case ch := <-w.quit: + w.mu.Lock() + var indexes []indexMap + for _, index := range w.watches { + indexes = append(indexes, index) + } + w.mu.Unlock() + for _, index := range indexes { + for _, watch := range index { + w.deleteWatch(watch) + w.startRead(watch) + } + } + var err error + if e := syscall.CloseHandle(w.port); e != nil { + err = os.NewSyscallError("CloseHandle", e) + } + close(w.Events) + close(w.Errors) + ch <- err + return + case in := <-w.input: + switch in.op { + case opAddWatch: + in.reply <- w.addWatch(in.path, uint64(in.flags)) + case opRemoveWatch: + in.reply <- w.remWatch(in.path) + } + default: + } + continue + } + + switch e { + case syscall.ERROR_MORE_DATA: + if watch == nil { + w.Errors <- errors.New("ERROR_MORE_DATA has unexpectedly null lpOverlapped buffer") + } else { + // The i/o succeeded but the buffer is full. + // In theory we should be building up a full packet. + // In practice we can get away with just carrying on. + n = uint32(unsafe.Sizeof(watch.buf)) + } + case syscall.ERROR_ACCESS_DENIED: + // Watched directory was probably removed + w.sendEvent(watch.path, watch.mask&sysFSDELETESELF) + w.deleteWatch(watch) + w.startRead(watch) + continue + case syscall.ERROR_OPERATION_ABORTED: + // CancelIo was called on this handle + continue + default: + w.Errors <- os.NewSyscallError("GetQueuedCompletionPort", e) + continue + case nil: + } + + var offset uint32 + for { + if n == 0 { + w.Events <- newEvent("", sysFSQOVERFLOW) + w.Errors <- errors.New("short read in readEvents()") + break + } + + // Point "raw" to the event in the buffer + raw := (*syscall.FileNotifyInformation)(unsafe.Pointer(&watch.buf[offset])) + buf := (*[syscall.MAX_PATH]uint16)(unsafe.Pointer(&raw.FileName)) + name := syscall.UTF16ToString(buf[:raw.FileNameLength/2]) + fullname := filepath.Join(watch.path, name) + + var mask uint64 + switch raw.Action { + case syscall.FILE_ACTION_REMOVED: + mask = sysFSDELETESELF + case syscall.FILE_ACTION_MODIFIED: + mask = sysFSMODIFY + case syscall.FILE_ACTION_RENAMED_OLD_NAME: + watch.rename = name + case syscall.FILE_ACTION_RENAMED_NEW_NAME: + if watch.names[watch.rename] != 0 { + watch.names[name] |= watch.names[watch.rename] + delete(watch.names, watch.rename) + mask = sysFSMOVESELF + } + } + + sendNameEvent := func() { + if w.sendEvent(fullname, watch.names[name]&mask) { + if watch.names[name]&sysFSONESHOT != 0 { + delete(watch.names, name) + } + } + } + if raw.Action != syscall.FILE_ACTION_RENAMED_NEW_NAME { + sendNameEvent() + } + if raw.Action == syscall.FILE_ACTION_REMOVED { + w.sendEvent(fullname, watch.names[name]&sysFSIGNORED) + delete(watch.names, name) + } + if w.sendEvent(fullname, watch.mask&toFSnotifyFlags(raw.Action)) { + if watch.mask&sysFSONESHOT != 0 { + watch.mask = 0 + } + } + if raw.Action == syscall.FILE_ACTION_RENAMED_NEW_NAME { + fullname = filepath.Join(watch.path, watch.rename) + sendNameEvent() + } + + // Move to the next event in the buffer + if raw.NextEntryOffset == 0 { + break + } + offset += raw.NextEntryOffset + + // Error! + if offset >= n { + w.Errors <- errors.New("Windows system assumed buffer larger than it is, events have likely been missed.") + break + } + } + + if err := w.startRead(watch); err != nil { + w.Errors <- err + } + } +} + +func (w *Watcher) sendEvent(name string, mask uint64) bool { + if mask == 0 { + return false + } + event := newEvent(name, uint32(mask)) + select { + case ch := <-w.quit: + w.quit <- ch + case w.Events <- event: + } + return true +} + +func toWindowsFlags(mask uint64) uint32 { + var m uint32 + if mask&sysFSACCESS != 0 { + m |= syscall.FILE_NOTIFY_CHANGE_LAST_ACCESS + } + if mask&sysFSMODIFY != 0 { + m |= syscall.FILE_NOTIFY_CHANGE_LAST_WRITE + } + if mask&sysFSATTRIB != 0 { + m |= syscall.FILE_NOTIFY_CHANGE_ATTRIBUTES + } + if mask&(sysFSMOVE|sysFSCREATE|sysFSDELETE) != 0 { + m |= syscall.FILE_NOTIFY_CHANGE_FILE_NAME | syscall.FILE_NOTIFY_CHANGE_DIR_NAME + } + return m +} + +func toFSnotifyFlags(action uint32) uint64 { + switch action { + case syscall.FILE_ACTION_ADDED: + return sysFSCREATE + case syscall.FILE_ACTION_REMOVED: + return sysFSDELETE + case syscall.FILE_ACTION_MODIFIED: + return sysFSMODIFY + case syscall.FILE_ACTION_RENAMED_OLD_NAME: + return sysFSMOVEDFROM + case syscall.FILE_ACTION_RENAMED_NEW_NAME: + return sysFSMOVEDTO + } + return 0 +} diff --git a/vendor/github.com/fvbommel/sortorder/.gitignore b/vendor/github.com/fvbommel/sortorder/.gitignore new file mode 100644 index 00000000000..c021733e255 --- /dev/null +++ b/vendor/github.com/fvbommel/sortorder/.gitignore @@ -0,0 +1,19 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so +# Folders +_obj +_test +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* +_testmain.go +*.exe +*.test +*.prof diff --git a/vendor/github.com/fvbommel/sortorder/LICENSE b/vendor/github.com/fvbommel/sortorder/LICENSE new file mode 100644 index 00000000000..5c695fb590f --- /dev/null +++ b/vendor/github.com/fvbommel/sortorder/LICENSE @@ -0,0 +1,17 @@ +The MIT License (MIT) +Copyright (c) 2015 Frits van Bommel +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/fvbommel/sortorder/README.md b/vendor/github.com/fvbommel/sortorder/README.md new file mode 100644 index 00000000000..7ebcab1d16e --- /dev/null +++ b/vendor/github.com/fvbommel/sortorder/README.md @@ -0,0 +1,5 @@ +# sortorder [![PkgGoDev](https://pkg.go.dev/badge/github.com/fvbommel/sortorder)](https://pkg.go.dev/github.com/fvbommel/sortorder) + + import "github.com/fvbommel/sortorder" + +Sort orders and comparison functions. diff --git a/vendor/github.com/fvbommel/sortorder/doc.go b/vendor/github.com/fvbommel/sortorder/doc.go new file mode 100644 index 00000000000..75d5a2928f3 --- /dev/null +++ b/vendor/github.com/fvbommel/sortorder/doc.go @@ -0,0 +1,5 @@ +// Package sortorder implements sort orders and comparison functions. +// +// Currently, it only implements so-called "natural order", where integers +// embedded in strings are compared by value. +package sortorder diff --git a/vendor/github.com/fvbommel/sortorder/go.mod b/vendor/github.com/fvbommel/sortorder/go.mod new file mode 100644 index 00000000000..57c8175e3ef --- /dev/null +++ b/vendor/github.com/fvbommel/sortorder/go.mod @@ -0,0 +1,3 @@ +module github.com/fvbommel/sortorder + +go 1.13 diff --git a/vendor/github.com/fvbommel/sortorder/go.sum b/vendor/github.com/fvbommel/sortorder/go.sum new file mode 100644 index 00000000000..e69de29bb2d diff --git a/vendor/github.com/fvbommel/sortorder/natsort.go b/vendor/github.com/fvbommel/sortorder/natsort.go new file mode 100644 index 00000000000..66a52c7125d --- /dev/null +++ b/vendor/github.com/fvbommel/sortorder/natsort.go @@ -0,0 +1,76 @@ +package sortorder + +// Natural implements sort.Interface to sort strings in natural order. This +// means that e.g. "abc2" < "abc12". +// +// Non-digit sequences and numbers are compared separately. The former are +// compared bytewise, while the latter are compared numerically (except that +// the number of leading zeros is used as a tie-breaker, so e.g. "2" < "02") +// +// Limitation: only ASCII digits (0-9) are considered. +type Natural []string + +func (n Natural) Len() int { return len(n) } +func (n Natural) Swap(i, j int) { n[i], n[j] = n[j], n[i] } +func (n Natural) Less(i, j int) bool { return NaturalLess(n[i], n[j]) } + +func isdigit(b byte) bool { return '0' <= b && b <= '9' } + +// NaturalLess compares two strings using natural ordering. This means that e.g. +// "abc2" < "abc12". +// +// Non-digit sequences and numbers are compared separately. The former are +// compared bytewise, while the latter are compared numerically (except that +// the number of leading zeros is used as a tie-breaker, so e.g. "2" < "02") +// +// Limitation: only ASCII digits (0-9) are considered. +func NaturalLess(str1, str2 string) bool { + idx1, idx2 := 0, 0 + for idx1 < len(str1) && idx2 < len(str2) { + c1, c2 := str1[idx1], str2[idx2] + dig1, dig2 := isdigit(c1), isdigit(c2) + switch { + case dig1 != dig2: // Digits before other characters. + return dig1 // True if LHS is a digit, false if the RHS is one. + case !dig1: // && !dig2, because dig1 == dig2 + // UTF-8 compares bytewise-lexicographically, no need to decode + // codepoints. + if c1 != c2 { + return c1 < c2 + } + idx1++ + idx2++ + default: // Digits + // Eat zeros. + for ; idx1 < len(str1) && str1[idx1] == '0'; idx1++ { + } + for ; idx2 < len(str2) && str2[idx2] == '0'; idx2++ { + } + // Eat all digits. + nonZero1, nonZero2 := idx1, idx2 + for ; idx1 < len(str1) && isdigit(str1[idx1]); idx1++ { + } + for ; idx2 < len(str2) && isdigit(str2[idx2]); idx2++ { + } + // If lengths of numbers with non-zero prefix differ, the shorter + // one is less. + if len1, len2 := idx1-nonZero1, idx2-nonZero2; len1 != len2 { + return len1 < len2 + } + // If they're equal, string comparison is correct. + if nr1, nr2 := str1[nonZero1:idx1], str2[nonZero2:idx2]; nr1 != nr2 { + return nr1 < nr2 + } + // Otherwise, the one with less zeros is less. + // Because everything up to the number is equal, comparing the index + // after the zeros is sufficient. + if nonZero1 != nonZero2 { + return nonZero1 < nonZero2 + } + } + // They're identical so far, so continue comparing. + } + // So far they are identical. At least one is ended. If the other continues, + // it sorts last. + return len(str1) < len(str2) +} diff --git a/vendor/github.com/ghodss/yaml/.gitignore b/vendor/github.com/ghodss/yaml/.gitignore new file mode 100644 index 00000000000..e256a31e00a --- /dev/null +++ b/vendor/github.com/ghodss/yaml/.gitignore @@ -0,0 +1,20 @@ +# OSX leaves these everywhere on SMB shares +._* + +# Eclipse files +.classpath +.project +.settings/** + +# Emacs save files +*~ + +# Vim-related files +[._]*.s[a-w][a-z] +[._]s[a-w][a-z] +*.un~ +Session.vim +.netrwhist + +# Go test binaries +*.test diff --git a/vendor/github.com/ghodss/yaml/.travis.yml b/vendor/github.com/ghodss/yaml/.travis.yml new file mode 100644 index 00000000000..0e9d6edc010 --- /dev/null +++ b/vendor/github.com/ghodss/yaml/.travis.yml @@ -0,0 +1,7 @@ +language: go +go: + - 1.3 + - 1.4 +script: + - go test + - go build diff --git a/vendor/github.com/ghodss/yaml/LICENSE b/vendor/github.com/ghodss/yaml/LICENSE new file mode 100644 index 00000000000..7805d36de73 --- /dev/null +++ b/vendor/github.com/ghodss/yaml/LICENSE @@ -0,0 +1,50 @@ +The MIT License (MIT) + +Copyright (c) 2014 Sam Ghods + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + + +Copyright (c) 2012 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/ghodss/yaml/README.md b/vendor/github.com/ghodss/yaml/README.md new file mode 100644 index 00000000000..0200f75b4d1 --- /dev/null +++ b/vendor/github.com/ghodss/yaml/README.md @@ -0,0 +1,121 @@ +# YAML marshaling and unmarshaling support for Go + +[![Build Status](https://travis-ci.org/ghodss/yaml.svg)](https://travis-ci.org/ghodss/yaml) + +## Introduction + +A wrapper around [go-yaml](https://github.com/go-yaml/yaml) designed to enable a better way of handling YAML when marshaling to and from structs. + +In short, this library first converts YAML to JSON using go-yaml and then uses `json.Marshal` and `json.Unmarshal` to convert to or from the struct. This means that it effectively reuses the JSON struct tags as well as the custom JSON methods `MarshalJSON` and `UnmarshalJSON` unlike go-yaml. For a detailed overview of the rationale behind this method, [see this blog post](http://ghodss.com/2014/the-right-way-to-handle-yaml-in-golang/). + +## Compatibility + +This package uses [go-yaml](https://github.com/go-yaml/yaml) and therefore supports [everything go-yaml supports](https://github.com/go-yaml/yaml#compatibility). + +## Caveats + +**Caveat #1:** When using `yaml.Marshal` and `yaml.Unmarshal`, binary data should NOT be preceded with the `!!binary` YAML tag. If you do, go-yaml will convert the binary data from base64 to native binary data, which is not compatible with JSON. You can still use binary in your YAML files though - just store them without the `!!binary` tag and decode the base64 in your code (e.g. in the custom JSON methods `MarshalJSON` and `UnmarshalJSON`). This also has the benefit that your YAML and your JSON binary data will be decoded exactly the same way. As an example: + +``` +BAD: + exampleKey: !!binary gIGC + +GOOD: + exampleKey: gIGC +... and decode the base64 data in your code. +``` + +**Caveat #2:** When using `YAMLToJSON` directly, maps with keys that are maps will result in an error since this is not supported by JSON. This error will occur in `Unmarshal` as well since you can't unmarshal map keys anyways since struct fields can't be keys. + +## Installation and usage + +To install, run: + +``` +$ go get github.com/ghodss/yaml +``` + +And import using: + +``` +import "github.com/ghodss/yaml" +``` + +Usage is very similar to the JSON library: + +```go +package main + +import ( + "fmt" + + "github.com/ghodss/yaml" +) + +type Person struct { + Name string `json:"name"` // Affects YAML field names too. + Age int `json:"age"` +} + +func main() { + // Marshal a Person struct to YAML. + p := Person{"John", 30} + y, err := yaml.Marshal(p) + if err != nil { + fmt.Printf("err: %v\n", err) + return + } + fmt.Println(string(y)) + /* Output: + age: 30 + name: John + */ + + // Unmarshal the YAML back into a Person struct. + var p2 Person + err = yaml.Unmarshal(y, &p2) + if err != nil { + fmt.Printf("err: %v\n", err) + return + } + fmt.Println(p2) + /* Output: + {John 30} + */ +} +``` + +`yaml.YAMLToJSON` and `yaml.JSONToYAML` methods are also available: + +```go +package main + +import ( + "fmt" + + "github.com/ghodss/yaml" +) + +func main() { + j := []byte(`{"name": "John", "age": 30}`) + y, err := yaml.JSONToYAML(j) + if err != nil { + fmt.Printf("err: %v\n", err) + return + } + fmt.Println(string(y)) + /* Output: + name: John + age: 30 + */ + j2, err := yaml.YAMLToJSON(y) + if err != nil { + fmt.Printf("err: %v\n", err) + return + } + fmt.Println(string(j2)) + /* Output: + {"age":30,"name":"John"} + */ +} +``` diff --git a/vendor/github.com/ghodss/yaml/fields.go b/vendor/github.com/ghodss/yaml/fields.go new file mode 100644 index 00000000000..58600740266 --- /dev/null +++ b/vendor/github.com/ghodss/yaml/fields.go @@ -0,0 +1,501 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +package yaml + +import ( + "bytes" + "encoding" + "encoding/json" + "reflect" + "sort" + "strings" + "sync" + "unicode" + "unicode/utf8" +) + +// indirect walks down v allocating pointers as needed, +// until it gets to a non-pointer. +// if it encounters an Unmarshaler, indirect stops and returns that. +// if decodingNull is true, indirect stops at the last pointer so it can be set to nil. +func indirect(v reflect.Value, decodingNull bool) (json.Unmarshaler, encoding.TextUnmarshaler, reflect.Value) { + // If v is a named type and is addressable, + // start with its address, so that if the type has pointer methods, + // we find them. + if v.Kind() != reflect.Ptr && v.Type().Name() != "" && v.CanAddr() { + v = v.Addr() + } + for { + // Load value from interface, but only if the result will be + // usefully addressable. + if v.Kind() == reflect.Interface && !v.IsNil() { + e := v.Elem() + if e.Kind() == reflect.Ptr && !e.IsNil() && (!decodingNull || e.Elem().Kind() == reflect.Ptr) { + v = e + continue + } + } + + if v.Kind() != reflect.Ptr { + break + } + + if v.Elem().Kind() != reflect.Ptr && decodingNull && v.CanSet() { + break + } + if v.IsNil() { + if v.CanSet() { + v.Set(reflect.New(v.Type().Elem())) + } else { + v = reflect.New(v.Type().Elem()) + } + } + if v.Type().NumMethod() > 0 { + if u, ok := v.Interface().(json.Unmarshaler); ok { + return u, nil, reflect.Value{} + } + if u, ok := v.Interface().(encoding.TextUnmarshaler); ok { + return nil, u, reflect.Value{} + } + } + v = v.Elem() + } + return nil, nil, v +} + +// A field represents a single field found in a struct. +type field struct { + name string + nameBytes []byte // []byte(name) + equalFold func(s, t []byte) bool // bytes.EqualFold or equivalent + + tag bool + index []int + typ reflect.Type + omitEmpty bool + quoted bool +} + +func fillField(f field) field { + f.nameBytes = []byte(f.name) + f.equalFold = foldFunc(f.nameBytes) + return f +} + +// byName sorts field by name, breaking ties with depth, +// then breaking ties with "name came from json tag", then +// breaking ties with index sequence. +type byName []field + +func (x byName) Len() int { return len(x) } + +func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] } + +func (x byName) Less(i, j int) bool { + if x[i].name != x[j].name { + return x[i].name < x[j].name + } + if len(x[i].index) != len(x[j].index) { + return len(x[i].index) < len(x[j].index) + } + if x[i].tag != x[j].tag { + return x[i].tag + } + return byIndex(x).Less(i, j) +} + +// byIndex sorts field by index sequence. +type byIndex []field + +func (x byIndex) Len() int { return len(x) } + +func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] } + +func (x byIndex) Less(i, j int) bool { + for k, xik := range x[i].index { + if k >= len(x[j].index) { + return false + } + if xik != x[j].index[k] { + return xik < x[j].index[k] + } + } + return len(x[i].index) < len(x[j].index) +} + +// typeFields returns a list of fields that JSON should recognize for the given type. +// The algorithm is breadth-first search over the set of structs to include - the top struct +// and then any reachable anonymous structs. +func typeFields(t reflect.Type) []field { + // Anonymous fields to explore at the current level and the next. + current := []field{} + next := []field{{typ: t}} + + // Count of queued names for current level and the next. + count := map[reflect.Type]int{} + nextCount := map[reflect.Type]int{} + + // Types already visited at an earlier level. + visited := map[reflect.Type]bool{} + + // Fields found. + var fields []field + + for len(next) > 0 { + current, next = next, current[:0] + count, nextCount = nextCount, map[reflect.Type]int{} + + for _, f := range current { + if visited[f.typ] { + continue + } + visited[f.typ] = true + + // Scan f.typ for fields to include. + for i := 0; i < f.typ.NumField(); i++ { + sf := f.typ.Field(i) + if sf.PkgPath != "" { // unexported + continue + } + tag := sf.Tag.Get("json") + if tag == "-" { + continue + } + name, opts := parseTag(tag) + if !isValidTag(name) { + name = "" + } + index := make([]int, len(f.index)+1) + copy(index, f.index) + index[len(f.index)] = i + + ft := sf.Type + if ft.Name() == "" && ft.Kind() == reflect.Ptr { + // Follow pointer. + ft = ft.Elem() + } + + // Record found field and index sequence. + if name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct { + tagged := name != "" + if name == "" { + name = sf.Name + } + fields = append(fields, fillField(field{ + name: name, + tag: tagged, + index: index, + typ: ft, + omitEmpty: opts.Contains("omitempty"), + quoted: opts.Contains("string"), + })) + if count[f.typ] > 1 { + // If there were multiple instances, add a second, + // so that the annihilation code will see a duplicate. + // It only cares about the distinction between 1 or 2, + // so don't bother generating any more copies. + fields = append(fields, fields[len(fields)-1]) + } + continue + } + + // Record new anonymous struct to explore in next round. + nextCount[ft]++ + if nextCount[ft] == 1 { + next = append(next, fillField(field{name: ft.Name(), index: index, typ: ft})) + } + } + } + } + + sort.Sort(byName(fields)) + + // Delete all fields that are hidden by the Go rules for embedded fields, + // except that fields with JSON tags are promoted. + + // The fields are sorted in primary order of name, secondary order + // of field index length. Loop over names; for each name, delete + // hidden fields by choosing the one dominant field that survives. + out := fields[:0] + for advance, i := 0, 0; i < len(fields); i += advance { + // One iteration per name. + // Find the sequence of fields with the name of this first field. + fi := fields[i] + name := fi.name + for advance = 1; i+advance < len(fields); advance++ { + fj := fields[i+advance] + if fj.name != name { + break + } + } + if advance == 1 { // Only one field with this name + out = append(out, fi) + continue + } + dominant, ok := dominantField(fields[i : i+advance]) + if ok { + out = append(out, dominant) + } + } + + fields = out + sort.Sort(byIndex(fields)) + + return fields +} + +// dominantField looks through the fields, all of which are known to +// have the same name, to find the single field that dominates the +// others using Go's embedding rules, modified by the presence of +// JSON tags. If there are multiple top-level fields, the boolean +// will be false: This condition is an error in Go and we skip all +// the fields. +func dominantField(fields []field) (field, bool) { + // The fields are sorted in increasing index-length order. The winner + // must therefore be one with the shortest index length. Drop all + // longer entries, which is easy: just truncate the slice. + length := len(fields[0].index) + tagged := -1 // Index of first tagged field. + for i, f := range fields { + if len(f.index) > length { + fields = fields[:i] + break + } + if f.tag { + if tagged >= 0 { + // Multiple tagged fields at the same level: conflict. + // Return no field. + return field{}, false + } + tagged = i + } + } + if tagged >= 0 { + return fields[tagged], true + } + // All remaining fields have the same length. If there's more than one, + // we have a conflict (two fields named "X" at the same level) and we + // return no field. + if len(fields) > 1 { + return field{}, false + } + return fields[0], true +} + +var fieldCache struct { + sync.RWMutex + m map[reflect.Type][]field +} + +// cachedTypeFields is like typeFields but uses a cache to avoid repeated work. +func cachedTypeFields(t reflect.Type) []field { + fieldCache.RLock() + f := fieldCache.m[t] + fieldCache.RUnlock() + if f != nil { + return f + } + + // Compute fields without lock. + // Might duplicate effort but won't hold other computations back. + f = typeFields(t) + if f == nil { + f = []field{} + } + + fieldCache.Lock() + if fieldCache.m == nil { + fieldCache.m = map[reflect.Type][]field{} + } + fieldCache.m[t] = f + fieldCache.Unlock() + return f +} + +func isValidTag(s string) bool { + if s == "" { + return false + } + for _, c := range s { + switch { + case strings.ContainsRune("!#$%&()*+-./:<=>?@[]^_{|}~ ", c): + // Backslash and quote chars are reserved, but + // otherwise any punctuation chars are allowed + // in a tag name. + default: + if !unicode.IsLetter(c) && !unicode.IsDigit(c) { + return false + } + } + } + return true +} + +const ( + caseMask = ^byte(0x20) // Mask to ignore case in ASCII. + kelvin = '\u212a' + smallLongEss = '\u017f' +) + +// foldFunc returns one of four different case folding equivalence +// functions, from most general (and slow) to fastest: +// +// 1) bytes.EqualFold, if the key s contains any non-ASCII UTF-8 +// 2) equalFoldRight, if s contains special folding ASCII ('k', 'K', 's', 'S') +// 3) asciiEqualFold, no special, but includes non-letters (including _) +// 4) simpleLetterEqualFold, no specials, no non-letters. +// +// The letters S and K are special because they map to 3 runes, not just 2: +// * S maps to s and to U+017F 'ſ' Latin small letter long s +// * k maps to K and to U+212A 'K' Kelvin sign +// See http://play.golang.org/p/tTxjOc0OGo +// +// The returned function is specialized for matching against s and +// should only be given s. It's not curried for performance reasons. +func foldFunc(s []byte) func(s, t []byte) bool { + nonLetter := false + special := false // special letter + for _, b := range s { + if b >= utf8.RuneSelf { + return bytes.EqualFold + } + upper := b & caseMask + if upper < 'A' || upper > 'Z' { + nonLetter = true + } else if upper == 'K' || upper == 'S' { + // See above for why these letters are special. + special = true + } + } + if special { + return equalFoldRight + } + if nonLetter { + return asciiEqualFold + } + return simpleLetterEqualFold +} + +// equalFoldRight is a specialization of bytes.EqualFold when s is +// known to be all ASCII (including punctuation), but contains an 's', +// 'S', 'k', or 'K', requiring a Unicode fold on the bytes in t. +// See comments on foldFunc. +func equalFoldRight(s, t []byte) bool { + for _, sb := range s { + if len(t) == 0 { + return false + } + tb := t[0] + if tb < utf8.RuneSelf { + if sb != tb { + sbUpper := sb & caseMask + if 'A' <= sbUpper && sbUpper <= 'Z' { + if sbUpper != tb&caseMask { + return false + } + } else { + return false + } + } + t = t[1:] + continue + } + // sb is ASCII and t is not. t must be either kelvin + // sign or long s; sb must be s, S, k, or K. + tr, size := utf8.DecodeRune(t) + switch sb { + case 's', 'S': + if tr != smallLongEss { + return false + } + case 'k', 'K': + if tr != kelvin { + return false + } + default: + return false + } + t = t[size:] + + } + if len(t) > 0 { + return false + } + return true +} + +// asciiEqualFold is a specialization of bytes.EqualFold for use when +// s is all ASCII (but may contain non-letters) and contains no +// special-folding letters. +// See comments on foldFunc. +func asciiEqualFold(s, t []byte) bool { + if len(s) != len(t) { + return false + } + for i, sb := range s { + tb := t[i] + if sb == tb { + continue + } + if ('a' <= sb && sb <= 'z') || ('A' <= sb && sb <= 'Z') { + if sb&caseMask != tb&caseMask { + return false + } + } else { + return false + } + } + return true +} + +// simpleLetterEqualFold is a specialization of bytes.EqualFold for +// use when s is all ASCII letters (no underscores, etc) and also +// doesn't contain 'k', 'K', 's', or 'S'. +// See comments on foldFunc. +func simpleLetterEqualFold(s, t []byte) bool { + if len(s) != len(t) { + return false + } + for i, b := range s { + if b&caseMask != t[i]&caseMask { + return false + } + } + return true +} + +// tagOptions is the string following a comma in a struct field's "json" +// tag, or the empty string. It does not include the leading comma. +type tagOptions string + +// parseTag splits a struct field's json tag into its name and +// comma-separated options. +func parseTag(tag string) (string, tagOptions) { + if idx := strings.Index(tag, ","); idx != -1 { + return tag[:idx], tagOptions(tag[idx+1:]) + } + return tag, tagOptions("") +} + +// Contains reports whether a comma-separated list of options +// contains a particular substr flag. substr must be surrounded by a +// string boundary or commas. +func (o tagOptions) Contains(optionName string) bool { + if len(o) == 0 { + return false + } + s := string(o) + for s != "" { + var next string + i := strings.Index(s, ",") + if i >= 0 { + s, next = s[:i], s[i+1:] + } + if s == optionName { + return true + } + s = next + } + return false +} diff --git a/vendor/github.com/ghodss/yaml/yaml.go b/vendor/github.com/ghodss/yaml/yaml.go new file mode 100644 index 00000000000..4fb4054a8b7 --- /dev/null +++ b/vendor/github.com/ghodss/yaml/yaml.go @@ -0,0 +1,277 @@ +package yaml + +import ( + "bytes" + "encoding/json" + "fmt" + "reflect" + "strconv" + + "gopkg.in/yaml.v2" +) + +// Marshals the object into JSON then converts JSON to YAML and returns the +// YAML. +func Marshal(o interface{}) ([]byte, error) { + j, err := json.Marshal(o) + if err != nil { + return nil, fmt.Errorf("error marshaling into JSON: %v", err) + } + + y, err := JSONToYAML(j) + if err != nil { + return nil, fmt.Errorf("error converting JSON to YAML: %v", err) + } + + return y, nil +} + +// Converts YAML to JSON then uses JSON to unmarshal into an object. +func Unmarshal(y []byte, o interface{}) error { + vo := reflect.ValueOf(o) + j, err := yamlToJSON(y, &vo) + if err != nil { + return fmt.Errorf("error converting YAML to JSON: %v", err) + } + + err = json.Unmarshal(j, o) + if err != nil { + return fmt.Errorf("error unmarshaling JSON: %v", err) + } + + return nil +} + +// Convert JSON to YAML. +func JSONToYAML(j []byte) ([]byte, error) { + // Convert the JSON to an object. + var jsonObj interface{} + // We are using yaml.Unmarshal here (instead of json.Unmarshal) because the + // Go JSON library doesn't try to pick the right number type (int, float, + // etc.) when unmarshalling to interface{}, it just picks float64 + // universally. go-yaml does go through the effort of picking the right + // number type, so we can preserve number type throughout this process. + err := yaml.Unmarshal(j, &jsonObj) + if err != nil { + return nil, err + } + + // Marshal this object into YAML. + return yaml.Marshal(jsonObj) +} + +// Convert YAML to JSON. Since JSON is a subset of YAML, passing JSON through +// this method should be a no-op. +// +// Things YAML can do that are not supported by JSON: +// * In YAML you can have binary and null keys in your maps. These are invalid +// in JSON. (int and float keys are converted to strings.) +// * Binary data in YAML with the !!binary tag is not supported. If you want to +// use binary data with this library, encode the data as base64 as usual but do +// not use the !!binary tag in your YAML. This will ensure the original base64 +// encoded data makes it all the way through to the JSON. +func YAMLToJSON(y []byte) ([]byte, error) { + return yamlToJSON(y, nil) +} + +func yamlToJSON(y []byte, jsonTarget *reflect.Value) ([]byte, error) { + // Convert the YAML to an object. + var yamlObj interface{} + err := yaml.Unmarshal(y, &yamlObj) + if err != nil { + return nil, err + } + + // YAML objects are not completely compatible with JSON objects (e.g. you + // can have non-string keys in YAML). So, convert the YAML-compatible object + // to a JSON-compatible object, failing with an error if irrecoverable + // incompatibilties happen along the way. + jsonObj, err := convertToJSONableObject(yamlObj, jsonTarget) + if err != nil { + return nil, err + } + + // Convert this object to JSON and return the data. + return json.Marshal(jsonObj) +} + +func convertToJSONableObject(yamlObj interface{}, jsonTarget *reflect.Value) (interface{}, error) { + var err error + + // Resolve jsonTarget to a concrete value (i.e. not a pointer or an + // interface). We pass decodingNull as false because we're not actually + // decoding into the value, we're just checking if the ultimate target is a + // string. + if jsonTarget != nil { + ju, tu, pv := indirect(*jsonTarget, false) + // We have a JSON or Text Umarshaler at this level, so we can't be trying + // to decode into a string. + if ju != nil || tu != nil { + jsonTarget = nil + } else { + jsonTarget = &pv + } + } + + // If yamlObj is a number or a boolean, check if jsonTarget is a string - + // if so, coerce. Else return normal. + // If yamlObj is a map or array, find the field that each key is + // unmarshaling to, and when you recurse pass the reflect.Value for that + // field back into this function. + switch typedYAMLObj := yamlObj.(type) { + case map[interface{}]interface{}: + // JSON does not support arbitrary keys in a map, so we must convert + // these keys to strings. + // + // From my reading of go-yaml v2 (specifically the resolve function), + // keys can only have the types string, int, int64, float64, binary + // (unsupported), or null (unsupported). + strMap := make(map[string]interface{}) + for k, v := range typedYAMLObj { + // Resolve the key to a string first. + var keyString string + switch typedKey := k.(type) { + case string: + keyString = typedKey + case int: + keyString = strconv.Itoa(typedKey) + case int64: + // go-yaml will only return an int64 as a key if the system + // architecture is 32-bit and the key's value is between 32-bit + // and 64-bit. Otherwise the key type will simply be int. + keyString = strconv.FormatInt(typedKey, 10) + case float64: + // Stolen from go-yaml to use the same conversion to string as + // the go-yaml library uses to convert float to string when + // Marshaling. + s := strconv.FormatFloat(typedKey, 'g', -1, 32) + switch s { + case "+Inf": + s = ".inf" + case "-Inf": + s = "-.inf" + case "NaN": + s = ".nan" + } + keyString = s + case bool: + if typedKey { + keyString = "true" + } else { + keyString = "false" + } + default: + return nil, fmt.Errorf("Unsupported map key of type: %s, key: %+#v, value: %+#v", + reflect.TypeOf(k), k, v) + } + + // jsonTarget should be a struct or a map. If it's a struct, find + // the field it's going to map to and pass its reflect.Value. If + // it's a map, find the element type of the map and pass the + // reflect.Value created from that type. If it's neither, just pass + // nil - JSON conversion will error for us if it's a real issue. + if jsonTarget != nil { + t := *jsonTarget + if t.Kind() == reflect.Struct { + keyBytes := []byte(keyString) + // Find the field that the JSON library would use. + var f *field + fields := cachedTypeFields(t.Type()) + for i := range fields { + ff := &fields[i] + if bytes.Equal(ff.nameBytes, keyBytes) { + f = ff + break + } + // Do case-insensitive comparison. + if f == nil && ff.equalFold(ff.nameBytes, keyBytes) { + f = ff + } + } + if f != nil { + // Find the reflect.Value of the most preferential + // struct field. + jtf := t.Field(f.index[0]) + strMap[keyString], err = convertToJSONableObject(v, &jtf) + if err != nil { + return nil, err + } + continue + } + } else if t.Kind() == reflect.Map { + // Create a zero value of the map's element type to use as + // the JSON target. + jtv := reflect.Zero(t.Type().Elem()) + strMap[keyString], err = convertToJSONableObject(v, &jtv) + if err != nil { + return nil, err + } + continue + } + } + strMap[keyString], err = convertToJSONableObject(v, nil) + if err != nil { + return nil, err + } + } + return strMap, nil + case []interface{}: + // We need to recurse into arrays in case there are any + // map[interface{}]interface{}'s inside and to convert any + // numbers to strings. + + // If jsonTarget is a slice (which it really should be), find the + // thing it's going to map to. If it's not a slice, just pass nil + // - JSON conversion will error for us if it's a real issue. + var jsonSliceElemValue *reflect.Value + if jsonTarget != nil { + t := *jsonTarget + if t.Kind() == reflect.Slice { + // By default slices point to nil, but we need a reflect.Value + // pointing to a value of the slice type, so we create one here. + ev := reflect.Indirect(reflect.New(t.Type().Elem())) + jsonSliceElemValue = &ev + } + } + + // Make and use a new array. + arr := make([]interface{}, len(typedYAMLObj)) + for i, v := range typedYAMLObj { + arr[i], err = convertToJSONableObject(v, jsonSliceElemValue) + if err != nil { + return nil, err + } + } + return arr, nil + default: + // If the target type is a string and the YAML type is a number, + // convert the YAML type to a string. + if jsonTarget != nil && (*jsonTarget).Kind() == reflect.String { + // Based on my reading of go-yaml, it may return int, int64, + // float64, or uint64. + var s string + switch typedVal := typedYAMLObj.(type) { + case int: + s = strconv.FormatInt(int64(typedVal), 10) + case int64: + s = strconv.FormatInt(typedVal, 10) + case float64: + s = strconv.FormatFloat(typedVal, 'g', -1, 32) + case uint64: + s = strconv.FormatUint(typedVal, 10) + case bool: + if typedVal { + s = "true" + } else { + s = "false" + } + } + if len(s) > 0 { + yamlObj = interface{}(s) + } + } + return yamlObj, nil + } + + return nil, nil +} diff --git a/vendor/github.com/go-logr/logr/LICENSE b/vendor/github.com/go-logr/logr/LICENSE new file mode 100644 index 00000000000..8dada3edaf5 --- /dev/null +++ b/vendor/github.com/go-logr/logr/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/go-logr/logr/README.md b/vendor/github.com/go-logr/logr/README.md new file mode 100644 index 00000000000..e9b5520a1c5 --- /dev/null +++ b/vendor/github.com/go-logr/logr/README.md @@ -0,0 +1,183 @@ +# A more minimal logging API for Go + +Before you consider this package, please read [this blog post by the +inimitable Dave Cheney][warning-makes-no-sense]. I really appreciate what +he has to say, and it largely aligns with my own experiences. Too many +choices of levels means inconsistent logs. + +This package offers a purely abstract interface, based on these ideas but with +a few twists. Code can depend on just this interface and have the actual +logging implementation be injected from callers. Ideally only `main()` knows +what logging implementation is being used. + +# Differences from Dave's ideas + +The main differences are: + +1) Dave basically proposes doing away with the notion of a logging API in favor +of `fmt.Printf()`. I disagree, especially when you consider things like output +locations, timestamps, file and line decorations, and structured logging. I +restrict the API to just 2 types of logs: info and error. + +Info logs are things you want to tell the user which are not errors. Error +logs are, well, errors. If your code receives an `error` from a subordinate +function call and is logging that `error` *and not returning it*, use error +logs. + +2) Verbosity-levels on info logs. This gives developers a chance to indicate +arbitrary grades of importance for info logs, without assigning names with +semantic meaning such as "warning", "trace", and "debug". Superficially this +may feel very similar, but the primary difference is the lack of semantics. +Because verbosity is a numerical value, it's safe to assume that an app running +with higher verbosity means more (and less important) logs will be generated. + +This is a BETA grade API. + +There are implementations for the following logging libraries: + +- **github.com/google/glog**: [glogr](https://github.com/go-logr/glogr) +- **k8s.io/klog**: [klogr](https://git.k8s.io/klog/klogr) +- **go.uber.org/zap**: [zapr](https://github.com/go-logr/zapr) +- **log** (the Go standard library logger): + [stdr](https://github.com/go-logr/stdr) +- **github.com/sirupsen/logrus**: [logrusr](https://github.com/bombsimon/logrusr) +- **github.com/wojas/genericr**: [genericr](https://github.com/wojas/genericr) (makes it easy to implement your own backend) +- **logfmt** (Heroku style [logging](https://www.brandur.org/logfmt)): [logfmtr](https://github.com/iand/logfmtr) + +# FAQ + +## Conceptual + +## Why structured logging? + +- **Structured logs are more easily queriable**: Since you've got + key-value pairs, it's much easier to query your structured logs for + particular values by filtering on the contents of a particular key -- + think searching request logs for error codes, Kubernetes reconcilers for + the name and namespace of the reconciled object, etc + +- **Structured logging makes it easier to have cross-referencable logs**: + Similarly to searchability, if you maintain conventions around your + keys, it becomes easy to gather all log lines related to a particular + concept. + +- **Structured logs allow better dimensions of filtering**: if you have + structure to your logs, you've got more precise control over how much + information is logged -- you might choose in a particular configuration + to log certain keys but not others, only log lines where a certain key + matches a certain value, etc, instead of just having v-levels and names + to key off of. + +- **Structured logs better represent structured data**: sometimes, the + data that you want to log is inherently structured (think tuple-link + objects). Structured logs allow you to preserve that structure when + outputting. + +## Why V-levels? + +**V-levels give operators an easy way to control the chattiness of log +operations**. V-levels provide a way for a given package to distinguish +the relative importance or verbosity of a given log message. Then, if +a particular logger or package is logging too many messages, the user +of the package can simply change the v-levels for that library. + +## Why not more named levels, like Warning? + +Read [Dave Cheney's post][warning-makes-no-sense]. Then read [Differences +from Dave's ideas](#differences-from-daves-ideas). + +## Why not allow format strings, too? + +**Format strings negate many of the benefits of structured logs**: + +- They're not easily searchable without resorting to fuzzy searching, + regular expressions, etc + +- They don't store structured data well, since contents are flattened into + a string + +- They're not cross-referencable + +- They don't compress easily, since the message is not constant + +(unless you turn positional parameters into key-value pairs with numerical +keys, at which point you've gotten key-value logging with meaningless +keys) + +## Practical + +## Why key-value pairs, and not a map? + +Key-value pairs are *much* easier to optimize, especially around +allocations. Zap (a structured logger that inspired logr's interface) has +[performance measurements](https://github.com/uber-go/zap#performance) +that show this quite nicely. + +While the interface ends up being a little less obvious, you get +potentially better performance, plus avoid making users type +`map[string]string{}` every time they want to log. + +## What if my V-levels differ between libraries? + +That's fine. Control your V-levels on a per-logger basis, and use the +`WithName` function to pass different loggers to different libraries. + +Generally, you should take care to ensure that you have relatively +consistent V-levels within a given logger, however, as this makes deciding +on what verbosity of logs to request easier. + +## But I *really* want to use a format string! + +That's not actually a question. Assuming your question is "how do +I convert my mental model of logging with format strings to logging with +constant messages": + +1. figure out what the error actually is, as you'd write in a TL;DR style, + and use that as a message + +2. For every place you'd write a format specifier, look to the word before + it, and add that as a key value pair + +For instance, consider the following examples (all taken from spots in the +Kubernetes codebase): + +- `klog.V(4).Infof("Client is returning errors: code %v, error %v", + responseCode, err)` becomes `logger.Error(err, "client returned an + error", "code", responseCode)` + +- `klog.V(4).Infof("Got a Retry-After %ds response for attempt %d to %v", + seconds, retries, url)` becomes `logger.V(4).Info("got a retry-after + response when requesting url", "attempt", retries, "after + seconds", seconds, "url", url)` + +If you *really* must use a format string, place it as a key value, and +call `fmt.Sprintf` yourself -- for instance, `log.Printf("unable to +reflect over type %T")` becomes `logger.Info("unable to reflect over +type", "type", fmt.Sprintf("%T"))`. In general though, the cases where +this is necessary should be few and far between. + +## How do I choose my V-levels? + +This is basically the only hard constraint: increase V-levels to denote +more verbose or more debug-y logs. + +Otherwise, you can start out with `0` as "you always want to see this", +`1` as "common logging that you might *possibly* want to turn off", and +`10` as "I would like to performance-test your log collection stack". + +Then gradually choose levels in between as you need them, working your way +down from 10 (for debug and trace style logs) and up from 1 (for chattier +info-type logs). + +## How do I choose my keys + +- make your keys human-readable +- constant keys are generally a good idea +- be consistent across your codebase +- keys should naturally match parts of the message string + +While key names are mostly unrestricted (and spaces are acceptable), +it's generally a good idea to stick to printable ascii characters, or at +least match the general character set of your log lines. + +[warning-makes-no-sense]: http://dave.cheney.net/2015/11/05/lets-talk-about-logging diff --git a/vendor/github.com/go-logr/logr/discard.go b/vendor/github.com/go-logr/logr/discard.go new file mode 100644 index 00000000000..2bafb13d15c --- /dev/null +++ b/vendor/github.com/go-logr/logr/discard.go @@ -0,0 +1,51 @@ +/* +Copyright 2020 The logr Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package logr + +// Discard returns a valid Logger that discards all messages logged to it. +// It can be used whenever the caller is not interested in the logs. +func Discard() Logger { + return DiscardLogger{} +} + +// DiscardLogger is a Logger that discards all messages. +type DiscardLogger struct{} + +func (l DiscardLogger) Enabled() bool { + return false +} + +func (l DiscardLogger) Info(msg string, keysAndValues ...interface{}) { +} + +func (l DiscardLogger) Error(err error, msg string, keysAndValues ...interface{}) { +} + +func (l DiscardLogger) V(level int) Logger { + return l +} + +func (l DiscardLogger) WithValues(keysAndValues ...interface{}) Logger { + return l +} + +func (l DiscardLogger) WithName(name string) Logger { + return l +} + +// Verify that it actually implements the interface +var _ Logger = DiscardLogger{} diff --git a/vendor/github.com/go-logr/logr/go.mod b/vendor/github.com/go-logr/logr/go.mod new file mode 100644 index 00000000000..591884e91f1 --- /dev/null +++ b/vendor/github.com/go-logr/logr/go.mod @@ -0,0 +1,3 @@ +module github.com/go-logr/logr + +go 1.14 diff --git a/vendor/github.com/go-logr/logr/logr.go b/vendor/github.com/go-logr/logr/logr.go new file mode 100644 index 00000000000..842428bd3a3 --- /dev/null +++ b/vendor/github.com/go-logr/logr/logr.go @@ -0,0 +1,266 @@ +/* +Copyright 2019 The logr Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This design derives from Dave Cheney's blog: +// http://dave.cheney.net/2015/11/05/lets-talk-about-logging +// +// This is a BETA grade API. Until there is a significant 2nd implementation, +// I don't really know how it will change. + +// Package logr defines abstract interfaces for logging. Packages can depend on +// these interfaces and callers can implement logging in whatever way is +// appropriate. +// +// Usage +// +// Logging is done using a Logger. Loggers can have name prefixes and named +// values attached, so that all log messages logged with that Logger have some +// base context associated. +// +// The term "key" is used to refer to the name associated with a particular +// value, to disambiguate it from the general Logger name. +// +// For instance, suppose we're trying to reconcile the state of an object, and +// we want to log that we've made some decision. +// +// With the traditional log package, we might write: +// log.Printf("decided to set field foo to value %q for object %s/%s", +// targetValue, object.Namespace, object.Name) +// +// With logr's structured logging, we'd write: +// // elsewhere in the file, set up the logger to log with the prefix of +// // "reconcilers", and the named value target-type=Foo, for extra context. +// log := mainLogger.WithName("reconcilers").WithValues("target-type", "Foo") +// +// // later on... +// log.Info("setting foo on object", "value", targetValue, "object", object) +// +// Depending on our logging implementation, we could then make logging decisions +// based on field values (like only logging such events for objects in a certain +// namespace), or copy the structured information into a structured log store. +// +// For logging errors, Logger has a method called Error. Suppose we wanted to +// log an error while reconciling. With the traditional log package, we might +// write: +// log.Errorf("unable to reconcile object %s/%s: %v", object.Namespace, object.Name, err) +// +// With logr, we'd instead write: +// // assuming the above setup for log +// log.Error(err, "unable to reconcile object", "object", object) +// +// This functions similarly to: +// log.Info("unable to reconcile object", "error", err, "object", object) +// +// However, it ensures that a standard key for the error value ("error") is used +// across all error logging. Furthermore, certain implementations may choose to +// attach additional information (such as stack traces) on calls to Error, so +// it's preferred to use Error to log errors. +// +// Parts of a log line +// +// Each log message from a Logger has four types of context: +// logger name, log verbosity, log message, and the named values. +// +// The Logger name consists of a series of name "segments" added by successive +// calls to WithName. These name segments will be joined in some way by the +// underlying implementation. It is strongly recommended that name segments +// contain simple identifiers (letters, digits, and hyphen), and do not contain +// characters that could muddle the log output or confuse the joining operation +// (e.g. whitespace, commas, periods, slashes, brackets, quotes, etc). +// +// Log verbosity represents how little a log matters. Level zero, the default, +// matters most. Increasing levels matter less and less. Try to avoid lots of +// different verbosity levels, and instead provide useful keys, logger names, +// and log messages for users to filter on. It's illegal to pass a log level +// below zero. +// +// The log message consists of a constant message attached to the log line. +// This should generally be a simple description of what's occurring, and should +// never be a format string. +// +// Variable information can then be attached using named values (key/value +// pairs). Keys are arbitrary strings, while values may be any Go value. +// +// Key Naming Conventions +// +// Keys are not strictly required to conform to any specification or regex, but +// it is recommended that they: +// * be human-readable and meaningful (not auto-generated or simple ordinals) +// * be constant (not dependent on input data) +// * contain only printable characters +// * not contain whitespace or punctuation +// +// These guidelines help ensure that log data is processed properly regardless +// of the log implementation. For example, log implementations will try to +// output JSON data or will store data for later database (e.g. SQL) queries. +// +// While users are generally free to use key names of their choice, it's +// generally best to avoid using the following keys, as they're frequently used +// by implementations: +// +// * `"caller"`: the calling information (file/line) of a particular log line. +// * `"error"`: the underlying error value in the `Error` method. +// * `"level"`: the log level. +// * `"logger"`: the name of the associated logger. +// * `"msg"`: the log message. +// * `"stacktrace"`: the stack trace associated with a particular log line or +// error (often from the `Error` message). +// * `"ts"`: the timestamp for a log line. +// +// Implementations are encouraged to make use of these keys to represent the +// above concepts, when necessary (for example, in a pure-JSON output form, it +// would be necessary to represent at least message and timestamp as ordinary +// named values). +// +// Implementations may choose to give callers access to the underlying +// logging implementation. The recommended pattern for this is: +// // Underlier exposes access to the underlying logging implementation. +// // Since callers only have a logr.Logger, they have to know which +// // implementation is in use, so this interface is less of an abstraction +// // and more of way to test type conversion. +// type Underlier interface { +// GetUnderlying() +// } +package logr + +import ( + "context" +) + +// TODO: consider adding back in format strings if they're really needed +// TODO: consider other bits of zap/zapcore functionality like ObjectMarshaller (for arbitrary objects) +// TODO: consider other bits of glog functionality like Flush, OutputStats + +// Logger represents the ability to log messages, both errors and not. +type Logger interface { + // Enabled tests whether this Logger is enabled. For example, commandline + // flags might be used to set the logging verbosity and disable some info + // logs. + Enabled() bool + + // Info logs a non-error message with the given key/value pairs as context. + // + // The msg argument should be used to add some constant description to + // the log line. The key/value pairs can then be used to add additional + // variable information. The key/value pairs should alternate string + // keys and arbitrary values. + Info(msg string, keysAndValues ...interface{}) + + // Error logs an error, with the given message and key/value pairs as context. + // It functions similarly to calling Info with the "error" named value, but may + // have unique behavior, and should be preferred for logging errors (see the + // package documentations for more information). + // + // The msg field should be used to add context to any underlying error, + // while the err field should be used to attach the actual error that + // triggered this log line, if present. + Error(err error, msg string, keysAndValues ...interface{}) + + // V returns an Logger value for a specific verbosity level, relative to + // this Logger. In other words, V values are additive. V higher verbosity + // level means a log message is less important. It's illegal to pass a log + // level less than zero. + V(level int) Logger + + // WithValues adds some key-value pairs of context to a logger. + // See Info for documentation on how key/value pairs work. + WithValues(keysAndValues ...interface{}) Logger + + // WithName adds a new element to the logger's name. + // Successive calls with WithName continue to append + // suffixes to the logger's name. It's strongly recommended + // that name segments contain only letters, digits, and hyphens + // (see the package documentation for more information). + WithName(name string) Logger +} + +// InfoLogger provides compatibility with code that relies on the v0.1.0 +// interface. +// +// Deprecated: InfoLogger is an artifact of early versions of this API. New +// users should never use it and existing users should use Logger instead. This +// will be removed in a future release. +type InfoLogger = Logger + +type contextKey struct{} + +// FromContext returns a Logger constructed from ctx or nil if no +// logger details are found. +func FromContext(ctx context.Context) Logger { + if v, ok := ctx.Value(contextKey{}).(Logger); ok { + return v + } + + return nil +} + +// FromContextOrDiscard returns a Logger constructed from ctx or a Logger +// that discards all messages if no logger details are found. +func FromContextOrDiscard(ctx context.Context) Logger { + if v, ok := ctx.Value(contextKey{}).(Logger); ok { + return v + } + + return Discard() +} + +// NewContext returns a new context derived from ctx that embeds the Logger. +func NewContext(ctx context.Context, l Logger) context.Context { + return context.WithValue(ctx, contextKey{}, l) +} + +// CallDepthLogger represents a Logger that knows how to climb the call stack +// to identify the original call site and can offset the depth by a specified +// number of frames. This is useful for users who have helper functions +// between the "real" call site and the actual calls to Logger methods. +// Implementations that log information about the call site (such as file, +// function, or line) would otherwise log information about the intermediate +// helper functions. +// +// This is an optional interface and implementations are not required to +// support it. +type CallDepthLogger interface { + Logger + + // WithCallDepth returns a Logger that will offset the call stack by the + // specified number of frames when logging call site information. If depth + // is 0 the attribution should be to the direct caller of this method. If + // depth is 1 the attribution should skip 1 call frame, and so on. + // Successive calls to this are additive. + WithCallDepth(depth int) Logger +} + +// WithCallDepth returns a Logger that will offset the call stack by the +// specified number of frames when logging call site information, if possible. +// This is useful for users who have helper functions between the "real" call +// site and the actual calls to Logger methods. If depth is 0 the attribution +// should be to the direct caller of this function. If depth is 1 the +// attribution should skip 1 call frame, and so on. Successive calls to this +// are additive. +// +// If the underlying log implementation supports the CallDepthLogger interface, +// the WithCallDepth method will be called and the result returned. If the +// implementation does not support CallDepthLogger, the original Logger will be +// returned. +// +// Callers which care about whether this was supported or not should test for +// CallDepthLogger support themselves. +func WithCallDepth(logger Logger, depth int) Logger { + if decorator, ok := logger.(CallDepthLogger); ok { + return decorator.WithCallDepth(depth) + } + return logger +} diff --git a/vendor/github.com/gogo/protobuf/AUTHORS b/vendor/github.com/gogo/protobuf/AUTHORS new file mode 100644 index 00000000000..3d97fc7a29f --- /dev/null +++ b/vendor/github.com/gogo/protobuf/AUTHORS @@ -0,0 +1,15 @@ +# This is the official list of GoGo authors for copyright purposes. +# This file is distinct from the CONTRIBUTORS file, which +# lists people. For example, employees are listed in CONTRIBUTORS, +# but not in AUTHORS, because the employer holds the copyright. + +# Names should be added to this file as one of +# Organization's name +# Individual's name +# Individual's name + +# Please keep the list sorted. + +Sendgrid, Inc +Vastech SA (PTY) LTD +Walter Schulze diff --git a/vendor/github.com/gogo/protobuf/CONTRIBUTORS b/vendor/github.com/gogo/protobuf/CONTRIBUTORS new file mode 100644 index 00000000000..1b4f6c208a1 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/CONTRIBUTORS @@ -0,0 +1,23 @@ +Anton Povarov +Brian Goff +Clayton Coleman +Denis Smirnov +DongYun Kang +Dwayne Schultz +Georg Apitz +Gustav Paul +Johan Brandhorst +John Shahid +John Tuley +Laurent +Patrick Lee +Peter Edge +Roger Johansson +Sam Nguyen +Sergio Arbeo +Stephen J Day +Tamir Duberstein +Todd Eisenberger +Tormod Erevik Lea +Vyacheslav Kim +Walter Schulze diff --git a/vendor/github.com/gogo/protobuf/LICENSE b/vendor/github.com/gogo/protobuf/LICENSE new file mode 100644 index 00000000000..f57de90da8a --- /dev/null +++ b/vendor/github.com/gogo/protobuf/LICENSE @@ -0,0 +1,35 @@ +Copyright (c) 2013, The GoGo Authors. All rights reserved. + +Protocol Buffers for Go with Gadgets + +Go support for Protocol Buffers - Google's data interchange format + +Copyright 2010 The Go Authors. All rights reserved. +https://github.com/golang/protobuf + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + diff --git a/vendor/github.com/gogo/protobuf/proto/Makefile b/vendor/github.com/gogo/protobuf/proto/Makefile new file mode 100644 index 00000000000..00d65f32773 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/Makefile @@ -0,0 +1,43 @@ +# Go support for Protocol Buffers - Google's data interchange format +# +# Copyright 2010 The Go Authors. All rights reserved. +# https://github.com/golang/protobuf +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +install: + go install + +test: install generate-test-pbs + go test + + +generate-test-pbs: + make install + make -C test_proto + make -C proto3_proto + make diff --git a/vendor/github.com/gogo/protobuf/proto/clone.go b/vendor/github.com/gogo/protobuf/proto/clone.go new file mode 100644 index 00000000000..a26b046d94f --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/clone.go @@ -0,0 +1,258 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2011 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Protocol buffer deep copy and merge. +// TODO: RawMessage. + +package proto + +import ( + "fmt" + "log" + "reflect" + "strings" +) + +// Clone returns a deep copy of a protocol buffer. +func Clone(src Message) Message { + in := reflect.ValueOf(src) + if in.IsNil() { + return src + } + out := reflect.New(in.Type().Elem()) + dst := out.Interface().(Message) + Merge(dst, src) + return dst +} + +// Merger is the interface representing objects that can merge messages of the same type. +type Merger interface { + // Merge merges src into this message. + // Required and optional fields that are set in src will be set to that value in dst. + // Elements of repeated fields will be appended. + // + // Merge may panic if called with a different argument type than the receiver. + Merge(src Message) +} + +// generatedMerger is the custom merge method that generated protos will have. +// We must add this method since a generate Merge method will conflict with +// many existing protos that have a Merge data field already defined. +type generatedMerger interface { + XXX_Merge(src Message) +} + +// Merge merges src into dst. +// Required and optional fields that are set in src will be set to that value in dst. +// Elements of repeated fields will be appended. +// Merge panics if src and dst are not the same type, or if dst is nil. +func Merge(dst, src Message) { + if m, ok := dst.(Merger); ok { + m.Merge(src) + return + } + + in := reflect.ValueOf(src) + out := reflect.ValueOf(dst) + if out.IsNil() { + panic("proto: nil destination") + } + if in.Type() != out.Type() { + panic(fmt.Sprintf("proto.Merge(%T, %T) type mismatch", dst, src)) + } + if in.IsNil() { + return // Merge from nil src is a noop + } + if m, ok := dst.(generatedMerger); ok { + m.XXX_Merge(src) + return + } + mergeStruct(out.Elem(), in.Elem()) +} + +func mergeStruct(out, in reflect.Value) { + sprop := GetProperties(in.Type()) + for i := 0; i < in.NumField(); i++ { + f := in.Type().Field(i) + if strings.HasPrefix(f.Name, "XXX_") { + continue + } + mergeAny(out.Field(i), in.Field(i), false, sprop.Prop[i]) + } + + if emIn, ok := in.Addr().Interface().(extensionsBytes); ok { + emOut := out.Addr().Interface().(extensionsBytes) + bIn := emIn.GetExtensions() + bOut := emOut.GetExtensions() + *bOut = append(*bOut, *bIn...) + } else if emIn, err := extendable(in.Addr().Interface()); err == nil { + emOut, _ := extendable(out.Addr().Interface()) + mIn, muIn := emIn.extensionsRead() + if mIn != nil { + mOut := emOut.extensionsWrite() + muIn.Lock() + mergeExtension(mOut, mIn) + muIn.Unlock() + } + } + + uf := in.FieldByName("XXX_unrecognized") + if !uf.IsValid() { + return + } + uin := uf.Bytes() + if len(uin) > 0 { + out.FieldByName("XXX_unrecognized").SetBytes(append([]byte(nil), uin...)) + } +} + +// mergeAny performs a merge between two values of the same type. +// viaPtr indicates whether the values were indirected through a pointer (implying proto2). +// prop is set if this is a struct field (it may be nil). +func mergeAny(out, in reflect.Value, viaPtr bool, prop *Properties) { + if in.Type() == protoMessageType { + if !in.IsNil() { + if out.IsNil() { + out.Set(reflect.ValueOf(Clone(in.Interface().(Message)))) + } else { + Merge(out.Interface().(Message), in.Interface().(Message)) + } + } + return + } + switch in.Kind() { + case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, + reflect.String, reflect.Uint32, reflect.Uint64: + if !viaPtr && isProto3Zero(in) { + return + } + out.Set(in) + case reflect.Interface: + // Probably a oneof field; copy non-nil values. + if in.IsNil() { + return + } + // Allocate destination if it is not set, or set to a different type. + // Otherwise we will merge as normal. + if out.IsNil() || out.Elem().Type() != in.Elem().Type() { + out.Set(reflect.New(in.Elem().Elem().Type())) // interface -> *T -> T -> new(T) + } + mergeAny(out.Elem(), in.Elem(), false, nil) + case reflect.Map: + if in.Len() == 0 { + return + } + if out.IsNil() { + out.Set(reflect.MakeMap(in.Type())) + } + // For maps with value types of *T or []byte we need to deep copy each value. + elemKind := in.Type().Elem().Kind() + for _, key := range in.MapKeys() { + var val reflect.Value + switch elemKind { + case reflect.Ptr: + val = reflect.New(in.Type().Elem().Elem()) + mergeAny(val, in.MapIndex(key), false, nil) + case reflect.Slice: + val = in.MapIndex(key) + val = reflect.ValueOf(append([]byte{}, val.Bytes()...)) + default: + val = in.MapIndex(key) + } + out.SetMapIndex(key, val) + } + case reflect.Ptr: + if in.IsNil() { + return + } + if out.IsNil() { + out.Set(reflect.New(in.Elem().Type())) + } + mergeAny(out.Elem(), in.Elem(), true, nil) + case reflect.Slice: + if in.IsNil() { + return + } + if in.Type().Elem().Kind() == reflect.Uint8 { + // []byte is a scalar bytes field, not a repeated field. + + // Edge case: if this is in a proto3 message, a zero length + // bytes field is considered the zero value, and should not + // be merged. + if prop != nil && prop.proto3 && in.Len() == 0 { + return + } + + // Make a deep copy. + // Append to []byte{} instead of []byte(nil) so that we never end up + // with a nil result. + out.SetBytes(append([]byte{}, in.Bytes()...)) + return + } + n := in.Len() + if out.IsNil() { + out.Set(reflect.MakeSlice(in.Type(), 0, n)) + } + switch in.Type().Elem().Kind() { + case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, + reflect.String, reflect.Uint32, reflect.Uint64: + out.Set(reflect.AppendSlice(out, in)) + default: + for i := 0; i < n; i++ { + x := reflect.Indirect(reflect.New(in.Type().Elem())) + mergeAny(x, in.Index(i), false, nil) + out.Set(reflect.Append(out, x)) + } + } + case reflect.Struct: + mergeStruct(out, in) + default: + // unknown type, so not a protocol buffer + log.Printf("proto: don't know how to copy %v", in) + } +} + +func mergeExtension(out, in map[int32]Extension) { + for extNum, eIn := range in { + eOut := Extension{desc: eIn.desc} + if eIn.value != nil { + v := reflect.New(reflect.TypeOf(eIn.value)).Elem() + mergeAny(v, reflect.ValueOf(eIn.value), false, nil) + eOut.value = v.Interface() + } + if eIn.enc != nil { + eOut.enc = make([]byte, len(eIn.enc)) + copy(eOut.enc, eIn.enc) + } + + out[extNum] = eOut + } +} diff --git a/vendor/github.com/gogo/protobuf/proto/custom_gogo.go b/vendor/github.com/gogo/protobuf/proto/custom_gogo.go new file mode 100644 index 00000000000..24552483c6c --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/custom_gogo.go @@ -0,0 +1,39 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2018, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import "reflect" + +type custom interface { + Marshal() ([]byte, error) + Unmarshal(data []byte) error + Size() int +} + +var customType = reflect.TypeOf((*custom)(nil)).Elem() diff --git a/vendor/github.com/gogo/protobuf/proto/decode.go b/vendor/github.com/gogo/protobuf/proto/decode.go new file mode 100644 index 00000000000..63b0f08bef2 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/decode.go @@ -0,0 +1,427 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +/* + * Routines for decoding protocol buffer data to construct in-memory representations. + */ + +import ( + "errors" + "fmt" + "io" +) + +// errOverflow is returned when an integer is too large to be represented. +var errOverflow = errors.New("proto: integer overflow") + +// ErrInternalBadWireType is returned by generated code when an incorrect +// wire type is encountered. It does not get returned to user code. +var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof") + +// DecodeVarint reads a varint-encoded integer from the slice. +// It returns the integer and the number of bytes consumed, or +// zero if there is not enough. +// This is the format for the +// int32, int64, uint32, uint64, bool, and enum +// protocol buffer types. +func DecodeVarint(buf []byte) (x uint64, n int) { + for shift := uint(0); shift < 64; shift += 7 { + if n >= len(buf) { + return 0, 0 + } + b := uint64(buf[n]) + n++ + x |= (b & 0x7F) << shift + if (b & 0x80) == 0 { + return x, n + } + } + + // The number is too large to represent in a 64-bit value. + return 0, 0 +} + +func (p *Buffer) decodeVarintSlow() (x uint64, err error) { + i := p.index + l := len(p.buf) + + for shift := uint(0); shift < 64; shift += 7 { + if i >= l { + err = io.ErrUnexpectedEOF + return + } + b := p.buf[i] + i++ + x |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + p.index = i + return + } + } + + // The number is too large to represent in a 64-bit value. + err = errOverflow + return +} + +// DecodeVarint reads a varint-encoded integer from the Buffer. +// This is the format for the +// int32, int64, uint32, uint64, bool, and enum +// protocol buffer types. +func (p *Buffer) DecodeVarint() (x uint64, err error) { + i := p.index + buf := p.buf + + if i >= len(buf) { + return 0, io.ErrUnexpectedEOF + } else if buf[i] < 0x80 { + p.index++ + return uint64(buf[i]), nil + } else if len(buf)-i < 10 { + return p.decodeVarintSlow() + } + + var b uint64 + // we already checked the first byte + x = uint64(buf[i]) - 0x80 + i++ + + b = uint64(buf[i]) + i++ + x += b << 7 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 7 + + b = uint64(buf[i]) + i++ + x += b << 14 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 14 + + b = uint64(buf[i]) + i++ + x += b << 21 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 21 + + b = uint64(buf[i]) + i++ + x += b << 28 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 28 + + b = uint64(buf[i]) + i++ + x += b << 35 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 35 + + b = uint64(buf[i]) + i++ + x += b << 42 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 42 + + b = uint64(buf[i]) + i++ + x += b << 49 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 49 + + b = uint64(buf[i]) + i++ + x += b << 56 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 56 + + b = uint64(buf[i]) + i++ + x += b << 63 + if b&0x80 == 0 { + goto done + } + + return 0, errOverflow + +done: + p.index = i + return x, nil +} + +// DecodeFixed64 reads a 64-bit integer from the Buffer. +// This is the format for the +// fixed64, sfixed64, and double protocol buffer types. +func (p *Buffer) DecodeFixed64() (x uint64, err error) { + // x, err already 0 + i := p.index + 8 + if i < 0 || i > len(p.buf) { + err = io.ErrUnexpectedEOF + return + } + p.index = i + + x = uint64(p.buf[i-8]) + x |= uint64(p.buf[i-7]) << 8 + x |= uint64(p.buf[i-6]) << 16 + x |= uint64(p.buf[i-5]) << 24 + x |= uint64(p.buf[i-4]) << 32 + x |= uint64(p.buf[i-3]) << 40 + x |= uint64(p.buf[i-2]) << 48 + x |= uint64(p.buf[i-1]) << 56 + return +} + +// DecodeFixed32 reads a 32-bit integer from the Buffer. +// This is the format for the +// fixed32, sfixed32, and float protocol buffer types. +func (p *Buffer) DecodeFixed32() (x uint64, err error) { + // x, err already 0 + i := p.index + 4 + if i < 0 || i > len(p.buf) { + err = io.ErrUnexpectedEOF + return + } + p.index = i + + x = uint64(p.buf[i-4]) + x |= uint64(p.buf[i-3]) << 8 + x |= uint64(p.buf[i-2]) << 16 + x |= uint64(p.buf[i-1]) << 24 + return +} + +// DecodeZigzag64 reads a zigzag-encoded 64-bit integer +// from the Buffer. +// This is the format used for the sint64 protocol buffer type. +func (p *Buffer) DecodeZigzag64() (x uint64, err error) { + x, err = p.DecodeVarint() + if err != nil { + return + } + x = (x >> 1) ^ uint64((int64(x&1)<<63)>>63) + return +} + +// DecodeZigzag32 reads a zigzag-encoded 32-bit integer +// from the Buffer. +// This is the format used for the sint32 protocol buffer type. +func (p *Buffer) DecodeZigzag32() (x uint64, err error) { + x, err = p.DecodeVarint() + if err != nil { + return + } + x = uint64((uint32(x) >> 1) ^ uint32((int32(x&1)<<31)>>31)) + return +} + +// DecodeRawBytes reads a count-delimited byte buffer from the Buffer. +// This is the format used for the bytes protocol buffer +// type and for embedded messages. +func (p *Buffer) DecodeRawBytes(alloc bool) (buf []byte, err error) { + n, err := p.DecodeVarint() + if err != nil { + return nil, err + } + + nb := int(n) + if nb < 0 { + return nil, fmt.Errorf("proto: bad byte length %d", nb) + } + end := p.index + nb + if end < p.index || end > len(p.buf) { + return nil, io.ErrUnexpectedEOF + } + + if !alloc { + // todo: check if can get more uses of alloc=false + buf = p.buf[p.index:end] + p.index += nb + return + } + + buf = make([]byte, nb) + copy(buf, p.buf[p.index:]) + p.index += nb + return +} + +// DecodeStringBytes reads an encoded string from the Buffer. +// This is the format used for the proto2 string type. +func (p *Buffer) DecodeStringBytes() (s string, err error) { + buf, err := p.DecodeRawBytes(false) + if err != nil { + return + } + return string(buf), nil +} + +// Unmarshaler is the interface representing objects that can +// unmarshal themselves. The argument points to data that may be +// overwritten, so implementations should not keep references to the +// buffer. +// Unmarshal implementations should not clear the receiver. +// Any unmarshaled data should be merged into the receiver. +// Callers of Unmarshal that do not want to retain existing data +// should Reset the receiver before calling Unmarshal. +type Unmarshaler interface { + Unmarshal([]byte) error +} + +// newUnmarshaler is the interface representing objects that can +// unmarshal themselves. The semantics are identical to Unmarshaler. +// +// This exists to support protoc-gen-go generated messages. +// The proto package will stop type-asserting to this interface in the future. +// +// DO NOT DEPEND ON THIS. +type newUnmarshaler interface { + XXX_Unmarshal([]byte) error +} + +// Unmarshal parses the protocol buffer representation in buf and places the +// decoded result in pb. If the struct underlying pb does not match +// the data in buf, the results can be unpredictable. +// +// Unmarshal resets pb before starting to unmarshal, so any +// existing data in pb is always removed. Use UnmarshalMerge +// to preserve and append to existing data. +func Unmarshal(buf []byte, pb Message) error { + pb.Reset() + if u, ok := pb.(newUnmarshaler); ok { + return u.XXX_Unmarshal(buf) + } + if u, ok := pb.(Unmarshaler); ok { + return u.Unmarshal(buf) + } + return NewBuffer(buf).Unmarshal(pb) +} + +// UnmarshalMerge parses the protocol buffer representation in buf and +// writes the decoded result to pb. If the struct underlying pb does not match +// the data in buf, the results can be unpredictable. +// +// UnmarshalMerge merges into existing data in pb. +// Most code should use Unmarshal instead. +func UnmarshalMerge(buf []byte, pb Message) error { + if u, ok := pb.(newUnmarshaler); ok { + return u.XXX_Unmarshal(buf) + } + if u, ok := pb.(Unmarshaler); ok { + // NOTE: The history of proto have unfortunately been inconsistent + // whether Unmarshaler should or should not implicitly clear itself. + // Some implementations do, most do not. + // Thus, calling this here may or may not do what people want. + // + // See https://github.com/golang/protobuf/issues/424 + return u.Unmarshal(buf) + } + return NewBuffer(buf).Unmarshal(pb) +} + +// DecodeMessage reads a count-delimited message from the Buffer. +func (p *Buffer) DecodeMessage(pb Message) error { + enc, err := p.DecodeRawBytes(false) + if err != nil { + return err + } + return NewBuffer(enc).Unmarshal(pb) +} + +// DecodeGroup reads a tag-delimited group from the Buffer. +// StartGroup tag is already consumed. This function consumes +// EndGroup tag. +func (p *Buffer) DecodeGroup(pb Message) error { + b := p.buf[p.index:] + x, y := findEndGroup(b) + if x < 0 { + return io.ErrUnexpectedEOF + } + err := Unmarshal(b[:x], pb) + p.index += y + return err +} + +// Unmarshal parses the protocol buffer representation in the +// Buffer and places the decoded result in pb. If the struct +// underlying pb does not match the data in the buffer, the results can be +// unpredictable. +// +// Unlike proto.Unmarshal, this does not reset pb before starting to unmarshal. +func (p *Buffer) Unmarshal(pb Message) error { + // If the object can unmarshal itself, let it. + if u, ok := pb.(newUnmarshaler); ok { + err := u.XXX_Unmarshal(p.buf[p.index:]) + p.index = len(p.buf) + return err + } + if u, ok := pb.(Unmarshaler); ok { + // NOTE: The history of proto have unfortunately been inconsistent + // whether Unmarshaler should or should not implicitly clear itself. + // Some implementations do, most do not. + // Thus, calling this here may or may not do what people want. + // + // See https://github.com/golang/protobuf/issues/424 + err := u.Unmarshal(p.buf[p.index:]) + p.index = len(p.buf) + return err + } + + // Slow workaround for messages that aren't Unmarshalers. + // This includes some hand-coded .pb.go files and + // bootstrap protos. + // TODO: fix all of those and then add Unmarshal to + // the Message interface. Then: + // The cast above and code below can be deleted. + // The old unmarshaler can be deleted. + // Clients can call Unmarshal directly (can already do that, actually). + var info InternalMessageInfo + err := info.Unmarshal(pb, p.buf[p.index:]) + p.index = len(p.buf) + return err +} diff --git a/vendor/github.com/gogo/protobuf/proto/deprecated.go b/vendor/github.com/gogo/protobuf/proto/deprecated.go new file mode 100644 index 00000000000..35b882c09aa --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/deprecated.go @@ -0,0 +1,63 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2018 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import "errors" + +// Deprecated: do not use. +type Stats struct{ Emalloc, Dmalloc, Encode, Decode, Chit, Cmiss, Size uint64 } + +// Deprecated: do not use. +func GetStats() Stats { return Stats{} } + +// Deprecated: do not use. +func MarshalMessageSet(interface{}) ([]byte, error) { + return nil, errors.New("proto: not implemented") +} + +// Deprecated: do not use. +func UnmarshalMessageSet([]byte, interface{}) error { + return errors.New("proto: not implemented") +} + +// Deprecated: do not use. +func MarshalMessageSetJSON(interface{}) ([]byte, error) { + return nil, errors.New("proto: not implemented") +} + +// Deprecated: do not use. +func UnmarshalMessageSetJSON([]byte, interface{}) error { + return errors.New("proto: not implemented") +} + +// Deprecated: do not use. +func RegisterMessageSetType(Message, int32, string) {} diff --git a/vendor/github.com/gogo/protobuf/proto/discard.go b/vendor/github.com/gogo/protobuf/proto/discard.go new file mode 100644 index 00000000000..fe1bd7d904e --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/discard.go @@ -0,0 +1,350 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2017 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "fmt" + "reflect" + "strings" + "sync" + "sync/atomic" +) + +type generatedDiscarder interface { + XXX_DiscardUnknown() +} + +// DiscardUnknown recursively discards all unknown fields from this message +// and all embedded messages. +// +// When unmarshaling a message with unrecognized fields, the tags and values +// of such fields are preserved in the Message. This allows a later call to +// marshal to be able to produce a message that continues to have those +// unrecognized fields. To avoid this, DiscardUnknown is used to +// explicitly clear the unknown fields after unmarshaling. +// +// For proto2 messages, the unknown fields of message extensions are only +// discarded from messages that have been accessed via GetExtension. +func DiscardUnknown(m Message) { + if m, ok := m.(generatedDiscarder); ok { + m.XXX_DiscardUnknown() + return + } + // TODO: Dynamically populate a InternalMessageInfo for legacy messages, + // but the master branch has no implementation for InternalMessageInfo, + // so it would be more work to replicate that approach. + discardLegacy(m) +} + +// DiscardUnknown recursively discards all unknown fields. +func (a *InternalMessageInfo) DiscardUnknown(m Message) { + di := atomicLoadDiscardInfo(&a.discard) + if di == nil { + di = getDiscardInfo(reflect.TypeOf(m).Elem()) + atomicStoreDiscardInfo(&a.discard, di) + } + di.discard(toPointer(&m)) +} + +type discardInfo struct { + typ reflect.Type + + initialized int32 // 0: only typ is valid, 1: everything is valid + lock sync.Mutex + + fields []discardFieldInfo + unrecognized field +} + +type discardFieldInfo struct { + field field // Offset of field, guaranteed to be valid + discard func(src pointer) +} + +var ( + discardInfoMap = map[reflect.Type]*discardInfo{} + discardInfoLock sync.Mutex +) + +func getDiscardInfo(t reflect.Type) *discardInfo { + discardInfoLock.Lock() + defer discardInfoLock.Unlock() + di := discardInfoMap[t] + if di == nil { + di = &discardInfo{typ: t} + discardInfoMap[t] = di + } + return di +} + +func (di *discardInfo) discard(src pointer) { + if src.isNil() { + return // Nothing to do. + } + + if atomic.LoadInt32(&di.initialized) == 0 { + di.computeDiscardInfo() + } + + for _, fi := range di.fields { + sfp := src.offset(fi.field) + fi.discard(sfp) + } + + // For proto2 messages, only discard unknown fields in message extensions + // that have been accessed via GetExtension. + if em, err := extendable(src.asPointerTo(di.typ).Interface()); err == nil { + // Ignore lock since DiscardUnknown is not concurrency safe. + emm, _ := em.extensionsRead() + for _, mx := range emm { + if m, ok := mx.value.(Message); ok { + DiscardUnknown(m) + } + } + } + + if di.unrecognized.IsValid() { + *src.offset(di.unrecognized).toBytes() = nil + } +} + +func (di *discardInfo) computeDiscardInfo() { + di.lock.Lock() + defer di.lock.Unlock() + if di.initialized != 0 { + return + } + t := di.typ + n := t.NumField() + + for i := 0; i < n; i++ { + f := t.Field(i) + if strings.HasPrefix(f.Name, "XXX_") { + continue + } + + dfi := discardFieldInfo{field: toField(&f)} + tf := f.Type + + // Unwrap tf to get its most basic type. + var isPointer, isSlice bool + if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 { + isSlice = true + tf = tf.Elem() + } + if tf.Kind() == reflect.Ptr { + isPointer = true + tf = tf.Elem() + } + if isPointer && isSlice && tf.Kind() != reflect.Struct { + panic(fmt.Sprintf("%v.%s cannot be a slice of pointers to primitive types", t, f.Name)) + } + + switch tf.Kind() { + case reflect.Struct: + switch { + case !isPointer: + panic(fmt.Sprintf("%v.%s cannot be a direct struct value", t, f.Name)) + case isSlice: // E.g., []*pb.T + discardInfo := getDiscardInfo(tf) + dfi.discard = func(src pointer) { + sps := src.getPointerSlice() + for _, sp := range sps { + if !sp.isNil() { + discardInfo.discard(sp) + } + } + } + default: // E.g., *pb.T + discardInfo := getDiscardInfo(tf) + dfi.discard = func(src pointer) { + sp := src.getPointer() + if !sp.isNil() { + discardInfo.discard(sp) + } + } + } + case reflect.Map: + switch { + case isPointer || isSlice: + panic(fmt.Sprintf("%v.%s cannot be a pointer to a map or a slice of map values", t, f.Name)) + default: // E.g., map[K]V + if tf.Elem().Kind() == reflect.Ptr { // Proto struct (e.g., *T) + dfi.discard = func(src pointer) { + sm := src.asPointerTo(tf).Elem() + if sm.Len() == 0 { + return + } + for _, key := range sm.MapKeys() { + val := sm.MapIndex(key) + DiscardUnknown(val.Interface().(Message)) + } + } + } else { + dfi.discard = func(pointer) {} // Noop + } + } + case reflect.Interface: + // Must be oneof field. + switch { + case isPointer || isSlice: + panic(fmt.Sprintf("%v.%s cannot be a pointer to a interface or a slice of interface values", t, f.Name)) + default: // E.g., interface{} + // TODO: Make this faster? + dfi.discard = func(src pointer) { + su := src.asPointerTo(tf).Elem() + if !su.IsNil() { + sv := su.Elem().Elem().Field(0) + if sv.Kind() == reflect.Ptr && sv.IsNil() { + return + } + switch sv.Type().Kind() { + case reflect.Ptr: // Proto struct (e.g., *T) + DiscardUnknown(sv.Interface().(Message)) + } + } + } + } + default: + continue + } + di.fields = append(di.fields, dfi) + } + + di.unrecognized = invalidField + if f, ok := t.FieldByName("XXX_unrecognized"); ok { + if f.Type != reflect.TypeOf([]byte{}) { + panic("expected XXX_unrecognized to be of type []byte") + } + di.unrecognized = toField(&f) + } + + atomic.StoreInt32(&di.initialized, 1) +} + +func discardLegacy(m Message) { + v := reflect.ValueOf(m) + if v.Kind() != reflect.Ptr || v.IsNil() { + return + } + v = v.Elem() + if v.Kind() != reflect.Struct { + return + } + t := v.Type() + + for i := 0; i < v.NumField(); i++ { + f := t.Field(i) + if strings.HasPrefix(f.Name, "XXX_") { + continue + } + vf := v.Field(i) + tf := f.Type + + // Unwrap tf to get its most basic type. + var isPointer, isSlice bool + if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 { + isSlice = true + tf = tf.Elem() + } + if tf.Kind() == reflect.Ptr { + isPointer = true + tf = tf.Elem() + } + if isPointer && isSlice && tf.Kind() != reflect.Struct { + panic(fmt.Sprintf("%T.%s cannot be a slice of pointers to primitive types", m, f.Name)) + } + + switch tf.Kind() { + case reflect.Struct: + switch { + case !isPointer: + panic(fmt.Sprintf("%T.%s cannot be a direct struct value", m, f.Name)) + case isSlice: // E.g., []*pb.T + for j := 0; j < vf.Len(); j++ { + discardLegacy(vf.Index(j).Interface().(Message)) + } + default: // E.g., *pb.T + discardLegacy(vf.Interface().(Message)) + } + case reflect.Map: + switch { + case isPointer || isSlice: + panic(fmt.Sprintf("%T.%s cannot be a pointer to a map or a slice of map values", m, f.Name)) + default: // E.g., map[K]V + tv := vf.Type().Elem() + if tv.Kind() == reflect.Ptr && tv.Implements(protoMessageType) { // Proto struct (e.g., *T) + for _, key := range vf.MapKeys() { + val := vf.MapIndex(key) + discardLegacy(val.Interface().(Message)) + } + } + } + case reflect.Interface: + // Must be oneof field. + switch { + case isPointer || isSlice: + panic(fmt.Sprintf("%T.%s cannot be a pointer to a interface or a slice of interface values", m, f.Name)) + default: // E.g., test_proto.isCommunique_Union interface + if !vf.IsNil() && f.Tag.Get("protobuf_oneof") != "" { + vf = vf.Elem() // E.g., *test_proto.Communique_Msg + if !vf.IsNil() { + vf = vf.Elem() // E.g., test_proto.Communique_Msg + vf = vf.Field(0) // E.g., Proto struct (e.g., *T) or primitive value + if vf.Kind() == reflect.Ptr { + discardLegacy(vf.Interface().(Message)) + } + } + } + } + } + } + + if vf := v.FieldByName("XXX_unrecognized"); vf.IsValid() { + if vf.Type() != reflect.TypeOf([]byte{}) { + panic("expected XXX_unrecognized to be of type []byte") + } + vf.Set(reflect.ValueOf([]byte(nil))) + } + + // For proto2 messages, only discard unknown fields in message extensions + // that have been accessed via GetExtension. + if em, err := extendable(m); err == nil { + // Ignore lock since discardLegacy is not concurrency safe. + emm, _ := em.extensionsRead() + for _, mx := range emm { + if m, ok := mx.value.(Message); ok { + discardLegacy(m) + } + } + } +} diff --git a/vendor/github.com/gogo/protobuf/proto/duration.go b/vendor/github.com/gogo/protobuf/proto/duration.go new file mode 100644 index 00000000000..93464c91cff --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/duration.go @@ -0,0 +1,100 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +// This file implements conversions between google.protobuf.Duration +// and time.Duration. + +import ( + "errors" + "fmt" + "time" +) + +const ( + // Range of a Duration in seconds, as specified in + // google/protobuf/duration.proto. This is about 10,000 years in seconds. + maxSeconds = int64(10000 * 365.25 * 24 * 60 * 60) + minSeconds = -maxSeconds +) + +// validateDuration determines whether the Duration is valid according to the +// definition in google/protobuf/duration.proto. A valid Duration +// may still be too large to fit into a time.Duration (the range of Duration +// is about 10,000 years, and the range of time.Duration is about 290). +func validateDuration(d *duration) error { + if d == nil { + return errors.New("duration: nil Duration") + } + if d.Seconds < minSeconds || d.Seconds > maxSeconds { + return fmt.Errorf("duration: %#v: seconds out of range", d) + } + if d.Nanos <= -1e9 || d.Nanos >= 1e9 { + return fmt.Errorf("duration: %#v: nanos out of range", d) + } + // Seconds and Nanos must have the same sign, unless d.Nanos is zero. + if (d.Seconds < 0 && d.Nanos > 0) || (d.Seconds > 0 && d.Nanos < 0) { + return fmt.Errorf("duration: %#v: seconds and nanos have different signs", d) + } + return nil +} + +// DurationFromProto converts a Duration to a time.Duration. DurationFromProto +// returns an error if the Duration is invalid or is too large to be +// represented in a time.Duration. +func durationFromProto(p *duration) (time.Duration, error) { + if err := validateDuration(p); err != nil { + return 0, err + } + d := time.Duration(p.Seconds) * time.Second + if int64(d/time.Second) != p.Seconds { + return 0, fmt.Errorf("duration: %#v is out of range for time.Duration", p) + } + if p.Nanos != 0 { + d += time.Duration(p.Nanos) + if (d < 0) != (p.Nanos < 0) { + return 0, fmt.Errorf("duration: %#v is out of range for time.Duration", p) + } + } + return d, nil +} + +// DurationProto converts a time.Duration to a Duration. +func durationProto(d time.Duration) *duration { + nanos := d.Nanoseconds() + secs := nanos / 1e9 + nanos -= secs * 1e9 + return &duration{ + Seconds: secs, + Nanos: int32(nanos), + } +} diff --git a/vendor/github.com/gogo/protobuf/proto/duration_gogo.go b/vendor/github.com/gogo/protobuf/proto/duration_gogo.go new file mode 100644 index 00000000000..e748e1730e1 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/duration_gogo.go @@ -0,0 +1,49 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2016, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "reflect" + "time" +) + +var durationType = reflect.TypeOf((*time.Duration)(nil)).Elem() + +type duration struct { + Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"` + Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"` +} + +func (m *duration) Reset() { *m = duration{} } +func (*duration) ProtoMessage() {} +func (*duration) String() string { return "duration" } + +func init() { + RegisterType((*duration)(nil), "gogo.protobuf.proto.duration") +} diff --git a/vendor/github.com/gogo/protobuf/proto/encode.go b/vendor/github.com/gogo/protobuf/proto/encode.go new file mode 100644 index 00000000000..9581ccd3042 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/encode.go @@ -0,0 +1,205 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +/* + * Routines for encoding data into the wire format for protocol buffers. + */ + +import ( + "errors" + "reflect" +) + +var ( + // errRepeatedHasNil is the error returned if Marshal is called with + // a struct with a repeated field containing a nil element. + errRepeatedHasNil = errors.New("proto: repeated field has nil element") + + // errOneofHasNil is the error returned if Marshal is called with + // a struct with a oneof field containing a nil element. + errOneofHasNil = errors.New("proto: oneof field has nil value") + + // ErrNil is the error returned if Marshal is called with nil. + ErrNil = errors.New("proto: Marshal called with nil") + + // ErrTooLarge is the error returned if Marshal is called with a + // message that encodes to >2GB. + ErrTooLarge = errors.New("proto: message encodes to over 2 GB") +) + +// The fundamental encoders that put bytes on the wire. +// Those that take integer types all accept uint64 and are +// therefore of type valueEncoder. + +const maxVarintBytes = 10 // maximum length of a varint + +// EncodeVarint returns the varint encoding of x. +// This is the format for the +// int32, int64, uint32, uint64, bool, and enum +// protocol buffer types. +// Not used by the package itself, but helpful to clients +// wishing to use the same encoding. +func EncodeVarint(x uint64) []byte { + var buf [maxVarintBytes]byte + var n int + for n = 0; x > 127; n++ { + buf[n] = 0x80 | uint8(x&0x7F) + x >>= 7 + } + buf[n] = uint8(x) + n++ + return buf[0:n] +} + +// EncodeVarint writes a varint-encoded integer to the Buffer. +// This is the format for the +// int32, int64, uint32, uint64, bool, and enum +// protocol buffer types. +func (p *Buffer) EncodeVarint(x uint64) error { + for x >= 1<<7 { + p.buf = append(p.buf, uint8(x&0x7f|0x80)) + x >>= 7 + } + p.buf = append(p.buf, uint8(x)) + return nil +} + +// SizeVarint returns the varint encoding size of an integer. +func SizeVarint(x uint64) int { + switch { + case x < 1<<7: + return 1 + case x < 1<<14: + return 2 + case x < 1<<21: + return 3 + case x < 1<<28: + return 4 + case x < 1<<35: + return 5 + case x < 1<<42: + return 6 + case x < 1<<49: + return 7 + case x < 1<<56: + return 8 + case x < 1<<63: + return 9 + } + return 10 +} + +// EncodeFixed64 writes a 64-bit integer to the Buffer. +// This is the format for the +// fixed64, sfixed64, and double protocol buffer types. +func (p *Buffer) EncodeFixed64(x uint64) error { + p.buf = append(p.buf, + uint8(x), + uint8(x>>8), + uint8(x>>16), + uint8(x>>24), + uint8(x>>32), + uint8(x>>40), + uint8(x>>48), + uint8(x>>56)) + return nil +} + +// EncodeFixed32 writes a 32-bit integer to the Buffer. +// This is the format for the +// fixed32, sfixed32, and float protocol buffer types. +func (p *Buffer) EncodeFixed32(x uint64) error { + p.buf = append(p.buf, + uint8(x), + uint8(x>>8), + uint8(x>>16), + uint8(x>>24)) + return nil +} + +// EncodeZigzag64 writes a zigzag-encoded 64-bit integer +// to the Buffer. +// This is the format used for the sint64 protocol buffer type. +func (p *Buffer) EncodeZigzag64(x uint64) error { + // use signed number to get arithmetic right shift. + return p.EncodeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} + +// EncodeZigzag32 writes a zigzag-encoded 32-bit integer +// to the Buffer. +// This is the format used for the sint32 protocol buffer type. +func (p *Buffer) EncodeZigzag32(x uint64) error { + // use signed number to get arithmetic right shift. + return p.EncodeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31)))) +} + +// EncodeRawBytes writes a count-delimited byte buffer to the Buffer. +// This is the format used for the bytes protocol buffer +// type and for embedded messages. +func (p *Buffer) EncodeRawBytes(b []byte) error { + p.EncodeVarint(uint64(len(b))) + p.buf = append(p.buf, b...) + return nil +} + +// EncodeStringBytes writes an encoded string to the Buffer. +// This is the format used for the proto2 string type. +func (p *Buffer) EncodeStringBytes(s string) error { + p.EncodeVarint(uint64(len(s))) + p.buf = append(p.buf, s...) + return nil +} + +// Marshaler is the interface representing objects that can marshal themselves. +type Marshaler interface { + Marshal() ([]byte, error) +} + +// EncodeMessage writes the protocol buffer to the Buffer, +// prefixed by a varint-encoded length. +func (p *Buffer) EncodeMessage(pb Message) error { + siz := Size(pb) + sizVar := SizeVarint(uint64(siz)) + p.grow(siz + sizVar) + p.EncodeVarint(uint64(siz)) + return p.Marshal(pb) +} + +// All protocol buffer fields are nillable, but be careful. +func isNil(v reflect.Value) bool { + switch v.Kind() { + case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: + return v.IsNil() + } + return false +} diff --git a/vendor/github.com/gogo/protobuf/proto/encode_gogo.go b/vendor/github.com/gogo/protobuf/proto/encode_gogo.go new file mode 100644 index 00000000000..0f5fb173e9f --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/encode_gogo.go @@ -0,0 +1,33 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +func NewRequiredNotSetError(field string) *RequiredNotSetError { + return &RequiredNotSetError{field} +} diff --git a/vendor/github.com/gogo/protobuf/proto/equal.go b/vendor/github.com/gogo/protobuf/proto/equal.go new file mode 100644 index 00000000000..d4db5a1c145 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/equal.go @@ -0,0 +1,300 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2011 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Protocol buffer comparison. + +package proto + +import ( + "bytes" + "log" + "reflect" + "strings" +) + +/* +Equal returns true iff protocol buffers a and b are equal. +The arguments must both be pointers to protocol buffer structs. + +Equality is defined in this way: + - Two messages are equal iff they are the same type, + corresponding fields are equal, unknown field sets + are equal, and extensions sets are equal. + - Two set scalar fields are equal iff their values are equal. + If the fields are of a floating-point type, remember that + NaN != x for all x, including NaN. If the message is defined + in a proto3 .proto file, fields are not "set"; specifically, + zero length proto3 "bytes" fields are equal (nil == {}). + - Two repeated fields are equal iff their lengths are the same, + and their corresponding elements are equal. Note a "bytes" field, + although represented by []byte, is not a repeated field and the + rule for the scalar fields described above applies. + - Two unset fields are equal. + - Two unknown field sets are equal if their current + encoded state is equal. + - Two extension sets are equal iff they have corresponding + elements that are pairwise equal. + - Two map fields are equal iff their lengths are the same, + and they contain the same set of elements. Zero-length map + fields are equal. + - Every other combination of things are not equal. + +The return value is undefined if a and b are not protocol buffers. +*/ +func Equal(a, b Message) bool { + if a == nil || b == nil { + return a == b + } + v1, v2 := reflect.ValueOf(a), reflect.ValueOf(b) + if v1.Type() != v2.Type() { + return false + } + if v1.Kind() == reflect.Ptr { + if v1.IsNil() { + return v2.IsNil() + } + if v2.IsNil() { + return false + } + v1, v2 = v1.Elem(), v2.Elem() + } + if v1.Kind() != reflect.Struct { + return false + } + return equalStruct(v1, v2) +} + +// v1 and v2 are known to have the same type. +func equalStruct(v1, v2 reflect.Value) bool { + sprop := GetProperties(v1.Type()) + for i := 0; i < v1.NumField(); i++ { + f := v1.Type().Field(i) + if strings.HasPrefix(f.Name, "XXX_") { + continue + } + f1, f2 := v1.Field(i), v2.Field(i) + if f.Type.Kind() == reflect.Ptr { + if n1, n2 := f1.IsNil(), f2.IsNil(); n1 && n2 { + // both unset + continue + } else if n1 != n2 { + // set/unset mismatch + return false + } + f1, f2 = f1.Elem(), f2.Elem() + } + if !equalAny(f1, f2, sprop.Prop[i]) { + return false + } + } + + if em1 := v1.FieldByName("XXX_InternalExtensions"); em1.IsValid() { + em2 := v2.FieldByName("XXX_InternalExtensions") + if !equalExtensions(v1.Type(), em1.Interface().(XXX_InternalExtensions), em2.Interface().(XXX_InternalExtensions)) { + return false + } + } + + if em1 := v1.FieldByName("XXX_extensions"); em1.IsValid() { + em2 := v2.FieldByName("XXX_extensions") + if !equalExtMap(v1.Type(), em1.Interface().(map[int32]Extension), em2.Interface().(map[int32]Extension)) { + return false + } + } + + uf := v1.FieldByName("XXX_unrecognized") + if !uf.IsValid() { + return true + } + + u1 := uf.Bytes() + u2 := v2.FieldByName("XXX_unrecognized").Bytes() + return bytes.Equal(u1, u2) +} + +// v1 and v2 are known to have the same type. +// prop may be nil. +func equalAny(v1, v2 reflect.Value, prop *Properties) bool { + if v1.Type() == protoMessageType { + m1, _ := v1.Interface().(Message) + m2, _ := v2.Interface().(Message) + return Equal(m1, m2) + } + switch v1.Kind() { + case reflect.Bool: + return v1.Bool() == v2.Bool() + case reflect.Float32, reflect.Float64: + return v1.Float() == v2.Float() + case reflect.Int32, reflect.Int64: + return v1.Int() == v2.Int() + case reflect.Interface: + // Probably a oneof field; compare the inner values. + n1, n2 := v1.IsNil(), v2.IsNil() + if n1 || n2 { + return n1 == n2 + } + e1, e2 := v1.Elem(), v2.Elem() + if e1.Type() != e2.Type() { + return false + } + return equalAny(e1, e2, nil) + case reflect.Map: + if v1.Len() != v2.Len() { + return false + } + for _, key := range v1.MapKeys() { + val2 := v2.MapIndex(key) + if !val2.IsValid() { + // This key was not found in the second map. + return false + } + if !equalAny(v1.MapIndex(key), val2, nil) { + return false + } + } + return true + case reflect.Ptr: + // Maps may have nil values in them, so check for nil. + if v1.IsNil() && v2.IsNil() { + return true + } + if v1.IsNil() != v2.IsNil() { + return false + } + return equalAny(v1.Elem(), v2.Elem(), prop) + case reflect.Slice: + if v1.Type().Elem().Kind() == reflect.Uint8 { + // short circuit: []byte + + // Edge case: if this is in a proto3 message, a zero length + // bytes field is considered the zero value. + if prop != nil && prop.proto3 && v1.Len() == 0 && v2.Len() == 0 { + return true + } + if v1.IsNil() != v2.IsNil() { + return false + } + return bytes.Equal(v1.Interface().([]byte), v2.Interface().([]byte)) + } + + if v1.Len() != v2.Len() { + return false + } + for i := 0; i < v1.Len(); i++ { + if !equalAny(v1.Index(i), v2.Index(i), prop) { + return false + } + } + return true + case reflect.String: + return v1.Interface().(string) == v2.Interface().(string) + case reflect.Struct: + return equalStruct(v1, v2) + case reflect.Uint32, reflect.Uint64: + return v1.Uint() == v2.Uint() + } + + // unknown type, so not a protocol buffer + log.Printf("proto: don't know how to compare %v", v1) + return false +} + +// base is the struct type that the extensions are based on. +// x1 and x2 are InternalExtensions. +func equalExtensions(base reflect.Type, x1, x2 XXX_InternalExtensions) bool { + em1, _ := x1.extensionsRead() + em2, _ := x2.extensionsRead() + return equalExtMap(base, em1, em2) +} + +func equalExtMap(base reflect.Type, em1, em2 map[int32]Extension) bool { + if len(em1) != len(em2) { + return false + } + + for extNum, e1 := range em1 { + e2, ok := em2[extNum] + if !ok { + return false + } + + m1, m2 := e1.value, e2.value + + if m1 == nil && m2 == nil { + // Both have only encoded form. + if bytes.Equal(e1.enc, e2.enc) { + continue + } + // The bytes are different, but the extensions might still be + // equal. We need to decode them to compare. + } + + if m1 != nil && m2 != nil { + // Both are unencoded. + if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) { + return false + } + continue + } + + // At least one is encoded. To do a semantically correct comparison + // we need to unmarshal them first. + var desc *ExtensionDesc + if m := extensionMaps[base]; m != nil { + desc = m[extNum] + } + if desc == nil { + // If both have only encoded form and the bytes are the same, + // it is handled above. We get here when the bytes are different. + // We don't know how to decode it, so just compare them as byte + // slices. + log.Printf("proto: don't know how to compare extension %d of %v", extNum, base) + return false + } + var err error + if m1 == nil { + m1, err = decodeExtension(e1.enc, desc) + } + if m2 == nil && err == nil { + m2, err = decodeExtension(e2.enc, desc) + } + if err != nil { + // The encoded form is invalid. + log.Printf("proto: badly encoded extension %d of %v: %v", extNum, base, err) + return false + } + if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) { + return false + } + } + + return true +} diff --git a/vendor/github.com/gogo/protobuf/proto/extensions.go b/vendor/github.com/gogo/protobuf/proto/extensions.go new file mode 100644 index 00000000000..341c6f57f52 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/extensions.go @@ -0,0 +1,605 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +/* + * Types and routines for supporting protocol buffer extensions. + */ + +import ( + "errors" + "fmt" + "io" + "reflect" + "strconv" + "sync" +) + +// ErrMissingExtension is the error returned by GetExtension if the named extension is not in the message. +var ErrMissingExtension = errors.New("proto: missing extension") + +// ExtensionRange represents a range of message extensions for a protocol buffer. +// Used in code generated by the protocol compiler. +type ExtensionRange struct { + Start, End int32 // both inclusive +} + +// extendableProto is an interface implemented by any protocol buffer generated by the current +// proto compiler that may be extended. +type extendableProto interface { + Message + ExtensionRangeArray() []ExtensionRange + extensionsWrite() map[int32]Extension + extensionsRead() (map[int32]Extension, sync.Locker) +} + +// extendableProtoV1 is an interface implemented by a protocol buffer generated by the previous +// version of the proto compiler that may be extended. +type extendableProtoV1 interface { + Message + ExtensionRangeArray() []ExtensionRange + ExtensionMap() map[int32]Extension +} + +// extensionAdapter is a wrapper around extendableProtoV1 that implements extendableProto. +type extensionAdapter struct { + extendableProtoV1 +} + +func (e extensionAdapter) extensionsWrite() map[int32]Extension { + return e.ExtensionMap() +} + +func (e extensionAdapter) extensionsRead() (map[int32]Extension, sync.Locker) { + return e.ExtensionMap(), notLocker{} +} + +// notLocker is a sync.Locker whose Lock and Unlock methods are nops. +type notLocker struct{} + +func (n notLocker) Lock() {} +func (n notLocker) Unlock() {} + +// extendable returns the extendableProto interface for the given generated proto message. +// If the proto message has the old extension format, it returns a wrapper that implements +// the extendableProto interface. +func extendable(p interface{}) (extendableProto, error) { + switch p := p.(type) { + case extendableProto: + if isNilPtr(p) { + return nil, fmt.Errorf("proto: nil %T is not extendable", p) + } + return p, nil + case extendableProtoV1: + if isNilPtr(p) { + return nil, fmt.Errorf("proto: nil %T is not extendable", p) + } + return extensionAdapter{p}, nil + case extensionsBytes: + return slowExtensionAdapter{p}, nil + } + // Don't allocate a specific error containing %T: + // this is the hot path for Clone and MarshalText. + return nil, errNotExtendable +} + +var errNotExtendable = errors.New("proto: not an extendable proto.Message") + +func isNilPtr(x interface{}) bool { + v := reflect.ValueOf(x) + return v.Kind() == reflect.Ptr && v.IsNil() +} + +// XXX_InternalExtensions is an internal representation of proto extensions. +// +// Each generated message struct type embeds an anonymous XXX_InternalExtensions field, +// thus gaining the unexported 'extensions' method, which can be called only from the proto package. +// +// The methods of XXX_InternalExtensions are not concurrency safe in general, +// but calls to logically read-only methods such as has and get may be executed concurrently. +type XXX_InternalExtensions struct { + // The struct must be indirect so that if a user inadvertently copies a + // generated message and its embedded XXX_InternalExtensions, they + // avoid the mayhem of a copied mutex. + // + // The mutex serializes all logically read-only operations to p.extensionMap. + // It is up to the client to ensure that write operations to p.extensionMap are + // mutually exclusive with other accesses. + p *struct { + mu sync.Mutex + extensionMap map[int32]Extension + } +} + +// extensionsWrite returns the extension map, creating it on first use. +func (e *XXX_InternalExtensions) extensionsWrite() map[int32]Extension { + if e.p == nil { + e.p = new(struct { + mu sync.Mutex + extensionMap map[int32]Extension + }) + e.p.extensionMap = make(map[int32]Extension) + } + return e.p.extensionMap +} + +// extensionsRead returns the extensions map for read-only use. It may be nil. +// The caller must hold the returned mutex's lock when accessing Elements within the map. +func (e *XXX_InternalExtensions) extensionsRead() (map[int32]Extension, sync.Locker) { + if e.p == nil { + return nil, nil + } + return e.p.extensionMap, &e.p.mu +} + +// ExtensionDesc represents an extension specification. +// Used in generated code from the protocol compiler. +type ExtensionDesc struct { + ExtendedType Message // nil pointer to the type that is being extended + ExtensionType interface{} // nil pointer to the extension type + Field int32 // field number + Name string // fully-qualified name of extension, for text formatting + Tag string // protobuf tag style + Filename string // name of the file in which the extension is defined +} + +func (ed *ExtensionDesc) repeated() bool { + t := reflect.TypeOf(ed.ExtensionType) + return t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 +} + +// Extension represents an extension in a message. +type Extension struct { + // When an extension is stored in a message using SetExtension + // only desc and value are set. When the message is marshaled + // enc will be set to the encoded form of the message. + // + // When a message is unmarshaled and contains extensions, each + // extension will have only enc set. When such an extension is + // accessed using GetExtension (or GetExtensions) desc and value + // will be set. + desc *ExtensionDesc + value interface{} + enc []byte +} + +// SetRawExtension is for testing only. +func SetRawExtension(base Message, id int32, b []byte) { + if ebase, ok := base.(extensionsBytes); ok { + clearExtension(base, id) + ext := ebase.GetExtensions() + *ext = append(*ext, b...) + return + } + epb, err := extendable(base) + if err != nil { + return + } + extmap := epb.extensionsWrite() + extmap[id] = Extension{enc: b} +} + +// isExtensionField returns true iff the given field number is in an extension range. +func isExtensionField(pb extendableProto, field int32) bool { + for _, er := range pb.ExtensionRangeArray() { + if er.Start <= field && field <= er.End { + return true + } + } + return false +} + +// checkExtensionTypes checks that the given extension is valid for pb. +func checkExtensionTypes(pb extendableProto, extension *ExtensionDesc) error { + var pbi interface{} = pb + // Check the extended type. + if ea, ok := pbi.(extensionAdapter); ok { + pbi = ea.extendableProtoV1 + } + if ea, ok := pbi.(slowExtensionAdapter); ok { + pbi = ea.extensionsBytes + } + if a, b := reflect.TypeOf(pbi), reflect.TypeOf(extension.ExtendedType); a != b { + return fmt.Errorf("proto: bad extended type; %v does not extend %v", b, a) + } + // Check the range. + if !isExtensionField(pb, extension.Field) { + return errors.New("proto: bad extension number; not in declared ranges") + } + return nil +} + +// extPropKey is sufficient to uniquely identify an extension. +type extPropKey struct { + base reflect.Type + field int32 +} + +var extProp = struct { + sync.RWMutex + m map[extPropKey]*Properties +}{ + m: make(map[extPropKey]*Properties), +} + +func extensionProperties(ed *ExtensionDesc) *Properties { + key := extPropKey{base: reflect.TypeOf(ed.ExtendedType), field: ed.Field} + + extProp.RLock() + if prop, ok := extProp.m[key]; ok { + extProp.RUnlock() + return prop + } + extProp.RUnlock() + + extProp.Lock() + defer extProp.Unlock() + // Check again. + if prop, ok := extProp.m[key]; ok { + return prop + } + + prop := new(Properties) + prop.Init(reflect.TypeOf(ed.ExtensionType), "unknown_name", ed.Tag, nil) + extProp.m[key] = prop + return prop +} + +// HasExtension returns whether the given extension is present in pb. +func HasExtension(pb Message, extension *ExtensionDesc) bool { + if epb, doki := pb.(extensionsBytes); doki { + ext := epb.GetExtensions() + buf := *ext + o := 0 + for o < len(buf) { + tag, n := DecodeVarint(buf[o:]) + fieldNum := int32(tag >> 3) + if int32(fieldNum) == extension.Field { + return true + } + wireType := int(tag & 0x7) + o += n + l, err := size(buf[o:], wireType) + if err != nil { + return false + } + o += l + } + return false + } + // TODO: Check types, field numbers, etc.? + epb, err := extendable(pb) + if err != nil { + return false + } + extmap, mu := epb.extensionsRead() + if extmap == nil { + return false + } + mu.Lock() + _, ok := extmap[extension.Field] + mu.Unlock() + return ok +} + +// ClearExtension removes the given extension from pb. +func ClearExtension(pb Message, extension *ExtensionDesc) { + clearExtension(pb, extension.Field) +} + +func clearExtension(pb Message, fieldNum int32) { + if epb, ok := pb.(extensionsBytes); ok { + offset := 0 + for offset != -1 { + offset = deleteExtension(epb, fieldNum, offset) + } + return + } + epb, err := extendable(pb) + if err != nil { + return + } + // TODO: Check types, field numbers, etc.? + extmap := epb.extensionsWrite() + delete(extmap, fieldNum) +} + +// GetExtension retrieves a proto2 extended field from pb. +// +// If the descriptor is type complete (i.e., ExtensionDesc.ExtensionType is non-nil), +// then GetExtension parses the encoded field and returns a Go value of the specified type. +// If the field is not present, then the default value is returned (if one is specified), +// otherwise ErrMissingExtension is reported. +// +// If the descriptor is not type complete (i.e., ExtensionDesc.ExtensionType is nil), +// then GetExtension returns the raw encoded bytes of the field extension. +func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) { + if epb, doki := pb.(extensionsBytes); doki { + ext := epb.GetExtensions() + return decodeExtensionFromBytes(extension, *ext) + } + + epb, err := extendable(pb) + if err != nil { + return nil, err + } + + if extension.ExtendedType != nil { + // can only check type if this is a complete descriptor + if cerr := checkExtensionTypes(epb, extension); cerr != nil { + return nil, cerr + } + } + + emap, mu := epb.extensionsRead() + if emap == nil { + return defaultExtensionValue(extension) + } + mu.Lock() + defer mu.Unlock() + e, ok := emap[extension.Field] + if !ok { + // defaultExtensionValue returns the default value or + // ErrMissingExtension if there is no default. + return defaultExtensionValue(extension) + } + + if e.value != nil { + // Already decoded. Check the descriptor, though. + if e.desc != extension { + // This shouldn't happen. If it does, it means that + // GetExtension was called twice with two different + // descriptors with the same field number. + return nil, errors.New("proto: descriptor conflict") + } + return e.value, nil + } + + if extension.ExtensionType == nil { + // incomplete descriptor + return e.enc, nil + } + + v, err := decodeExtension(e.enc, extension) + if err != nil { + return nil, err + } + + // Remember the decoded version and drop the encoded version. + // That way it is safe to mutate what we return. + e.value = v + e.desc = extension + e.enc = nil + emap[extension.Field] = e + return e.value, nil +} + +// defaultExtensionValue returns the default value for extension. +// If no default for an extension is defined ErrMissingExtension is returned. +func defaultExtensionValue(extension *ExtensionDesc) (interface{}, error) { + if extension.ExtensionType == nil { + // incomplete descriptor, so no default + return nil, ErrMissingExtension + } + + t := reflect.TypeOf(extension.ExtensionType) + props := extensionProperties(extension) + + sf, _, err := fieldDefault(t, props) + if err != nil { + return nil, err + } + + if sf == nil || sf.value == nil { + // There is no default value. + return nil, ErrMissingExtension + } + + if t.Kind() != reflect.Ptr { + // We do not need to return a Ptr, we can directly return sf.value. + return sf.value, nil + } + + // We need to return an interface{} that is a pointer to sf.value. + value := reflect.New(t).Elem() + value.Set(reflect.New(value.Type().Elem())) + if sf.kind == reflect.Int32 { + // We may have an int32 or an enum, but the underlying data is int32. + // Since we can't set an int32 into a non int32 reflect.value directly + // set it as a int32. + value.Elem().SetInt(int64(sf.value.(int32))) + } else { + value.Elem().Set(reflect.ValueOf(sf.value)) + } + return value.Interface(), nil +} + +// decodeExtension decodes an extension encoded in b. +func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) { + t := reflect.TypeOf(extension.ExtensionType) + unmarshal := typeUnmarshaler(t, extension.Tag) + + // t is a pointer to a struct, pointer to basic type or a slice. + // Allocate space to store the pointer/slice. + value := reflect.New(t).Elem() + + var err error + for { + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + wire := int(x) & 7 + + b, err = unmarshal(b, valToPointer(value.Addr()), wire) + if err != nil { + return nil, err + } + + if len(b) == 0 { + break + } + } + return value.Interface(), nil +} + +// GetExtensions returns a slice of the extensions present in pb that are also listed in es. +// The returned slice has the same length as es; missing extensions will appear as nil elements. +func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, err error) { + epb, err := extendable(pb) + if err != nil { + return nil, err + } + extensions = make([]interface{}, len(es)) + for i, e := range es { + extensions[i], err = GetExtension(epb, e) + if err == ErrMissingExtension { + err = nil + } + if err != nil { + return + } + } + return +} + +// ExtensionDescs returns a new slice containing pb's extension descriptors, in undefined order. +// For non-registered extensions, ExtensionDescs returns an incomplete descriptor containing +// just the Field field, which defines the extension's field number. +func ExtensionDescs(pb Message) ([]*ExtensionDesc, error) { + epb, err := extendable(pb) + if err != nil { + return nil, err + } + registeredExtensions := RegisteredExtensions(pb) + + emap, mu := epb.extensionsRead() + if emap == nil { + return nil, nil + } + mu.Lock() + defer mu.Unlock() + extensions := make([]*ExtensionDesc, 0, len(emap)) + for extid, e := range emap { + desc := e.desc + if desc == nil { + desc = registeredExtensions[extid] + if desc == nil { + desc = &ExtensionDesc{Field: extid} + } + } + + extensions = append(extensions, desc) + } + return extensions, nil +} + +// SetExtension sets the specified extension of pb to the specified value. +func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error { + if epb, ok := pb.(extensionsBytes); ok { + ClearExtension(pb, extension) + newb, err := encodeExtension(extension, value) + if err != nil { + return err + } + bb := epb.GetExtensions() + *bb = append(*bb, newb...) + return nil + } + epb, err := extendable(pb) + if err != nil { + return err + } + if err := checkExtensionTypes(epb, extension); err != nil { + return err + } + typ := reflect.TypeOf(extension.ExtensionType) + if typ != reflect.TypeOf(value) { + return fmt.Errorf("proto: bad extension value type. got: %T, want: %T", value, extension.ExtensionType) + } + // nil extension values need to be caught early, because the + // encoder can't distinguish an ErrNil due to a nil extension + // from an ErrNil due to a missing field. Extensions are + // always optional, so the encoder would just swallow the error + // and drop all the extensions from the encoded message. + if reflect.ValueOf(value).IsNil() { + return fmt.Errorf("proto: SetExtension called with nil value of type %T", value) + } + + extmap := epb.extensionsWrite() + extmap[extension.Field] = Extension{desc: extension, value: value} + return nil +} + +// ClearAllExtensions clears all extensions from pb. +func ClearAllExtensions(pb Message) { + if epb, doki := pb.(extensionsBytes); doki { + ext := epb.GetExtensions() + *ext = []byte{} + return + } + epb, err := extendable(pb) + if err != nil { + return + } + m := epb.extensionsWrite() + for k := range m { + delete(m, k) + } +} + +// A global registry of extensions. +// The generated code will register the generated descriptors by calling RegisterExtension. + +var extensionMaps = make(map[reflect.Type]map[int32]*ExtensionDesc) + +// RegisterExtension is called from the generated code. +func RegisterExtension(desc *ExtensionDesc) { + st := reflect.TypeOf(desc.ExtendedType).Elem() + m := extensionMaps[st] + if m == nil { + m = make(map[int32]*ExtensionDesc) + extensionMaps[st] = m + } + if _, ok := m[desc.Field]; ok { + panic("proto: duplicate extension registered: " + st.String() + " " + strconv.Itoa(int(desc.Field))) + } + m[desc.Field] = desc +} + +// RegisteredExtensions returns a map of the registered extensions of a +// protocol buffer struct, indexed by the extension number. +// The argument pb should be a nil pointer to the struct type. +func RegisteredExtensions(pb Message) map[int32]*ExtensionDesc { + return extensionMaps[reflect.TypeOf(pb).Elem()] +} diff --git a/vendor/github.com/gogo/protobuf/proto/extensions_gogo.go b/vendor/github.com/gogo/protobuf/proto/extensions_gogo.go new file mode 100644 index 00000000000..6f1ae120ece --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/extensions_gogo.go @@ -0,0 +1,389 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "bytes" + "errors" + "fmt" + "io" + "reflect" + "sort" + "strings" + "sync" +) + +type extensionsBytes interface { + Message + ExtensionRangeArray() []ExtensionRange + GetExtensions() *[]byte +} + +type slowExtensionAdapter struct { + extensionsBytes +} + +func (s slowExtensionAdapter) extensionsWrite() map[int32]Extension { + panic("Please report a bug to github.com/gogo/protobuf if you see this message: Writing extensions is not supported for extensions stored in a byte slice field.") +} + +func (s slowExtensionAdapter) extensionsRead() (map[int32]Extension, sync.Locker) { + b := s.GetExtensions() + m, err := BytesToExtensionsMap(*b) + if err != nil { + panic(err) + } + return m, notLocker{} +} + +func GetBoolExtension(pb Message, extension *ExtensionDesc, ifnotset bool) bool { + if reflect.ValueOf(pb).IsNil() { + return ifnotset + } + value, err := GetExtension(pb, extension) + if err != nil { + return ifnotset + } + if value == nil { + return ifnotset + } + if value.(*bool) == nil { + return ifnotset + } + return *(value.(*bool)) +} + +func (this *Extension) Equal(that *Extension) bool { + if err := this.Encode(); err != nil { + return false + } + if err := that.Encode(); err != nil { + return false + } + return bytes.Equal(this.enc, that.enc) +} + +func (this *Extension) Compare(that *Extension) int { + if err := this.Encode(); err != nil { + return 1 + } + if err := that.Encode(); err != nil { + return -1 + } + return bytes.Compare(this.enc, that.enc) +} + +func SizeOfInternalExtension(m extendableProto) (n int) { + info := getMarshalInfo(reflect.TypeOf(m)) + return info.sizeV1Extensions(m.extensionsWrite()) +} + +type sortableMapElem struct { + field int32 + ext Extension +} + +func newSortableExtensionsFromMap(m map[int32]Extension) sortableExtensions { + s := make(sortableExtensions, 0, len(m)) + for k, v := range m { + s = append(s, &sortableMapElem{field: k, ext: v}) + } + return s +} + +type sortableExtensions []*sortableMapElem + +func (this sortableExtensions) Len() int { return len(this) } + +func (this sortableExtensions) Swap(i, j int) { this[i], this[j] = this[j], this[i] } + +func (this sortableExtensions) Less(i, j int) bool { return this[i].field < this[j].field } + +func (this sortableExtensions) String() string { + sort.Sort(this) + ss := make([]string, len(this)) + for i := range this { + ss[i] = fmt.Sprintf("%d: %v", this[i].field, this[i].ext) + } + return "map[" + strings.Join(ss, ",") + "]" +} + +func StringFromInternalExtension(m extendableProto) string { + return StringFromExtensionsMap(m.extensionsWrite()) +} + +func StringFromExtensionsMap(m map[int32]Extension) string { + return newSortableExtensionsFromMap(m).String() +} + +func StringFromExtensionsBytes(ext []byte) string { + m, err := BytesToExtensionsMap(ext) + if err != nil { + panic(err) + } + return StringFromExtensionsMap(m) +} + +func EncodeInternalExtension(m extendableProto, data []byte) (n int, err error) { + return EncodeExtensionMap(m.extensionsWrite(), data) +} + +func EncodeInternalExtensionBackwards(m extendableProto, data []byte) (n int, err error) { + return EncodeExtensionMapBackwards(m.extensionsWrite(), data) +} + +func EncodeExtensionMap(m map[int32]Extension, data []byte) (n int, err error) { + o := 0 + for _, e := range m { + if err := e.Encode(); err != nil { + return 0, err + } + n := copy(data[o:], e.enc) + if n != len(e.enc) { + return 0, io.ErrShortBuffer + } + o += n + } + return o, nil +} + +func EncodeExtensionMapBackwards(m map[int32]Extension, data []byte) (n int, err error) { + o := 0 + end := len(data) + for _, e := range m { + if err := e.Encode(); err != nil { + return 0, err + } + n := copy(data[end-len(e.enc):], e.enc) + if n != len(e.enc) { + return 0, io.ErrShortBuffer + } + end -= n + o += n + } + return o, nil +} + +func GetRawExtension(m map[int32]Extension, id int32) ([]byte, error) { + e := m[id] + if err := e.Encode(); err != nil { + return nil, err + } + return e.enc, nil +} + +func size(buf []byte, wire int) (int, error) { + switch wire { + case WireVarint: + _, n := DecodeVarint(buf) + return n, nil + case WireFixed64: + return 8, nil + case WireBytes: + v, n := DecodeVarint(buf) + return int(v) + n, nil + case WireFixed32: + return 4, nil + case WireStartGroup: + offset := 0 + for { + u, n := DecodeVarint(buf[offset:]) + fwire := int(u & 0x7) + offset += n + if fwire == WireEndGroup { + return offset, nil + } + s, err := size(buf[offset:], wire) + if err != nil { + return 0, err + } + offset += s + } + } + return 0, fmt.Errorf("proto: can't get size for unknown wire type %d", wire) +} + +func BytesToExtensionsMap(buf []byte) (map[int32]Extension, error) { + m := make(map[int32]Extension) + i := 0 + for i < len(buf) { + tag, n := DecodeVarint(buf[i:]) + if n <= 0 { + return nil, fmt.Errorf("unable to decode varint") + } + fieldNum := int32(tag >> 3) + wireType := int(tag & 0x7) + l, err := size(buf[i+n:], wireType) + if err != nil { + return nil, err + } + end := i + int(l) + n + m[int32(fieldNum)] = Extension{enc: buf[i:end]} + i = end + } + return m, nil +} + +func NewExtension(e []byte) Extension { + ee := Extension{enc: make([]byte, len(e))} + copy(ee.enc, e) + return ee +} + +func AppendExtension(e Message, tag int32, buf []byte) { + if ee, eok := e.(extensionsBytes); eok { + ext := ee.GetExtensions() + *ext = append(*ext, buf...) + return + } + if ee, eok := e.(extendableProto); eok { + m := ee.extensionsWrite() + ext := m[int32(tag)] // may be missing + ext.enc = append(ext.enc, buf...) + m[int32(tag)] = ext + } +} + +func encodeExtension(extension *ExtensionDesc, value interface{}) ([]byte, error) { + u := getMarshalInfo(reflect.TypeOf(extension.ExtendedType)) + ei := u.getExtElemInfo(extension) + v := value + p := toAddrPointer(&v, ei.isptr) + siz := ei.sizer(p, SizeVarint(ei.wiretag)) + buf := make([]byte, 0, siz) + return ei.marshaler(buf, p, ei.wiretag, false) +} + +func decodeExtensionFromBytes(extension *ExtensionDesc, buf []byte) (interface{}, error) { + o := 0 + for o < len(buf) { + tag, n := DecodeVarint((buf)[o:]) + fieldNum := int32(tag >> 3) + wireType := int(tag & 0x7) + if o+n > len(buf) { + return nil, fmt.Errorf("unable to decode extension") + } + l, err := size((buf)[o+n:], wireType) + if err != nil { + return nil, err + } + if int32(fieldNum) == extension.Field { + if o+n+l > len(buf) { + return nil, fmt.Errorf("unable to decode extension") + } + v, err := decodeExtension((buf)[o:o+n+l], extension) + if err != nil { + return nil, err + } + return v, nil + } + o += n + l + } + return defaultExtensionValue(extension) +} + +func (this *Extension) Encode() error { + if this.enc == nil { + var err error + this.enc, err = encodeExtension(this.desc, this.value) + if err != nil { + return err + } + } + return nil +} + +func (this Extension) GoString() string { + if err := this.Encode(); err != nil { + return fmt.Sprintf("error encoding extension: %v", err) + } + return fmt.Sprintf("proto.NewExtension(%#v)", this.enc) +} + +func SetUnsafeExtension(pb Message, fieldNum int32, value interface{}) error { + typ := reflect.TypeOf(pb).Elem() + ext, ok := extensionMaps[typ] + if !ok { + return fmt.Errorf("proto: bad extended type; %s is not extendable", typ.String()) + } + desc, ok := ext[fieldNum] + if !ok { + return errors.New("proto: bad extension number; not in declared ranges") + } + return SetExtension(pb, desc, value) +} + +func GetUnsafeExtension(pb Message, fieldNum int32) (interface{}, error) { + typ := reflect.TypeOf(pb).Elem() + ext, ok := extensionMaps[typ] + if !ok { + return nil, fmt.Errorf("proto: bad extended type; %s is not extendable", typ.String()) + } + desc, ok := ext[fieldNum] + if !ok { + return nil, fmt.Errorf("unregistered field number %d", fieldNum) + } + return GetExtension(pb, desc) +} + +func NewUnsafeXXX_InternalExtensions(m map[int32]Extension) XXX_InternalExtensions { + x := &XXX_InternalExtensions{ + p: new(struct { + mu sync.Mutex + extensionMap map[int32]Extension + }), + } + x.p.extensionMap = m + return *x +} + +func GetUnsafeExtensionsMap(extendable Message) map[int32]Extension { + pb := extendable.(extendableProto) + return pb.extensionsWrite() +} + +func deleteExtension(pb extensionsBytes, theFieldNum int32, offset int) int { + ext := pb.GetExtensions() + for offset < len(*ext) { + tag, n1 := DecodeVarint((*ext)[offset:]) + fieldNum := int32(tag >> 3) + wireType := int(tag & 0x7) + n2, err := size((*ext)[offset+n1:], wireType) + if err != nil { + panic(err) + } + newOffset := offset + n1 + n2 + if fieldNum == theFieldNum { + *ext = append((*ext)[:offset], (*ext)[newOffset:]...) + return offset + } + offset = newOffset + } + return -1 +} diff --git a/vendor/github.com/gogo/protobuf/proto/lib.go b/vendor/github.com/gogo/protobuf/proto/lib.go new file mode 100644 index 00000000000..80db1c155b5 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/lib.go @@ -0,0 +1,973 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +/* +Package proto converts data structures to and from the wire format of +protocol buffers. It works in concert with the Go source code generated +for .proto files by the protocol compiler. + +A summary of the properties of the protocol buffer interface +for a protocol buffer variable v: + + - Names are turned from camel_case to CamelCase for export. + - There are no methods on v to set fields; just treat + them as structure fields. + - There are getters that return a field's value if set, + and return the field's default value if unset. + The getters work even if the receiver is a nil message. + - The zero value for a struct is its correct initialization state. + All desired fields must be set before marshaling. + - A Reset() method will restore a protobuf struct to its zero state. + - Non-repeated fields are pointers to the values; nil means unset. + That is, optional or required field int32 f becomes F *int32. + - Repeated fields are slices. + - Helper functions are available to aid the setting of fields. + msg.Foo = proto.String("hello") // set field + - Constants are defined to hold the default values of all fields that + have them. They have the form Default_StructName_FieldName. + Because the getter methods handle defaulted values, + direct use of these constants should be rare. + - Enums are given type names and maps from names to values. + Enum values are prefixed by the enclosing message's name, or by the + enum's type name if it is a top-level enum. Enum types have a String + method, and a Enum method to assist in message construction. + - Nested messages, groups and enums have type names prefixed with the name of + the surrounding message type. + - Extensions are given descriptor names that start with E_, + followed by an underscore-delimited list of the nested messages + that contain it (if any) followed by the CamelCased name of the + extension field itself. HasExtension, ClearExtension, GetExtension + and SetExtension are functions for manipulating extensions. + - Oneof field sets are given a single field in their message, + with distinguished wrapper types for each possible field value. + - Marshal and Unmarshal are functions to encode and decode the wire format. + +When the .proto file specifies `syntax="proto3"`, there are some differences: + + - Non-repeated fields of non-message type are values instead of pointers. + - Enum types do not get an Enum method. + +The simplest way to describe this is to see an example. +Given file test.proto, containing + + package example; + + enum FOO { X = 17; } + + message Test { + required string label = 1; + optional int32 type = 2 [default=77]; + repeated int64 reps = 3; + optional group OptionalGroup = 4 { + required string RequiredField = 5; + } + oneof union { + int32 number = 6; + string name = 7; + } + } + +The resulting file, test.pb.go, is: + + package example + + import proto "github.com/gogo/protobuf/proto" + import math "math" + + type FOO int32 + const ( + FOO_X FOO = 17 + ) + var FOO_name = map[int32]string{ + 17: "X", + } + var FOO_value = map[string]int32{ + "X": 17, + } + + func (x FOO) Enum() *FOO { + p := new(FOO) + *p = x + return p + } + func (x FOO) String() string { + return proto.EnumName(FOO_name, int32(x)) + } + func (x *FOO) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FOO_value, data) + if err != nil { + return err + } + *x = FOO(value) + return nil + } + + type Test struct { + Label *string `protobuf:"bytes,1,req,name=label" json:"label,omitempty"` + Type *int32 `protobuf:"varint,2,opt,name=type,def=77" json:"type,omitempty"` + Reps []int64 `protobuf:"varint,3,rep,name=reps" json:"reps,omitempty"` + Optionalgroup *Test_OptionalGroup `protobuf:"group,4,opt,name=OptionalGroup" json:"optionalgroup,omitempty"` + // Types that are valid to be assigned to Union: + // *Test_Number + // *Test_Name + Union isTest_Union `protobuf_oneof:"union"` + XXX_unrecognized []byte `json:"-"` + } + func (m *Test) Reset() { *m = Test{} } + func (m *Test) String() string { return proto.CompactTextString(m) } + func (*Test) ProtoMessage() {} + + type isTest_Union interface { + isTest_Union() + } + + type Test_Number struct { + Number int32 `protobuf:"varint,6,opt,name=number"` + } + type Test_Name struct { + Name string `protobuf:"bytes,7,opt,name=name"` + } + + func (*Test_Number) isTest_Union() {} + func (*Test_Name) isTest_Union() {} + + func (m *Test) GetUnion() isTest_Union { + if m != nil { + return m.Union + } + return nil + } + const Default_Test_Type int32 = 77 + + func (m *Test) GetLabel() string { + if m != nil && m.Label != nil { + return *m.Label + } + return "" + } + + func (m *Test) GetType() int32 { + if m != nil && m.Type != nil { + return *m.Type + } + return Default_Test_Type + } + + func (m *Test) GetOptionalgroup() *Test_OptionalGroup { + if m != nil { + return m.Optionalgroup + } + return nil + } + + type Test_OptionalGroup struct { + RequiredField *string `protobuf:"bytes,5,req" json:"RequiredField,omitempty"` + } + func (m *Test_OptionalGroup) Reset() { *m = Test_OptionalGroup{} } + func (m *Test_OptionalGroup) String() string { return proto.CompactTextString(m) } + + func (m *Test_OptionalGroup) GetRequiredField() string { + if m != nil && m.RequiredField != nil { + return *m.RequiredField + } + return "" + } + + func (m *Test) GetNumber() int32 { + if x, ok := m.GetUnion().(*Test_Number); ok { + return x.Number + } + return 0 + } + + func (m *Test) GetName() string { + if x, ok := m.GetUnion().(*Test_Name); ok { + return x.Name + } + return "" + } + + func init() { + proto.RegisterEnum("example.FOO", FOO_name, FOO_value) + } + +To create and play with a Test object: + + package main + + import ( + "log" + + "github.com/gogo/protobuf/proto" + pb "./example.pb" + ) + + func main() { + test := &pb.Test{ + Label: proto.String("hello"), + Type: proto.Int32(17), + Reps: []int64{1, 2, 3}, + Optionalgroup: &pb.Test_OptionalGroup{ + RequiredField: proto.String("good bye"), + }, + Union: &pb.Test_Name{"fred"}, + } + data, err := proto.Marshal(test) + if err != nil { + log.Fatal("marshaling error: ", err) + } + newTest := &pb.Test{} + err = proto.Unmarshal(data, newTest) + if err != nil { + log.Fatal("unmarshaling error: ", err) + } + // Now test and newTest contain the same data. + if test.GetLabel() != newTest.GetLabel() { + log.Fatalf("data mismatch %q != %q", test.GetLabel(), newTest.GetLabel()) + } + // Use a type switch to determine which oneof was set. + switch u := test.Union.(type) { + case *pb.Test_Number: // u.Number contains the number. + case *pb.Test_Name: // u.Name contains the string. + } + // etc. + } +*/ +package proto + +import ( + "encoding/json" + "fmt" + "log" + "reflect" + "sort" + "strconv" + "sync" +) + +// RequiredNotSetError is an error type returned by either Marshal or Unmarshal. +// Marshal reports this when a required field is not initialized. +// Unmarshal reports this when a required field is missing from the wire data. +type RequiredNotSetError struct{ field string } + +func (e *RequiredNotSetError) Error() string { + if e.field == "" { + return fmt.Sprintf("proto: required field not set") + } + return fmt.Sprintf("proto: required field %q not set", e.field) +} +func (e *RequiredNotSetError) RequiredNotSet() bool { + return true +} + +type invalidUTF8Error struct{ field string } + +func (e *invalidUTF8Error) Error() string { + if e.field == "" { + return "proto: invalid UTF-8 detected" + } + return fmt.Sprintf("proto: field %q contains invalid UTF-8", e.field) +} +func (e *invalidUTF8Error) InvalidUTF8() bool { + return true +} + +// errInvalidUTF8 is a sentinel error to identify fields with invalid UTF-8. +// This error should not be exposed to the external API as such errors should +// be recreated with the field information. +var errInvalidUTF8 = &invalidUTF8Error{} + +// isNonFatal reports whether the error is either a RequiredNotSet error +// or a InvalidUTF8 error. +func isNonFatal(err error) bool { + if re, ok := err.(interface{ RequiredNotSet() bool }); ok && re.RequiredNotSet() { + return true + } + if re, ok := err.(interface{ InvalidUTF8() bool }); ok && re.InvalidUTF8() { + return true + } + return false +} + +type nonFatal struct{ E error } + +// Merge merges err into nf and reports whether it was successful. +// Otherwise it returns false for any fatal non-nil errors. +func (nf *nonFatal) Merge(err error) (ok bool) { + if err == nil { + return true // not an error + } + if !isNonFatal(err) { + return false // fatal error + } + if nf.E == nil { + nf.E = err // store first instance of non-fatal error + } + return true +} + +// Message is implemented by generated protocol buffer messages. +type Message interface { + Reset() + String() string + ProtoMessage() +} + +// A Buffer is a buffer manager for marshaling and unmarshaling +// protocol buffers. It may be reused between invocations to +// reduce memory usage. It is not necessary to use a Buffer; +// the global functions Marshal and Unmarshal create a +// temporary Buffer and are fine for most applications. +type Buffer struct { + buf []byte // encode/decode byte stream + index int // read point + + deterministic bool +} + +// NewBuffer allocates a new Buffer and initializes its internal data to +// the contents of the argument slice. +func NewBuffer(e []byte) *Buffer { + return &Buffer{buf: e} +} + +// Reset resets the Buffer, ready for marshaling a new protocol buffer. +func (p *Buffer) Reset() { + p.buf = p.buf[0:0] // for reading/writing + p.index = 0 // for reading +} + +// SetBuf replaces the internal buffer with the slice, +// ready for unmarshaling the contents of the slice. +func (p *Buffer) SetBuf(s []byte) { + p.buf = s + p.index = 0 +} + +// Bytes returns the contents of the Buffer. +func (p *Buffer) Bytes() []byte { return p.buf } + +// SetDeterministic sets whether to use deterministic serialization. +// +// Deterministic serialization guarantees that for a given binary, equal +// messages will always be serialized to the same bytes. This implies: +// +// - Repeated serialization of a message will return the same bytes. +// - Different processes of the same binary (which may be executing on +// different machines) will serialize equal messages to the same bytes. +// +// Note that the deterministic serialization is NOT canonical across +// languages. It is not guaranteed to remain stable over time. It is unstable +// across different builds with schema changes due to unknown fields. +// Users who need canonical serialization (e.g., persistent storage in a +// canonical form, fingerprinting, etc.) should define their own +// canonicalization specification and implement their own serializer rather +// than relying on this API. +// +// If deterministic serialization is requested, map entries will be sorted +// by keys in lexographical order. This is an implementation detail and +// subject to change. +func (p *Buffer) SetDeterministic(deterministic bool) { + p.deterministic = deterministic +} + +/* + * Helper routines for simplifying the creation of optional fields of basic type. + */ + +// Bool is a helper routine that allocates a new bool value +// to store v and returns a pointer to it. +func Bool(v bool) *bool { + return &v +} + +// Int32 is a helper routine that allocates a new int32 value +// to store v and returns a pointer to it. +func Int32(v int32) *int32 { + return &v +} + +// Int is a helper routine that allocates a new int32 value +// to store v and returns a pointer to it, but unlike Int32 +// its argument value is an int. +func Int(v int) *int32 { + p := new(int32) + *p = int32(v) + return p +} + +// Int64 is a helper routine that allocates a new int64 value +// to store v and returns a pointer to it. +func Int64(v int64) *int64 { + return &v +} + +// Float32 is a helper routine that allocates a new float32 value +// to store v and returns a pointer to it. +func Float32(v float32) *float32 { + return &v +} + +// Float64 is a helper routine that allocates a new float64 value +// to store v and returns a pointer to it. +func Float64(v float64) *float64 { + return &v +} + +// Uint32 is a helper routine that allocates a new uint32 value +// to store v and returns a pointer to it. +func Uint32(v uint32) *uint32 { + return &v +} + +// Uint64 is a helper routine that allocates a new uint64 value +// to store v and returns a pointer to it. +func Uint64(v uint64) *uint64 { + return &v +} + +// String is a helper routine that allocates a new string value +// to store v and returns a pointer to it. +func String(v string) *string { + return &v +} + +// EnumName is a helper function to simplify printing protocol buffer enums +// by name. Given an enum map and a value, it returns a useful string. +func EnumName(m map[int32]string, v int32) string { + s, ok := m[v] + if ok { + return s + } + return strconv.Itoa(int(v)) +} + +// UnmarshalJSONEnum is a helper function to simplify recovering enum int values +// from their JSON-encoded representation. Given a map from the enum's symbolic +// names to its int values, and a byte buffer containing the JSON-encoded +// value, it returns an int32 that can be cast to the enum type by the caller. +// +// The function can deal with both JSON representations, numeric and symbolic. +func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) { + if data[0] == '"' { + // New style: enums are strings. + var repr string + if err := json.Unmarshal(data, &repr); err != nil { + return -1, err + } + val, ok := m[repr] + if !ok { + return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr) + } + return val, nil + } + // Old style: enums are ints. + var val int32 + if err := json.Unmarshal(data, &val); err != nil { + return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName) + } + return val, nil +} + +// DebugPrint dumps the encoded data in b in a debugging format with a header +// including the string s. Used in testing but made available for general debugging. +func (p *Buffer) DebugPrint(s string, b []byte) { + var u uint64 + + obuf := p.buf + sindex := p.index + p.buf = b + p.index = 0 + depth := 0 + + fmt.Printf("\n--- %s ---\n", s) + +out: + for { + for i := 0; i < depth; i++ { + fmt.Print(" ") + } + + index := p.index + if index == len(p.buf) { + break + } + + op, err := p.DecodeVarint() + if err != nil { + fmt.Printf("%3d: fetching op err %v\n", index, err) + break out + } + tag := op >> 3 + wire := op & 7 + + switch wire { + default: + fmt.Printf("%3d: t=%3d unknown wire=%d\n", + index, tag, wire) + break out + + case WireBytes: + var r []byte + + r, err = p.DecodeRawBytes(false) + if err != nil { + break out + } + fmt.Printf("%3d: t=%3d bytes [%d]", index, tag, len(r)) + if len(r) <= 6 { + for i := 0; i < len(r); i++ { + fmt.Printf(" %.2x", r[i]) + } + } else { + for i := 0; i < 3; i++ { + fmt.Printf(" %.2x", r[i]) + } + fmt.Printf(" ..") + for i := len(r) - 3; i < len(r); i++ { + fmt.Printf(" %.2x", r[i]) + } + } + fmt.Printf("\n") + + case WireFixed32: + u, err = p.DecodeFixed32() + if err != nil { + fmt.Printf("%3d: t=%3d fix32 err %v\n", index, tag, err) + break out + } + fmt.Printf("%3d: t=%3d fix32 %d\n", index, tag, u) + + case WireFixed64: + u, err = p.DecodeFixed64() + if err != nil { + fmt.Printf("%3d: t=%3d fix64 err %v\n", index, tag, err) + break out + } + fmt.Printf("%3d: t=%3d fix64 %d\n", index, tag, u) + + case WireVarint: + u, err = p.DecodeVarint() + if err != nil { + fmt.Printf("%3d: t=%3d varint err %v\n", index, tag, err) + break out + } + fmt.Printf("%3d: t=%3d varint %d\n", index, tag, u) + + case WireStartGroup: + fmt.Printf("%3d: t=%3d start\n", index, tag) + depth++ + + case WireEndGroup: + depth-- + fmt.Printf("%3d: t=%3d end\n", index, tag) + } + } + + if depth != 0 { + fmt.Printf("%3d: start-end not balanced %d\n", p.index, depth) + } + fmt.Printf("\n") + + p.buf = obuf + p.index = sindex +} + +// SetDefaults sets unset protocol buffer fields to their default values. +// It only modifies fields that are both unset and have defined defaults. +// It recursively sets default values in any non-nil sub-messages. +func SetDefaults(pb Message) { + setDefaults(reflect.ValueOf(pb), true, false) +} + +// v is a struct. +func setDefaults(v reflect.Value, recur, zeros bool) { + if v.Kind() == reflect.Ptr { + v = v.Elem() + } + + defaultMu.RLock() + dm, ok := defaults[v.Type()] + defaultMu.RUnlock() + if !ok { + dm = buildDefaultMessage(v.Type()) + defaultMu.Lock() + defaults[v.Type()] = dm + defaultMu.Unlock() + } + + for _, sf := range dm.scalars { + f := v.Field(sf.index) + if !f.IsNil() { + // field already set + continue + } + dv := sf.value + if dv == nil && !zeros { + // no explicit default, and don't want to set zeros + continue + } + fptr := f.Addr().Interface() // **T + // TODO: Consider batching the allocations we do here. + switch sf.kind { + case reflect.Bool: + b := new(bool) + if dv != nil { + *b = dv.(bool) + } + *(fptr.(**bool)) = b + case reflect.Float32: + f := new(float32) + if dv != nil { + *f = dv.(float32) + } + *(fptr.(**float32)) = f + case reflect.Float64: + f := new(float64) + if dv != nil { + *f = dv.(float64) + } + *(fptr.(**float64)) = f + case reflect.Int32: + // might be an enum + if ft := f.Type(); ft != int32PtrType { + // enum + f.Set(reflect.New(ft.Elem())) + if dv != nil { + f.Elem().SetInt(int64(dv.(int32))) + } + } else { + // int32 field + i := new(int32) + if dv != nil { + *i = dv.(int32) + } + *(fptr.(**int32)) = i + } + case reflect.Int64: + i := new(int64) + if dv != nil { + *i = dv.(int64) + } + *(fptr.(**int64)) = i + case reflect.String: + s := new(string) + if dv != nil { + *s = dv.(string) + } + *(fptr.(**string)) = s + case reflect.Uint8: + // exceptional case: []byte + var b []byte + if dv != nil { + db := dv.([]byte) + b = make([]byte, len(db)) + copy(b, db) + } else { + b = []byte{} + } + *(fptr.(*[]byte)) = b + case reflect.Uint32: + u := new(uint32) + if dv != nil { + *u = dv.(uint32) + } + *(fptr.(**uint32)) = u + case reflect.Uint64: + u := new(uint64) + if dv != nil { + *u = dv.(uint64) + } + *(fptr.(**uint64)) = u + default: + log.Printf("proto: can't set default for field %v (sf.kind=%v)", f, sf.kind) + } + } + + for _, ni := range dm.nested { + f := v.Field(ni) + // f is *T or T or []*T or []T + switch f.Kind() { + case reflect.Struct: + setDefaults(f, recur, zeros) + + case reflect.Ptr: + if f.IsNil() { + continue + } + setDefaults(f, recur, zeros) + + case reflect.Slice: + for i := 0; i < f.Len(); i++ { + e := f.Index(i) + if e.Kind() == reflect.Ptr && e.IsNil() { + continue + } + setDefaults(e, recur, zeros) + } + + case reflect.Map: + for _, k := range f.MapKeys() { + e := f.MapIndex(k) + if e.IsNil() { + continue + } + setDefaults(e, recur, zeros) + } + } + } +} + +var ( + // defaults maps a protocol buffer struct type to a slice of the fields, + // with its scalar fields set to their proto-declared non-zero default values. + defaultMu sync.RWMutex + defaults = make(map[reflect.Type]defaultMessage) + + int32PtrType = reflect.TypeOf((*int32)(nil)) +) + +// defaultMessage represents information about the default values of a message. +type defaultMessage struct { + scalars []scalarField + nested []int // struct field index of nested messages +} + +type scalarField struct { + index int // struct field index + kind reflect.Kind // element type (the T in *T or []T) + value interface{} // the proto-declared default value, or nil +} + +// t is a struct type. +func buildDefaultMessage(t reflect.Type) (dm defaultMessage) { + sprop := GetProperties(t) + for _, prop := range sprop.Prop { + fi, ok := sprop.decoderTags.get(prop.Tag) + if !ok { + // XXX_unrecognized + continue + } + ft := t.Field(fi).Type + + sf, nested, err := fieldDefault(ft, prop) + switch { + case err != nil: + log.Print(err) + case nested: + dm.nested = append(dm.nested, fi) + case sf != nil: + sf.index = fi + dm.scalars = append(dm.scalars, *sf) + } + } + + return dm +} + +// fieldDefault returns the scalarField for field type ft. +// sf will be nil if the field can not have a default. +// nestedMessage will be true if this is a nested message. +// Note that sf.index is not set on return. +func fieldDefault(ft reflect.Type, prop *Properties) (sf *scalarField, nestedMessage bool, err error) { + var canHaveDefault bool + switch ft.Kind() { + case reflect.Struct: + nestedMessage = true // non-nullable + + case reflect.Ptr: + if ft.Elem().Kind() == reflect.Struct { + nestedMessage = true + } else { + canHaveDefault = true // proto2 scalar field + } + + case reflect.Slice: + switch ft.Elem().Kind() { + case reflect.Ptr, reflect.Struct: + nestedMessage = true // repeated message + case reflect.Uint8: + canHaveDefault = true // bytes field + } + + case reflect.Map: + if ft.Elem().Kind() == reflect.Ptr { + nestedMessage = true // map with message values + } + } + + if !canHaveDefault { + if nestedMessage { + return nil, true, nil + } + return nil, false, nil + } + + // We now know that ft is a pointer or slice. + sf = &scalarField{kind: ft.Elem().Kind()} + + // scalar fields without defaults + if !prop.HasDefault { + return sf, false, nil + } + + // a scalar field: either *T or []byte + switch ft.Elem().Kind() { + case reflect.Bool: + x, err := strconv.ParseBool(prop.Default) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default bool %q: %v", prop.Default, err) + } + sf.value = x + case reflect.Float32: + x, err := strconv.ParseFloat(prop.Default, 32) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default float32 %q: %v", prop.Default, err) + } + sf.value = float32(x) + case reflect.Float64: + x, err := strconv.ParseFloat(prop.Default, 64) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default float64 %q: %v", prop.Default, err) + } + sf.value = x + case reflect.Int32: + x, err := strconv.ParseInt(prop.Default, 10, 32) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default int32 %q: %v", prop.Default, err) + } + sf.value = int32(x) + case reflect.Int64: + x, err := strconv.ParseInt(prop.Default, 10, 64) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default int64 %q: %v", prop.Default, err) + } + sf.value = x + case reflect.String: + sf.value = prop.Default + case reflect.Uint8: + // []byte (not *uint8) + sf.value = []byte(prop.Default) + case reflect.Uint32: + x, err := strconv.ParseUint(prop.Default, 10, 32) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default uint32 %q: %v", prop.Default, err) + } + sf.value = uint32(x) + case reflect.Uint64: + x, err := strconv.ParseUint(prop.Default, 10, 64) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default uint64 %q: %v", prop.Default, err) + } + sf.value = x + default: + return nil, false, fmt.Errorf("proto: unhandled def kind %v", ft.Elem().Kind()) + } + + return sf, false, nil +} + +// mapKeys returns a sort.Interface to be used for sorting the map keys. +// Map fields may have key types of non-float scalars, strings and enums. +func mapKeys(vs []reflect.Value) sort.Interface { + s := mapKeySorter{vs: vs} + + // Type specialization per https://developers.google.com/protocol-buffers/docs/proto#maps. + if len(vs) == 0 { + return s + } + switch vs[0].Kind() { + case reflect.Int32, reflect.Int64: + s.less = func(a, b reflect.Value) bool { return a.Int() < b.Int() } + case reflect.Uint32, reflect.Uint64: + s.less = func(a, b reflect.Value) bool { return a.Uint() < b.Uint() } + case reflect.Bool: + s.less = func(a, b reflect.Value) bool { return !a.Bool() && b.Bool() } // false < true + case reflect.String: + s.less = func(a, b reflect.Value) bool { return a.String() < b.String() } + default: + panic(fmt.Sprintf("unsupported map key type: %v", vs[0].Kind())) + } + + return s +} + +type mapKeySorter struct { + vs []reflect.Value + less func(a, b reflect.Value) bool +} + +func (s mapKeySorter) Len() int { return len(s.vs) } +func (s mapKeySorter) Swap(i, j int) { s.vs[i], s.vs[j] = s.vs[j], s.vs[i] } +func (s mapKeySorter) Less(i, j int) bool { + return s.less(s.vs[i], s.vs[j]) +} + +// isProto3Zero reports whether v is a zero proto3 value. +func isProto3Zero(v reflect.Value) bool { + switch v.Kind() { + case reflect.Bool: + return !v.Bool() + case reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint32, reflect.Uint64: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.String: + return v.String() == "" + } + return false +} + +const ( + // ProtoPackageIsVersion3 is referenced from generated protocol buffer files + // to assert that that code is compatible with this version of the proto package. + GoGoProtoPackageIsVersion3 = true + + // ProtoPackageIsVersion2 is referenced from generated protocol buffer files + // to assert that that code is compatible with this version of the proto package. + GoGoProtoPackageIsVersion2 = true + + // ProtoPackageIsVersion1 is referenced from generated protocol buffer files + // to assert that that code is compatible with this version of the proto package. + GoGoProtoPackageIsVersion1 = true +) + +// InternalMessageInfo is a type used internally by generated .pb.go files. +// This type is not intended to be used by non-generated code. +// This type is not subject to any compatibility guarantee. +type InternalMessageInfo struct { + marshal *marshalInfo + unmarshal *unmarshalInfo + merge *mergeInfo + discard *discardInfo +} diff --git a/vendor/github.com/gogo/protobuf/proto/lib_gogo.go b/vendor/github.com/gogo/protobuf/proto/lib_gogo.go new file mode 100644 index 00000000000..b3aa39190a1 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/lib_gogo.go @@ -0,0 +1,50 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "encoding/json" + "strconv" +) + +type Sizer interface { + Size() int +} + +type ProtoSizer interface { + ProtoSize() int +} + +func MarshalJSONEnum(m map[int32]string, value int32) ([]byte, error) { + s, ok := m[value] + if !ok { + s = strconv.Itoa(int(value)) + } + return json.Marshal(s) +} diff --git a/vendor/github.com/gogo/protobuf/proto/message_set.go b/vendor/github.com/gogo/protobuf/proto/message_set.go new file mode 100644 index 00000000000..f48a756761e --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/message_set.go @@ -0,0 +1,181 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +/* + * Support for message sets. + */ + +import ( + "errors" +) + +// errNoMessageTypeID occurs when a protocol buffer does not have a message type ID. +// A message type ID is required for storing a protocol buffer in a message set. +var errNoMessageTypeID = errors.New("proto does not have a message type ID") + +// The first two types (_MessageSet_Item and messageSet) +// model what the protocol compiler produces for the following protocol message: +// message MessageSet { +// repeated group Item = 1 { +// required int32 type_id = 2; +// required string message = 3; +// }; +// } +// That is the MessageSet wire format. We can't use a proto to generate these +// because that would introduce a circular dependency between it and this package. + +type _MessageSet_Item struct { + TypeId *int32 `protobuf:"varint,2,req,name=type_id"` + Message []byte `protobuf:"bytes,3,req,name=message"` +} + +type messageSet struct { + Item []*_MessageSet_Item `protobuf:"group,1,rep"` + XXX_unrecognized []byte + // TODO: caching? +} + +// Make sure messageSet is a Message. +var _ Message = (*messageSet)(nil) + +// messageTypeIder is an interface satisfied by a protocol buffer type +// that may be stored in a MessageSet. +type messageTypeIder interface { + MessageTypeId() int32 +} + +func (ms *messageSet) find(pb Message) *_MessageSet_Item { + mti, ok := pb.(messageTypeIder) + if !ok { + return nil + } + id := mti.MessageTypeId() + for _, item := range ms.Item { + if *item.TypeId == id { + return item + } + } + return nil +} + +func (ms *messageSet) Has(pb Message) bool { + return ms.find(pb) != nil +} + +func (ms *messageSet) Unmarshal(pb Message) error { + if item := ms.find(pb); item != nil { + return Unmarshal(item.Message, pb) + } + if _, ok := pb.(messageTypeIder); !ok { + return errNoMessageTypeID + } + return nil // TODO: return error instead? +} + +func (ms *messageSet) Marshal(pb Message) error { + msg, err := Marshal(pb) + if err != nil { + return err + } + if item := ms.find(pb); item != nil { + // reuse existing item + item.Message = msg + return nil + } + + mti, ok := pb.(messageTypeIder) + if !ok { + return errNoMessageTypeID + } + + mtid := mti.MessageTypeId() + ms.Item = append(ms.Item, &_MessageSet_Item{ + TypeId: &mtid, + Message: msg, + }) + return nil +} + +func (ms *messageSet) Reset() { *ms = messageSet{} } +func (ms *messageSet) String() string { return CompactTextString(ms) } +func (*messageSet) ProtoMessage() {} + +// Support for the message_set_wire_format message option. + +func skipVarint(buf []byte) []byte { + i := 0 + for ; buf[i]&0x80 != 0; i++ { + } + return buf[i+1:] +} + +// unmarshalMessageSet decodes the extension map encoded in buf in the message set wire format. +// It is called by Unmarshal methods on protocol buffer messages with the message_set_wire_format option. +func unmarshalMessageSet(buf []byte, exts interface{}) error { + var m map[int32]Extension + switch exts := exts.(type) { + case *XXX_InternalExtensions: + m = exts.extensionsWrite() + case map[int32]Extension: + m = exts + default: + return errors.New("proto: not an extension map") + } + + ms := new(messageSet) + if err := Unmarshal(buf, ms); err != nil { + return err + } + for _, item := range ms.Item { + id := *item.TypeId + msg := item.Message + + // Restore wire type and field number varint, plus length varint. + // Be careful to preserve duplicate items. + b := EncodeVarint(uint64(id)<<3 | WireBytes) + if ext, ok := m[id]; ok { + // Existing data; rip off the tag and length varint + // so we join the new data correctly. + // We can assume that ext.enc is set because we are unmarshaling. + o := ext.enc[len(b):] // skip wire type and field number + _, n := DecodeVarint(o) // calculate length of length varint + o = o[n:] // skip length varint + msg = append(o, msg...) // join old data and new data + } + b = append(b, EncodeVarint(uint64(len(msg)))...) + b = append(b, msg...) + + m[id] = Extension{enc: b} + } + return nil +} diff --git a/vendor/github.com/gogo/protobuf/proto/pointer_reflect.go b/vendor/github.com/gogo/protobuf/proto/pointer_reflect.go new file mode 100644 index 00000000000..b6cad90834b --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/pointer_reflect.go @@ -0,0 +1,357 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2012 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// +build purego appengine js + +// This file contains an implementation of proto field accesses using package reflect. +// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can +// be used on App Engine. + +package proto + +import ( + "reflect" + "sync" +) + +const unsafeAllowed = false + +// A field identifies a field in a struct, accessible from a pointer. +// In this implementation, a field is identified by the sequence of field indices +// passed to reflect's FieldByIndex. +type field []int + +// toField returns a field equivalent to the given reflect field. +func toField(f *reflect.StructField) field { + return f.Index +} + +// invalidField is an invalid field identifier. +var invalidField = field(nil) + +// zeroField is a noop when calling pointer.offset. +var zeroField = field([]int{}) + +// IsValid reports whether the field identifier is valid. +func (f field) IsValid() bool { return f != nil } + +// The pointer type is for the table-driven decoder. +// The implementation here uses a reflect.Value of pointer type to +// create a generic pointer. In pointer_unsafe.go we use unsafe +// instead of reflect to implement the same (but faster) interface. +type pointer struct { + v reflect.Value +} + +// toPointer converts an interface of pointer type to a pointer +// that points to the same target. +func toPointer(i *Message) pointer { + return pointer{v: reflect.ValueOf(*i)} +} + +// toAddrPointer converts an interface to a pointer that points to +// the interface data. +func toAddrPointer(i *interface{}, isptr bool) pointer { + v := reflect.ValueOf(*i) + u := reflect.New(v.Type()) + u.Elem().Set(v) + return pointer{v: u} +} + +// valToPointer converts v to a pointer. v must be of pointer type. +func valToPointer(v reflect.Value) pointer { + return pointer{v: v} +} + +// offset converts from a pointer to a structure to a pointer to +// one of its fields. +func (p pointer) offset(f field) pointer { + return pointer{v: p.v.Elem().FieldByIndex(f).Addr()} +} + +func (p pointer) isNil() bool { + return p.v.IsNil() +} + +// grow updates the slice s in place to make it one element longer. +// s must be addressable. +// Returns the (addressable) new element. +func grow(s reflect.Value) reflect.Value { + n, m := s.Len(), s.Cap() + if n < m { + s.SetLen(n + 1) + } else { + s.Set(reflect.Append(s, reflect.Zero(s.Type().Elem()))) + } + return s.Index(n) +} + +func (p pointer) toInt64() *int64 { + return p.v.Interface().(*int64) +} +func (p pointer) toInt64Ptr() **int64 { + return p.v.Interface().(**int64) +} +func (p pointer) toInt64Slice() *[]int64 { + return p.v.Interface().(*[]int64) +} + +var int32ptr = reflect.TypeOf((*int32)(nil)) + +func (p pointer) toInt32() *int32 { + return p.v.Convert(int32ptr).Interface().(*int32) +} + +// The toInt32Ptr/Slice methods don't work because of enums. +// Instead, we must use set/get methods for the int32ptr/slice case. +/* + func (p pointer) toInt32Ptr() **int32 { + return p.v.Interface().(**int32) +} + func (p pointer) toInt32Slice() *[]int32 { + return p.v.Interface().(*[]int32) +} +*/ +func (p pointer) getInt32Ptr() *int32 { + if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) { + // raw int32 type + return p.v.Elem().Interface().(*int32) + } + // an enum + return p.v.Elem().Convert(int32PtrType).Interface().(*int32) +} +func (p pointer) setInt32Ptr(v int32) { + // Allocate value in a *int32. Possibly convert that to a *enum. + // Then assign it to a **int32 or **enum. + // Note: we can convert *int32 to *enum, but we can't convert + // **int32 to **enum! + p.v.Elem().Set(reflect.ValueOf(&v).Convert(p.v.Type().Elem())) +} + +// getInt32Slice copies []int32 from p as a new slice. +// This behavior differs from the implementation in pointer_unsafe.go. +func (p pointer) getInt32Slice() []int32 { + if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) { + // raw int32 type + return p.v.Elem().Interface().([]int32) + } + // an enum + // Allocate a []int32, then assign []enum's values into it. + // Note: we can't convert []enum to []int32. + slice := p.v.Elem() + s := make([]int32, slice.Len()) + for i := 0; i < slice.Len(); i++ { + s[i] = int32(slice.Index(i).Int()) + } + return s +} + +// setInt32Slice copies []int32 into p as a new slice. +// This behavior differs from the implementation in pointer_unsafe.go. +func (p pointer) setInt32Slice(v []int32) { + if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) { + // raw int32 type + p.v.Elem().Set(reflect.ValueOf(v)) + return + } + // an enum + // Allocate a []enum, then assign []int32's values into it. + // Note: we can't convert []enum to []int32. + slice := reflect.MakeSlice(p.v.Type().Elem(), len(v), cap(v)) + for i, x := range v { + slice.Index(i).SetInt(int64(x)) + } + p.v.Elem().Set(slice) +} +func (p pointer) appendInt32Slice(v int32) { + grow(p.v.Elem()).SetInt(int64(v)) +} + +func (p pointer) toUint64() *uint64 { + return p.v.Interface().(*uint64) +} +func (p pointer) toUint64Ptr() **uint64 { + return p.v.Interface().(**uint64) +} +func (p pointer) toUint64Slice() *[]uint64 { + return p.v.Interface().(*[]uint64) +} +func (p pointer) toUint32() *uint32 { + return p.v.Interface().(*uint32) +} +func (p pointer) toUint32Ptr() **uint32 { + return p.v.Interface().(**uint32) +} +func (p pointer) toUint32Slice() *[]uint32 { + return p.v.Interface().(*[]uint32) +} +func (p pointer) toBool() *bool { + return p.v.Interface().(*bool) +} +func (p pointer) toBoolPtr() **bool { + return p.v.Interface().(**bool) +} +func (p pointer) toBoolSlice() *[]bool { + return p.v.Interface().(*[]bool) +} +func (p pointer) toFloat64() *float64 { + return p.v.Interface().(*float64) +} +func (p pointer) toFloat64Ptr() **float64 { + return p.v.Interface().(**float64) +} +func (p pointer) toFloat64Slice() *[]float64 { + return p.v.Interface().(*[]float64) +} +func (p pointer) toFloat32() *float32 { + return p.v.Interface().(*float32) +} +func (p pointer) toFloat32Ptr() **float32 { + return p.v.Interface().(**float32) +} +func (p pointer) toFloat32Slice() *[]float32 { + return p.v.Interface().(*[]float32) +} +func (p pointer) toString() *string { + return p.v.Interface().(*string) +} +func (p pointer) toStringPtr() **string { + return p.v.Interface().(**string) +} +func (p pointer) toStringSlice() *[]string { + return p.v.Interface().(*[]string) +} +func (p pointer) toBytes() *[]byte { + return p.v.Interface().(*[]byte) +} +func (p pointer) toBytesSlice() *[][]byte { + return p.v.Interface().(*[][]byte) +} +func (p pointer) toExtensions() *XXX_InternalExtensions { + return p.v.Interface().(*XXX_InternalExtensions) +} +func (p pointer) toOldExtensions() *map[int32]Extension { + return p.v.Interface().(*map[int32]Extension) +} +func (p pointer) getPointer() pointer { + return pointer{v: p.v.Elem()} +} +func (p pointer) setPointer(q pointer) { + p.v.Elem().Set(q.v) +} +func (p pointer) appendPointer(q pointer) { + grow(p.v.Elem()).Set(q.v) +} + +// getPointerSlice copies []*T from p as a new []pointer. +// This behavior differs from the implementation in pointer_unsafe.go. +func (p pointer) getPointerSlice() []pointer { + if p.v.IsNil() { + return nil + } + n := p.v.Elem().Len() + s := make([]pointer, n) + for i := 0; i < n; i++ { + s[i] = pointer{v: p.v.Elem().Index(i)} + } + return s +} + +// setPointerSlice copies []pointer into p as a new []*T. +// This behavior differs from the implementation in pointer_unsafe.go. +func (p pointer) setPointerSlice(v []pointer) { + if v == nil { + p.v.Elem().Set(reflect.New(p.v.Elem().Type()).Elem()) + return + } + s := reflect.MakeSlice(p.v.Elem().Type(), 0, len(v)) + for _, p := range v { + s = reflect.Append(s, p.v) + } + p.v.Elem().Set(s) +} + +// getInterfacePointer returns a pointer that points to the +// interface data of the interface pointed by p. +func (p pointer) getInterfacePointer() pointer { + if p.v.Elem().IsNil() { + return pointer{v: p.v.Elem()} + } + return pointer{v: p.v.Elem().Elem().Elem().Field(0).Addr()} // *interface -> interface -> *struct -> struct +} + +func (p pointer) asPointerTo(t reflect.Type) reflect.Value { + // TODO: check that p.v.Type().Elem() == t? + return p.v +} + +func atomicLoadUnmarshalInfo(p **unmarshalInfo) *unmarshalInfo { + atomicLock.Lock() + defer atomicLock.Unlock() + return *p +} +func atomicStoreUnmarshalInfo(p **unmarshalInfo, v *unmarshalInfo) { + atomicLock.Lock() + defer atomicLock.Unlock() + *p = v +} +func atomicLoadMarshalInfo(p **marshalInfo) *marshalInfo { + atomicLock.Lock() + defer atomicLock.Unlock() + return *p +} +func atomicStoreMarshalInfo(p **marshalInfo, v *marshalInfo) { + atomicLock.Lock() + defer atomicLock.Unlock() + *p = v +} +func atomicLoadMergeInfo(p **mergeInfo) *mergeInfo { + atomicLock.Lock() + defer atomicLock.Unlock() + return *p +} +func atomicStoreMergeInfo(p **mergeInfo, v *mergeInfo) { + atomicLock.Lock() + defer atomicLock.Unlock() + *p = v +} +func atomicLoadDiscardInfo(p **discardInfo) *discardInfo { + atomicLock.Lock() + defer atomicLock.Unlock() + return *p +} +func atomicStoreDiscardInfo(p **discardInfo, v *discardInfo) { + atomicLock.Lock() + defer atomicLock.Unlock() + *p = v +} + +var atomicLock sync.Mutex diff --git a/vendor/github.com/gogo/protobuf/proto/pointer_reflect_gogo.go b/vendor/github.com/gogo/protobuf/proto/pointer_reflect_gogo.go new file mode 100644 index 00000000000..7ffd3c29d90 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/pointer_reflect_gogo.go @@ -0,0 +1,59 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2018, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// +build purego appengine js + +// This file contains an implementation of proto field accesses using package reflect. +// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can +// be used on App Engine. + +package proto + +import ( + "reflect" +) + +// TODO: untested, so probably incorrect. + +func (p pointer) getRef() pointer { + return pointer{v: p.v.Addr()} +} + +func (p pointer) appendRef(v pointer, typ reflect.Type) { + slice := p.getSlice(typ) + elem := v.asPointerTo(typ).Elem() + newSlice := reflect.Append(slice, elem) + slice.Set(newSlice) +} + +func (p pointer) getSlice(typ reflect.Type) reflect.Value { + sliceTyp := reflect.SliceOf(typ) + slice := p.asPointerTo(sliceTyp) + slice = slice.Elem() + return slice +} diff --git a/vendor/github.com/gogo/protobuf/proto/pointer_unsafe.go b/vendor/github.com/gogo/protobuf/proto/pointer_unsafe.go new file mode 100644 index 00000000000..d55a335d945 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/pointer_unsafe.go @@ -0,0 +1,308 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2012 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// +build !purego,!appengine,!js + +// This file contains the implementation of the proto field accesses using package unsafe. + +package proto + +import ( + "reflect" + "sync/atomic" + "unsafe" +) + +const unsafeAllowed = true + +// A field identifies a field in a struct, accessible from a pointer. +// In this implementation, a field is identified by its byte offset from the start of the struct. +type field uintptr + +// toField returns a field equivalent to the given reflect field. +func toField(f *reflect.StructField) field { + return field(f.Offset) +} + +// invalidField is an invalid field identifier. +const invalidField = ^field(0) + +// zeroField is a noop when calling pointer.offset. +const zeroField = field(0) + +// IsValid reports whether the field identifier is valid. +func (f field) IsValid() bool { + return f != invalidField +} + +// The pointer type below is for the new table-driven encoder/decoder. +// The implementation here uses unsafe.Pointer to create a generic pointer. +// In pointer_reflect.go we use reflect instead of unsafe to implement +// the same (but slower) interface. +type pointer struct { + p unsafe.Pointer +} + +// size of pointer +var ptrSize = unsafe.Sizeof(uintptr(0)) + +// toPointer converts an interface of pointer type to a pointer +// that points to the same target. +func toPointer(i *Message) pointer { + // Super-tricky - read pointer out of data word of interface value. + // Saves ~25ns over the equivalent: + // return valToPointer(reflect.ValueOf(*i)) + return pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]} +} + +// toAddrPointer converts an interface to a pointer that points to +// the interface data. +func toAddrPointer(i *interface{}, isptr bool) pointer { + // Super-tricky - read or get the address of data word of interface value. + if isptr { + // The interface is of pointer type, thus it is a direct interface. + // The data word is the pointer data itself. We take its address. + return pointer{p: unsafe.Pointer(uintptr(unsafe.Pointer(i)) + ptrSize)} + } + // The interface is not of pointer type. The data word is the pointer + // to the data. + return pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]} +} + +// valToPointer converts v to a pointer. v must be of pointer type. +func valToPointer(v reflect.Value) pointer { + return pointer{p: unsafe.Pointer(v.Pointer())} +} + +// offset converts from a pointer to a structure to a pointer to +// one of its fields. +func (p pointer) offset(f field) pointer { + // For safety, we should panic if !f.IsValid, however calling panic causes + // this to no longer be inlineable, which is a serious performance cost. + /* + if !f.IsValid() { + panic("invalid field") + } + */ + return pointer{p: unsafe.Pointer(uintptr(p.p) + uintptr(f))} +} + +func (p pointer) isNil() bool { + return p.p == nil +} + +func (p pointer) toInt64() *int64 { + return (*int64)(p.p) +} +func (p pointer) toInt64Ptr() **int64 { + return (**int64)(p.p) +} +func (p pointer) toInt64Slice() *[]int64 { + return (*[]int64)(p.p) +} +func (p pointer) toInt32() *int32 { + return (*int32)(p.p) +} + +// See pointer_reflect.go for why toInt32Ptr/Slice doesn't exist. +/* + func (p pointer) toInt32Ptr() **int32 { + return (**int32)(p.p) + } + func (p pointer) toInt32Slice() *[]int32 { + return (*[]int32)(p.p) + } +*/ +func (p pointer) getInt32Ptr() *int32 { + return *(**int32)(p.p) +} +func (p pointer) setInt32Ptr(v int32) { + *(**int32)(p.p) = &v +} + +// getInt32Slice loads a []int32 from p. +// The value returned is aliased with the original slice. +// This behavior differs from the implementation in pointer_reflect.go. +func (p pointer) getInt32Slice() []int32 { + return *(*[]int32)(p.p) +} + +// setInt32Slice stores a []int32 to p. +// The value set is aliased with the input slice. +// This behavior differs from the implementation in pointer_reflect.go. +func (p pointer) setInt32Slice(v []int32) { + *(*[]int32)(p.p) = v +} + +// TODO: Can we get rid of appendInt32Slice and use setInt32Slice instead? +func (p pointer) appendInt32Slice(v int32) { + s := (*[]int32)(p.p) + *s = append(*s, v) +} + +func (p pointer) toUint64() *uint64 { + return (*uint64)(p.p) +} +func (p pointer) toUint64Ptr() **uint64 { + return (**uint64)(p.p) +} +func (p pointer) toUint64Slice() *[]uint64 { + return (*[]uint64)(p.p) +} +func (p pointer) toUint32() *uint32 { + return (*uint32)(p.p) +} +func (p pointer) toUint32Ptr() **uint32 { + return (**uint32)(p.p) +} +func (p pointer) toUint32Slice() *[]uint32 { + return (*[]uint32)(p.p) +} +func (p pointer) toBool() *bool { + return (*bool)(p.p) +} +func (p pointer) toBoolPtr() **bool { + return (**bool)(p.p) +} +func (p pointer) toBoolSlice() *[]bool { + return (*[]bool)(p.p) +} +func (p pointer) toFloat64() *float64 { + return (*float64)(p.p) +} +func (p pointer) toFloat64Ptr() **float64 { + return (**float64)(p.p) +} +func (p pointer) toFloat64Slice() *[]float64 { + return (*[]float64)(p.p) +} +func (p pointer) toFloat32() *float32 { + return (*float32)(p.p) +} +func (p pointer) toFloat32Ptr() **float32 { + return (**float32)(p.p) +} +func (p pointer) toFloat32Slice() *[]float32 { + return (*[]float32)(p.p) +} +func (p pointer) toString() *string { + return (*string)(p.p) +} +func (p pointer) toStringPtr() **string { + return (**string)(p.p) +} +func (p pointer) toStringSlice() *[]string { + return (*[]string)(p.p) +} +func (p pointer) toBytes() *[]byte { + return (*[]byte)(p.p) +} +func (p pointer) toBytesSlice() *[][]byte { + return (*[][]byte)(p.p) +} +func (p pointer) toExtensions() *XXX_InternalExtensions { + return (*XXX_InternalExtensions)(p.p) +} +func (p pointer) toOldExtensions() *map[int32]Extension { + return (*map[int32]Extension)(p.p) +} + +// getPointerSlice loads []*T from p as a []pointer. +// The value returned is aliased with the original slice. +// This behavior differs from the implementation in pointer_reflect.go. +func (p pointer) getPointerSlice() []pointer { + // Super-tricky - p should point to a []*T where T is a + // message type. We load it as []pointer. + return *(*[]pointer)(p.p) +} + +// setPointerSlice stores []pointer into p as a []*T. +// The value set is aliased with the input slice. +// This behavior differs from the implementation in pointer_reflect.go. +func (p pointer) setPointerSlice(v []pointer) { + // Super-tricky - p should point to a []*T where T is a + // message type. We store it as []pointer. + *(*[]pointer)(p.p) = v +} + +// getPointer loads the pointer at p and returns it. +func (p pointer) getPointer() pointer { + return pointer{p: *(*unsafe.Pointer)(p.p)} +} + +// setPointer stores the pointer q at p. +func (p pointer) setPointer(q pointer) { + *(*unsafe.Pointer)(p.p) = q.p +} + +// append q to the slice pointed to by p. +func (p pointer) appendPointer(q pointer) { + s := (*[]unsafe.Pointer)(p.p) + *s = append(*s, q.p) +} + +// getInterfacePointer returns a pointer that points to the +// interface data of the interface pointed by p. +func (p pointer) getInterfacePointer() pointer { + // Super-tricky - read pointer out of data word of interface value. + return pointer{p: (*(*[2]unsafe.Pointer)(p.p))[1]} +} + +// asPointerTo returns a reflect.Value that is a pointer to an +// object of type t stored at p. +func (p pointer) asPointerTo(t reflect.Type) reflect.Value { + return reflect.NewAt(t, p.p) +} + +func atomicLoadUnmarshalInfo(p **unmarshalInfo) *unmarshalInfo { + return (*unmarshalInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) +} +func atomicStoreUnmarshalInfo(p **unmarshalInfo, v *unmarshalInfo) { + atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) +} +func atomicLoadMarshalInfo(p **marshalInfo) *marshalInfo { + return (*marshalInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) +} +func atomicStoreMarshalInfo(p **marshalInfo, v *marshalInfo) { + atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) +} +func atomicLoadMergeInfo(p **mergeInfo) *mergeInfo { + return (*mergeInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) +} +func atomicStoreMergeInfo(p **mergeInfo, v *mergeInfo) { + atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) +} +func atomicLoadDiscardInfo(p **discardInfo) *discardInfo { + return (*discardInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) +} +func atomicStoreDiscardInfo(p **discardInfo, v *discardInfo) { + atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) +} diff --git a/vendor/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go b/vendor/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go new file mode 100644 index 00000000000..aca8eed02a1 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go @@ -0,0 +1,56 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2018, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// +build !purego,!appengine,!js + +// This file contains the implementation of the proto field accesses using package unsafe. + +package proto + +import ( + "reflect" + "unsafe" +) + +func (p pointer) getRef() pointer { + return pointer{p: (unsafe.Pointer)(&p.p)} +} + +func (p pointer) appendRef(v pointer, typ reflect.Type) { + slice := p.getSlice(typ) + elem := v.asPointerTo(typ).Elem() + newSlice := reflect.Append(slice, elem) + slice.Set(newSlice) +} + +func (p pointer) getSlice(typ reflect.Type) reflect.Value { + sliceTyp := reflect.SliceOf(typ) + slice := p.asPointerTo(sliceTyp) + slice = slice.Elem() + return slice +} diff --git a/vendor/github.com/gogo/protobuf/proto/properties.go b/vendor/github.com/gogo/protobuf/proto/properties.go new file mode 100644 index 00000000000..28da1475fb3 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/properties.go @@ -0,0 +1,610 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +/* + * Routines for encoding data into the wire format for protocol buffers. + */ + +import ( + "fmt" + "log" + "reflect" + "sort" + "strconv" + "strings" + "sync" +) + +const debug bool = false + +// Constants that identify the encoding of a value on the wire. +const ( + WireVarint = 0 + WireFixed64 = 1 + WireBytes = 2 + WireStartGroup = 3 + WireEndGroup = 4 + WireFixed32 = 5 +) + +// tagMap is an optimization over map[int]int for typical protocol buffer +// use-cases. Encoded protocol buffers are often in tag order with small tag +// numbers. +type tagMap struct { + fastTags []int + slowTags map[int]int +} + +// tagMapFastLimit is the upper bound on the tag number that will be stored in +// the tagMap slice rather than its map. +const tagMapFastLimit = 1024 + +func (p *tagMap) get(t int) (int, bool) { + if t > 0 && t < tagMapFastLimit { + if t >= len(p.fastTags) { + return 0, false + } + fi := p.fastTags[t] + return fi, fi >= 0 + } + fi, ok := p.slowTags[t] + return fi, ok +} + +func (p *tagMap) put(t int, fi int) { + if t > 0 && t < tagMapFastLimit { + for len(p.fastTags) < t+1 { + p.fastTags = append(p.fastTags, -1) + } + p.fastTags[t] = fi + return + } + if p.slowTags == nil { + p.slowTags = make(map[int]int) + } + p.slowTags[t] = fi +} + +// StructProperties represents properties for all the fields of a struct. +// decoderTags and decoderOrigNames should only be used by the decoder. +type StructProperties struct { + Prop []*Properties // properties for each field + reqCount int // required count + decoderTags tagMap // map from proto tag to struct field number + decoderOrigNames map[string]int // map from original name to struct field number + order []int // list of struct field numbers in tag order + + // OneofTypes contains information about the oneof fields in this message. + // It is keyed by the original name of a field. + OneofTypes map[string]*OneofProperties +} + +// OneofProperties represents information about a specific field in a oneof. +type OneofProperties struct { + Type reflect.Type // pointer to generated struct type for this oneof field + Field int // struct field number of the containing oneof in the message + Prop *Properties +} + +// Implement the sorting interface so we can sort the fields in tag order, as recommended by the spec. +// See encode.go, (*Buffer).enc_struct. + +func (sp *StructProperties) Len() int { return len(sp.order) } +func (sp *StructProperties) Less(i, j int) bool { + return sp.Prop[sp.order[i]].Tag < sp.Prop[sp.order[j]].Tag +} +func (sp *StructProperties) Swap(i, j int) { sp.order[i], sp.order[j] = sp.order[j], sp.order[i] } + +// Properties represents the protocol-specific behavior of a single struct field. +type Properties struct { + Name string // name of the field, for error messages + OrigName string // original name before protocol compiler (always set) + JSONName string // name to use for JSON; determined by protoc + Wire string + WireType int + Tag int + Required bool + Optional bool + Repeated bool + Packed bool // relevant for repeated primitives only + Enum string // set for enum types only + proto3 bool // whether this is known to be a proto3 field + oneof bool // whether this is a oneof field + + Default string // default value + HasDefault bool // whether an explicit default was provided + CustomType string + CastType string + StdTime bool + StdDuration bool + WktPointer bool + + stype reflect.Type // set for struct types only + ctype reflect.Type // set for custom types only + sprop *StructProperties // set for struct types only + + mtype reflect.Type // set for map types only + MapKeyProp *Properties // set for map types only + MapValProp *Properties // set for map types only +} + +// String formats the properties in the protobuf struct field tag style. +func (p *Properties) String() string { + s := p.Wire + s += "," + s += strconv.Itoa(p.Tag) + if p.Required { + s += ",req" + } + if p.Optional { + s += ",opt" + } + if p.Repeated { + s += ",rep" + } + if p.Packed { + s += ",packed" + } + s += ",name=" + p.OrigName + if p.JSONName != p.OrigName { + s += ",json=" + p.JSONName + } + if p.proto3 { + s += ",proto3" + } + if p.oneof { + s += ",oneof" + } + if len(p.Enum) > 0 { + s += ",enum=" + p.Enum + } + if p.HasDefault { + s += ",def=" + p.Default + } + return s +} + +// Parse populates p by parsing a string in the protobuf struct field tag style. +func (p *Properties) Parse(s string) { + // "bytes,49,opt,name=foo,def=hello!" + fields := strings.Split(s, ",") // breaks def=, but handled below. + if len(fields) < 2 { + log.Printf("proto: tag has too few fields: %q", s) + return + } + + p.Wire = fields[0] + switch p.Wire { + case "varint": + p.WireType = WireVarint + case "fixed32": + p.WireType = WireFixed32 + case "fixed64": + p.WireType = WireFixed64 + case "zigzag32": + p.WireType = WireVarint + case "zigzag64": + p.WireType = WireVarint + case "bytes", "group": + p.WireType = WireBytes + // no numeric converter for non-numeric types + default: + log.Printf("proto: tag has unknown wire type: %q", s) + return + } + + var err error + p.Tag, err = strconv.Atoi(fields[1]) + if err != nil { + return + } + +outer: + for i := 2; i < len(fields); i++ { + f := fields[i] + switch { + case f == "req": + p.Required = true + case f == "opt": + p.Optional = true + case f == "rep": + p.Repeated = true + case f == "packed": + p.Packed = true + case strings.HasPrefix(f, "name="): + p.OrigName = f[5:] + case strings.HasPrefix(f, "json="): + p.JSONName = f[5:] + case strings.HasPrefix(f, "enum="): + p.Enum = f[5:] + case f == "proto3": + p.proto3 = true + case f == "oneof": + p.oneof = true + case strings.HasPrefix(f, "def="): + p.HasDefault = true + p.Default = f[4:] // rest of string + if i+1 < len(fields) { + // Commas aren't escaped, and def is always last. + p.Default += "," + strings.Join(fields[i+1:], ",") + break outer + } + case strings.HasPrefix(f, "embedded="): + p.OrigName = strings.Split(f, "=")[1] + case strings.HasPrefix(f, "customtype="): + p.CustomType = strings.Split(f, "=")[1] + case strings.HasPrefix(f, "casttype="): + p.CastType = strings.Split(f, "=")[1] + case f == "stdtime": + p.StdTime = true + case f == "stdduration": + p.StdDuration = true + case f == "wktptr": + p.WktPointer = true + } + } +} + +var protoMessageType = reflect.TypeOf((*Message)(nil)).Elem() + +// setFieldProps initializes the field properties for submessages and maps. +func (p *Properties) setFieldProps(typ reflect.Type, f *reflect.StructField, lockGetProp bool) { + isMap := typ.Kind() == reflect.Map + if len(p.CustomType) > 0 && !isMap { + p.ctype = typ + p.setTag(lockGetProp) + return + } + if p.StdTime && !isMap { + p.setTag(lockGetProp) + return + } + if p.StdDuration && !isMap { + p.setTag(lockGetProp) + return + } + if p.WktPointer && !isMap { + p.setTag(lockGetProp) + return + } + switch t1 := typ; t1.Kind() { + case reflect.Struct: + p.stype = typ + case reflect.Ptr: + if t1.Elem().Kind() == reflect.Struct { + p.stype = t1.Elem() + } + case reflect.Slice: + switch t2 := t1.Elem(); t2.Kind() { + case reflect.Ptr: + switch t3 := t2.Elem(); t3.Kind() { + case reflect.Struct: + p.stype = t3 + } + case reflect.Struct: + p.stype = t2 + } + + case reflect.Map: + + p.mtype = t1 + p.MapKeyProp = &Properties{} + p.MapKeyProp.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp) + p.MapValProp = &Properties{} + vtype := p.mtype.Elem() + if vtype.Kind() != reflect.Ptr && vtype.Kind() != reflect.Slice { + // The value type is not a message (*T) or bytes ([]byte), + // so we need encoders for the pointer to this type. + vtype = reflect.PtrTo(vtype) + } + + p.MapValProp.CustomType = p.CustomType + p.MapValProp.StdDuration = p.StdDuration + p.MapValProp.StdTime = p.StdTime + p.MapValProp.WktPointer = p.WktPointer + p.MapValProp.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp) + } + p.setTag(lockGetProp) +} + +func (p *Properties) setTag(lockGetProp bool) { + if p.stype != nil { + if lockGetProp { + p.sprop = GetProperties(p.stype) + } else { + p.sprop = getPropertiesLocked(p.stype) + } + } +} + +var ( + marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem() +) + +// Init populates the properties from a protocol buffer struct tag. +func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) { + p.init(typ, name, tag, f, true) +} + +func (p *Properties) init(typ reflect.Type, name, tag string, f *reflect.StructField, lockGetProp bool) { + // "bytes,49,opt,def=hello!" + p.Name = name + p.OrigName = name + if tag == "" { + return + } + p.Parse(tag) + p.setFieldProps(typ, f, lockGetProp) +} + +var ( + propertiesMu sync.RWMutex + propertiesMap = make(map[reflect.Type]*StructProperties) +) + +// GetProperties returns the list of properties for the type represented by t. +// t must represent a generated struct type of a protocol message. +func GetProperties(t reflect.Type) *StructProperties { + if t.Kind() != reflect.Struct { + panic("proto: type must have kind struct") + } + + // Most calls to GetProperties in a long-running program will be + // retrieving details for types we have seen before. + propertiesMu.RLock() + sprop, ok := propertiesMap[t] + propertiesMu.RUnlock() + if ok { + return sprop + } + + propertiesMu.Lock() + sprop = getPropertiesLocked(t) + propertiesMu.Unlock() + return sprop +} + +type ( + oneofFuncsIface interface { + XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{}) + } + oneofWrappersIface interface { + XXX_OneofWrappers() []interface{} + } +) + +// getPropertiesLocked requires that propertiesMu is held. +func getPropertiesLocked(t reflect.Type) *StructProperties { + if prop, ok := propertiesMap[t]; ok { + return prop + } + + prop := new(StructProperties) + // in case of recursive protos, fill this in now. + propertiesMap[t] = prop + + // build properties + prop.Prop = make([]*Properties, t.NumField()) + prop.order = make([]int, t.NumField()) + + isOneofMessage := false + for i := 0; i < t.NumField(); i++ { + f := t.Field(i) + p := new(Properties) + name := f.Name + p.init(f.Type, name, f.Tag.Get("protobuf"), &f, false) + + oneof := f.Tag.Get("protobuf_oneof") // special case + if oneof != "" { + isOneofMessage = true + // Oneof fields don't use the traditional protobuf tag. + p.OrigName = oneof + } + prop.Prop[i] = p + prop.order[i] = i + if debug { + print(i, " ", f.Name, " ", t.String(), " ") + if p.Tag > 0 { + print(p.String()) + } + print("\n") + } + } + + // Re-order prop.order. + sort.Sort(prop) + + if isOneofMessage { + var oots []interface{} + switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) { + case oneofFuncsIface: + _, _, _, oots = m.XXX_OneofFuncs() + case oneofWrappersIface: + oots = m.XXX_OneofWrappers() + } + if len(oots) > 0 { + // Interpret oneof metadata. + prop.OneofTypes = make(map[string]*OneofProperties) + for _, oot := range oots { + oop := &OneofProperties{ + Type: reflect.ValueOf(oot).Type(), // *T + Prop: new(Properties), + } + sft := oop.Type.Elem().Field(0) + oop.Prop.Name = sft.Name + oop.Prop.Parse(sft.Tag.Get("protobuf")) + // There will be exactly one interface field that + // this new value is assignable to. + for i := 0; i < t.NumField(); i++ { + f := t.Field(i) + if f.Type.Kind() != reflect.Interface { + continue + } + if !oop.Type.AssignableTo(f.Type) { + continue + } + oop.Field = i + break + } + prop.OneofTypes[oop.Prop.OrigName] = oop + } + } + } + + // build required counts + // build tags + reqCount := 0 + prop.decoderOrigNames = make(map[string]int) + for i, p := range prop.Prop { + if strings.HasPrefix(p.Name, "XXX_") { + // Internal fields should not appear in tags/origNames maps. + // They are handled specially when encoding and decoding. + continue + } + if p.Required { + reqCount++ + } + prop.decoderTags.put(p.Tag, i) + prop.decoderOrigNames[p.OrigName] = i + } + prop.reqCount = reqCount + + return prop +} + +// A global registry of enum types. +// The generated code will register the generated maps by calling RegisterEnum. + +var enumValueMaps = make(map[string]map[string]int32) +var enumStringMaps = make(map[string]map[int32]string) + +// RegisterEnum is called from the generated code to install the enum descriptor +// maps into the global table to aid parsing text format protocol buffers. +func RegisterEnum(typeName string, unusedNameMap map[int32]string, valueMap map[string]int32) { + if _, ok := enumValueMaps[typeName]; ok { + panic("proto: duplicate enum registered: " + typeName) + } + enumValueMaps[typeName] = valueMap + if _, ok := enumStringMaps[typeName]; ok { + panic("proto: duplicate enum registered: " + typeName) + } + enumStringMaps[typeName] = unusedNameMap +} + +// EnumValueMap returns the mapping from names to integers of the +// enum type enumType, or a nil if not found. +func EnumValueMap(enumType string) map[string]int32 { + return enumValueMaps[enumType] +} + +// A registry of all linked message types. +// The string is a fully-qualified proto name ("pkg.Message"). +var ( + protoTypedNils = make(map[string]Message) // a map from proto names to typed nil pointers + protoMapTypes = make(map[string]reflect.Type) // a map from proto names to map types + revProtoTypes = make(map[reflect.Type]string) +) + +// RegisterType is called from generated code and maps from the fully qualified +// proto name to the type (pointer to struct) of the protocol buffer. +func RegisterType(x Message, name string) { + if _, ok := protoTypedNils[name]; ok { + // TODO: Some day, make this a panic. + log.Printf("proto: duplicate proto type registered: %s", name) + return + } + t := reflect.TypeOf(x) + if v := reflect.ValueOf(x); v.Kind() == reflect.Ptr && v.Pointer() == 0 { + // Generated code always calls RegisterType with nil x. + // This check is just for extra safety. + protoTypedNils[name] = x + } else { + protoTypedNils[name] = reflect.Zero(t).Interface().(Message) + } + revProtoTypes[t] = name +} + +// RegisterMapType is called from generated code and maps from the fully qualified +// proto name to the native map type of the proto map definition. +func RegisterMapType(x interface{}, name string) { + if reflect.TypeOf(x).Kind() != reflect.Map { + panic(fmt.Sprintf("RegisterMapType(%T, %q); want map", x, name)) + } + if _, ok := protoMapTypes[name]; ok { + log.Printf("proto: duplicate proto type registered: %s", name) + return + } + t := reflect.TypeOf(x) + protoMapTypes[name] = t + revProtoTypes[t] = name +} + +// MessageName returns the fully-qualified proto name for the given message type. +func MessageName(x Message) string { + type xname interface { + XXX_MessageName() string + } + if m, ok := x.(xname); ok { + return m.XXX_MessageName() + } + return revProtoTypes[reflect.TypeOf(x)] +} + +// MessageType returns the message type (pointer to struct) for a named message. +// The type is not guaranteed to implement proto.Message if the name refers to a +// map entry. +func MessageType(name string) reflect.Type { + if t, ok := protoTypedNils[name]; ok { + return reflect.TypeOf(t) + } + return protoMapTypes[name] +} + +// A registry of all linked proto files. +var ( + protoFiles = make(map[string][]byte) // file name => fileDescriptor +) + +// RegisterFile is called from generated code and maps from the +// full file name of a .proto file to its compressed FileDescriptorProto. +func RegisterFile(filename string, fileDescriptor []byte) { + protoFiles[filename] = fileDescriptor +} + +// FileDescriptor returns the compressed FileDescriptorProto for a .proto file. +func FileDescriptor(filename string) []byte { return protoFiles[filename] } diff --git a/vendor/github.com/gogo/protobuf/proto/properties_gogo.go b/vendor/github.com/gogo/protobuf/proto/properties_gogo.go new file mode 100644 index 00000000000..40ea3dd935c --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/properties_gogo.go @@ -0,0 +1,36 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2018, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "reflect" +) + +var sizerType = reflect.TypeOf((*Sizer)(nil)).Elem() +var protosizerType = reflect.TypeOf((*ProtoSizer)(nil)).Elem() diff --git a/vendor/github.com/gogo/protobuf/proto/skip_gogo.go b/vendor/github.com/gogo/protobuf/proto/skip_gogo.go new file mode 100644 index 00000000000..5a5fd93f7c1 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/skip_gogo.go @@ -0,0 +1,119 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "fmt" + "io" +) + +func Skip(data []byte) (n int, err error) { + l := len(data) + index := 0 + for index < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if index >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[index] + index++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for { + if index >= l { + return 0, io.ErrUnexpectedEOF + } + index++ + if data[index-1] < 0x80 { + break + } + } + return index, nil + case 1: + index += 8 + return index, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if index >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[index] + index++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + index += length + return index, nil + case 3: + for { + var innerWire uint64 + var start int = index + for shift := uint(0); ; shift += 7 { + if index >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[index] + index++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := Skip(data[start:]) + if err != nil { + return 0, err + } + index = start + next + } + return index, nil + case 4: + return index, nil + case 5: + index += 4 + return index, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} diff --git a/vendor/github.com/gogo/protobuf/proto/table_marshal.go b/vendor/github.com/gogo/protobuf/proto/table_marshal.go new file mode 100644 index 00000000000..f8babdefab9 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/table_marshal.go @@ -0,0 +1,3009 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "errors" + "fmt" + "math" + "reflect" + "sort" + "strconv" + "strings" + "sync" + "sync/atomic" + "unicode/utf8" +) + +// a sizer takes a pointer to a field and the size of its tag, computes the size of +// the encoded data. +type sizer func(pointer, int) int + +// a marshaler takes a byte slice, a pointer to a field, and its tag (in wire format), +// marshals the field to the end of the slice, returns the slice and error (if any). +type marshaler func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) + +// marshalInfo is the information used for marshaling a message. +type marshalInfo struct { + typ reflect.Type + fields []*marshalFieldInfo + unrecognized field // offset of XXX_unrecognized + extensions field // offset of XXX_InternalExtensions + v1extensions field // offset of XXX_extensions + sizecache field // offset of XXX_sizecache + initialized int32 // 0 -- only typ is set, 1 -- fully initialized + messageset bool // uses message set wire format + hasmarshaler bool // has custom marshaler + sync.RWMutex // protect extElems map, also for initialization + extElems map[int32]*marshalElemInfo // info of extension elements + + hassizer bool // has custom sizer + hasprotosizer bool // has custom protosizer + + bytesExtensions field // offset of XXX_extensions where the field type is []byte +} + +// marshalFieldInfo is the information used for marshaling a field of a message. +type marshalFieldInfo struct { + field field + wiretag uint64 // tag in wire format + tagsize int // size of tag in wire format + sizer sizer + marshaler marshaler + isPointer bool + required bool // field is required + name string // name of the field, for error reporting + oneofElems map[reflect.Type]*marshalElemInfo // info of oneof elements +} + +// marshalElemInfo is the information used for marshaling an extension or oneof element. +type marshalElemInfo struct { + wiretag uint64 // tag in wire format + tagsize int // size of tag in wire format + sizer sizer + marshaler marshaler + isptr bool // elem is pointer typed, thus interface of this type is a direct interface (extension only) +} + +var ( + marshalInfoMap = map[reflect.Type]*marshalInfo{} + marshalInfoLock sync.Mutex + + uint8SliceType = reflect.TypeOf(([]uint8)(nil)).Kind() +) + +// getMarshalInfo returns the information to marshal a given type of message. +// The info it returns may not necessarily initialized. +// t is the type of the message (NOT the pointer to it). +func getMarshalInfo(t reflect.Type) *marshalInfo { + marshalInfoLock.Lock() + u, ok := marshalInfoMap[t] + if !ok { + u = &marshalInfo{typ: t} + marshalInfoMap[t] = u + } + marshalInfoLock.Unlock() + return u +} + +// Size is the entry point from generated code, +// and should be ONLY called by generated code. +// It computes the size of encoded data of msg. +// a is a pointer to a place to store cached marshal info. +func (a *InternalMessageInfo) Size(msg Message) int { + u := getMessageMarshalInfo(msg, a) + ptr := toPointer(&msg) + if ptr.isNil() { + // We get here if msg is a typed nil ((*SomeMessage)(nil)), + // so it satisfies the interface, and msg == nil wouldn't + // catch it. We don't want crash in this case. + return 0 + } + return u.size(ptr) +} + +// Marshal is the entry point from generated code, +// and should be ONLY called by generated code. +// It marshals msg to the end of b. +// a is a pointer to a place to store cached marshal info. +func (a *InternalMessageInfo) Marshal(b []byte, msg Message, deterministic bool) ([]byte, error) { + u := getMessageMarshalInfo(msg, a) + ptr := toPointer(&msg) + if ptr.isNil() { + // We get here if msg is a typed nil ((*SomeMessage)(nil)), + // so it satisfies the interface, and msg == nil wouldn't + // catch it. We don't want crash in this case. + return b, ErrNil + } + return u.marshal(b, ptr, deterministic) +} + +func getMessageMarshalInfo(msg interface{}, a *InternalMessageInfo) *marshalInfo { + // u := a.marshal, but atomically. + // We use an atomic here to ensure memory consistency. + u := atomicLoadMarshalInfo(&a.marshal) + if u == nil { + // Get marshal information from type of message. + t := reflect.ValueOf(msg).Type() + if t.Kind() != reflect.Ptr { + panic(fmt.Sprintf("cannot handle non-pointer message type %v", t)) + } + u = getMarshalInfo(t.Elem()) + // Store it in the cache for later users. + // a.marshal = u, but atomically. + atomicStoreMarshalInfo(&a.marshal, u) + } + return u +} + +// size is the main function to compute the size of the encoded data of a message. +// ptr is the pointer to the message. +func (u *marshalInfo) size(ptr pointer) int { + if atomic.LoadInt32(&u.initialized) == 0 { + u.computeMarshalInfo() + } + + // If the message can marshal itself, let it do it, for compatibility. + // NOTE: This is not efficient. + if u.hasmarshaler { + // Uses the message's Size method if available + if u.hassizer { + s := ptr.asPointerTo(u.typ).Interface().(Sizer) + return s.Size() + } + // Uses the message's ProtoSize method if available + if u.hasprotosizer { + s := ptr.asPointerTo(u.typ).Interface().(ProtoSizer) + return s.ProtoSize() + } + + m := ptr.asPointerTo(u.typ).Interface().(Marshaler) + b, _ := m.Marshal() + return len(b) + } + + n := 0 + for _, f := range u.fields { + if f.isPointer && ptr.offset(f.field).getPointer().isNil() { + // nil pointer always marshals to nothing + continue + } + n += f.sizer(ptr.offset(f.field), f.tagsize) + } + if u.extensions.IsValid() { + e := ptr.offset(u.extensions).toExtensions() + if u.messageset { + n += u.sizeMessageSet(e) + } else { + n += u.sizeExtensions(e) + } + } + if u.v1extensions.IsValid() { + m := *ptr.offset(u.v1extensions).toOldExtensions() + n += u.sizeV1Extensions(m) + } + if u.bytesExtensions.IsValid() { + s := *ptr.offset(u.bytesExtensions).toBytes() + n += len(s) + } + if u.unrecognized.IsValid() { + s := *ptr.offset(u.unrecognized).toBytes() + n += len(s) + } + + // cache the result for use in marshal + if u.sizecache.IsValid() { + atomic.StoreInt32(ptr.offset(u.sizecache).toInt32(), int32(n)) + } + return n +} + +// cachedsize gets the size from cache. If there is no cache (i.e. message is not generated), +// fall back to compute the size. +func (u *marshalInfo) cachedsize(ptr pointer) int { + if u.sizecache.IsValid() { + return int(atomic.LoadInt32(ptr.offset(u.sizecache).toInt32())) + } + return u.size(ptr) +} + +// marshal is the main function to marshal a message. It takes a byte slice and appends +// the encoded data to the end of the slice, returns the slice and error (if any). +// ptr is the pointer to the message. +// If deterministic is true, map is marshaled in deterministic order. +func (u *marshalInfo) marshal(b []byte, ptr pointer, deterministic bool) ([]byte, error) { + if atomic.LoadInt32(&u.initialized) == 0 { + u.computeMarshalInfo() + } + + // If the message can marshal itself, let it do it, for compatibility. + // NOTE: This is not efficient. + if u.hasmarshaler { + m := ptr.asPointerTo(u.typ).Interface().(Marshaler) + b1, err := m.Marshal() + b = append(b, b1...) + return b, err + } + + var err, errLater error + // The old marshaler encodes extensions at beginning. + if u.extensions.IsValid() { + e := ptr.offset(u.extensions).toExtensions() + if u.messageset { + b, err = u.appendMessageSet(b, e, deterministic) + } else { + b, err = u.appendExtensions(b, e, deterministic) + } + if err != nil { + return b, err + } + } + if u.v1extensions.IsValid() { + m := *ptr.offset(u.v1extensions).toOldExtensions() + b, err = u.appendV1Extensions(b, m, deterministic) + if err != nil { + return b, err + } + } + if u.bytesExtensions.IsValid() { + s := *ptr.offset(u.bytesExtensions).toBytes() + b = append(b, s...) + } + for _, f := range u.fields { + if f.required { + if f.isPointer && ptr.offset(f.field).getPointer().isNil() { + // Required field is not set. + // We record the error but keep going, to give a complete marshaling. + if errLater == nil { + errLater = &RequiredNotSetError{f.name} + } + continue + } + } + if f.isPointer && ptr.offset(f.field).getPointer().isNil() { + // nil pointer always marshals to nothing + continue + } + b, err = f.marshaler(b, ptr.offset(f.field), f.wiretag, deterministic) + if err != nil { + if err1, ok := err.(*RequiredNotSetError); ok { + // Required field in submessage is not set. + // We record the error but keep going, to give a complete marshaling. + if errLater == nil { + errLater = &RequiredNotSetError{f.name + "." + err1.field} + } + continue + } + if err == errRepeatedHasNil { + err = errors.New("proto: repeated field " + f.name + " has nil element") + } + if err == errInvalidUTF8 { + if errLater == nil { + fullName := revProtoTypes[reflect.PtrTo(u.typ)] + "." + f.name + errLater = &invalidUTF8Error{fullName} + } + continue + } + return b, err + } + } + if u.unrecognized.IsValid() { + s := *ptr.offset(u.unrecognized).toBytes() + b = append(b, s...) + } + return b, errLater +} + +// computeMarshalInfo initializes the marshal info. +func (u *marshalInfo) computeMarshalInfo() { + u.Lock() + defer u.Unlock() + if u.initialized != 0 { // non-atomic read is ok as it is protected by the lock + return + } + + t := u.typ + u.unrecognized = invalidField + u.extensions = invalidField + u.v1extensions = invalidField + u.bytesExtensions = invalidField + u.sizecache = invalidField + isOneofMessage := false + + if reflect.PtrTo(t).Implements(sizerType) { + u.hassizer = true + } + if reflect.PtrTo(t).Implements(protosizerType) { + u.hasprotosizer = true + } + // If the message can marshal itself, let it do it, for compatibility. + // NOTE: This is not efficient. + if reflect.PtrTo(t).Implements(marshalerType) { + u.hasmarshaler = true + atomic.StoreInt32(&u.initialized, 1) + return + } + + n := t.NumField() + + // deal with XXX fields first + for i := 0; i < t.NumField(); i++ { + f := t.Field(i) + if f.Tag.Get("protobuf_oneof") != "" { + isOneofMessage = true + } + if !strings.HasPrefix(f.Name, "XXX_") { + continue + } + switch f.Name { + case "XXX_sizecache": + u.sizecache = toField(&f) + case "XXX_unrecognized": + u.unrecognized = toField(&f) + case "XXX_InternalExtensions": + u.extensions = toField(&f) + u.messageset = f.Tag.Get("protobuf_messageset") == "1" + case "XXX_extensions": + if f.Type.Kind() == reflect.Map { + u.v1extensions = toField(&f) + } else { + u.bytesExtensions = toField(&f) + } + case "XXX_NoUnkeyedLiteral": + // nothing to do + default: + panic("unknown XXX field: " + f.Name) + } + n-- + } + + // get oneof implementers + var oneofImplementers []interface{} + // gogo: isOneofMessage is needed for embedded oneof messages, without a marshaler and unmarshaler + if isOneofMessage { + switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) { + case oneofFuncsIface: + _, _, _, oneofImplementers = m.XXX_OneofFuncs() + case oneofWrappersIface: + oneofImplementers = m.XXX_OneofWrappers() + } + } + + // normal fields + fields := make([]marshalFieldInfo, n) // batch allocation + u.fields = make([]*marshalFieldInfo, 0, n) + for i, j := 0, 0; i < t.NumField(); i++ { + f := t.Field(i) + + if strings.HasPrefix(f.Name, "XXX_") { + continue + } + field := &fields[j] + j++ + field.name = f.Name + u.fields = append(u.fields, field) + if f.Tag.Get("protobuf_oneof") != "" { + field.computeOneofFieldInfo(&f, oneofImplementers) + continue + } + if f.Tag.Get("protobuf") == "" { + // field has no tag (not in generated message), ignore it + u.fields = u.fields[:len(u.fields)-1] + j-- + continue + } + field.computeMarshalFieldInfo(&f) + } + + // fields are marshaled in tag order on the wire. + sort.Sort(byTag(u.fields)) + + atomic.StoreInt32(&u.initialized, 1) +} + +// helper for sorting fields by tag +type byTag []*marshalFieldInfo + +func (a byTag) Len() int { return len(a) } +func (a byTag) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a byTag) Less(i, j int) bool { return a[i].wiretag < a[j].wiretag } + +// getExtElemInfo returns the information to marshal an extension element. +// The info it returns is initialized. +func (u *marshalInfo) getExtElemInfo(desc *ExtensionDesc) *marshalElemInfo { + // get from cache first + u.RLock() + e, ok := u.extElems[desc.Field] + u.RUnlock() + if ok { + return e + } + + t := reflect.TypeOf(desc.ExtensionType) // pointer or slice to basic type or struct + tags := strings.Split(desc.Tag, ",") + tag, err := strconv.Atoi(tags[1]) + if err != nil { + panic("tag is not an integer") + } + wt := wiretype(tags[0]) + sizr, marshalr := typeMarshaler(t, tags, false, false) + e = &marshalElemInfo{ + wiretag: uint64(tag)<<3 | wt, + tagsize: SizeVarint(uint64(tag) << 3), + sizer: sizr, + marshaler: marshalr, + isptr: t.Kind() == reflect.Ptr, + } + + // update cache + u.Lock() + if u.extElems == nil { + u.extElems = make(map[int32]*marshalElemInfo) + } + u.extElems[desc.Field] = e + u.Unlock() + return e +} + +// computeMarshalFieldInfo fills up the information to marshal a field. +func (fi *marshalFieldInfo) computeMarshalFieldInfo(f *reflect.StructField) { + // parse protobuf tag of the field. + // tag has format of "bytes,49,opt,name=foo,def=hello!" + tags := strings.Split(f.Tag.Get("protobuf"), ",") + if tags[0] == "" { + return + } + tag, err := strconv.Atoi(tags[1]) + if err != nil { + panic("tag is not an integer") + } + wt := wiretype(tags[0]) + if tags[2] == "req" { + fi.required = true + } + fi.setTag(f, tag, wt) + fi.setMarshaler(f, tags) +} + +func (fi *marshalFieldInfo) computeOneofFieldInfo(f *reflect.StructField, oneofImplementers []interface{}) { + fi.field = toField(f) + fi.wiretag = math.MaxInt32 // Use a large tag number, make oneofs sorted at the end. This tag will not appear on the wire. + fi.isPointer = true + fi.sizer, fi.marshaler = makeOneOfMarshaler(fi, f) + fi.oneofElems = make(map[reflect.Type]*marshalElemInfo) + + ityp := f.Type // interface type + for _, o := range oneofImplementers { + t := reflect.TypeOf(o) + if !t.Implements(ityp) { + continue + } + sf := t.Elem().Field(0) // oneof implementer is a struct with a single field + tags := strings.Split(sf.Tag.Get("protobuf"), ",") + tag, err := strconv.Atoi(tags[1]) + if err != nil { + panic("tag is not an integer") + } + wt := wiretype(tags[0]) + sizr, marshalr := typeMarshaler(sf.Type, tags, false, true) // oneof should not omit any zero value + fi.oneofElems[t.Elem()] = &marshalElemInfo{ + wiretag: uint64(tag)<<3 | wt, + tagsize: SizeVarint(uint64(tag) << 3), + sizer: sizr, + marshaler: marshalr, + } + } +} + +// wiretype returns the wire encoding of the type. +func wiretype(encoding string) uint64 { + switch encoding { + case "fixed32": + return WireFixed32 + case "fixed64": + return WireFixed64 + case "varint", "zigzag32", "zigzag64": + return WireVarint + case "bytes": + return WireBytes + case "group": + return WireStartGroup + } + panic("unknown wire type " + encoding) +} + +// setTag fills up the tag (in wire format) and its size in the info of a field. +func (fi *marshalFieldInfo) setTag(f *reflect.StructField, tag int, wt uint64) { + fi.field = toField(f) + fi.wiretag = uint64(tag)<<3 | wt + fi.tagsize = SizeVarint(uint64(tag) << 3) +} + +// setMarshaler fills up the sizer and marshaler in the info of a field. +func (fi *marshalFieldInfo) setMarshaler(f *reflect.StructField, tags []string) { + switch f.Type.Kind() { + case reflect.Map: + // map field + fi.isPointer = true + fi.sizer, fi.marshaler = makeMapMarshaler(f) + return + case reflect.Ptr, reflect.Slice: + fi.isPointer = true + } + fi.sizer, fi.marshaler = typeMarshaler(f.Type, tags, true, false) +} + +// typeMarshaler returns the sizer and marshaler of a given field. +// t is the type of the field. +// tags is the generated "protobuf" tag of the field. +// If nozero is true, zero value is not marshaled to the wire. +// If oneof is true, it is a oneof field. +func typeMarshaler(t reflect.Type, tags []string, nozero, oneof bool) (sizer, marshaler) { + encoding := tags[0] + + pointer := false + slice := false + if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 { + slice = true + t = t.Elem() + } + if t.Kind() == reflect.Ptr { + pointer = true + t = t.Elem() + } + + packed := false + proto3 := false + ctype := false + isTime := false + isDuration := false + isWktPointer := false + validateUTF8 := true + for i := 2; i < len(tags); i++ { + if tags[i] == "packed" { + packed = true + } + if tags[i] == "proto3" { + proto3 = true + } + if strings.HasPrefix(tags[i], "customtype=") { + ctype = true + } + if tags[i] == "stdtime" { + isTime = true + } + if tags[i] == "stdduration" { + isDuration = true + } + if tags[i] == "wktptr" { + isWktPointer = true + } + } + validateUTF8 = validateUTF8 && proto3 + if !proto3 && !pointer && !slice { + nozero = false + } + + if ctype { + if reflect.PtrTo(t).Implements(customType) { + if slice { + return makeMessageRefSliceMarshaler(getMarshalInfo(t)) + } + if pointer { + return makeCustomPtrMarshaler(getMarshalInfo(t)) + } + return makeCustomMarshaler(getMarshalInfo(t)) + } else { + panic(fmt.Sprintf("custom type: type: %v, does not implement the proto.custom interface", t)) + } + } + + if isTime { + if pointer { + if slice { + return makeTimePtrSliceMarshaler(getMarshalInfo(t)) + } + return makeTimePtrMarshaler(getMarshalInfo(t)) + } + if slice { + return makeTimeSliceMarshaler(getMarshalInfo(t)) + } + return makeTimeMarshaler(getMarshalInfo(t)) + } + + if isDuration { + if pointer { + if slice { + return makeDurationPtrSliceMarshaler(getMarshalInfo(t)) + } + return makeDurationPtrMarshaler(getMarshalInfo(t)) + } + if slice { + return makeDurationSliceMarshaler(getMarshalInfo(t)) + } + return makeDurationMarshaler(getMarshalInfo(t)) + } + + if isWktPointer { + switch t.Kind() { + case reflect.Float64: + if pointer { + if slice { + return makeStdDoubleValuePtrSliceMarshaler(getMarshalInfo(t)) + } + return makeStdDoubleValuePtrMarshaler(getMarshalInfo(t)) + } + if slice { + return makeStdDoubleValueSliceMarshaler(getMarshalInfo(t)) + } + return makeStdDoubleValueMarshaler(getMarshalInfo(t)) + case reflect.Float32: + if pointer { + if slice { + return makeStdFloatValuePtrSliceMarshaler(getMarshalInfo(t)) + } + return makeStdFloatValuePtrMarshaler(getMarshalInfo(t)) + } + if slice { + return makeStdFloatValueSliceMarshaler(getMarshalInfo(t)) + } + return makeStdFloatValueMarshaler(getMarshalInfo(t)) + case reflect.Int64: + if pointer { + if slice { + return makeStdInt64ValuePtrSliceMarshaler(getMarshalInfo(t)) + } + return makeStdInt64ValuePtrMarshaler(getMarshalInfo(t)) + } + if slice { + return makeStdInt64ValueSliceMarshaler(getMarshalInfo(t)) + } + return makeStdInt64ValueMarshaler(getMarshalInfo(t)) + case reflect.Uint64: + if pointer { + if slice { + return makeStdUInt64ValuePtrSliceMarshaler(getMarshalInfo(t)) + } + return makeStdUInt64ValuePtrMarshaler(getMarshalInfo(t)) + } + if slice { + return makeStdUInt64ValueSliceMarshaler(getMarshalInfo(t)) + } + return makeStdUInt64ValueMarshaler(getMarshalInfo(t)) + case reflect.Int32: + if pointer { + if slice { + return makeStdInt32ValuePtrSliceMarshaler(getMarshalInfo(t)) + } + return makeStdInt32ValuePtrMarshaler(getMarshalInfo(t)) + } + if slice { + return makeStdInt32ValueSliceMarshaler(getMarshalInfo(t)) + } + return makeStdInt32ValueMarshaler(getMarshalInfo(t)) + case reflect.Uint32: + if pointer { + if slice { + return makeStdUInt32ValuePtrSliceMarshaler(getMarshalInfo(t)) + } + return makeStdUInt32ValuePtrMarshaler(getMarshalInfo(t)) + } + if slice { + return makeStdUInt32ValueSliceMarshaler(getMarshalInfo(t)) + } + return makeStdUInt32ValueMarshaler(getMarshalInfo(t)) + case reflect.Bool: + if pointer { + if slice { + return makeStdBoolValuePtrSliceMarshaler(getMarshalInfo(t)) + } + return makeStdBoolValuePtrMarshaler(getMarshalInfo(t)) + } + if slice { + return makeStdBoolValueSliceMarshaler(getMarshalInfo(t)) + } + return makeStdBoolValueMarshaler(getMarshalInfo(t)) + case reflect.String: + if pointer { + if slice { + return makeStdStringValuePtrSliceMarshaler(getMarshalInfo(t)) + } + return makeStdStringValuePtrMarshaler(getMarshalInfo(t)) + } + if slice { + return makeStdStringValueSliceMarshaler(getMarshalInfo(t)) + } + return makeStdStringValueMarshaler(getMarshalInfo(t)) + case uint8SliceType: + if pointer { + if slice { + return makeStdBytesValuePtrSliceMarshaler(getMarshalInfo(t)) + } + return makeStdBytesValuePtrMarshaler(getMarshalInfo(t)) + } + if slice { + return makeStdBytesValueSliceMarshaler(getMarshalInfo(t)) + } + return makeStdBytesValueMarshaler(getMarshalInfo(t)) + default: + panic(fmt.Sprintf("unknown wktpointer type %#v", t)) + } + } + + switch t.Kind() { + case reflect.Bool: + if pointer { + return sizeBoolPtr, appendBoolPtr + } + if slice { + if packed { + return sizeBoolPackedSlice, appendBoolPackedSlice + } + return sizeBoolSlice, appendBoolSlice + } + if nozero { + return sizeBoolValueNoZero, appendBoolValueNoZero + } + return sizeBoolValue, appendBoolValue + case reflect.Uint32: + switch encoding { + case "fixed32": + if pointer { + return sizeFixed32Ptr, appendFixed32Ptr + } + if slice { + if packed { + return sizeFixed32PackedSlice, appendFixed32PackedSlice + } + return sizeFixed32Slice, appendFixed32Slice + } + if nozero { + return sizeFixed32ValueNoZero, appendFixed32ValueNoZero + } + return sizeFixed32Value, appendFixed32Value + case "varint": + if pointer { + return sizeVarint32Ptr, appendVarint32Ptr + } + if slice { + if packed { + return sizeVarint32PackedSlice, appendVarint32PackedSlice + } + return sizeVarint32Slice, appendVarint32Slice + } + if nozero { + return sizeVarint32ValueNoZero, appendVarint32ValueNoZero + } + return sizeVarint32Value, appendVarint32Value + } + case reflect.Int32: + switch encoding { + case "fixed32": + if pointer { + return sizeFixedS32Ptr, appendFixedS32Ptr + } + if slice { + if packed { + return sizeFixedS32PackedSlice, appendFixedS32PackedSlice + } + return sizeFixedS32Slice, appendFixedS32Slice + } + if nozero { + return sizeFixedS32ValueNoZero, appendFixedS32ValueNoZero + } + return sizeFixedS32Value, appendFixedS32Value + case "varint": + if pointer { + return sizeVarintS32Ptr, appendVarintS32Ptr + } + if slice { + if packed { + return sizeVarintS32PackedSlice, appendVarintS32PackedSlice + } + return sizeVarintS32Slice, appendVarintS32Slice + } + if nozero { + return sizeVarintS32ValueNoZero, appendVarintS32ValueNoZero + } + return sizeVarintS32Value, appendVarintS32Value + case "zigzag32": + if pointer { + return sizeZigzag32Ptr, appendZigzag32Ptr + } + if slice { + if packed { + return sizeZigzag32PackedSlice, appendZigzag32PackedSlice + } + return sizeZigzag32Slice, appendZigzag32Slice + } + if nozero { + return sizeZigzag32ValueNoZero, appendZigzag32ValueNoZero + } + return sizeZigzag32Value, appendZigzag32Value + } + case reflect.Uint64: + switch encoding { + case "fixed64": + if pointer { + return sizeFixed64Ptr, appendFixed64Ptr + } + if slice { + if packed { + return sizeFixed64PackedSlice, appendFixed64PackedSlice + } + return sizeFixed64Slice, appendFixed64Slice + } + if nozero { + return sizeFixed64ValueNoZero, appendFixed64ValueNoZero + } + return sizeFixed64Value, appendFixed64Value + case "varint": + if pointer { + return sizeVarint64Ptr, appendVarint64Ptr + } + if slice { + if packed { + return sizeVarint64PackedSlice, appendVarint64PackedSlice + } + return sizeVarint64Slice, appendVarint64Slice + } + if nozero { + return sizeVarint64ValueNoZero, appendVarint64ValueNoZero + } + return sizeVarint64Value, appendVarint64Value + } + case reflect.Int64: + switch encoding { + case "fixed64": + if pointer { + return sizeFixedS64Ptr, appendFixedS64Ptr + } + if slice { + if packed { + return sizeFixedS64PackedSlice, appendFixedS64PackedSlice + } + return sizeFixedS64Slice, appendFixedS64Slice + } + if nozero { + return sizeFixedS64ValueNoZero, appendFixedS64ValueNoZero + } + return sizeFixedS64Value, appendFixedS64Value + case "varint": + if pointer { + return sizeVarintS64Ptr, appendVarintS64Ptr + } + if slice { + if packed { + return sizeVarintS64PackedSlice, appendVarintS64PackedSlice + } + return sizeVarintS64Slice, appendVarintS64Slice + } + if nozero { + return sizeVarintS64ValueNoZero, appendVarintS64ValueNoZero + } + return sizeVarintS64Value, appendVarintS64Value + case "zigzag64": + if pointer { + return sizeZigzag64Ptr, appendZigzag64Ptr + } + if slice { + if packed { + return sizeZigzag64PackedSlice, appendZigzag64PackedSlice + } + return sizeZigzag64Slice, appendZigzag64Slice + } + if nozero { + return sizeZigzag64ValueNoZero, appendZigzag64ValueNoZero + } + return sizeZigzag64Value, appendZigzag64Value + } + case reflect.Float32: + if pointer { + return sizeFloat32Ptr, appendFloat32Ptr + } + if slice { + if packed { + return sizeFloat32PackedSlice, appendFloat32PackedSlice + } + return sizeFloat32Slice, appendFloat32Slice + } + if nozero { + return sizeFloat32ValueNoZero, appendFloat32ValueNoZero + } + return sizeFloat32Value, appendFloat32Value + case reflect.Float64: + if pointer { + return sizeFloat64Ptr, appendFloat64Ptr + } + if slice { + if packed { + return sizeFloat64PackedSlice, appendFloat64PackedSlice + } + return sizeFloat64Slice, appendFloat64Slice + } + if nozero { + return sizeFloat64ValueNoZero, appendFloat64ValueNoZero + } + return sizeFloat64Value, appendFloat64Value + case reflect.String: + if validateUTF8 { + if pointer { + return sizeStringPtr, appendUTF8StringPtr + } + if slice { + return sizeStringSlice, appendUTF8StringSlice + } + if nozero { + return sizeStringValueNoZero, appendUTF8StringValueNoZero + } + return sizeStringValue, appendUTF8StringValue + } + if pointer { + return sizeStringPtr, appendStringPtr + } + if slice { + return sizeStringSlice, appendStringSlice + } + if nozero { + return sizeStringValueNoZero, appendStringValueNoZero + } + return sizeStringValue, appendStringValue + case reflect.Slice: + if slice { + return sizeBytesSlice, appendBytesSlice + } + if oneof { + // Oneof bytes field may also have "proto3" tag. + // We want to marshal it as a oneof field. Do this + // check before the proto3 check. + return sizeBytesOneof, appendBytesOneof + } + if proto3 { + return sizeBytes3, appendBytes3 + } + return sizeBytes, appendBytes + case reflect.Struct: + switch encoding { + case "group": + if slice { + return makeGroupSliceMarshaler(getMarshalInfo(t)) + } + return makeGroupMarshaler(getMarshalInfo(t)) + case "bytes": + if pointer { + if slice { + return makeMessageSliceMarshaler(getMarshalInfo(t)) + } + return makeMessageMarshaler(getMarshalInfo(t)) + } else { + if slice { + return makeMessageRefSliceMarshaler(getMarshalInfo(t)) + } + return makeMessageRefMarshaler(getMarshalInfo(t)) + } + } + } + panic(fmt.Sprintf("unknown or mismatched type: type: %v, wire type: %v", t, encoding)) +} + +// Below are functions to size/marshal a specific type of a field. +// They are stored in the field's info, and called by function pointers. +// They have type sizer or marshaler. + +func sizeFixed32Value(_ pointer, tagsize int) int { + return 4 + tagsize +} +func sizeFixed32ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toUint32() + if v == 0 { + return 0 + } + return 4 + tagsize +} +func sizeFixed32Ptr(ptr pointer, tagsize int) int { + p := *ptr.toUint32Ptr() + if p == nil { + return 0 + } + return 4 + tagsize +} +func sizeFixed32Slice(ptr pointer, tagsize int) int { + s := *ptr.toUint32Slice() + return (4 + tagsize) * len(s) +} +func sizeFixed32PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toUint32Slice() + if len(s) == 0 { + return 0 + } + return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize +} +func sizeFixedS32Value(_ pointer, tagsize int) int { + return 4 + tagsize +} +func sizeFixedS32ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toInt32() + if v == 0 { + return 0 + } + return 4 + tagsize +} +func sizeFixedS32Ptr(ptr pointer, tagsize int) int { + p := ptr.getInt32Ptr() + if p == nil { + return 0 + } + return 4 + tagsize +} +func sizeFixedS32Slice(ptr pointer, tagsize int) int { + s := ptr.getInt32Slice() + return (4 + tagsize) * len(s) +} +func sizeFixedS32PackedSlice(ptr pointer, tagsize int) int { + s := ptr.getInt32Slice() + if len(s) == 0 { + return 0 + } + return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize +} +func sizeFloat32Value(_ pointer, tagsize int) int { + return 4 + tagsize +} +func sizeFloat32ValueNoZero(ptr pointer, tagsize int) int { + v := math.Float32bits(*ptr.toFloat32()) + if v == 0 { + return 0 + } + return 4 + tagsize +} +func sizeFloat32Ptr(ptr pointer, tagsize int) int { + p := *ptr.toFloat32Ptr() + if p == nil { + return 0 + } + return 4 + tagsize +} +func sizeFloat32Slice(ptr pointer, tagsize int) int { + s := *ptr.toFloat32Slice() + return (4 + tagsize) * len(s) +} +func sizeFloat32PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toFloat32Slice() + if len(s) == 0 { + return 0 + } + return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize +} +func sizeFixed64Value(_ pointer, tagsize int) int { + return 8 + tagsize +} +func sizeFixed64ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toUint64() + if v == 0 { + return 0 + } + return 8 + tagsize +} +func sizeFixed64Ptr(ptr pointer, tagsize int) int { + p := *ptr.toUint64Ptr() + if p == nil { + return 0 + } + return 8 + tagsize +} +func sizeFixed64Slice(ptr pointer, tagsize int) int { + s := *ptr.toUint64Slice() + return (8 + tagsize) * len(s) +} +func sizeFixed64PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toUint64Slice() + if len(s) == 0 { + return 0 + } + return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize +} +func sizeFixedS64Value(_ pointer, tagsize int) int { + return 8 + tagsize +} +func sizeFixedS64ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toInt64() + if v == 0 { + return 0 + } + return 8 + tagsize +} +func sizeFixedS64Ptr(ptr pointer, tagsize int) int { + p := *ptr.toInt64Ptr() + if p == nil { + return 0 + } + return 8 + tagsize +} +func sizeFixedS64Slice(ptr pointer, tagsize int) int { + s := *ptr.toInt64Slice() + return (8 + tagsize) * len(s) +} +func sizeFixedS64PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toInt64Slice() + if len(s) == 0 { + return 0 + } + return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize +} +func sizeFloat64Value(_ pointer, tagsize int) int { + return 8 + tagsize +} +func sizeFloat64ValueNoZero(ptr pointer, tagsize int) int { + v := math.Float64bits(*ptr.toFloat64()) + if v == 0 { + return 0 + } + return 8 + tagsize +} +func sizeFloat64Ptr(ptr pointer, tagsize int) int { + p := *ptr.toFloat64Ptr() + if p == nil { + return 0 + } + return 8 + tagsize +} +func sizeFloat64Slice(ptr pointer, tagsize int) int { + s := *ptr.toFloat64Slice() + return (8 + tagsize) * len(s) +} +func sizeFloat64PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toFloat64Slice() + if len(s) == 0 { + return 0 + } + return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize +} +func sizeVarint32Value(ptr pointer, tagsize int) int { + v := *ptr.toUint32() + return SizeVarint(uint64(v)) + tagsize +} +func sizeVarint32ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toUint32() + if v == 0 { + return 0 + } + return SizeVarint(uint64(v)) + tagsize +} +func sizeVarint32Ptr(ptr pointer, tagsize int) int { + p := *ptr.toUint32Ptr() + if p == nil { + return 0 + } + return SizeVarint(uint64(*p)) + tagsize +} +func sizeVarint32Slice(ptr pointer, tagsize int) int { + s := *ptr.toUint32Slice() + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + tagsize + } + return n +} +func sizeVarint32PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toUint32Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + } + return n + SizeVarint(uint64(n)) + tagsize +} +func sizeVarintS32Value(ptr pointer, tagsize int) int { + v := *ptr.toInt32() + return SizeVarint(uint64(v)) + tagsize +} +func sizeVarintS32ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toInt32() + if v == 0 { + return 0 + } + return SizeVarint(uint64(v)) + tagsize +} +func sizeVarintS32Ptr(ptr pointer, tagsize int) int { + p := ptr.getInt32Ptr() + if p == nil { + return 0 + } + return SizeVarint(uint64(*p)) + tagsize +} +func sizeVarintS32Slice(ptr pointer, tagsize int) int { + s := ptr.getInt32Slice() + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + tagsize + } + return n +} +func sizeVarintS32PackedSlice(ptr pointer, tagsize int) int { + s := ptr.getInt32Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + } + return n + SizeVarint(uint64(n)) + tagsize +} +func sizeVarint64Value(ptr pointer, tagsize int) int { + v := *ptr.toUint64() + return SizeVarint(v) + tagsize +} +func sizeVarint64ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toUint64() + if v == 0 { + return 0 + } + return SizeVarint(v) + tagsize +} +func sizeVarint64Ptr(ptr pointer, tagsize int) int { + p := *ptr.toUint64Ptr() + if p == nil { + return 0 + } + return SizeVarint(*p) + tagsize +} +func sizeVarint64Slice(ptr pointer, tagsize int) int { + s := *ptr.toUint64Slice() + n := 0 + for _, v := range s { + n += SizeVarint(v) + tagsize + } + return n +} +func sizeVarint64PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toUint64Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += SizeVarint(v) + } + return n + SizeVarint(uint64(n)) + tagsize +} +func sizeVarintS64Value(ptr pointer, tagsize int) int { + v := *ptr.toInt64() + return SizeVarint(uint64(v)) + tagsize +} +func sizeVarintS64ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toInt64() + if v == 0 { + return 0 + } + return SizeVarint(uint64(v)) + tagsize +} +func sizeVarintS64Ptr(ptr pointer, tagsize int) int { + p := *ptr.toInt64Ptr() + if p == nil { + return 0 + } + return SizeVarint(uint64(*p)) + tagsize +} +func sizeVarintS64Slice(ptr pointer, tagsize int) int { + s := *ptr.toInt64Slice() + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + tagsize + } + return n +} +func sizeVarintS64PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toInt64Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + } + return n + SizeVarint(uint64(n)) + tagsize +} +func sizeZigzag32Value(ptr pointer, tagsize int) int { + v := *ptr.toInt32() + return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize +} +func sizeZigzag32ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toInt32() + if v == 0 { + return 0 + } + return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize +} +func sizeZigzag32Ptr(ptr pointer, tagsize int) int { + p := ptr.getInt32Ptr() + if p == nil { + return 0 + } + v := *p + return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize +} +func sizeZigzag32Slice(ptr pointer, tagsize int) int { + s := ptr.getInt32Slice() + n := 0 + for _, v := range s { + n += SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize + } + return n +} +func sizeZigzag32PackedSlice(ptr pointer, tagsize int) int { + s := ptr.getInt32Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += SizeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31)))) + } + return n + SizeVarint(uint64(n)) + tagsize +} +func sizeZigzag64Value(ptr pointer, tagsize int) int { + v := *ptr.toInt64() + return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize +} +func sizeZigzag64ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toInt64() + if v == 0 { + return 0 + } + return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize +} +func sizeZigzag64Ptr(ptr pointer, tagsize int) int { + p := *ptr.toInt64Ptr() + if p == nil { + return 0 + } + v := *p + return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize +} +func sizeZigzag64Slice(ptr pointer, tagsize int) int { + s := *ptr.toInt64Slice() + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize + } + return n +} +func sizeZigzag64PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toInt64Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v<<1) ^ uint64((int64(v) >> 63))) + } + return n + SizeVarint(uint64(n)) + tagsize +} +func sizeBoolValue(_ pointer, tagsize int) int { + return 1 + tagsize +} +func sizeBoolValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toBool() + if !v { + return 0 + } + return 1 + tagsize +} +func sizeBoolPtr(ptr pointer, tagsize int) int { + p := *ptr.toBoolPtr() + if p == nil { + return 0 + } + return 1 + tagsize +} +func sizeBoolSlice(ptr pointer, tagsize int) int { + s := *ptr.toBoolSlice() + return (1 + tagsize) * len(s) +} +func sizeBoolPackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toBoolSlice() + if len(s) == 0 { + return 0 + } + return len(s) + SizeVarint(uint64(len(s))) + tagsize +} +func sizeStringValue(ptr pointer, tagsize int) int { + v := *ptr.toString() + return len(v) + SizeVarint(uint64(len(v))) + tagsize +} +func sizeStringValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toString() + if v == "" { + return 0 + } + return len(v) + SizeVarint(uint64(len(v))) + tagsize +} +func sizeStringPtr(ptr pointer, tagsize int) int { + p := *ptr.toStringPtr() + if p == nil { + return 0 + } + v := *p + return len(v) + SizeVarint(uint64(len(v))) + tagsize +} +func sizeStringSlice(ptr pointer, tagsize int) int { + s := *ptr.toStringSlice() + n := 0 + for _, v := range s { + n += len(v) + SizeVarint(uint64(len(v))) + tagsize + } + return n +} +func sizeBytes(ptr pointer, tagsize int) int { + v := *ptr.toBytes() + if v == nil { + return 0 + } + return len(v) + SizeVarint(uint64(len(v))) + tagsize +} +func sizeBytes3(ptr pointer, tagsize int) int { + v := *ptr.toBytes() + if len(v) == 0 { + return 0 + } + return len(v) + SizeVarint(uint64(len(v))) + tagsize +} +func sizeBytesOneof(ptr pointer, tagsize int) int { + v := *ptr.toBytes() + return len(v) + SizeVarint(uint64(len(v))) + tagsize +} +func sizeBytesSlice(ptr pointer, tagsize int) int { + s := *ptr.toBytesSlice() + n := 0 + for _, v := range s { + n += len(v) + SizeVarint(uint64(len(v))) + tagsize + } + return n +} + +// appendFixed32 appends an encoded fixed32 to b. +func appendFixed32(b []byte, v uint32) []byte { + b = append(b, + byte(v), + byte(v>>8), + byte(v>>16), + byte(v>>24)) + return b +} + +// appendFixed64 appends an encoded fixed64 to b. +func appendFixed64(b []byte, v uint64) []byte { + b = append(b, + byte(v), + byte(v>>8), + byte(v>>16), + byte(v>>24), + byte(v>>32), + byte(v>>40), + byte(v>>48), + byte(v>>56)) + return b +} + +// appendVarint appends an encoded varint to b. +func appendVarint(b []byte, v uint64) []byte { + // TODO: make 1-byte (maybe 2-byte) case inline-able, once we + // have non-leaf inliner. + switch { + case v < 1<<7: + b = append(b, byte(v)) + case v < 1<<14: + b = append(b, + byte(v&0x7f|0x80), + byte(v>>7)) + case v < 1<<21: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte(v>>14)) + case v < 1<<28: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte(v>>21)) + case v < 1<<35: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte(v>>28)) + case v < 1<<42: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte((v>>28)&0x7f|0x80), + byte(v>>35)) + case v < 1<<49: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte((v>>28)&0x7f|0x80), + byte((v>>35)&0x7f|0x80), + byte(v>>42)) + case v < 1<<56: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte((v>>28)&0x7f|0x80), + byte((v>>35)&0x7f|0x80), + byte((v>>42)&0x7f|0x80), + byte(v>>49)) + case v < 1<<63: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte((v>>28)&0x7f|0x80), + byte((v>>35)&0x7f|0x80), + byte((v>>42)&0x7f|0x80), + byte((v>>49)&0x7f|0x80), + byte(v>>56)) + default: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte((v>>28)&0x7f|0x80), + byte((v>>35)&0x7f|0x80), + byte((v>>42)&0x7f|0x80), + byte((v>>49)&0x7f|0x80), + byte((v>>56)&0x7f|0x80), + 1) + } + return b +} + +func appendFixed32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint32() + b = appendVarint(b, wiretag) + b = appendFixed32(b, v) + return b, nil +} +func appendFixed32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint32() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed32(b, v) + return b, nil +} +func appendFixed32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toUint32Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed32(b, *p) + return b, nil +} +func appendFixed32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint32Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendFixed32(b, v) + } + return b, nil +} +func appendFixed32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint32Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(4*len(s))) + for _, v := range s { + b = appendFixed32(b, v) + } + return b, nil +} +func appendFixedS32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt32() + b = appendVarint(b, wiretag) + b = appendFixed32(b, uint32(v)) + return b, nil +} +func appendFixedS32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt32() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed32(b, uint32(v)) + return b, nil +} +func appendFixedS32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := ptr.getInt32Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed32(b, uint32(*p)) + return b, nil +} +func appendFixedS32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := ptr.getInt32Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendFixed32(b, uint32(v)) + } + return b, nil +} +func appendFixedS32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := ptr.getInt32Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(4*len(s))) + for _, v := range s { + b = appendFixed32(b, uint32(v)) + } + return b, nil +} +func appendFloat32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := math.Float32bits(*ptr.toFloat32()) + b = appendVarint(b, wiretag) + b = appendFixed32(b, v) + return b, nil +} +func appendFloat32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := math.Float32bits(*ptr.toFloat32()) + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed32(b, v) + return b, nil +} +func appendFloat32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toFloat32Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed32(b, math.Float32bits(*p)) + return b, nil +} +func appendFloat32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toFloat32Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendFixed32(b, math.Float32bits(v)) + } + return b, nil +} +func appendFloat32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toFloat32Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(4*len(s))) + for _, v := range s { + b = appendFixed32(b, math.Float32bits(v)) + } + return b, nil +} +func appendFixed64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint64() + b = appendVarint(b, wiretag) + b = appendFixed64(b, v) + return b, nil +} +func appendFixed64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint64() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed64(b, v) + return b, nil +} +func appendFixed64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toUint64Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed64(b, *p) + return b, nil +} +func appendFixed64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint64Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendFixed64(b, v) + } + return b, nil +} +func appendFixed64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint64Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(8*len(s))) + for _, v := range s { + b = appendFixed64(b, v) + } + return b, nil +} +func appendFixedS64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt64() + b = appendVarint(b, wiretag) + b = appendFixed64(b, uint64(v)) + return b, nil +} +func appendFixedS64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt64() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed64(b, uint64(v)) + return b, nil +} +func appendFixedS64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toInt64Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed64(b, uint64(*p)) + return b, nil +} +func appendFixedS64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toInt64Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendFixed64(b, uint64(v)) + } + return b, nil +} +func appendFixedS64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toInt64Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(8*len(s))) + for _, v := range s { + b = appendFixed64(b, uint64(v)) + } + return b, nil +} +func appendFloat64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := math.Float64bits(*ptr.toFloat64()) + b = appendVarint(b, wiretag) + b = appendFixed64(b, v) + return b, nil +} +func appendFloat64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := math.Float64bits(*ptr.toFloat64()) + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed64(b, v) + return b, nil +} +func appendFloat64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toFloat64Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed64(b, math.Float64bits(*p)) + return b, nil +} +func appendFloat64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toFloat64Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendFixed64(b, math.Float64bits(v)) + } + return b, nil +} +func appendFloat64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toFloat64Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(8*len(s))) + for _, v := range s { + b = appendFixed64(b, math.Float64bits(v)) + } + return b, nil +} +func appendVarint32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint32() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + return b, nil +} +func appendVarint32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint32() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + return b, nil +} +func appendVarint32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toUint32Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(*p)) + return b, nil +} +func appendVarint32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint32Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + } + return b, nil +} +func appendVarint32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint32Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + // compute size + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + } + b = appendVarint(b, uint64(n)) + for _, v := range s { + b = appendVarint(b, uint64(v)) + } + return b, nil +} +func appendVarintS32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt32() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + return b, nil +} +func appendVarintS32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt32() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + return b, nil +} +func appendVarintS32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := ptr.getInt32Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(*p)) + return b, nil +} +func appendVarintS32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := ptr.getInt32Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + } + return b, nil +} +func appendVarintS32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := ptr.getInt32Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + // compute size + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + } + b = appendVarint(b, uint64(n)) + for _, v := range s { + b = appendVarint(b, uint64(v)) + } + return b, nil +} +func appendVarint64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint64() + b = appendVarint(b, wiretag) + b = appendVarint(b, v) + return b, nil +} +func appendVarint64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint64() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, v) + return b, nil +} +func appendVarint64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toUint64Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, *p) + return b, nil +} +func appendVarint64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint64Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, v) + } + return b, nil +} +func appendVarint64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint64Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + // compute size + n := 0 + for _, v := range s { + n += SizeVarint(v) + } + b = appendVarint(b, uint64(n)) + for _, v := range s { + b = appendVarint(b, v) + } + return b, nil +} +func appendVarintS64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt64() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + return b, nil +} +func appendVarintS64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt64() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + return b, nil +} +func appendVarintS64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toInt64Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(*p)) + return b, nil +} +func appendVarintS64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toInt64Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + } + return b, nil +} +func appendVarintS64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toInt64Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + // compute size + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + } + b = appendVarint(b, uint64(n)) + for _, v := range s { + b = appendVarint(b, uint64(v)) + } + return b, nil +} +func appendZigzag32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt32() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + return b, nil +} +func appendZigzag32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt32() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + return b, nil +} +func appendZigzag32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := ptr.getInt32Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + v := *p + b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + return b, nil +} +func appendZigzag32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := ptr.getInt32Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + } + return b, nil +} +func appendZigzag32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := ptr.getInt32Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + // compute size + n := 0 + for _, v := range s { + n += SizeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31)))) + } + b = appendVarint(b, uint64(n)) + for _, v := range s { + b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + } + return b, nil +} +func appendZigzag64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt64() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) + return b, nil +} +func appendZigzag64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt64() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) + return b, nil +} +func appendZigzag64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toInt64Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + v := *p + b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) + return b, nil +} +func appendZigzag64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toInt64Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) + } + return b, nil +} +func appendZigzag64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toInt64Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + // compute size + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v<<1) ^ uint64((int64(v) >> 63))) + } + b = appendVarint(b, uint64(n)) + for _, v := range s { + b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) + } + return b, nil +} +func appendBoolValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toBool() + b = appendVarint(b, wiretag) + if v { + b = append(b, 1) + } else { + b = append(b, 0) + } + return b, nil +} +func appendBoolValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toBool() + if !v { + return b, nil + } + b = appendVarint(b, wiretag) + b = append(b, 1) + return b, nil +} + +func appendBoolPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toBoolPtr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + if *p { + b = append(b, 1) + } else { + b = append(b, 0) + } + return b, nil +} +func appendBoolSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toBoolSlice() + for _, v := range s { + b = appendVarint(b, wiretag) + if v { + b = append(b, 1) + } else { + b = append(b, 0) + } + } + return b, nil +} +func appendBoolPackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toBoolSlice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(len(s))) + for _, v := range s { + if v { + b = append(b, 1) + } else { + b = append(b, 0) + } + } + return b, nil +} +func appendStringValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toString() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + return b, nil +} +func appendStringValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toString() + if v == "" { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + return b, nil +} +func appendStringPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toStringPtr() + if p == nil { + return b, nil + } + v := *p + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + return b, nil +} +func appendStringSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toStringSlice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + } + return b, nil +} +func appendUTF8StringValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + var invalidUTF8 bool + v := *ptr.toString() + if !utf8.ValidString(v) { + invalidUTF8 = true + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + if invalidUTF8 { + return b, errInvalidUTF8 + } + return b, nil +} +func appendUTF8StringValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + var invalidUTF8 bool + v := *ptr.toString() + if v == "" { + return b, nil + } + if !utf8.ValidString(v) { + invalidUTF8 = true + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + if invalidUTF8 { + return b, errInvalidUTF8 + } + return b, nil +} +func appendUTF8StringPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + var invalidUTF8 bool + p := *ptr.toStringPtr() + if p == nil { + return b, nil + } + v := *p + if !utf8.ValidString(v) { + invalidUTF8 = true + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + if invalidUTF8 { + return b, errInvalidUTF8 + } + return b, nil +} +func appendUTF8StringSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + var invalidUTF8 bool + s := *ptr.toStringSlice() + for _, v := range s { + if !utf8.ValidString(v) { + invalidUTF8 = true + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + } + if invalidUTF8 { + return b, errInvalidUTF8 + } + return b, nil +} +func appendBytes(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toBytes() + if v == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + return b, nil +} +func appendBytes3(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toBytes() + if len(v) == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + return b, nil +} +func appendBytesOneof(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toBytes() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + return b, nil +} +func appendBytesSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toBytesSlice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + } + return b, nil +} + +// makeGroupMarshaler returns the sizer and marshaler for a group. +// u is the marshal info of the underlying message. +func makeGroupMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + p := ptr.getPointer() + if p.isNil() { + return 0 + } + return u.size(p) + 2*tagsize + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + p := ptr.getPointer() + if p.isNil() { + return b, nil + } + var err error + b = appendVarint(b, wiretag) // start group + b, err = u.marshal(b, p, deterministic) + b = appendVarint(b, wiretag+(WireEndGroup-WireStartGroup)) // end group + return b, err + } +} + +// makeGroupSliceMarshaler returns the sizer and marshaler for a group slice. +// u is the marshal info of the underlying message. +func makeGroupSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getPointerSlice() + n := 0 + for _, v := range s { + if v.isNil() { + continue + } + n += u.size(v) + 2*tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getPointerSlice() + var err error + var nerr nonFatal + for _, v := range s { + if v.isNil() { + return b, errRepeatedHasNil + } + b = appendVarint(b, wiretag) // start group + b, err = u.marshal(b, v, deterministic) + b = appendVarint(b, wiretag+(WireEndGroup-WireStartGroup)) // end group + if !nerr.Merge(err) { + if err == ErrNil { + err = errRepeatedHasNil + } + return b, err + } + } + return b, nerr.E + } +} + +// makeMessageMarshaler returns the sizer and marshaler for a message field. +// u is the marshal info of the message. +func makeMessageMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + p := ptr.getPointer() + if p.isNil() { + return 0 + } + siz := u.size(p) + return siz + SizeVarint(uint64(siz)) + tagsize + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + p := ptr.getPointer() + if p.isNil() { + return b, nil + } + b = appendVarint(b, wiretag) + siz := u.cachedsize(p) + b = appendVarint(b, uint64(siz)) + return u.marshal(b, p, deterministic) + } +} + +// makeMessageSliceMarshaler returns the sizer and marshaler for a message slice. +// u is the marshal info of the message. +func makeMessageSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getPointerSlice() + n := 0 + for _, v := range s { + if v.isNil() { + continue + } + siz := u.size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getPointerSlice() + var err error + var nerr nonFatal + for _, v := range s { + if v.isNil() { + return b, errRepeatedHasNil + } + b = appendVarint(b, wiretag) + siz := u.cachedsize(v) + b = appendVarint(b, uint64(siz)) + b, err = u.marshal(b, v, deterministic) + + if !nerr.Merge(err) { + if err == ErrNil { + err = errRepeatedHasNil + } + return b, err + } + } + return b, nerr.E + } +} + +// makeMapMarshaler returns the sizer and marshaler for a map field. +// f is the pointer to the reflect data structure of the field. +func makeMapMarshaler(f *reflect.StructField) (sizer, marshaler) { + // figure out key and value type + t := f.Type + keyType := t.Key() + valType := t.Elem() + tags := strings.Split(f.Tag.Get("protobuf"), ",") + keyTags := strings.Split(f.Tag.Get("protobuf_key"), ",") + valTags := strings.Split(f.Tag.Get("protobuf_val"), ",") + stdOptions := false + for _, t := range tags { + if strings.HasPrefix(t, "customtype=") { + valTags = append(valTags, t) + } + if t == "stdtime" { + valTags = append(valTags, t) + stdOptions = true + } + if t == "stdduration" { + valTags = append(valTags, t) + stdOptions = true + } + if t == "wktptr" { + valTags = append(valTags, t) + } + } + keySizer, keyMarshaler := typeMarshaler(keyType, keyTags, false, false) // don't omit zero value in map + valSizer, valMarshaler := typeMarshaler(valType, valTags, false, false) // don't omit zero value in map + keyWireTag := 1<<3 | wiretype(keyTags[0]) + valWireTag := 2<<3 | wiretype(valTags[0]) + + // We create an interface to get the addresses of the map key and value. + // If value is pointer-typed, the interface is a direct interface, the + // idata itself is the value. Otherwise, the idata is the pointer to the + // value. + // Key cannot be pointer-typed. + valIsPtr := valType.Kind() == reflect.Ptr + + // If value is a message with nested maps, calling + // valSizer in marshal may be quadratic. We should use + // cached version in marshal (but not in size). + // If value is not message type, we don't have size cache, + // but it cannot be nested either. Just use valSizer. + valCachedSizer := valSizer + if valIsPtr && !stdOptions && valType.Elem().Kind() == reflect.Struct { + u := getMarshalInfo(valType.Elem()) + valCachedSizer = func(ptr pointer, tagsize int) int { + // Same as message sizer, but use cache. + p := ptr.getPointer() + if p.isNil() { + return 0 + } + siz := u.cachedsize(p) + return siz + SizeVarint(uint64(siz)) + tagsize + } + } + return func(ptr pointer, tagsize int) int { + m := ptr.asPointerTo(t).Elem() // the map + n := 0 + for _, k := range m.MapKeys() { + ki := k.Interface() + vi := m.MapIndex(k).Interface() + kaddr := toAddrPointer(&ki, false) // pointer to key + vaddr := toAddrPointer(&vi, valIsPtr) // pointer to value + siz := keySizer(kaddr, 1) + valSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, tag uint64, deterministic bool) ([]byte, error) { + m := ptr.asPointerTo(t).Elem() // the map + var err error + keys := m.MapKeys() + if len(keys) > 1 && deterministic { + sort.Sort(mapKeys(keys)) + } + + var nerr nonFatal + for _, k := range keys { + ki := k.Interface() + vi := m.MapIndex(k).Interface() + kaddr := toAddrPointer(&ki, false) // pointer to key + vaddr := toAddrPointer(&vi, valIsPtr) // pointer to value + b = appendVarint(b, tag) + siz := keySizer(kaddr, 1) + valCachedSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1) + b = appendVarint(b, uint64(siz)) + b, err = keyMarshaler(b, kaddr, keyWireTag, deterministic) + if !nerr.Merge(err) { + return b, err + } + b, err = valMarshaler(b, vaddr, valWireTag, deterministic) + if err != ErrNil && !nerr.Merge(err) { // allow nil value in map + return b, err + } + } + return b, nerr.E + } +} + +// makeOneOfMarshaler returns the sizer and marshaler for a oneof field. +// fi is the marshal info of the field. +// f is the pointer to the reflect data structure of the field. +func makeOneOfMarshaler(fi *marshalFieldInfo, f *reflect.StructField) (sizer, marshaler) { + // Oneof field is an interface. We need to get the actual data type on the fly. + t := f.Type + return func(ptr pointer, _ int) int { + p := ptr.getInterfacePointer() + if p.isNil() { + return 0 + } + v := ptr.asPointerTo(t).Elem().Elem().Elem() // *interface -> interface -> *struct -> struct + telem := v.Type() + e := fi.oneofElems[telem] + return e.sizer(p, e.tagsize) + }, + func(b []byte, ptr pointer, _ uint64, deterministic bool) ([]byte, error) { + p := ptr.getInterfacePointer() + if p.isNil() { + return b, nil + } + v := ptr.asPointerTo(t).Elem().Elem().Elem() // *interface -> interface -> *struct -> struct + telem := v.Type() + if telem.Field(0).Type.Kind() == reflect.Ptr && p.getPointer().isNil() { + return b, errOneofHasNil + } + e := fi.oneofElems[telem] + return e.marshaler(b, p, e.wiretag, deterministic) + } +} + +// sizeExtensions computes the size of encoded data for a XXX_InternalExtensions field. +func (u *marshalInfo) sizeExtensions(ext *XXX_InternalExtensions) int { + m, mu := ext.extensionsRead() + if m == nil { + return 0 + } + mu.Lock() + + n := 0 + for _, e := range m { + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + n += len(e.enc) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + n += ei.sizer(p, ei.tagsize) + } + mu.Unlock() + return n +} + +// appendExtensions marshals a XXX_InternalExtensions field to the end of byte slice b. +func (u *marshalInfo) appendExtensions(b []byte, ext *XXX_InternalExtensions, deterministic bool) ([]byte, error) { + m, mu := ext.extensionsRead() + if m == nil { + return b, nil + } + mu.Lock() + defer mu.Unlock() + + var err error + var nerr nonFatal + + // Fast-path for common cases: zero or one extensions. + // Don't bother sorting the keys. + if len(m) <= 1 { + for _, e := range m { + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + b = append(b, e.enc...) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + b, err = ei.marshaler(b, p, ei.wiretag, deterministic) + if !nerr.Merge(err) { + return b, err + } + } + return b, nerr.E + } + + // Sort the keys to provide a deterministic encoding. + // Not sure this is required, but the old code does it. + keys := make([]int, 0, len(m)) + for k := range m { + keys = append(keys, int(k)) + } + sort.Ints(keys) + + for _, k := range keys { + e := m[int32(k)] + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + b = append(b, e.enc...) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + b, err = ei.marshaler(b, p, ei.wiretag, deterministic) + if !nerr.Merge(err) { + return b, err + } + } + return b, nerr.E +} + +// message set format is: +// message MessageSet { +// repeated group Item = 1 { +// required int32 type_id = 2; +// required string message = 3; +// }; +// } + +// sizeMessageSet computes the size of encoded data for a XXX_InternalExtensions field +// in message set format (above). +func (u *marshalInfo) sizeMessageSet(ext *XXX_InternalExtensions) int { + m, mu := ext.extensionsRead() + if m == nil { + return 0 + } + mu.Lock() + + n := 0 + for id, e := range m { + n += 2 // start group, end group. tag = 1 (size=1) + n += SizeVarint(uint64(id)) + 1 // type_id, tag = 2 (size=1) + + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint + siz := len(msgWithLen) + n += siz + 1 // message, tag = 3 (size=1) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + n += ei.sizer(p, 1) // message, tag = 3 (size=1) + } + mu.Unlock() + return n +} + +// appendMessageSet marshals a XXX_InternalExtensions field in message set format (above) +// to the end of byte slice b. +func (u *marshalInfo) appendMessageSet(b []byte, ext *XXX_InternalExtensions, deterministic bool) ([]byte, error) { + m, mu := ext.extensionsRead() + if m == nil { + return b, nil + } + mu.Lock() + defer mu.Unlock() + + var err error + var nerr nonFatal + + // Fast-path for common cases: zero or one extensions. + // Don't bother sorting the keys. + if len(m) <= 1 { + for id, e := range m { + b = append(b, 1<<3|WireStartGroup) + b = append(b, 2<<3|WireVarint) + b = appendVarint(b, uint64(id)) + + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint + b = append(b, 3<<3|WireBytes) + b = append(b, msgWithLen...) + b = append(b, 1<<3|WireEndGroup) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic) + if !nerr.Merge(err) { + return b, err + } + b = append(b, 1<<3|WireEndGroup) + } + return b, nerr.E + } + + // Sort the keys to provide a deterministic encoding. + keys := make([]int, 0, len(m)) + for k := range m { + keys = append(keys, int(k)) + } + sort.Ints(keys) + + for _, id := range keys { + e := m[int32(id)] + b = append(b, 1<<3|WireStartGroup) + b = append(b, 2<<3|WireVarint) + b = appendVarint(b, uint64(id)) + + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint + b = append(b, 3<<3|WireBytes) + b = append(b, msgWithLen...) + b = append(b, 1<<3|WireEndGroup) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic) + b = append(b, 1<<3|WireEndGroup) + if !nerr.Merge(err) { + return b, err + } + } + return b, nerr.E +} + +// sizeV1Extensions computes the size of encoded data for a V1-API extension field. +func (u *marshalInfo) sizeV1Extensions(m map[int32]Extension) int { + if m == nil { + return 0 + } + + n := 0 + for _, e := range m { + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + n += len(e.enc) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + n += ei.sizer(p, ei.tagsize) + } + return n +} + +// appendV1Extensions marshals a V1-API extension field to the end of byte slice b. +func (u *marshalInfo) appendV1Extensions(b []byte, m map[int32]Extension, deterministic bool) ([]byte, error) { + if m == nil { + return b, nil + } + + // Sort the keys to provide a deterministic encoding. + keys := make([]int, 0, len(m)) + for k := range m { + keys = append(keys, int(k)) + } + sort.Ints(keys) + + var err error + var nerr nonFatal + for _, k := range keys { + e := m[int32(k)] + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + b = append(b, e.enc...) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + b, err = ei.marshaler(b, p, ei.wiretag, deterministic) + if !nerr.Merge(err) { + return b, err + } + } + return b, nerr.E +} + +// newMarshaler is the interface representing objects that can marshal themselves. +// +// This exists to support protoc-gen-go generated messages. +// The proto package will stop type-asserting to this interface in the future. +// +// DO NOT DEPEND ON THIS. +type newMarshaler interface { + XXX_Size() int + XXX_Marshal(b []byte, deterministic bool) ([]byte, error) +} + +// Size returns the encoded size of a protocol buffer message. +// This is the main entry point. +func Size(pb Message) int { + if m, ok := pb.(newMarshaler); ok { + return m.XXX_Size() + } + if m, ok := pb.(Marshaler); ok { + // If the message can marshal itself, let it do it, for compatibility. + // NOTE: This is not efficient. + b, _ := m.Marshal() + return len(b) + } + // in case somehow we didn't generate the wrapper + if pb == nil { + return 0 + } + var info InternalMessageInfo + return info.Size(pb) +} + +// Marshal takes a protocol buffer message +// and encodes it into the wire format, returning the data. +// This is the main entry point. +func Marshal(pb Message) ([]byte, error) { + if m, ok := pb.(newMarshaler); ok { + siz := m.XXX_Size() + b := make([]byte, 0, siz) + return m.XXX_Marshal(b, false) + } + if m, ok := pb.(Marshaler); ok { + // If the message can marshal itself, let it do it, for compatibility. + // NOTE: This is not efficient. + return m.Marshal() + } + // in case somehow we didn't generate the wrapper + if pb == nil { + return nil, ErrNil + } + var info InternalMessageInfo + siz := info.Size(pb) + b := make([]byte, 0, siz) + return info.Marshal(b, pb, false) +} + +// Marshal takes a protocol buffer message +// and encodes it into the wire format, writing the result to the +// Buffer. +// This is an alternative entry point. It is not necessary to use +// a Buffer for most applications. +func (p *Buffer) Marshal(pb Message) error { + var err error + if p.deterministic { + if _, ok := pb.(Marshaler); ok { + return fmt.Errorf("proto: deterministic not supported by the Marshal method of %T", pb) + } + } + if m, ok := pb.(newMarshaler); ok { + siz := m.XXX_Size() + p.grow(siz) // make sure buf has enough capacity + pp := p.buf[len(p.buf) : len(p.buf) : len(p.buf)+siz] + pp, err = m.XXX_Marshal(pp, p.deterministic) + p.buf = append(p.buf, pp...) + return err + } + if m, ok := pb.(Marshaler); ok { + // If the message can marshal itself, let it do it, for compatibility. + // NOTE: This is not efficient. + var b []byte + b, err = m.Marshal() + p.buf = append(p.buf, b...) + return err + } + // in case somehow we didn't generate the wrapper + if pb == nil { + return ErrNil + } + var info InternalMessageInfo + siz := info.Size(pb) + p.grow(siz) // make sure buf has enough capacity + p.buf, err = info.Marshal(p.buf, pb, p.deterministic) + return err +} + +// grow grows the buffer's capacity, if necessary, to guarantee space for +// another n bytes. After grow(n), at least n bytes can be written to the +// buffer without another allocation. +func (p *Buffer) grow(n int) { + need := len(p.buf) + n + if need <= cap(p.buf) { + return + } + newCap := len(p.buf) * 2 + if newCap < need { + newCap = need + } + p.buf = append(make([]byte, 0, newCap), p.buf...) +} diff --git a/vendor/github.com/gogo/protobuf/proto/table_marshal_gogo.go b/vendor/github.com/gogo/protobuf/proto/table_marshal_gogo.go new file mode 100644 index 00000000000..997f57c1e10 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/table_marshal_gogo.go @@ -0,0 +1,388 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2018, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "reflect" + "time" +) + +// makeMessageRefMarshaler differs a bit from makeMessageMarshaler +// It marshal a message T instead of a *T +func makeMessageRefMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + siz := u.size(ptr) + return siz + SizeVarint(uint64(siz)) + tagsize + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + b = appendVarint(b, wiretag) + siz := u.cachedsize(ptr) + b = appendVarint(b, uint64(siz)) + return u.marshal(b, ptr, deterministic) + } +} + +// makeMessageRefSliceMarshaler differs quite a lot from makeMessageSliceMarshaler +// It marshals a slice of messages []T instead of []*T +func makeMessageRefSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(u.typ) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + e := elem.Interface() + v := toAddrPointer(&e, false) + siz := u.size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(u.typ) + var err, errreq error + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + e := elem.Interface() + v := toAddrPointer(&e, false) + b = appendVarint(b, wiretag) + siz := u.size(v) + b = appendVarint(b, uint64(siz)) + b, err = u.marshal(b, v, deterministic) + + if err != nil { + if _, ok := err.(*RequiredNotSetError); ok { + // Required field in submessage is not set. + // We record the error but keep going, to give a complete marshaling. + if errreq == nil { + errreq = err + } + continue + } + if err == ErrNil { + err = errRepeatedHasNil + } + return b, err + } + } + + return b, errreq + } +} + +func makeCustomPtrMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + if ptr.isNil() { + return 0 + } + m := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(custom) + siz := m.Size() + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + if ptr.isNil() { + return b, nil + } + m := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(custom) + siz := m.Size() + buf, err := m.Marshal() + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + return b, nil + } +} + +func makeCustomMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + m := ptr.asPointerTo(u.typ).Interface().(custom) + siz := m.Size() + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + m := ptr.asPointerTo(u.typ).Interface().(custom) + siz := m.Size() + buf, err := m.Marshal() + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + return b, nil + } +} + +func makeTimeMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + t := ptr.asPointerTo(u.typ).Interface().(*time.Time) + ts, err := timestampProto(*t) + if err != nil { + return 0 + } + siz := Size(ts) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + t := ptr.asPointerTo(u.typ).Interface().(*time.Time) + ts, err := timestampProto(*t) + if err != nil { + return nil, err + } + buf, err := Marshal(ts) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeTimePtrMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + if ptr.isNil() { + return 0 + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*time.Time) + ts, err := timestampProto(*t) + if err != nil { + return 0 + } + siz := Size(ts) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + if ptr.isNil() { + return b, nil + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*time.Time) + ts, err := timestampProto(*t) + if err != nil { + return nil, err + } + buf, err := Marshal(ts) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeTimeSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(u.typ) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(time.Time) + ts, err := timestampProto(t) + if err != nil { + return 0 + } + siz := Size(ts) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(u.typ) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(time.Time) + ts, err := timestampProto(t) + if err != nil { + return nil, err + } + siz := Size(ts) + buf, err := Marshal(ts) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeTimePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*time.Time) + ts, err := timestampProto(*t) + if err != nil { + return 0 + } + siz := Size(ts) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*time.Time) + ts, err := timestampProto(*t) + if err != nil { + return nil, err + } + siz := Size(ts) + buf, err := Marshal(ts) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeDurationMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + d := ptr.asPointerTo(u.typ).Interface().(*time.Duration) + dur := durationProto(*d) + siz := Size(dur) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + d := ptr.asPointerTo(u.typ).Interface().(*time.Duration) + dur := durationProto(*d) + buf, err := Marshal(dur) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeDurationPtrMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + if ptr.isNil() { + return 0 + } + d := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*time.Duration) + dur := durationProto(*d) + siz := Size(dur) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + if ptr.isNil() { + return b, nil + } + d := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*time.Duration) + dur := durationProto(*d) + buf, err := Marshal(dur) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeDurationSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(u.typ) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + d := elem.Interface().(time.Duration) + dur := durationProto(d) + siz := Size(dur) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(u.typ) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + d := elem.Interface().(time.Duration) + dur := durationProto(d) + siz := Size(dur) + buf, err := Marshal(dur) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeDurationPtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + d := elem.Interface().(*time.Duration) + dur := durationProto(*d) + siz := Size(dur) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + d := elem.Interface().(*time.Duration) + dur := durationProto(*d) + siz := Size(dur) + buf, err := Marshal(dur) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} diff --git a/vendor/github.com/gogo/protobuf/proto/table_merge.go b/vendor/github.com/gogo/protobuf/proto/table_merge.go new file mode 100644 index 00000000000..60dcf70d1e6 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/table_merge.go @@ -0,0 +1,676 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "fmt" + "reflect" + "strings" + "sync" + "sync/atomic" +) + +// Merge merges the src message into dst. +// This assumes that dst and src of the same type and are non-nil. +func (a *InternalMessageInfo) Merge(dst, src Message) { + mi := atomicLoadMergeInfo(&a.merge) + if mi == nil { + mi = getMergeInfo(reflect.TypeOf(dst).Elem()) + atomicStoreMergeInfo(&a.merge, mi) + } + mi.merge(toPointer(&dst), toPointer(&src)) +} + +type mergeInfo struct { + typ reflect.Type + + initialized int32 // 0: only typ is valid, 1: everything is valid + lock sync.Mutex + + fields []mergeFieldInfo + unrecognized field // Offset of XXX_unrecognized +} + +type mergeFieldInfo struct { + field field // Offset of field, guaranteed to be valid + + // isPointer reports whether the value in the field is a pointer. + // This is true for the following situations: + // * Pointer to struct + // * Pointer to basic type (proto2 only) + // * Slice (first value in slice header is a pointer) + // * String (first value in string header is a pointer) + isPointer bool + + // basicWidth reports the width of the field assuming that it is directly + // embedded in the struct (as is the case for basic types in proto3). + // The possible values are: + // 0: invalid + // 1: bool + // 4: int32, uint32, float32 + // 8: int64, uint64, float64 + basicWidth int + + // Where dst and src are pointers to the types being merged. + merge func(dst, src pointer) +} + +var ( + mergeInfoMap = map[reflect.Type]*mergeInfo{} + mergeInfoLock sync.Mutex +) + +func getMergeInfo(t reflect.Type) *mergeInfo { + mergeInfoLock.Lock() + defer mergeInfoLock.Unlock() + mi := mergeInfoMap[t] + if mi == nil { + mi = &mergeInfo{typ: t} + mergeInfoMap[t] = mi + } + return mi +} + +// merge merges src into dst assuming they are both of type *mi.typ. +func (mi *mergeInfo) merge(dst, src pointer) { + if dst.isNil() { + panic("proto: nil destination") + } + if src.isNil() { + return // Nothing to do. + } + + if atomic.LoadInt32(&mi.initialized) == 0 { + mi.computeMergeInfo() + } + + for _, fi := range mi.fields { + sfp := src.offset(fi.field) + + // As an optimization, we can avoid the merge function call cost + // if we know for sure that the source will have no effect + // by checking if it is the zero value. + if unsafeAllowed { + if fi.isPointer && sfp.getPointer().isNil() { // Could be slice or string + continue + } + if fi.basicWidth > 0 { + switch { + case fi.basicWidth == 1 && !*sfp.toBool(): + continue + case fi.basicWidth == 4 && *sfp.toUint32() == 0: + continue + case fi.basicWidth == 8 && *sfp.toUint64() == 0: + continue + } + } + } + + dfp := dst.offset(fi.field) + fi.merge(dfp, sfp) + } + + // TODO: Make this faster? + out := dst.asPointerTo(mi.typ).Elem() + in := src.asPointerTo(mi.typ).Elem() + if emIn, err := extendable(in.Addr().Interface()); err == nil { + emOut, _ := extendable(out.Addr().Interface()) + mIn, muIn := emIn.extensionsRead() + if mIn != nil { + mOut := emOut.extensionsWrite() + muIn.Lock() + mergeExtension(mOut, mIn) + muIn.Unlock() + } + } + + if mi.unrecognized.IsValid() { + if b := *src.offset(mi.unrecognized).toBytes(); len(b) > 0 { + *dst.offset(mi.unrecognized).toBytes() = append([]byte(nil), b...) + } + } +} + +func (mi *mergeInfo) computeMergeInfo() { + mi.lock.Lock() + defer mi.lock.Unlock() + if mi.initialized != 0 { + return + } + t := mi.typ + n := t.NumField() + + props := GetProperties(t) + for i := 0; i < n; i++ { + f := t.Field(i) + if strings.HasPrefix(f.Name, "XXX_") { + continue + } + + mfi := mergeFieldInfo{field: toField(&f)} + tf := f.Type + + // As an optimization, we can avoid the merge function call cost + // if we know for sure that the source will have no effect + // by checking if it is the zero value. + if unsafeAllowed { + switch tf.Kind() { + case reflect.Ptr, reflect.Slice, reflect.String: + // As a special case, we assume slices and strings are pointers + // since we know that the first field in the SliceSlice or + // StringHeader is a data pointer. + mfi.isPointer = true + case reflect.Bool: + mfi.basicWidth = 1 + case reflect.Int32, reflect.Uint32, reflect.Float32: + mfi.basicWidth = 4 + case reflect.Int64, reflect.Uint64, reflect.Float64: + mfi.basicWidth = 8 + } + } + + // Unwrap tf to get at its most basic type. + var isPointer, isSlice bool + if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 { + isSlice = true + tf = tf.Elem() + } + if tf.Kind() == reflect.Ptr { + isPointer = true + tf = tf.Elem() + } + if isPointer && isSlice && tf.Kind() != reflect.Struct { + panic("both pointer and slice for basic type in " + tf.Name()) + } + + switch tf.Kind() { + case reflect.Int32: + switch { + case isSlice: // E.g., []int32 + mfi.merge = func(dst, src pointer) { + // NOTE: toInt32Slice is not defined (see pointer_reflect.go). + /* + sfsp := src.toInt32Slice() + if *sfsp != nil { + dfsp := dst.toInt32Slice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []int64{} + } + } + */ + sfs := src.getInt32Slice() + if sfs != nil { + dfs := dst.getInt32Slice() + dfs = append(dfs, sfs...) + if dfs == nil { + dfs = []int32{} + } + dst.setInt32Slice(dfs) + } + } + case isPointer: // E.g., *int32 + mfi.merge = func(dst, src pointer) { + // NOTE: toInt32Ptr is not defined (see pointer_reflect.go). + /* + sfpp := src.toInt32Ptr() + if *sfpp != nil { + dfpp := dst.toInt32Ptr() + if *dfpp == nil { + *dfpp = Int32(**sfpp) + } else { + **dfpp = **sfpp + } + } + */ + sfp := src.getInt32Ptr() + if sfp != nil { + dfp := dst.getInt32Ptr() + if dfp == nil { + dst.setInt32Ptr(*sfp) + } else { + *dfp = *sfp + } + } + } + default: // E.g., int32 + mfi.merge = func(dst, src pointer) { + if v := *src.toInt32(); v != 0 { + *dst.toInt32() = v + } + } + } + case reflect.Int64: + switch { + case isSlice: // E.g., []int64 + mfi.merge = func(dst, src pointer) { + sfsp := src.toInt64Slice() + if *sfsp != nil { + dfsp := dst.toInt64Slice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []int64{} + } + } + } + case isPointer: // E.g., *int64 + mfi.merge = func(dst, src pointer) { + sfpp := src.toInt64Ptr() + if *sfpp != nil { + dfpp := dst.toInt64Ptr() + if *dfpp == nil { + *dfpp = Int64(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., int64 + mfi.merge = func(dst, src pointer) { + if v := *src.toInt64(); v != 0 { + *dst.toInt64() = v + } + } + } + case reflect.Uint32: + switch { + case isSlice: // E.g., []uint32 + mfi.merge = func(dst, src pointer) { + sfsp := src.toUint32Slice() + if *sfsp != nil { + dfsp := dst.toUint32Slice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []uint32{} + } + } + } + case isPointer: // E.g., *uint32 + mfi.merge = func(dst, src pointer) { + sfpp := src.toUint32Ptr() + if *sfpp != nil { + dfpp := dst.toUint32Ptr() + if *dfpp == nil { + *dfpp = Uint32(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., uint32 + mfi.merge = func(dst, src pointer) { + if v := *src.toUint32(); v != 0 { + *dst.toUint32() = v + } + } + } + case reflect.Uint64: + switch { + case isSlice: // E.g., []uint64 + mfi.merge = func(dst, src pointer) { + sfsp := src.toUint64Slice() + if *sfsp != nil { + dfsp := dst.toUint64Slice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []uint64{} + } + } + } + case isPointer: // E.g., *uint64 + mfi.merge = func(dst, src pointer) { + sfpp := src.toUint64Ptr() + if *sfpp != nil { + dfpp := dst.toUint64Ptr() + if *dfpp == nil { + *dfpp = Uint64(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., uint64 + mfi.merge = func(dst, src pointer) { + if v := *src.toUint64(); v != 0 { + *dst.toUint64() = v + } + } + } + case reflect.Float32: + switch { + case isSlice: // E.g., []float32 + mfi.merge = func(dst, src pointer) { + sfsp := src.toFloat32Slice() + if *sfsp != nil { + dfsp := dst.toFloat32Slice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []float32{} + } + } + } + case isPointer: // E.g., *float32 + mfi.merge = func(dst, src pointer) { + sfpp := src.toFloat32Ptr() + if *sfpp != nil { + dfpp := dst.toFloat32Ptr() + if *dfpp == nil { + *dfpp = Float32(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., float32 + mfi.merge = func(dst, src pointer) { + if v := *src.toFloat32(); v != 0 { + *dst.toFloat32() = v + } + } + } + case reflect.Float64: + switch { + case isSlice: // E.g., []float64 + mfi.merge = func(dst, src pointer) { + sfsp := src.toFloat64Slice() + if *sfsp != nil { + dfsp := dst.toFloat64Slice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []float64{} + } + } + } + case isPointer: // E.g., *float64 + mfi.merge = func(dst, src pointer) { + sfpp := src.toFloat64Ptr() + if *sfpp != nil { + dfpp := dst.toFloat64Ptr() + if *dfpp == nil { + *dfpp = Float64(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., float64 + mfi.merge = func(dst, src pointer) { + if v := *src.toFloat64(); v != 0 { + *dst.toFloat64() = v + } + } + } + case reflect.Bool: + switch { + case isSlice: // E.g., []bool + mfi.merge = func(dst, src pointer) { + sfsp := src.toBoolSlice() + if *sfsp != nil { + dfsp := dst.toBoolSlice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []bool{} + } + } + } + case isPointer: // E.g., *bool + mfi.merge = func(dst, src pointer) { + sfpp := src.toBoolPtr() + if *sfpp != nil { + dfpp := dst.toBoolPtr() + if *dfpp == nil { + *dfpp = Bool(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., bool + mfi.merge = func(dst, src pointer) { + if v := *src.toBool(); v { + *dst.toBool() = v + } + } + } + case reflect.String: + switch { + case isSlice: // E.g., []string + mfi.merge = func(dst, src pointer) { + sfsp := src.toStringSlice() + if *sfsp != nil { + dfsp := dst.toStringSlice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []string{} + } + } + } + case isPointer: // E.g., *string + mfi.merge = func(dst, src pointer) { + sfpp := src.toStringPtr() + if *sfpp != nil { + dfpp := dst.toStringPtr() + if *dfpp == nil { + *dfpp = String(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., string + mfi.merge = func(dst, src pointer) { + if v := *src.toString(); v != "" { + *dst.toString() = v + } + } + } + case reflect.Slice: + isProto3 := props.Prop[i].proto3 + switch { + case isPointer: + panic("bad pointer in byte slice case in " + tf.Name()) + case tf.Elem().Kind() != reflect.Uint8: + panic("bad element kind in byte slice case in " + tf.Name()) + case isSlice: // E.g., [][]byte + mfi.merge = func(dst, src pointer) { + sbsp := src.toBytesSlice() + if *sbsp != nil { + dbsp := dst.toBytesSlice() + for _, sb := range *sbsp { + if sb == nil { + *dbsp = append(*dbsp, nil) + } else { + *dbsp = append(*dbsp, append([]byte{}, sb...)) + } + } + if *dbsp == nil { + *dbsp = [][]byte{} + } + } + } + default: // E.g., []byte + mfi.merge = func(dst, src pointer) { + sbp := src.toBytes() + if *sbp != nil { + dbp := dst.toBytes() + if !isProto3 || len(*sbp) > 0 { + *dbp = append([]byte{}, *sbp...) + } + } + } + } + case reflect.Struct: + switch { + case isSlice && !isPointer: // E.g. []pb.T + mergeInfo := getMergeInfo(tf) + zero := reflect.Zero(tf) + mfi.merge = func(dst, src pointer) { + // TODO: Make this faster? + dstsp := dst.asPointerTo(f.Type) + dsts := dstsp.Elem() + srcs := src.asPointerTo(f.Type).Elem() + for i := 0; i < srcs.Len(); i++ { + dsts = reflect.Append(dsts, zero) + srcElement := srcs.Index(i).Addr() + dstElement := dsts.Index(dsts.Len() - 1).Addr() + mergeInfo.merge(valToPointer(dstElement), valToPointer(srcElement)) + } + if dsts.IsNil() { + dsts = reflect.MakeSlice(f.Type, 0, 0) + } + dstsp.Elem().Set(dsts) + } + case !isPointer: + mergeInfo := getMergeInfo(tf) + mfi.merge = func(dst, src pointer) { + mergeInfo.merge(dst, src) + } + case isSlice: // E.g., []*pb.T + mergeInfo := getMergeInfo(tf) + mfi.merge = func(dst, src pointer) { + sps := src.getPointerSlice() + if sps != nil { + dps := dst.getPointerSlice() + for _, sp := range sps { + var dp pointer + if !sp.isNil() { + dp = valToPointer(reflect.New(tf)) + mergeInfo.merge(dp, sp) + } + dps = append(dps, dp) + } + if dps == nil { + dps = []pointer{} + } + dst.setPointerSlice(dps) + } + } + default: // E.g., *pb.T + mergeInfo := getMergeInfo(tf) + mfi.merge = func(dst, src pointer) { + sp := src.getPointer() + if !sp.isNil() { + dp := dst.getPointer() + if dp.isNil() { + dp = valToPointer(reflect.New(tf)) + dst.setPointer(dp) + } + mergeInfo.merge(dp, sp) + } + } + } + case reflect.Map: + switch { + case isPointer || isSlice: + panic("bad pointer or slice in map case in " + tf.Name()) + default: // E.g., map[K]V + mfi.merge = func(dst, src pointer) { + sm := src.asPointerTo(tf).Elem() + if sm.Len() == 0 { + return + } + dm := dst.asPointerTo(tf).Elem() + if dm.IsNil() { + dm.Set(reflect.MakeMap(tf)) + } + + switch tf.Elem().Kind() { + case reflect.Ptr: // Proto struct (e.g., *T) + for _, key := range sm.MapKeys() { + val := sm.MapIndex(key) + val = reflect.ValueOf(Clone(val.Interface().(Message))) + dm.SetMapIndex(key, val) + } + case reflect.Slice: // E.g. Bytes type (e.g., []byte) + for _, key := range sm.MapKeys() { + val := sm.MapIndex(key) + val = reflect.ValueOf(append([]byte{}, val.Bytes()...)) + dm.SetMapIndex(key, val) + } + default: // Basic type (e.g., string) + for _, key := range sm.MapKeys() { + val := sm.MapIndex(key) + dm.SetMapIndex(key, val) + } + } + } + } + case reflect.Interface: + // Must be oneof field. + switch { + case isPointer || isSlice: + panic("bad pointer or slice in interface case in " + tf.Name()) + default: // E.g., interface{} + // TODO: Make this faster? + mfi.merge = func(dst, src pointer) { + su := src.asPointerTo(tf).Elem() + if !su.IsNil() { + du := dst.asPointerTo(tf).Elem() + typ := su.Elem().Type() + if du.IsNil() || du.Elem().Type() != typ { + du.Set(reflect.New(typ.Elem())) // Initialize interface if empty + } + sv := su.Elem().Elem().Field(0) + if sv.Kind() == reflect.Ptr && sv.IsNil() { + return + } + dv := du.Elem().Elem().Field(0) + if dv.Kind() == reflect.Ptr && dv.IsNil() { + dv.Set(reflect.New(sv.Type().Elem())) // Initialize proto message if empty + } + switch sv.Type().Kind() { + case reflect.Ptr: // Proto struct (e.g., *T) + Merge(dv.Interface().(Message), sv.Interface().(Message)) + case reflect.Slice: // E.g. Bytes type (e.g., []byte) + dv.Set(reflect.ValueOf(append([]byte{}, sv.Bytes()...))) + default: // Basic type (e.g., string) + dv.Set(sv) + } + } + } + } + default: + panic(fmt.Sprintf("merger not found for type:%s", tf)) + } + mi.fields = append(mi.fields, mfi) + } + + mi.unrecognized = invalidField + if f, ok := t.FieldByName("XXX_unrecognized"); ok { + if f.Type != reflect.TypeOf([]byte{}) { + panic("expected XXX_unrecognized to be of type []byte") + } + mi.unrecognized = toField(&f) + } + + atomic.StoreInt32(&mi.initialized, 1) +} diff --git a/vendor/github.com/gogo/protobuf/proto/table_unmarshal.go b/vendor/github.com/gogo/protobuf/proto/table_unmarshal.go new file mode 100644 index 00000000000..937229386a2 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/table_unmarshal.go @@ -0,0 +1,2249 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "errors" + "fmt" + "io" + "math" + "reflect" + "strconv" + "strings" + "sync" + "sync/atomic" + "unicode/utf8" +) + +// Unmarshal is the entry point from the generated .pb.go files. +// This function is not intended to be used by non-generated code. +// This function is not subject to any compatibility guarantee. +// msg contains a pointer to a protocol buffer struct. +// b is the data to be unmarshaled into the protocol buffer. +// a is a pointer to a place to store cached unmarshal information. +func (a *InternalMessageInfo) Unmarshal(msg Message, b []byte) error { + // Load the unmarshal information for this message type. + // The atomic load ensures memory consistency. + u := atomicLoadUnmarshalInfo(&a.unmarshal) + if u == nil { + // Slow path: find unmarshal info for msg, update a with it. + u = getUnmarshalInfo(reflect.TypeOf(msg).Elem()) + atomicStoreUnmarshalInfo(&a.unmarshal, u) + } + // Then do the unmarshaling. + err := u.unmarshal(toPointer(&msg), b) + return err +} + +type unmarshalInfo struct { + typ reflect.Type // type of the protobuf struct + + // 0 = only typ field is initialized + // 1 = completely initialized + initialized int32 + lock sync.Mutex // prevents double initialization + dense []unmarshalFieldInfo // fields indexed by tag # + sparse map[uint64]unmarshalFieldInfo // fields indexed by tag # + reqFields []string // names of required fields + reqMask uint64 // 1< 0 { + // Read tag and wire type. + // Special case 1 and 2 byte varints. + var x uint64 + if b[0] < 128 { + x = uint64(b[0]) + b = b[1:] + } else if len(b) >= 2 && b[1] < 128 { + x = uint64(b[0]&0x7f) + uint64(b[1])<<7 + b = b[2:] + } else { + var n int + x, n = decodeVarint(b) + if n == 0 { + return io.ErrUnexpectedEOF + } + b = b[n:] + } + tag := x >> 3 + wire := int(x) & 7 + + // Dispatch on the tag to one of the unmarshal* functions below. + var f unmarshalFieldInfo + if tag < uint64(len(u.dense)) { + f = u.dense[tag] + } else { + f = u.sparse[tag] + } + if fn := f.unmarshal; fn != nil { + var err error + b, err = fn(b, m.offset(f.field), wire) + if err == nil { + reqMask |= f.reqMask + continue + } + if r, ok := err.(*RequiredNotSetError); ok { + // Remember this error, but keep parsing. We need to produce + // a full parse even if a required field is missing. + if errLater == nil { + errLater = r + } + reqMask |= f.reqMask + continue + } + if err != errInternalBadWireType { + if err == errInvalidUTF8 { + if errLater == nil { + fullName := revProtoTypes[reflect.PtrTo(u.typ)] + "." + f.name + errLater = &invalidUTF8Error{fullName} + } + continue + } + return err + } + // Fragments with bad wire type are treated as unknown fields. + } + + // Unknown tag. + if !u.unrecognized.IsValid() { + // Don't keep unrecognized data; just skip it. + var err error + b, err = skipField(b, wire) + if err != nil { + return err + } + continue + } + // Keep unrecognized data around. + // maybe in extensions, maybe in the unrecognized field. + z := m.offset(u.unrecognized).toBytes() + var emap map[int32]Extension + var e Extension + for _, r := range u.extensionRanges { + if uint64(r.Start) <= tag && tag <= uint64(r.End) { + if u.extensions.IsValid() { + mp := m.offset(u.extensions).toExtensions() + emap = mp.extensionsWrite() + e = emap[int32(tag)] + z = &e.enc + break + } + if u.oldExtensions.IsValid() { + p := m.offset(u.oldExtensions).toOldExtensions() + emap = *p + if emap == nil { + emap = map[int32]Extension{} + *p = emap + } + e = emap[int32(tag)] + z = &e.enc + break + } + if u.bytesExtensions.IsValid() { + z = m.offset(u.bytesExtensions).toBytes() + break + } + panic("no extensions field available") + } + } + // Use wire type to skip data. + var err error + b0 := b + b, err = skipField(b, wire) + if err != nil { + return err + } + *z = encodeVarint(*z, tag<<3|uint64(wire)) + *z = append(*z, b0[:len(b0)-len(b)]...) + + if emap != nil { + emap[int32(tag)] = e + } + } + if reqMask != u.reqMask && errLater == nil { + // A required field of this message is missing. + for _, n := range u.reqFields { + if reqMask&1 == 0 { + errLater = &RequiredNotSetError{n} + } + reqMask >>= 1 + } + } + return errLater +} + +// computeUnmarshalInfo fills in u with information for use +// in unmarshaling protocol buffers of type u.typ. +func (u *unmarshalInfo) computeUnmarshalInfo() { + u.lock.Lock() + defer u.lock.Unlock() + if u.initialized != 0 { + return + } + t := u.typ + n := t.NumField() + + // Set up the "not found" value for the unrecognized byte buffer. + // This is the default for proto3. + u.unrecognized = invalidField + u.extensions = invalidField + u.oldExtensions = invalidField + u.bytesExtensions = invalidField + + // List of the generated type and offset for each oneof field. + type oneofField struct { + ityp reflect.Type // interface type of oneof field + field field // offset in containing message + } + var oneofFields []oneofField + + for i := 0; i < n; i++ { + f := t.Field(i) + if f.Name == "XXX_unrecognized" { + // The byte slice used to hold unrecognized input is special. + if f.Type != reflect.TypeOf(([]byte)(nil)) { + panic("bad type for XXX_unrecognized field: " + f.Type.Name()) + } + u.unrecognized = toField(&f) + continue + } + if f.Name == "XXX_InternalExtensions" { + // Ditto here. + if f.Type != reflect.TypeOf(XXX_InternalExtensions{}) { + panic("bad type for XXX_InternalExtensions field: " + f.Type.Name()) + } + u.extensions = toField(&f) + if f.Tag.Get("protobuf_messageset") == "1" { + u.isMessageSet = true + } + continue + } + if f.Name == "XXX_extensions" { + // An older form of the extensions field. + if f.Type == reflect.TypeOf((map[int32]Extension)(nil)) { + u.oldExtensions = toField(&f) + continue + } else if f.Type == reflect.TypeOf(([]byte)(nil)) { + u.bytesExtensions = toField(&f) + continue + } + panic("bad type for XXX_extensions field: " + f.Type.Name()) + } + if f.Name == "XXX_NoUnkeyedLiteral" || f.Name == "XXX_sizecache" { + continue + } + + oneof := f.Tag.Get("protobuf_oneof") + if oneof != "" { + oneofFields = append(oneofFields, oneofField{f.Type, toField(&f)}) + // The rest of oneof processing happens below. + continue + } + + tags := f.Tag.Get("protobuf") + tagArray := strings.Split(tags, ",") + if len(tagArray) < 2 { + panic("protobuf tag not enough fields in " + t.Name() + "." + f.Name + ": " + tags) + } + tag, err := strconv.Atoi(tagArray[1]) + if err != nil { + panic("protobuf tag field not an integer: " + tagArray[1]) + } + + name := "" + for _, tag := range tagArray[3:] { + if strings.HasPrefix(tag, "name=") { + name = tag[5:] + } + } + + // Extract unmarshaling function from the field (its type and tags). + unmarshal := fieldUnmarshaler(&f) + + // Required field? + var reqMask uint64 + if tagArray[2] == "req" { + bit := len(u.reqFields) + u.reqFields = append(u.reqFields, name) + reqMask = uint64(1) << uint(bit) + // TODO: if we have more than 64 required fields, we end up + // not verifying that all required fields are present. + // Fix this, perhaps using a count of required fields? + } + + // Store the info in the correct slot in the message. + u.setTag(tag, toField(&f), unmarshal, reqMask, name) + } + + // Find any types associated with oneof fields. + // gogo: len(oneofFields) > 0 is needed for embedded oneof messages, without a marshaler and unmarshaler + if len(oneofFields) > 0 { + var oneofImplementers []interface{} + switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) { + case oneofFuncsIface: + _, _, _, oneofImplementers = m.XXX_OneofFuncs() + case oneofWrappersIface: + oneofImplementers = m.XXX_OneofWrappers() + } + for _, v := range oneofImplementers { + tptr := reflect.TypeOf(v) // *Msg_X + typ := tptr.Elem() // Msg_X + + f := typ.Field(0) // oneof implementers have one field + baseUnmarshal := fieldUnmarshaler(&f) + tags := strings.Split(f.Tag.Get("protobuf"), ",") + fieldNum, err := strconv.Atoi(tags[1]) + if err != nil { + panic("protobuf tag field not an integer: " + tags[1]) + } + var name string + for _, tag := range tags { + if strings.HasPrefix(tag, "name=") { + name = strings.TrimPrefix(tag, "name=") + break + } + } + + // Find the oneof field that this struct implements. + // Might take O(n^2) to process all of the oneofs, but who cares. + for _, of := range oneofFields { + if tptr.Implements(of.ityp) { + // We have found the corresponding interface for this struct. + // That lets us know where this struct should be stored + // when we encounter it during unmarshaling. + unmarshal := makeUnmarshalOneof(typ, of.ityp, baseUnmarshal) + u.setTag(fieldNum, of.field, unmarshal, 0, name) + } + } + + } + } + + // Get extension ranges, if any. + fn := reflect.Zero(reflect.PtrTo(t)).MethodByName("ExtensionRangeArray") + if fn.IsValid() { + if !u.extensions.IsValid() && !u.oldExtensions.IsValid() && !u.bytesExtensions.IsValid() { + panic("a message with extensions, but no extensions field in " + t.Name()) + } + u.extensionRanges = fn.Call(nil)[0].Interface().([]ExtensionRange) + } + + // Explicitly disallow tag 0. This will ensure we flag an error + // when decoding a buffer of all zeros. Without this code, we + // would decode and skip an all-zero buffer of even length. + // [0 0] is [tag=0/wiretype=varint varint-encoded-0]. + u.setTag(0, zeroField, func(b []byte, f pointer, w int) ([]byte, error) { + return nil, fmt.Errorf("proto: %s: illegal tag 0 (wire type %d)", t, w) + }, 0, "") + + // Set mask for required field check. + u.reqMask = uint64(1)<= 0 && (tag < 16 || tag < 2*n) { // TODO: what are the right numbers here? + for len(u.dense) <= tag { + u.dense = append(u.dense, unmarshalFieldInfo{}) + } + u.dense[tag] = i + return + } + if u.sparse == nil { + u.sparse = map[uint64]unmarshalFieldInfo{} + } + u.sparse[uint64(tag)] = i +} + +// fieldUnmarshaler returns an unmarshaler for the given field. +func fieldUnmarshaler(f *reflect.StructField) unmarshaler { + if f.Type.Kind() == reflect.Map { + return makeUnmarshalMap(f) + } + return typeUnmarshaler(f.Type, f.Tag.Get("protobuf")) +} + +// typeUnmarshaler returns an unmarshaler for the given field type / field tag pair. +func typeUnmarshaler(t reflect.Type, tags string) unmarshaler { + tagArray := strings.Split(tags, ",") + encoding := tagArray[0] + name := "unknown" + ctype := false + isTime := false + isDuration := false + isWktPointer := false + proto3 := false + validateUTF8 := true + for _, tag := range tagArray[3:] { + if strings.HasPrefix(tag, "name=") { + name = tag[5:] + } + if tag == "proto3" { + proto3 = true + } + if strings.HasPrefix(tag, "customtype=") { + ctype = true + } + if tag == "stdtime" { + isTime = true + } + if tag == "stdduration" { + isDuration = true + } + if tag == "wktptr" { + isWktPointer = true + } + } + validateUTF8 = validateUTF8 && proto3 + + // Figure out packaging (pointer, slice, or both) + slice := false + pointer := false + if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 { + slice = true + t = t.Elem() + } + if t.Kind() == reflect.Ptr { + pointer = true + t = t.Elem() + } + + if ctype { + if reflect.PtrTo(t).Implements(customType) { + if slice { + return makeUnmarshalCustomSlice(getUnmarshalInfo(t), name) + } + if pointer { + return makeUnmarshalCustomPtr(getUnmarshalInfo(t), name) + } + return makeUnmarshalCustom(getUnmarshalInfo(t), name) + } else { + panic(fmt.Sprintf("custom type: type: %v, does not implement the proto.custom interface", t)) + } + } + + if isTime { + if pointer { + if slice { + return makeUnmarshalTimePtrSlice(getUnmarshalInfo(t), name) + } + return makeUnmarshalTimePtr(getUnmarshalInfo(t), name) + } + if slice { + return makeUnmarshalTimeSlice(getUnmarshalInfo(t), name) + } + return makeUnmarshalTime(getUnmarshalInfo(t), name) + } + + if isDuration { + if pointer { + if slice { + return makeUnmarshalDurationPtrSlice(getUnmarshalInfo(t), name) + } + return makeUnmarshalDurationPtr(getUnmarshalInfo(t), name) + } + if slice { + return makeUnmarshalDurationSlice(getUnmarshalInfo(t), name) + } + return makeUnmarshalDuration(getUnmarshalInfo(t), name) + } + + if isWktPointer { + switch t.Kind() { + case reflect.Float64: + if pointer { + if slice { + return makeStdDoubleValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdDoubleValuePtrUnmarshaler(getUnmarshalInfo(t), name) + } + if slice { + return makeStdDoubleValueSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdDoubleValueUnmarshaler(getUnmarshalInfo(t), name) + case reflect.Float32: + if pointer { + if slice { + return makeStdFloatValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdFloatValuePtrUnmarshaler(getUnmarshalInfo(t), name) + } + if slice { + return makeStdFloatValueSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdFloatValueUnmarshaler(getUnmarshalInfo(t), name) + case reflect.Int64: + if pointer { + if slice { + return makeStdInt64ValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdInt64ValuePtrUnmarshaler(getUnmarshalInfo(t), name) + } + if slice { + return makeStdInt64ValueSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdInt64ValueUnmarshaler(getUnmarshalInfo(t), name) + case reflect.Uint64: + if pointer { + if slice { + return makeStdUInt64ValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdUInt64ValuePtrUnmarshaler(getUnmarshalInfo(t), name) + } + if slice { + return makeStdUInt64ValueSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdUInt64ValueUnmarshaler(getUnmarshalInfo(t), name) + case reflect.Int32: + if pointer { + if slice { + return makeStdInt32ValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdInt32ValuePtrUnmarshaler(getUnmarshalInfo(t), name) + } + if slice { + return makeStdInt32ValueSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdInt32ValueUnmarshaler(getUnmarshalInfo(t), name) + case reflect.Uint32: + if pointer { + if slice { + return makeStdUInt32ValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdUInt32ValuePtrUnmarshaler(getUnmarshalInfo(t), name) + } + if slice { + return makeStdUInt32ValueSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdUInt32ValueUnmarshaler(getUnmarshalInfo(t), name) + case reflect.Bool: + if pointer { + if slice { + return makeStdBoolValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdBoolValuePtrUnmarshaler(getUnmarshalInfo(t), name) + } + if slice { + return makeStdBoolValueSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdBoolValueUnmarshaler(getUnmarshalInfo(t), name) + case reflect.String: + if pointer { + if slice { + return makeStdStringValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdStringValuePtrUnmarshaler(getUnmarshalInfo(t), name) + } + if slice { + return makeStdStringValueSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdStringValueUnmarshaler(getUnmarshalInfo(t), name) + case uint8SliceType: + if pointer { + if slice { + return makeStdBytesValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdBytesValuePtrUnmarshaler(getUnmarshalInfo(t), name) + } + if slice { + return makeStdBytesValueSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdBytesValueUnmarshaler(getUnmarshalInfo(t), name) + default: + panic(fmt.Sprintf("unknown wktpointer type %#v", t)) + } + } + + // We'll never have both pointer and slice for basic types. + if pointer && slice && t.Kind() != reflect.Struct { + panic("both pointer and slice for basic type in " + t.Name()) + } + + switch t.Kind() { + case reflect.Bool: + if pointer { + return unmarshalBoolPtr + } + if slice { + return unmarshalBoolSlice + } + return unmarshalBoolValue + case reflect.Int32: + switch encoding { + case "fixed32": + if pointer { + return unmarshalFixedS32Ptr + } + if slice { + return unmarshalFixedS32Slice + } + return unmarshalFixedS32Value + case "varint": + // this could be int32 or enum + if pointer { + return unmarshalInt32Ptr + } + if slice { + return unmarshalInt32Slice + } + return unmarshalInt32Value + case "zigzag32": + if pointer { + return unmarshalSint32Ptr + } + if slice { + return unmarshalSint32Slice + } + return unmarshalSint32Value + } + case reflect.Int64: + switch encoding { + case "fixed64": + if pointer { + return unmarshalFixedS64Ptr + } + if slice { + return unmarshalFixedS64Slice + } + return unmarshalFixedS64Value + case "varint": + if pointer { + return unmarshalInt64Ptr + } + if slice { + return unmarshalInt64Slice + } + return unmarshalInt64Value + case "zigzag64": + if pointer { + return unmarshalSint64Ptr + } + if slice { + return unmarshalSint64Slice + } + return unmarshalSint64Value + } + case reflect.Uint32: + switch encoding { + case "fixed32": + if pointer { + return unmarshalFixed32Ptr + } + if slice { + return unmarshalFixed32Slice + } + return unmarshalFixed32Value + case "varint": + if pointer { + return unmarshalUint32Ptr + } + if slice { + return unmarshalUint32Slice + } + return unmarshalUint32Value + } + case reflect.Uint64: + switch encoding { + case "fixed64": + if pointer { + return unmarshalFixed64Ptr + } + if slice { + return unmarshalFixed64Slice + } + return unmarshalFixed64Value + case "varint": + if pointer { + return unmarshalUint64Ptr + } + if slice { + return unmarshalUint64Slice + } + return unmarshalUint64Value + } + case reflect.Float32: + if pointer { + return unmarshalFloat32Ptr + } + if slice { + return unmarshalFloat32Slice + } + return unmarshalFloat32Value + case reflect.Float64: + if pointer { + return unmarshalFloat64Ptr + } + if slice { + return unmarshalFloat64Slice + } + return unmarshalFloat64Value + case reflect.Map: + panic("map type in typeUnmarshaler in " + t.Name()) + case reflect.Slice: + if pointer { + panic("bad pointer in slice case in " + t.Name()) + } + if slice { + return unmarshalBytesSlice + } + return unmarshalBytesValue + case reflect.String: + if validateUTF8 { + if pointer { + return unmarshalUTF8StringPtr + } + if slice { + return unmarshalUTF8StringSlice + } + return unmarshalUTF8StringValue + } + if pointer { + return unmarshalStringPtr + } + if slice { + return unmarshalStringSlice + } + return unmarshalStringValue + case reflect.Struct: + // message or group field + if !pointer { + switch encoding { + case "bytes": + if slice { + return makeUnmarshalMessageSlice(getUnmarshalInfo(t), name) + } + return makeUnmarshalMessage(getUnmarshalInfo(t), name) + } + } + switch encoding { + case "bytes": + if slice { + return makeUnmarshalMessageSlicePtr(getUnmarshalInfo(t), name) + } + return makeUnmarshalMessagePtr(getUnmarshalInfo(t), name) + case "group": + if slice { + return makeUnmarshalGroupSlicePtr(getUnmarshalInfo(t), name) + } + return makeUnmarshalGroupPtr(getUnmarshalInfo(t), name) + } + } + panic(fmt.Sprintf("unmarshaler not found type:%s encoding:%s", t, encoding)) +} + +// Below are all the unmarshalers for individual fields of various types. + +func unmarshalInt64Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x) + *f.toInt64() = v + return b, nil +} + +func unmarshalInt64Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x) + *f.toInt64Ptr() = &v + return b, nil +} + +func unmarshalInt64Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x) + s := f.toInt64Slice() + *s = append(*s, v) + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x) + s := f.toInt64Slice() + *s = append(*s, v) + return b, nil +} + +func unmarshalSint64Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x>>1) ^ int64(x)<<63>>63 + *f.toInt64() = v + return b, nil +} + +func unmarshalSint64Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x>>1) ^ int64(x)<<63>>63 + *f.toInt64Ptr() = &v + return b, nil +} + +func unmarshalSint64Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x>>1) ^ int64(x)<<63>>63 + s := f.toInt64Slice() + *s = append(*s, v) + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x>>1) ^ int64(x)<<63>>63 + s := f.toInt64Slice() + *s = append(*s, v) + return b, nil +} + +func unmarshalUint64Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint64(x) + *f.toUint64() = v + return b, nil +} + +func unmarshalUint64Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint64(x) + *f.toUint64Ptr() = &v + return b, nil +} + +func unmarshalUint64Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint64(x) + s := f.toUint64Slice() + *s = append(*s, v) + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint64(x) + s := f.toUint64Slice() + *s = append(*s, v) + return b, nil +} + +func unmarshalInt32Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x) + *f.toInt32() = v + return b, nil +} + +func unmarshalInt32Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x) + f.setInt32Ptr(v) + return b, nil +} + +func unmarshalInt32Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x) + f.appendInt32Slice(v) + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x) + f.appendInt32Slice(v) + return b, nil +} + +func unmarshalSint32Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x>>1) ^ int32(x)<<31>>31 + *f.toInt32() = v + return b, nil +} + +func unmarshalSint32Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x>>1) ^ int32(x)<<31>>31 + f.setInt32Ptr(v) + return b, nil +} + +func unmarshalSint32Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x>>1) ^ int32(x)<<31>>31 + f.appendInt32Slice(v) + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x>>1) ^ int32(x)<<31>>31 + f.appendInt32Slice(v) + return b, nil +} + +func unmarshalUint32Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint32(x) + *f.toUint32() = v + return b, nil +} + +func unmarshalUint32Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint32(x) + *f.toUint32Ptr() = &v + return b, nil +} + +func unmarshalUint32Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint32(x) + s := f.toUint32Slice() + *s = append(*s, v) + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint32(x) + s := f.toUint32Slice() + *s = append(*s, v) + return b, nil +} + +func unmarshalFixed64Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 + *f.toUint64() = v + return b[8:], nil +} + +func unmarshalFixed64Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 + *f.toUint64Ptr() = &v + return b[8:], nil +} + +func unmarshalFixed64Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 + s := f.toUint64Slice() + *s = append(*s, v) + b = b[8:] + } + return res, nil + } + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 + s := f.toUint64Slice() + *s = append(*s, v) + return b[8:], nil +} + +func unmarshalFixedS64Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 + *f.toInt64() = v + return b[8:], nil +} + +func unmarshalFixedS64Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 + *f.toInt64Ptr() = &v + return b[8:], nil +} + +func unmarshalFixedS64Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 + s := f.toInt64Slice() + *s = append(*s, v) + b = b[8:] + } + return res, nil + } + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 + s := f.toInt64Slice() + *s = append(*s, v) + return b[8:], nil +} + +func unmarshalFixed32Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 + *f.toUint32() = v + return b[4:], nil +} + +func unmarshalFixed32Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 + *f.toUint32Ptr() = &v + return b[4:], nil +} + +func unmarshalFixed32Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 + s := f.toUint32Slice() + *s = append(*s, v) + b = b[4:] + } + return res, nil + } + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 + s := f.toUint32Slice() + *s = append(*s, v) + return b[4:], nil +} + +func unmarshalFixedS32Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 + *f.toInt32() = v + return b[4:], nil +} + +func unmarshalFixedS32Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 + f.setInt32Ptr(v) + return b[4:], nil +} + +func unmarshalFixedS32Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 + f.appendInt32Slice(v) + b = b[4:] + } + return res, nil + } + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 + f.appendInt32Slice(v) + return b[4:], nil +} + +func unmarshalBoolValue(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + // Note: any length varint is allowed, even though any sane + // encoder will use one byte. + // See https://github.com/golang/protobuf/issues/76 + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + // TODO: check if x>1? Tests seem to indicate no. + v := x != 0 + *f.toBool() = v + return b[n:], nil +} + +func unmarshalBoolPtr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + v := x != 0 + *f.toBoolPtr() = &v + return b[n:], nil +} + +func unmarshalBoolSlice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + v := x != 0 + s := f.toBoolSlice() + *s = append(*s, v) + b = b[n:] + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + v := x != 0 + s := f.toBoolSlice() + *s = append(*s, v) + return b[n:], nil +} + +func unmarshalFloat64Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) + *f.toFloat64() = v + return b[8:], nil +} + +func unmarshalFloat64Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) + *f.toFloat64Ptr() = &v + return b[8:], nil +} + +func unmarshalFloat64Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) + s := f.toFloat64Slice() + *s = append(*s, v) + b = b[8:] + } + return res, nil + } + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) + s := f.toFloat64Slice() + *s = append(*s, v) + return b[8:], nil +} + +func unmarshalFloat32Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) + *f.toFloat32() = v + return b[4:], nil +} + +func unmarshalFloat32Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) + *f.toFloat32Ptr() = &v + return b[4:], nil +} + +func unmarshalFloat32Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) + s := f.toFloat32Slice() + *s = append(*s, v) + b = b[4:] + } + return res, nil + } + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) + s := f.toFloat32Slice() + *s = append(*s, v) + return b[4:], nil +} + +func unmarshalStringValue(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := string(b[:x]) + *f.toString() = v + return b[x:], nil +} + +func unmarshalStringPtr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := string(b[:x]) + *f.toStringPtr() = &v + return b[x:], nil +} + +func unmarshalStringSlice(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := string(b[:x]) + s := f.toStringSlice() + *s = append(*s, v) + return b[x:], nil +} + +func unmarshalUTF8StringValue(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := string(b[:x]) + *f.toString() = v + if !utf8.ValidString(v) { + return b[x:], errInvalidUTF8 + } + return b[x:], nil +} + +func unmarshalUTF8StringPtr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := string(b[:x]) + *f.toStringPtr() = &v + if !utf8.ValidString(v) { + return b[x:], errInvalidUTF8 + } + return b[x:], nil +} + +func unmarshalUTF8StringSlice(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := string(b[:x]) + s := f.toStringSlice() + *s = append(*s, v) + if !utf8.ValidString(v) { + return b[x:], errInvalidUTF8 + } + return b[x:], nil +} + +var emptyBuf [0]byte + +func unmarshalBytesValue(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + // The use of append here is a trick which avoids the zeroing + // that would be required if we used a make/copy pair. + // We append to emptyBuf instead of nil because we want + // a non-nil result even when the length is 0. + v := append(emptyBuf[:], b[:x]...) + *f.toBytes() = v + return b[x:], nil +} + +func unmarshalBytesSlice(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := append(emptyBuf[:], b[:x]...) + s := f.toBytesSlice() + *s = append(*s, v) + return b[x:], nil +} + +func makeUnmarshalMessagePtr(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + // First read the message field to see if something is there. + // The semantics of multiple submessages are weird. Instead of + // the last one winning (as it is for all other fields), multiple + // submessages are merged. + v := f.getPointer() + if v.isNil() { + v = valToPointer(reflect.New(sub.typ)) + f.setPointer(v) + } + err := sub.unmarshal(v, b[:x]) + if err != nil { + if r, ok := err.(*RequiredNotSetError); ok { + r.field = name + "." + r.field + } else { + return nil, err + } + } + return b[x:], err + } +} + +func makeUnmarshalMessageSlicePtr(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := valToPointer(reflect.New(sub.typ)) + err := sub.unmarshal(v, b[:x]) + if err != nil { + if r, ok := err.(*RequiredNotSetError); ok { + r.field = name + "." + r.field + } else { + return nil, err + } + } + f.appendPointer(v) + return b[x:], err + } +} + +func makeUnmarshalGroupPtr(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireStartGroup { + return b, errInternalBadWireType + } + x, y := findEndGroup(b) + if x < 0 { + return nil, io.ErrUnexpectedEOF + } + v := f.getPointer() + if v.isNil() { + v = valToPointer(reflect.New(sub.typ)) + f.setPointer(v) + } + err := sub.unmarshal(v, b[:x]) + if err != nil { + if r, ok := err.(*RequiredNotSetError); ok { + r.field = name + "." + r.field + } else { + return nil, err + } + } + return b[y:], err + } +} + +func makeUnmarshalGroupSlicePtr(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireStartGroup { + return b, errInternalBadWireType + } + x, y := findEndGroup(b) + if x < 0 { + return nil, io.ErrUnexpectedEOF + } + v := valToPointer(reflect.New(sub.typ)) + err := sub.unmarshal(v, b[:x]) + if err != nil { + if r, ok := err.(*RequiredNotSetError); ok { + r.field = name + "." + r.field + } else { + return nil, err + } + } + f.appendPointer(v) + return b[y:], err + } +} + +func makeUnmarshalMap(f *reflect.StructField) unmarshaler { + t := f.Type + kt := t.Key() + vt := t.Elem() + tagArray := strings.Split(f.Tag.Get("protobuf"), ",") + valTags := strings.Split(f.Tag.Get("protobuf_val"), ",") + for _, t := range tagArray { + if strings.HasPrefix(t, "customtype=") { + valTags = append(valTags, t) + } + if t == "stdtime" { + valTags = append(valTags, t) + } + if t == "stdduration" { + valTags = append(valTags, t) + } + if t == "wktptr" { + valTags = append(valTags, t) + } + } + unmarshalKey := typeUnmarshaler(kt, f.Tag.Get("protobuf_key")) + unmarshalVal := typeUnmarshaler(vt, strings.Join(valTags, ",")) + return func(b []byte, f pointer, w int) ([]byte, error) { + // The map entry is a submessage. Figure out how big it is. + if w != WireBytes { + return nil, fmt.Errorf("proto: bad wiretype for map field: got %d want %d", w, WireBytes) + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + r := b[x:] // unused data to return + b = b[:x] // data for map entry + + // Note: we could use #keys * #values ~= 200 functions + // to do map decoding without reflection. Probably not worth it. + // Maps will be somewhat slow. Oh well. + + // Read key and value from data. + var nerr nonFatal + k := reflect.New(kt) + v := reflect.New(vt) + for len(b) > 0 { + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + wire := int(x) & 7 + b = b[n:] + + var err error + switch x >> 3 { + case 1: + b, err = unmarshalKey(b, valToPointer(k), wire) + case 2: + b, err = unmarshalVal(b, valToPointer(v), wire) + default: + err = errInternalBadWireType // skip unknown tag + } + + if nerr.Merge(err) { + continue + } + if err != errInternalBadWireType { + return nil, err + } + + // Skip past unknown fields. + b, err = skipField(b, wire) + if err != nil { + return nil, err + } + } + + // Get map, allocate if needed. + m := f.asPointerTo(t).Elem() // an addressable map[K]T + if m.IsNil() { + m.Set(reflect.MakeMap(t)) + } + + // Insert into map. + m.SetMapIndex(k.Elem(), v.Elem()) + + return r, nerr.E + } +} + +// makeUnmarshalOneof makes an unmarshaler for oneof fields. +// for: +// message Msg { +// oneof F { +// int64 X = 1; +// float64 Y = 2; +// } +// } +// typ is the type of the concrete entry for a oneof case (e.g. Msg_X). +// ityp is the interface type of the oneof field (e.g. isMsg_F). +// unmarshal is the unmarshaler for the base type of the oneof case (e.g. int64). +// Note that this function will be called once for each case in the oneof. +func makeUnmarshalOneof(typ, ityp reflect.Type, unmarshal unmarshaler) unmarshaler { + sf := typ.Field(0) + field0 := toField(&sf) + return func(b []byte, f pointer, w int) ([]byte, error) { + // Allocate holder for value. + v := reflect.New(typ) + + // Unmarshal data into holder. + // We unmarshal into the first field of the holder object. + var err error + var nerr nonFatal + b, err = unmarshal(b, valToPointer(v).offset(field0), w) + if !nerr.Merge(err) { + return nil, err + } + + // Write pointer to holder into target field. + f.asPointerTo(ityp).Elem().Set(v) + + return b, nerr.E + } +} + +// Error used by decode internally. +var errInternalBadWireType = errors.New("proto: internal error: bad wiretype") + +// skipField skips past a field of type wire and returns the remaining bytes. +func skipField(b []byte, wire int) ([]byte, error) { + switch wire { + case WireVarint: + _, k := decodeVarint(b) + if k == 0 { + return b, io.ErrUnexpectedEOF + } + b = b[k:] + case WireFixed32: + if len(b) < 4 { + return b, io.ErrUnexpectedEOF + } + b = b[4:] + case WireFixed64: + if len(b) < 8 { + return b, io.ErrUnexpectedEOF + } + b = b[8:] + case WireBytes: + m, k := decodeVarint(b) + if k == 0 || uint64(len(b)-k) < m { + return b, io.ErrUnexpectedEOF + } + b = b[uint64(k)+m:] + case WireStartGroup: + _, i := findEndGroup(b) + if i == -1 { + return b, io.ErrUnexpectedEOF + } + b = b[i:] + default: + return b, fmt.Errorf("proto: can't skip unknown wire type %d", wire) + } + return b, nil +} + +// findEndGroup finds the index of the next EndGroup tag. +// Groups may be nested, so the "next" EndGroup tag is the first +// unpaired EndGroup. +// findEndGroup returns the indexes of the start and end of the EndGroup tag. +// Returns (-1,-1) if it can't find one. +func findEndGroup(b []byte) (int, int) { + depth := 1 + i := 0 + for { + x, n := decodeVarint(b[i:]) + if n == 0 { + return -1, -1 + } + j := i + i += n + switch x & 7 { + case WireVarint: + _, k := decodeVarint(b[i:]) + if k == 0 { + return -1, -1 + } + i += k + case WireFixed32: + if len(b)-4 < i { + return -1, -1 + } + i += 4 + case WireFixed64: + if len(b)-8 < i { + return -1, -1 + } + i += 8 + case WireBytes: + m, k := decodeVarint(b[i:]) + if k == 0 { + return -1, -1 + } + i += k + if uint64(len(b)-i) < m { + return -1, -1 + } + i += int(m) + case WireStartGroup: + depth++ + case WireEndGroup: + depth-- + if depth == 0 { + return j, i + } + default: + return -1, -1 + } + } +} + +// encodeVarint appends a varint-encoded integer to b and returns the result. +func encodeVarint(b []byte, x uint64) []byte { + for x >= 1<<7 { + b = append(b, byte(x&0x7f|0x80)) + x >>= 7 + } + return append(b, byte(x)) +} + +// decodeVarint reads a varint-encoded integer from b. +// Returns the decoded integer and the number of bytes read. +// If there is an error, it returns 0,0. +func decodeVarint(b []byte) (uint64, int) { + var x, y uint64 + if len(b) == 0 { + goto bad + } + x = uint64(b[0]) + if x < 0x80 { + return x, 1 + } + x -= 0x80 + + if len(b) <= 1 { + goto bad + } + y = uint64(b[1]) + x += y << 7 + if y < 0x80 { + return x, 2 + } + x -= 0x80 << 7 + + if len(b) <= 2 { + goto bad + } + y = uint64(b[2]) + x += y << 14 + if y < 0x80 { + return x, 3 + } + x -= 0x80 << 14 + + if len(b) <= 3 { + goto bad + } + y = uint64(b[3]) + x += y << 21 + if y < 0x80 { + return x, 4 + } + x -= 0x80 << 21 + + if len(b) <= 4 { + goto bad + } + y = uint64(b[4]) + x += y << 28 + if y < 0x80 { + return x, 5 + } + x -= 0x80 << 28 + + if len(b) <= 5 { + goto bad + } + y = uint64(b[5]) + x += y << 35 + if y < 0x80 { + return x, 6 + } + x -= 0x80 << 35 + + if len(b) <= 6 { + goto bad + } + y = uint64(b[6]) + x += y << 42 + if y < 0x80 { + return x, 7 + } + x -= 0x80 << 42 + + if len(b) <= 7 { + goto bad + } + y = uint64(b[7]) + x += y << 49 + if y < 0x80 { + return x, 8 + } + x -= 0x80 << 49 + + if len(b) <= 8 { + goto bad + } + y = uint64(b[8]) + x += y << 56 + if y < 0x80 { + return x, 9 + } + x -= 0x80 << 56 + + if len(b) <= 9 { + goto bad + } + y = uint64(b[9]) + x += y << 63 + if y < 2 { + return x, 10 + } + +bad: + return 0, 0 +} diff --git a/vendor/github.com/gogo/protobuf/proto/table_unmarshal_gogo.go b/vendor/github.com/gogo/protobuf/proto/table_unmarshal_gogo.go new file mode 100644 index 00000000000..00d6c7ad937 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/table_unmarshal_gogo.go @@ -0,0 +1,385 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2018, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "io" + "reflect" +) + +func makeUnmarshalMessage(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + // First read the message field to see if something is there. + // The semantics of multiple submessages are weird. Instead of + // the last one winning (as it is for all other fields), multiple + // submessages are merged. + v := f // gogo: changed from v := f.getPointer() + if v.isNil() { + v = valToPointer(reflect.New(sub.typ)) + f.setPointer(v) + } + err := sub.unmarshal(v, b[:x]) + if err != nil { + if r, ok := err.(*RequiredNotSetError); ok { + r.field = name + "." + r.field + } else { + return nil, err + } + } + return b[x:], err + } +} + +func makeUnmarshalMessageSlice(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := valToPointer(reflect.New(sub.typ)) + err := sub.unmarshal(v, b[:x]) + if err != nil { + if r, ok := err.(*RequiredNotSetError); ok { + r.field = name + "." + r.field + } else { + return nil, err + } + } + f.appendRef(v, sub.typ) // gogo: changed from f.appendPointer(v) + return b[x:], err + } +} + +func makeUnmarshalCustomPtr(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + + s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() + s.Set(reflect.New(sub.typ)) + m := s.Interface().(custom) + if err := m.Unmarshal(b[:x]); err != nil { + return nil, err + } + return b[x:], nil + } +} + +func makeUnmarshalCustomSlice(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := reflect.New(sub.typ) + c := m.Interface().(custom) + if err := c.Unmarshal(b[:x]); err != nil { + return nil, err + } + v := valToPointer(m) + f.appendRef(v, sub.typ) + return b[x:], nil + } +} + +func makeUnmarshalCustom(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + + m := f.asPointerTo(sub.typ).Interface().(custom) + if err := m.Unmarshal(b[:x]); err != nil { + return nil, err + } + return b[x:], nil + } +} + +func makeUnmarshalTime(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := ×tamp{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + t, err := timestampFromProto(m) + if err != nil { + return nil, err + } + s := f.asPointerTo(sub.typ).Elem() + s.Set(reflect.ValueOf(t)) + return b[x:], nil + } +} + +func makeUnmarshalTimePtr(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := ×tamp{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + t, err := timestampFromProto(m) + if err != nil { + return nil, err + } + s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() + s.Set(reflect.ValueOf(&t)) + return b[x:], nil + } +} + +func makeUnmarshalTimePtrSlice(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := ×tamp{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + t, err := timestampFromProto(m) + if err != nil { + return nil, err + } + slice := f.getSlice(reflect.PtrTo(sub.typ)) + newSlice := reflect.Append(slice, reflect.ValueOf(&t)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeUnmarshalTimeSlice(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := ×tamp{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + t, err := timestampFromProto(m) + if err != nil { + return nil, err + } + slice := f.getSlice(sub.typ) + newSlice := reflect.Append(slice, reflect.ValueOf(t)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeUnmarshalDurationPtr(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &duration{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + d, err := durationFromProto(m) + if err != nil { + return nil, err + } + s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() + s.Set(reflect.ValueOf(&d)) + return b[x:], nil + } +} + +func makeUnmarshalDuration(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &duration{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + d, err := durationFromProto(m) + if err != nil { + return nil, err + } + s := f.asPointerTo(sub.typ).Elem() + s.Set(reflect.ValueOf(d)) + return b[x:], nil + } +} + +func makeUnmarshalDurationPtrSlice(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &duration{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + d, err := durationFromProto(m) + if err != nil { + return nil, err + } + slice := f.getSlice(reflect.PtrTo(sub.typ)) + newSlice := reflect.Append(slice, reflect.ValueOf(&d)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeUnmarshalDurationSlice(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &duration{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + d, err := durationFromProto(m) + if err != nil { + return nil, err + } + slice := f.getSlice(sub.typ) + newSlice := reflect.Append(slice, reflect.ValueOf(d)) + slice.Set(newSlice) + return b[x:], nil + } +} diff --git a/vendor/github.com/gogo/protobuf/proto/text.go b/vendor/github.com/gogo/protobuf/proto/text.go new file mode 100644 index 00000000000..87416afe955 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/text.go @@ -0,0 +1,930 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +// Functions for writing the text protocol buffer format. + +import ( + "bufio" + "bytes" + "encoding" + "errors" + "fmt" + "io" + "log" + "math" + "reflect" + "sort" + "strings" + "sync" + "time" +) + +var ( + newline = []byte("\n") + spaces = []byte(" ") + endBraceNewline = []byte("}\n") + backslashN = []byte{'\\', 'n'} + backslashR = []byte{'\\', 'r'} + backslashT = []byte{'\\', 't'} + backslashDQ = []byte{'\\', '"'} + backslashBS = []byte{'\\', '\\'} + posInf = []byte("inf") + negInf = []byte("-inf") + nan = []byte("nan") +) + +type writer interface { + io.Writer + WriteByte(byte) error +} + +// textWriter is an io.Writer that tracks its indentation level. +type textWriter struct { + ind int + complete bool // if the current position is a complete line + compact bool // whether to write out as a one-liner + w writer +} + +func (w *textWriter) WriteString(s string) (n int, err error) { + if !strings.Contains(s, "\n") { + if !w.compact && w.complete { + w.writeIndent() + } + w.complete = false + return io.WriteString(w.w, s) + } + // WriteString is typically called without newlines, so this + // codepath and its copy are rare. We copy to avoid + // duplicating all of Write's logic here. + return w.Write([]byte(s)) +} + +func (w *textWriter) Write(p []byte) (n int, err error) { + newlines := bytes.Count(p, newline) + if newlines == 0 { + if !w.compact && w.complete { + w.writeIndent() + } + n, err = w.w.Write(p) + w.complete = false + return n, err + } + + frags := bytes.SplitN(p, newline, newlines+1) + if w.compact { + for i, frag := range frags { + if i > 0 { + if err := w.w.WriteByte(' '); err != nil { + return n, err + } + n++ + } + nn, err := w.w.Write(frag) + n += nn + if err != nil { + return n, err + } + } + return n, nil + } + + for i, frag := range frags { + if w.complete { + w.writeIndent() + } + nn, err := w.w.Write(frag) + n += nn + if err != nil { + return n, err + } + if i+1 < len(frags) { + if err := w.w.WriteByte('\n'); err != nil { + return n, err + } + n++ + } + } + w.complete = len(frags[len(frags)-1]) == 0 + return n, nil +} + +func (w *textWriter) WriteByte(c byte) error { + if w.compact && c == '\n' { + c = ' ' + } + if !w.compact && w.complete { + w.writeIndent() + } + err := w.w.WriteByte(c) + w.complete = c == '\n' + return err +} + +func (w *textWriter) indent() { w.ind++ } + +func (w *textWriter) unindent() { + if w.ind == 0 { + log.Print("proto: textWriter unindented too far") + return + } + w.ind-- +} + +func writeName(w *textWriter, props *Properties) error { + if _, err := w.WriteString(props.OrigName); err != nil { + return err + } + if props.Wire != "group" { + return w.WriteByte(':') + } + return nil +} + +func requiresQuotes(u string) bool { + // When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted. + for _, ch := range u { + switch { + case ch == '.' || ch == '/' || ch == '_': + continue + case '0' <= ch && ch <= '9': + continue + case 'A' <= ch && ch <= 'Z': + continue + case 'a' <= ch && ch <= 'z': + continue + default: + return true + } + } + return false +} + +// isAny reports whether sv is a google.protobuf.Any message +func isAny(sv reflect.Value) bool { + type wkt interface { + XXX_WellKnownType() string + } + t, ok := sv.Addr().Interface().(wkt) + return ok && t.XXX_WellKnownType() == "Any" +} + +// writeProto3Any writes an expanded google.protobuf.Any message. +// +// It returns (false, nil) if sv value can't be unmarshaled (e.g. because +// required messages are not linked in). +// +// It returns (true, error) when sv was written in expanded format or an error +// was encountered. +func (tm *TextMarshaler) writeProto3Any(w *textWriter, sv reflect.Value) (bool, error) { + turl := sv.FieldByName("TypeUrl") + val := sv.FieldByName("Value") + if !turl.IsValid() || !val.IsValid() { + return true, errors.New("proto: invalid google.protobuf.Any message") + } + + b, ok := val.Interface().([]byte) + if !ok { + return true, errors.New("proto: invalid google.protobuf.Any message") + } + + parts := strings.Split(turl.String(), "/") + mt := MessageType(parts[len(parts)-1]) + if mt == nil { + return false, nil + } + m := reflect.New(mt.Elem()) + if err := Unmarshal(b, m.Interface().(Message)); err != nil { + return false, nil + } + w.Write([]byte("[")) + u := turl.String() + if requiresQuotes(u) { + writeString(w, u) + } else { + w.Write([]byte(u)) + } + if w.compact { + w.Write([]byte("]:<")) + } else { + w.Write([]byte("]: <\n")) + w.ind++ + } + if err := tm.writeStruct(w, m.Elem()); err != nil { + return true, err + } + if w.compact { + w.Write([]byte("> ")) + } else { + w.ind-- + w.Write([]byte(">\n")) + } + return true, nil +} + +func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error { + if tm.ExpandAny && isAny(sv) { + if canExpand, err := tm.writeProto3Any(w, sv); canExpand { + return err + } + } + st := sv.Type() + sprops := GetProperties(st) + for i := 0; i < sv.NumField(); i++ { + fv := sv.Field(i) + props := sprops.Prop[i] + name := st.Field(i).Name + + if name == "XXX_NoUnkeyedLiteral" { + continue + } + + if strings.HasPrefix(name, "XXX_") { + // There are two XXX_ fields: + // XXX_unrecognized []byte + // XXX_extensions map[int32]proto.Extension + // The first is handled here; + // the second is handled at the bottom of this function. + if name == "XXX_unrecognized" && !fv.IsNil() { + if err := writeUnknownStruct(w, fv.Interface().([]byte)); err != nil { + return err + } + } + continue + } + if fv.Kind() == reflect.Ptr && fv.IsNil() { + // Field not filled in. This could be an optional field or + // a required field that wasn't filled in. Either way, there + // isn't anything we can show for it. + continue + } + if fv.Kind() == reflect.Slice && fv.IsNil() { + // Repeated field that is empty, or a bytes field that is unused. + continue + } + + if props.Repeated && fv.Kind() == reflect.Slice { + // Repeated field. + for j := 0; j < fv.Len(); j++ { + if err := writeName(w, props); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + v := fv.Index(j) + if v.Kind() == reflect.Ptr && v.IsNil() { + // A nil message in a repeated field is not valid, + // but we can handle that more gracefully than panicking. + if _, err := w.Write([]byte("\n")); err != nil { + return err + } + continue + } + if len(props.Enum) > 0 { + if err := tm.writeEnum(w, v, props); err != nil { + return err + } + } else if err := tm.writeAny(w, v, props); err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + } + continue + } + if fv.Kind() == reflect.Map { + // Map fields are rendered as a repeated struct with key/value fields. + keys := fv.MapKeys() + sort.Sort(mapKeys(keys)) + for _, key := range keys { + val := fv.MapIndex(key) + if err := writeName(w, props); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + // open struct + if err := w.WriteByte('<'); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte('\n'); err != nil { + return err + } + } + w.indent() + // key + if _, err := w.WriteString("key:"); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + if err := tm.writeAny(w, key, props.MapKeyProp); err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + // nil values aren't legal, but we can avoid panicking because of them. + if val.Kind() != reflect.Ptr || !val.IsNil() { + // value + if _, err := w.WriteString("value:"); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + if err := tm.writeAny(w, val, props.MapValProp); err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + } + // close struct + w.unindent() + if err := w.WriteByte('>'); err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + } + continue + } + if props.proto3 && fv.Kind() == reflect.Slice && fv.Len() == 0 { + // empty bytes field + continue + } + if props.proto3 && fv.Kind() != reflect.Ptr && fv.Kind() != reflect.Slice { + // proto3 non-repeated scalar field; skip if zero value + if isProto3Zero(fv) { + continue + } + } + + if fv.Kind() == reflect.Interface { + // Check if it is a oneof. + if st.Field(i).Tag.Get("protobuf_oneof") != "" { + // fv is nil, or holds a pointer to generated struct. + // That generated struct has exactly one field, + // which has a protobuf struct tag. + if fv.IsNil() { + continue + } + inner := fv.Elem().Elem() // interface -> *T -> T + tag := inner.Type().Field(0).Tag.Get("protobuf") + props = new(Properties) // Overwrite the outer props var, but not its pointee. + props.Parse(tag) + // Write the value in the oneof, not the oneof itself. + fv = inner.Field(0) + + // Special case to cope with malformed messages gracefully: + // If the value in the oneof is a nil pointer, don't panic + // in writeAny. + if fv.Kind() == reflect.Ptr && fv.IsNil() { + // Use errors.New so writeAny won't render quotes. + msg := errors.New("/* nil */") + fv = reflect.ValueOf(&msg).Elem() + } + } + } + + if err := writeName(w, props); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + + if len(props.Enum) > 0 { + if err := tm.writeEnum(w, fv, props); err != nil { + return err + } + } else if err := tm.writeAny(w, fv, props); err != nil { + return err + } + + if err := w.WriteByte('\n'); err != nil { + return err + } + } + + // Extensions (the XXX_extensions field). + pv := sv + if pv.CanAddr() { + pv = sv.Addr() + } else { + pv = reflect.New(sv.Type()) + pv.Elem().Set(sv) + } + if _, err := extendable(pv.Interface()); err == nil { + if err := tm.writeExtensions(w, pv); err != nil { + return err + } + } + + return nil +} + +var textMarshalerType = reflect.TypeOf((*encoding.TextMarshaler)(nil)).Elem() + +// writeAny writes an arbitrary field. +func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Properties) error { + v = reflect.Indirect(v) + + if props != nil { + if len(props.CustomType) > 0 { + custom, ok := v.Interface().(Marshaler) + if ok { + data, err := custom.Marshal() + if err != nil { + return err + } + if err := writeString(w, string(data)); err != nil { + return err + } + return nil + } + } else if len(props.CastType) > 0 { + if _, ok := v.Interface().(interface { + String() string + }); ok { + switch v.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + _, err := fmt.Fprintf(w, "%d", v.Interface()) + return err + } + } + } else if props.StdTime { + t, ok := v.Interface().(time.Time) + if !ok { + return fmt.Errorf("stdtime is not time.Time, but %T", v.Interface()) + } + tproto, err := timestampProto(t) + if err != nil { + return err + } + propsCopy := *props // Make a copy so that this is goroutine-safe + propsCopy.StdTime = false + err = tm.writeAny(w, reflect.ValueOf(tproto), &propsCopy) + return err + } else if props.StdDuration { + d, ok := v.Interface().(time.Duration) + if !ok { + return fmt.Errorf("stdtime is not time.Duration, but %T", v.Interface()) + } + dproto := durationProto(d) + propsCopy := *props // Make a copy so that this is goroutine-safe + propsCopy.StdDuration = false + err := tm.writeAny(w, reflect.ValueOf(dproto), &propsCopy) + return err + } + } + + // Floats have special cases. + if v.Kind() == reflect.Float32 || v.Kind() == reflect.Float64 { + x := v.Float() + var b []byte + switch { + case math.IsInf(x, 1): + b = posInf + case math.IsInf(x, -1): + b = negInf + case math.IsNaN(x): + b = nan + } + if b != nil { + _, err := w.Write(b) + return err + } + // Other values are handled below. + } + + // We don't attempt to serialise every possible value type; only those + // that can occur in protocol buffers. + switch v.Kind() { + case reflect.Slice: + // Should only be a []byte; repeated fields are handled in writeStruct. + if err := writeString(w, string(v.Bytes())); err != nil { + return err + } + case reflect.String: + if err := writeString(w, v.String()); err != nil { + return err + } + case reflect.Struct: + // Required/optional group/message. + var bra, ket byte = '<', '>' + if props != nil && props.Wire == "group" { + bra, ket = '{', '}' + } + if err := w.WriteByte(bra); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte('\n'); err != nil { + return err + } + } + w.indent() + if v.CanAddr() { + // Calling v.Interface on a struct causes the reflect package to + // copy the entire struct. This is racy with the new Marshaler + // since we atomically update the XXX_sizecache. + // + // Thus, we retrieve a pointer to the struct if possible to avoid + // a race since v.Interface on the pointer doesn't copy the struct. + // + // If v is not addressable, then we are not worried about a race + // since it implies that the binary Marshaler cannot possibly be + // mutating this value. + v = v.Addr() + } + if v.Type().Implements(textMarshalerType) { + text, err := v.Interface().(encoding.TextMarshaler).MarshalText() + if err != nil { + return err + } + if _, err = w.Write(text); err != nil { + return err + } + } else { + if v.Kind() == reflect.Ptr { + v = v.Elem() + } + if err := tm.writeStruct(w, v); err != nil { + return err + } + } + w.unindent() + if err := w.WriteByte(ket); err != nil { + return err + } + default: + _, err := fmt.Fprint(w, v.Interface()) + return err + } + return nil +} + +// equivalent to C's isprint. +func isprint(c byte) bool { + return c >= 0x20 && c < 0x7f +} + +// writeString writes a string in the protocol buffer text format. +// It is similar to strconv.Quote except we don't use Go escape sequences, +// we treat the string as a byte sequence, and we use octal escapes. +// These differences are to maintain interoperability with the other +// languages' implementations of the text format. +func writeString(w *textWriter, s string) error { + // use WriteByte here to get any needed indent + if err := w.WriteByte('"'); err != nil { + return err + } + // Loop over the bytes, not the runes. + for i := 0; i < len(s); i++ { + var err error + // Divergence from C++: we don't escape apostrophes. + // There's no need to escape them, and the C++ parser + // copes with a naked apostrophe. + switch c := s[i]; c { + case '\n': + _, err = w.w.Write(backslashN) + case '\r': + _, err = w.w.Write(backslashR) + case '\t': + _, err = w.w.Write(backslashT) + case '"': + _, err = w.w.Write(backslashDQ) + case '\\': + _, err = w.w.Write(backslashBS) + default: + if isprint(c) { + err = w.w.WriteByte(c) + } else { + _, err = fmt.Fprintf(w.w, "\\%03o", c) + } + } + if err != nil { + return err + } + } + return w.WriteByte('"') +} + +func writeUnknownStruct(w *textWriter, data []byte) (err error) { + if !w.compact { + if _, err := fmt.Fprintf(w, "/* %d unknown bytes */\n", len(data)); err != nil { + return err + } + } + b := NewBuffer(data) + for b.index < len(b.buf) { + x, err := b.DecodeVarint() + if err != nil { + _, ferr := fmt.Fprintf(w, "/* %v */\n", err) + return ferr + } + wire, tag := x&7, x>>3 + if wire == WireEndGroup { + w.unindent() + if _, werr := w.Write(endBraceNewline); werr != nil { + return werr + } + continue + } + if _, ferr := fmt.Fprint(w, tag); ferr != nil { + return ferr + } + if wire != WireStartGroup { + if err = w.WriteByte(':'); err != nil { + return err + } + } + if !w.compact || wire == WireStartGroup { + if err = w.WriteByte(' '); err != nil { + return err + } + } + switch wire { + case WireBytes: + buf, e := b.DecodeRawBytes(false) + if e == nil { + _, err = fmt.Fprintf(w, "%q", buf) + } else { + _, err = fmt.Fprintf(w, "/* %v */", e) + } + case WireFixed32: + x, err = b.DecodeFixed32() + err = writeUnknownInt(w, x, err) + case WireFixed64: + x, err = b.DecodeFixed64() + err = writeUnknownInt(w, x, err) + case WireStartGroup: + err = w.WriteByte('{') + w.indent() + case WireVarint: + x, err = b.DecodeVarint() + err = writeUnknownInt(w, x, err) + default: + _, err = fmt.Fprintf(w, "/* unknown wire type %d */", wire) + } + if err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + } + return nil +} + +func writeUnknownInt(w *textWriter, x uint64, err error) error { + if err == nil { + _, err = fmt.Fprint(w, x) + } else { + _, err = fmt.Fprintf(w, "/* %v */", err) + } + return err +} + +type int32Slice []int32 + +func (s int32Slice) Len() int { return len(s) } +func (s int32Slice) Less(i, j int) bool { return s[i] < s[j] } +func (s int32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +// writeExtensions writes all the extensions in pv. +// pv is assumed to be a pointer to a protocol message struct that is extendable. +func (tm *TextMarshaler) writeExtensions(w *textWriter, pv reflect.Value) error { + emap := extensionMaps[pv.Type().Elem()] + e := pv.Interface().(Message) + + var m map[int32]Extension + var mu sync.Locker + if em, ok := e.(extensionsBytes); ok { + eb := em.GetExtensions() + var err error + m, err = BytesToExtensionsMap(*eb) + if err != nil { + return err + } + mu = notLocker{} + } else if _, ok := e.(extendableProto); ok { + ep, _ := extendable(e) + m, mu = ep.extensionsRead() + if m == nil { + return nil + } + } + + // Order the extensions by ID. + // This isn't strictly necessary, but it will give us + // canonical output, which will also make testing easier. + + mu.Lock() + ids := make([]int32, 0, len(m)) + for id := range m { + ids = append(ids, id) + } + sort.Sort(int32Slice(ids)) + mu.Unlock() + + for _, extNum := range ids { + ext := m[extNum] + var desc *ExtensionDesc + if emap != nil { + desc = emap[extNum] + } + if desc == nil { + // Unknown extension. + if err := writeUnknownStruct(w, ext.enc); err != nil { + return err + } + continue + } + + pb, err := GetExtension(e, desc) + if err != nil { + return fmt.Errorf("failed getting extension: %v", err) + } + + // Repeated extensions will appear as a slice. + if !desc.repeated() { + if err := tm.writeExtension(w, desc.Name, pb); err != nil { + return err + } + } else { + v := reflect.ValueOf(pb) + for i := 0; i < v.Len(); i++ { + if err := tm.writeExtension(w, desc.Name, v.Index(i).Interface()); err != nil { + return err + } + } + } + } + return nil +} + +func (tm *TextMarshaler) writeExtension(w *textWriter, name string, pb interface{}) error { + if _, err := fmt.Fprintf(w, "[%s]:", name); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + if err := tm.writeAny(w, reflect.ValueOf(pb), nil); err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + return nil +} + +func (w *textWriter) writeIndent() { + if !w.complete { + return + } + remain := w.ind * 2 + for remain > 0 { + n := remain + if n > len(spaces) { + n = len(spaces) + } + w.w.Write(spaces[:n]) + remain -= n + } + w.complete = false +} + +// TextMarshaler is a configurable text format marshaler. +type TextMarshaler struct { + Compact bool // use compact text format (one line). + ExpandAny bool // expand google.protobuf.Any messages of known types +} + +// Marshal writes a given protocol buffer in text format. +// The only errors returned are from w. +func (tm *TextMarshaler) Marshal(w io.Writer, pb Message) error { + val := reflect.ValueOf(pb) + if pb == nil || val.IsNil() { + w.Write([]byte("")) + return nil + } + var bw *bufio.Writer + ww, ok := w.(writer) + if !ok { + bw = bufio.NewWriter(w) + ww = bw + } + aw := &textWriter{ + w: ww, + complete: true, + compact: tm.Compact, + } + + if etm, ok := pb.(encoding.TextMarshaler); ok { + text, err := etm.MarshalText() + if err != nil { + return err + } + if _, err = aw.Write(text); err != nil { + return err + } + if bw != nil { + return bw.Flush() + } + return nil + } + // Dereference the received pointer so we don't have outer < and >. + v := reflect.Indirect(val) + if err := tm.writeStruct(aw, v); err != nil { + return err + } + if bw != nil { + return bw.Flush() + } + return nil +} + +// Text is the same as Marshal, but returns the string directly. +func (tm *TextMarshaler) Text(pb Message) string { + var buf bytes.Buffer + tm.Marshal(&buf, pb) + return buf.String() +} + +var ( + defaultTextMarshaler = TextMarshaler{} + compactTextMarshaler = TextMarshaler{Compact: true} +) + +// TODO: consider removing some of the Marshal functions below. + +// MarshalText writes a given protocol buffer in text format. +// The only errors returned are from w. +func MarshalText(w io.Writer, pb Message) error { return defaultTextMarshaler.Marshal(w, pb) } + +// MarshalTextString is the same as MarshalText, but returns the string directly. +func MarshalTextString(pb Message) string { return defaultTextMarshaler.Text(pb) } + +// CompactText writes a given protocol buffer in compact text format (one line). +func CompactText(w io.Writer, pb Message) error { return compactTextMarshaler.Marshal(w, pb) } + +// CompactTextString is the same as CompactText, but returns the string directly. +func CompactTextString(pb Message) string { return compactTextMarshaler.Text(pb) } diff --git a/vendor/github.com/gogo/protobuf/proto/text_gogo.go b/vendor/github.com/gogo/protobuf/proto/text_gogo.go new file mode 100644 index 00000000000..1d6c6aa0e41 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/text_gogo.go @@ -0,0 +1,57 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "fmt" + "reflect" +) + +func (tm *TextMarshaler) writeEnum(w *textWriter, v reflect.Value, props *Properties) error { + m, ok := enumStringMaps[props.Enum] + if !ok { + if err := tm.writeAny(w, v, props); err != nil { + return err + } + } + key := int32(0) + if v.Kind() == reflect.Ptr { + key = int32(v.Elem().Int()) + } else { + key = int32(v.Int()) + } + s, ok := m[key] + if !ok { + if err := tm.writeAny(w, v, props); err != nil { + return err + } + } + _, err := fmt.Fprint(w, s) + return err +} diff --git a/vendor/github.com/gogo/protobuf/proto/text_parser.go b/vendor/github.com/gogo/protobuf/proto/text_parser.go new file mode 100644 index 00000000000..f85c0cc81a7 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/text_parser.go @@ -0,0 +1,1018 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +// Functions for parsing the Text protocol buffer format. +// TODO: message sets. + +import ( + "encoding" + "errors" + "fmt" + "reflect" + "strconv" + "strings" + "time" + "unicode/utf8" +) + +// Error string emitted when deserializing Any and fields are already set +const anyRepeatedlyUnpacked = "Any message unpacked multiple times, or %q already set" + +type ParseError struct { + Message string + Line int // 1-based line number + Offset int // 0-based byte offset from start of input +} + +func (p *ParseError) Error() string { + if p.Line == 1 { + // show offset only for first line + return fmt.Sprintf("line 1.%d: %v", p.Offset, p.Message) + } + return fmt.Sprintf("line %d: %v", p.Line, p.Message) +} + +type token struct { + value string + err *ParseError + line int // line number + offset int // byte number from start of input, not start of line + unquoted string // the unquoted version of value, if it was a quoted string +} + +func (t *token) String() string { + if t.err == nil { + return fmt.Sprintf("%q (line=%d, offset=%d)", t.value, t.line, t.offset) + } + return fmt.Sprintf("parse error: %v", t.err) +} + +type textParser struct { + s string // remaining input + done bool // whether the parsing is finished (success or error) + backed bool // whether back() was called + offset, line int + cur token +} + +func newTextParser(s string) *textParser { + p := new(textParser) + p.s = s + p.line = 1 + p.cur.line = 1 + return p +} + +func (p *textParser) errorf(format string, a ...interface{}) *ParseError { + pe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset} + p.cur.err = pe + p.done = true + return pe +} + +// Numbers and identifiers are matched by [-+._A-Za-z0-9] +func isIdentOrNumberChar(c byte) bool { + switch { + case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z': + return true + case '0' <= c && c <= '9': + return true + } + switch c { + case '-', '+', '.', '_': + return true + } + return false +} + +func isWhitespace(c byte) bool { + switch c { + case ' ', '\t', '\n', '\r': + return true + } + return false +} + +func isQuote(c byte) bool { + switch c { + case '"', '\'': + return true + } + return false +} + +func (p *textParser) skipWhitespace() { + i := 0 + for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') { + if p.s[i] == '#' { + // comment; skip to end of line or input + for i < len(p.s) && p.s[i] != '\n' { + i++ + } + if i == len(p.s) { + break + } + } + if p.s[i] == '\n' { + p.line++ + } + i++ + } + p.offset += i + p.s = p.s[i:len(p.s)] + if len(p.s) == 0 { + p.done = true + } +} + +func (p *textParser) advance() { + // Skip whitespace + p.skipWhitespace() + if p.done { + return + } + + // Start of non-whitespace + p.cur.err = nil + p.cur.offset, p.cur.line = p.offset, p.line + p.cur.unquoted = "" + switch p.s[0] { + case '<', '>', '{', '}', ':', '[', ']', ';', ',', '/': + // Single symbol + p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)] + case '"', '\'': + // Quoted string + i := 1 + for i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\n' { + if p.s[i] == '\\' && i+1 < len(p.s) { + // skip escaped char + i++ + } + i++ + } + if i >= len(p.s) || p.s[i] != p.s[0] { + p.errorf("unmatched quote") + return + } + unq, err := unquoteC(p.s[1:i], rune(p.s[0])) + if err != nil { + p.errorf("invalid quoted string %s: %v", p.s[0:i+1], err) + return + } + p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)] + p.cur.unquoted = unq + default: + i := 0 + for i < len(p.s) && isIdentOrNumberChar(p.s[i]) { + i++ + } + if i == 0 { + p.errorf("unexpected byte %#x", p.s[0]) + return + } + p.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)] + } + p.offset += len(p.cur.value) +} + +var ( + errBadUTF8 = errors.New("proto: bad UTF-8") +) + +func unquoteC(s string, quote rune) (string, error) { + // This is based on C++'s tokenizer.cc. + // Despite its name, this is *not* parsing C syntax. + // For instance, "\0" is an invalid quoted string. + + // Avoid allocation in trivial cases. + simple := true + for _, r := range s { + if r == '\\' || r == quote { + simple = false + break + } + } + if simple { + return s, nil + } + + buf := make([]byte, 0, 3*len(s)/2) + for len(s) > 0 { + r, n := utf8.DecodeRuneInString(s) + if r == utf8.RuneError && n == 1 { + return "", errBadUTF8 + } + s = s[n:] + if r != '\\' { + if r < utf8.RuneSelf { + buf = append(buf, byte(r)) + } else { + buf = append(buf, string(r)...) + } + continue + } + + ch, tail, err := unescape(s) + if err != nil { + return "", err + } + buf = append(buf, ch...) + s = tail + } + return string(buf), nil +} + +func unescape(s string) (ch string, tail string, err error) { + r, n := utf8.DecodeRuneInString(s) + if r == utf8.RuneError && n == 1 { + return "", "", errBadUTF8 + } + s = s[n:] + switch r { + case 'a': + return "\a", s, nil + case 'b': + return "\b", s, nil + case 'f': + return "\f", s, nil + case 'n': + return "\n", s, nil + case 'r': + return "\r", s, nil + case 't': + return "\t", s, nil + case 'v': + return "\v", s, nil + case '?': + return "?", s, nil // trigraph workaround + case '\'', '"', '\\': + return string(r), s, nil + case '0', '1', '2', '3', '4', '5', '6', '7': + if len(s) < 2 { + return "", "", fmt.Errorf(`\%c requires 2 following digits`, r) + } + ss := string(r) + s[:2] + s = s[2:] + i, err := strconv.ParseUint(ss, 8, 8) + if err != nil { + return "", "", fmt.Errorf(`\%s contains non-octal digits`, ss) + } + return string([]byte{byte(i)}), s, nil + case 'x', 'X', 'u', 'U': + var n int + switch r { + case 'x', 'X': + n = 2 + case 'u': + n = 4 + case 'U': + n = 8 + } + if len(s) < n { + return "", "", fmt.Errorf(`\%c requires %d following digits`, r, n) + } + ss := s[:n] + s = s[n:] + i, err := strconv.ParseUint(ss, 16, 64) + if err != nil { + return "", "", fmt.Errorf(`\%c%s contains non-hexadecimal digits`, r, ss) + } + if r == 'x' || r == 'X' { + return string([]byte{byte(i)}), s, nil + } + if i > utf8.MaxRune { + return "", "", fmt.Errorf(`\%c%s is not a valid Unicode code point`, r, ss) + } + return string(rune(i)), s, nil + } + return "", "", fmt.Errorf(`unknown escape \%c`, r) +} + +// Back off the parser by one token. Can only be done between calls to next(). +// It makes the next advance() a no-op. +func (p *textParser) back() { p.backed = true } + +// Advances the parser and returns the new current token. +func (p *textParser) next() *token { + if p.backed || p.done { + p.backed = false + return &p.cur + } + p.advance() + if p.done { + p.cur.value = "" + } else if len(p.cur.value) > 0 && isQuote(p.cur.value[0]) { + // Look for multiple quoted strings separated by whitespace, + // and concatenate them. + cat := p.cur + for { + p.skipWhitespace() + if p.done || !isQuote(p.s[0]) { + break + } + p.advance() + if p.cur.err != nil { + return &p.cur + } + cat.value += " " + p.cur.value + cat.unquoted += p.cur.unquoted + } + p.done = false // parser may have seen EOF, but we want to return cat + p.cur = cat + } + return &p.cur +} + +func (p *textParser) consumeToken(s string) error { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value != s { + p.back() + return p.errorf("expected %q, found %q", s, tok.value) + } + return nil +} + +// Return a RequiredNotSetError indicating which required field was not set. +func (p *textParser) missingRequiredFieldError(sv reflect.Value) *RequiredNotSetError { + st := sv.Type() + sprops := GetProperties(st) + for i := 0; i < st.NumField(); i++ { + if !isNil(sv.Field(i)) { + continue + } + + props := sprops.Prop[i] + if props.Required { + return &RequiredNotSetError{fmt.Sprintf("%v.%v", st, props.OrigName)} + } + } + return &RequiredNotSetError{fmt.Sprintf("%v.", st)} // should not happen +} + +// Returns the index in the struct for the named field, as well as the parsed tag properties. +func structFieldByName(sprops *StructProperties, name string) (int, *Properties, bool) { + i, ok := sprops.decoderOrigNames[name] + if ok { + return i, sprops.Prop[i], true + } + return -1, nil, false +} + +// Consume a ':' from the input stream (if the next token is a colon), +// returning an error if a colon is needed but not present. +func (p *textParser) checkForColon(props *Properties, typ reflect.Type) *ParseError { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value != ":" { + // Colon is optional when the field is a group or message. + needColon := true + switch props.Wire { + case "group": + needColon = false + case "bytes": + // A "bytes" field is either a message, a string, or a repeated field; + // those three become *T, *string and []T respectively, so we can check for + // this field being a pointer to a non-string. + if typ.Kind() == reflect.Ptr { + // *T or *string + if typ.Elem().Kind() == reflect.String { + break + } + } else if typ.Kind() == reflect.Slice { + // []T or []*T + if typ.Elem().Kind() != reflect.Ptr { + break + } + } else if typ.Kind() == reflect.String { + // The proto3 exception is for a string field, + // which requires a colon. + break + } + needColon = false + } + if needColon { + return p.errorf("expected ':', found %q", tok.value) + } + p.back() + } + return nil +} + +func (p *textParser) readStruct(sv reflect.Value, terminator string) error { + st := sv.Type() + sprops := GetProperties(st) + reqCount := sprops.reqCount + var reqFieldErr error + fieldSet := make(map[string]bool) + // A struct is a sequence of "name: value", terminated by one of + // '>' or '}', or the end of the input. A name may also be + // "[extension]" or "[type/url]". + // + // The whole struct can also be an expanded Any message, like: + // [type/url] < ... struct contents ... > + for { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value == terminator { + break + } + if tok.value == "[" { + // Looks like an extension or an Any. + // + // TODO: Check whether we need to handle + // namespace rooted names (e.g. ".something.Foo"). + extName, err := p.consumeExtName() + if err != nil { + return err + } + + if s := strings.LastIndex(extName, "/"); s >= 0 { + // If it contains a slash, it's an Any type URL. + messageName := extName[s+1:] + mt := MessageType(messageName) + if mt == nil { + return p.errorf("unrecognized message %q in google.protobuf.Any", messageName) + } + tok = p.next() + if tok.err != nil { + return tok.err + } + // consume an optional colon + if tok.value == ":" { + tok = p.next() + if tok.err != nil { + return tok.err + } + } + var terminator string + switch tok.value { + case "<": + terminator = ">" + case "{": + terminator = "}" + default: + return p.errorf("expected '{' or '<', found %q", tok.value) + } + v := reflect.New(mt.Elem()) + if pe := p.readStruct(v.Elem(), terminator); pe != nil { + return pe + } + b, err := Marshal(v.Interface().(Message)) + if err != nil { + return p.errorf("failed to marshal message of type %q: %v", messageName, err) + } + if fieldSet["type_url"] { + return p.errorf(anyRepeatedlyUnpacked, "type_url") + } + if fieldSet["value"] { + return p.errorf(anyRepeatedlyUnpacked, "value") + } + sv.FieldByName("TypeUrl").SetString(extName) + sv.FieldByName("Value").SetBytes(b) + fieldSet["type_url"] = true + fieldSet["value"] = true + continue + } + + var desc *ExtensionDesc + // This could be faster, but it's functional. + // TODO: Do something smarter than a linear scan. + for _, d := range RegisteredExtensions(reflect.New(st).Interface().(Message)) { + if d.Name == extName { + desc = d + break + } + } + if desc == nil { + return p.errorf("unrecognized extension %q", extName) + } + + props := &Properties{} + props.Parse(desc.Tag) + + typ := reflect.TypeOf(desc.ExtensionType) + if err := p.checkForColon(props, typ); err != nil { + return err + } + + rep := desc.repeated() + + // Read the extension structure, and set it in + // the value we're constructing. + var ext reflect.Value + if !rep { + ext = reflect.New(typ).Elem() + } else { + ext = reflect.New(typ.Elem()).Elem() + } + if err := p.readAny(ext, props); err != nil { + if _, ok := err.(*RequiredNotSetError); !ok { + return err + } + reqFieldErr = err + } + ep := sv.Addr().Interface().(Message) + if !rep { + SetExtension(ep, desc, ext.Interface()) + } else { + old, err := GetExtension(ep, desc) + var sl reflect.Value + if err == nil { + sl = reflect.ValueOf(old) // existing slice + } else { + sl = reflect.MakeSlice(typ, 0, 1) + } + sl = reflect.Append(sl, ext) + SetExtension(ep, desc, sl.Interface()) + } + if err := p.consumeOptionalSeparator(); err != nil { + return err + } + continue + } + + // This is a normal, non-extension field. + name := tok.value + var dst reflect.Value + fi, props, ok := structFieldByName(sprops, name) + if ok { + dst = sv.Field(fi) + } else if oop, ok := sprops.OneofTypes[name]; ok { + // It is a oneof. + props = oop.Prop + nv := reflect.New(oop.Type.Elem()) + dst = nv.Elem().Field(0) + field := sv.Field(oop.Field) + if !field.IsNil() { + return p.errorf("field '%s' would overwrite already parsed oneof '%s'", name, sv.Type().Field(oop.Field).Name) + } + field.Set(nv) + } + if !dst.IsValid() { + return p.errorf("unknown field name %q in %v", name, st) + } + + if dst.Kind() == reflect.Map { + // Consume any colon. + if err := p.checkForColon(props, dst.Type()); err != nil { + return err + } + + // Construct the map if it doesn't already exist. + if dst.IsNil() { + dst.Set(reflect.MakeMap(dst.Type())) + } + key := reflect.New(dst.Type().Key()).Elem() + val := reflect.New(dst.Type().Elem()).Elem() + + // The map entry should be this sequence of tokens: + // < key : KEY value : VALUE > + // However, implementations may omit key or value, and technically + // we should support them in any order. See b/28924776 for a time + // this went wrong. + + tok := p.next() + var terminator string + switch tok.value { + case "<": + terminator = ">" + case "{": + terminator = "}" + default: + return p.errorf("expected '{' or '<', found %q", tok.value) + } + for { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value == terminator { + break + } + switch tok.value { + case "key": + if err := p.consumeToken(":"); err != nil { + return err + } + if err := p.readAny(key, props.MapKeyProp); err != nil { + return err + } + if err := p.consumeOptionalSeparator(); err != nil { + return err + } + case "value": + if err := p.checkForColon(props.MapValProp, dst.Type().Elem()); err != nil { + return err + } + if err := p.readAny(val, props.MapValProp); err != nil { + return err + } + if err := p.consumeOptionalSeparator(); err != nil { + return err + } + default: + p.back() + return p.errorf(`expected "key", "value", or %q, found %q`, terminator, tok.value) + } + } + + dst.SetMapIndex(key, val) + continue + } + + // Check that it's not already set if it's not a repeated field. + if !props.Repeated && fieldSet[name] { + return p.errorf("non-repeated field %q was repeated", name) + } + + if err := p.checkForColon(props, dst.Type()); err != nil { + return err + } + + // Parse into the field. + fieldSet[name] = true + if err := p.readAny(dst, props); err != nil { + if _, ok := err.(*RequiredNotSetError); !ok { + return err + } + reqFieldErr = err + } + if props.Required { + reqCount-- + } + + if err := p.consumeOptionalSeparator(); err != nil { + return err + } + + } + + if reqCount > 0 { + return p.missingRequiredFieldError(sv) + } + return reqFieldErr +} + +// consumeExtName consumes extension name or expanded Any type URL and the +// following ']'. It returns the name or URL consumed. +func (p *textParser) consumeExtName() (string, error) { + tok := p.next() + if tok.err != nil { + return "", tok.err + } + + // If extension name or type url is quoted, it's a single token. + if len(tok.value) > 2 && isQuote(tok.value[0]) && tok.value[len(tok.value)-1] == tok.value[0] { + name, err := unquoteC(tok.value[1:len(tok.value)-1], rune(tok.value[0])) + if err != nil { + return "", err + } + return name, p.consumeToken("]") + } + + // Consume everything up to "]" + var parts []string + for tok.value != "]" { + parts = append(parts, tok.value) + tok = p.next() + if tok.err != nil { + return "", p.errorf("unrecognized type_url or extension name: %s", tok.err) + } + if p.done && tok.value != "]" { + return "", p.errorf("unclosed type_url or extension name") + } + } + return strings.Join(parts, ""), nil +} + +// consumeOptionalSeparator consumes an optional semicolon or comma. +// It is used in readStruct to provide backward compatibility. +func (p *textParser) consumeOptionalSeparator() error { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value != ";" && tok.value != "," { + p.back() + } + return nil +} + +func (p *textParser) readAny(v reflect.Value, props *Properties) error { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value == "" { + return p.errorf("unexpected EOF") + } + if len(props.CustomType) > 0 { + if props.Repeated { + t := reflect.TypeOf(v.Interface()) + if t.Kind() == reflect.Slice { + tc := reflect.TypeOf(new(Marshaler)) + ok := t.Elem().Implements(tc.Elem()) + if ok { + fv := v + flen := fv.Len() + if flen == fv.Cap() { + nav := reflect.MakeSlice(v.Type(), flen, 2*flen+1) + reflect.Copy(nav, fv) + fv.Set(nav) + } + fv.SetLen(flen + 1) + + // Read one. + p.back() + return p.readAny(fv.Index(flen), props) + } + } + } + if reflect.TypeOf(v.Interface()).Kind() == reflect.Ptr { + custom := reflect.New(props.ctype.Elem()).Interface().(Unmarshaler) + err := custom.Unmarshal([]byte(tok.unquoted)) + if err != nil { + return p.errorf("%v %v: %v", err, v.Type(), tok.value) + } + v.Set(reflect.ValueOf(custom)) + } else { + custom := reflect.New(reflect.TypeOf(v.Interface())).Interface().(Unmarshaler) + err := custom.Unmarshal([]byte(tok.unquoted)) + if err != nil { + return p.errorf("%v %v: %v", err, v.Type(), tok.value) + } + v.Set(reflect.Indirect(reflect.ValueOf(custom))) + } + return nil + } + if props.StdTime { + fv := v + p.back() + props.StdTime = false + tproto := ×tamp{} + err := p.readAny(reflect.ValueOf(tproto).Elem(), props) + props.StdTime = true + if err != nil { + return err + } + tim, err := timestampFromProto(tproto) + if err != nil { + return err + } + if props.Repeated { + t := reflect.TypeOf(v.Interface()) + if t.Kind() == reflect.Slice { + if t.Elem().Kind() == reflect.Ptr { + ts := fv.Interface().([]*time.Time) + ts = append(ts, &tim) + fv.Set(reflect.ValueOf(ts)) + return nil + } else { + ts := fv.Interface().([]time.Time) + ts = append(ts, tim) + fv.Set(reflect.ValueOf(ts)) + return nil + } + } + } + if reflect.TypeOf(v.Interface()).Kind() == reflect.Ptr { + v.Set(reflect.ValueOf(&tim)) + } else { + v.Set(reflect.Indirect(reflect.ValueOf(&tim))) + } + return nil + } + if props.StdDuration { + fv := v + p.back() + props.StdDuration = false + dproto := &duration{} + err := p.readAny(reflect.ValueOf(dproto).Elem(), props) + props.StdDuration = true + if err != nil { + return err + } + dur, err := durationFromProto(dproto) + if err != nil { + return err + } + if props.Repeated { + t := reflect.TypeOf(v.Interface()) + if t.Kind() == reflect.Slice { + if t.Elem().Kind() == reflect.Ptr { + ds := fv.Interface().([]*time.Duration) + ds = append(ds, &dur) + fv.Set(reflect.ValueOf(ds)) + return nil + } else { + ds := fv.Interface().([]time.Duration) + ds = append(ds, dur) + fv.Set(reflect.ValueOf(ds)) + return nil + } + } + } + if reflect.TypeOf(v.Interface()).Kind() == reflect.Ptr { + v.Set(reflect.ValueOf(&dur)) + } else { + v.Set(reflect.Indirect(reflect.ValueOf(&dur))) + } + return nil + } + switch fv := v; fv.Kind() { + case reflect.Slice: + at := v.Type() + if at.Elem().Kind() == reflect.Uint8 { + // Special case for []byte + if tok.value[0] != '"' && tok.value[0] != '\'' { + // Deliberately written out here, as the error after + // this switch statement would write "invalid []byte: ...", + // which is not as user-friendly. + return p.errorf("invalid string: %v", tok.value) + } + bytes := []byte(tok.unquoted) + fv.Set(reflect.ValueOf(bytes)) + return nil + } + // Repeated field. + if tok.value == "[" { + // Repeated field with list notation, like [1,2,3]. + for { + fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem())) + err := p.readAny(fv.Index(fv.Len()-1), props) + if err != nil { + return err + } + ntok := p.next() + if ntok.err != nil { + return ntok.err + } + if ntok.value == "]" { + break + } + if ntok.value != "," { + return p.errorf("Expected ']' or ',' found %q", ntok.value) + } + } + return nil + } + // One value of the repeated field. + p.back() + fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem())) + return p.readAny(fv.Index(fv.Len()-1), props) + case reflect.Bool: + // true/1/t/True or false/f/0/False. + switch tok.value { + case "true", "1", "t", "True": + fv.SetBool(true) + return nil + case "false", "0", "f", "False": + fv.SetBool(false) + return nil + } + case reflect.Float32, reflect.Float64: + v := tok.value + // Ignore 'f' for compatibility with output generated by C++, but don't + // remove 'f' when the value is "-inf" or "inf". + if strings.HasSuffix(v, "f") && tok.value != "-inf" && tok.value != "inf" { + v = v[:len(v)-1] + } + if f, err := strconv.ParseFloat(v, fv.Type().Bits()); err == nil { + fv.SetFloat(f) + return nil + } + case reflect.Int8: + if x, err := strconv.ParseInt(tok.value, 0, 8); err == nil { + fv.SetInt(x) + return nil + } + case reflect.Int16: + if x, err := strconv.ParseInt(tok.value, 0, 16); err == nil { + fv.SetInt(x) + return nil + } + case reflect.Int32: + if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil { + fv.SetInt(x) + return nil + } + + if len(props.Enum) == 0 { + break + } + m, ok := enumValueMaps[props.Enum] + if !ok { + break + } + x, ok := m[tok.value] + if !ok { + break + } + fv.SetInt(int64(x)) + return nil + case reflect.Int64: + if x, err := strconv.ParseInt(tok.value, 0, 64); err == nil { + fv.SetInt(x) + return nil + } + + case reflect.Ptr: + // A basic field (indirected through pointer), or a repeated message/group + p.back() + fv.Set(reflect.New(fv.Type().Elem())) + return p.readAny(fv.Elem(), props) + case reflect.String: + if tok.value[0] == '"' || tok.value[0] == '\'' { + fv.SetString(tok.unquoted) + return nil + } + case reflect.Struct: + var terminator string + switch tok.value { + case "{": + terminator = "}" + case "<": + terminator = ">" + default: + return p.errorf("expected '{' or '<', found %q", tok.value) + } + // TODO: Handle nested messages which implement encoding.TextUnmarshaler. + return p.readStruct(fv, terminator) + case reflect.Uint8: + if x, err := strconv.ParseUint(tok.value, 0, 8); err == nil { + fv.SetUint(x) + return nil + } + case reflect.Uint16: + if x, err := strconv.ParseUint(tok.value, 0, 16); err == nil { + fv.SetUint(x) + return nil + } + case reflect.Uint32: + if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil { + fv.SetUint(uint64(x)) + return nil + } + case reflect.Uint64: + if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil { + fv.SetUint(x) + return nil + } + } + return p.errorf("invalid %v: %v", v.Type(), tok.value) +} + +// UnmarshalText reads a protocol buffer in Text format. UnmarshalText resets pb +// before starting to unmarshal, so any existing data in pb is always removed. +// If a required field is not set and no other error occurs, +// UnmarshalText returns *RequiredNotSetError. +func UnmarshalText(s string, pb Message) error { + if um, ok := pb.(encoding.TextUnmarshaler); ok { + return um.UnmarshalText([]byte(s)) + } + pb.Reset() + v := reflect.ValueOf(pb) + return newTextParser(s).readStruct(v.Elem(), "") +} diff --git a/vendor/github.com/gogo/protobuf/proto/timestamp.go b/vendor/github.com/gogo/protobuf/proto/timestamp.go new file mode 100644 index 00000000000..9324f6542bc --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/timestamp.go @@ -0,0 +1,113 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +// This file implements operations on google.protobuf.Timestamp. + +import ( + "errors" + "fmt" + "time" +) + +const ( + // Seconds field of the earliest valid Timestamp. + // This is time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC).Unix(). + minValidSeconds = -62135596800 + // Seconds field just after the latest valid Timestamp. + // This is time.Date(10000, 1, 1, 0, 0, 0, 0, time.UTC).Unix(). + maxValidSeconds = 253402300800 +) + +// validateTimestamp determines whether a Timestamp is valid. +// A valid timestamp represents a time in the range +// [0001-01-01, 10000-01-01) and has a Nanos field +// in the range [0, 1e9). +// +// If the Timestamp is valid, validateTimestamp returns nil. +// Otherwise, it returns an error that describes +// the problem. +// +// Every valid Timestamp can be represented by a time.Time, but the converse is not true. +func validateTimestamp(ts *timestamp) error { + if ts == nil { + return errors.New("timestamp: nil Timestamp") + } + if ts.Seconds < minValidSeconds { + return fmt.Errorf("timestamp: %#v before 0001-01-01", ts) + } + if ts.Seconds >= maxValidSeconds { + return fmt.Errorf("timestamp: %#v after 10000-01-01", ts) + } + if ts.Nanos < 0 || ts.Nanos >= 1e9 { + return fmt.Errorf("timestamp: %#v: nanos not in range [0, 1e9)", ts) + } + return nil +} + +// TimestampFromProto converts a google.protobuf.Timestamp proto to a time.Time. +// It returns an error if the argument is invalid. +// +// Unlike most Go functions, if Timestamp returns an error, the first return value +// is not the zero time.Time. Instead, it is the value obtained from the +// time.Unix function when passed the contents of the Timestamp, in the UTC +// locale. This may or may not be a meaningful time; many invalid Timestamps +// do map to valid time.Times. +// +// A nil Timestamp returns an error. The first return value in that case is +// undefined. +func timestampFromProto(ts *timestamp) (time.Time, error) { + // Don't return the zero value on error, because corresponds to a valid + // timestamp. Instead return whatever time.Unix gives us. + var t time.Time + if ts == nil { + t = time.Unix(0, 0).UTC() // treat nil like the empty Timestamp + } else { + t = time.Unix(ts.Seconds, int64(ts.Nanos)).UTC() + } + return t, validateTimestamp(ts) +} + +// TimestampProto converts the time.Time to a google.protobuf.Timestamp proto. +// It returns an error if the resulting Timestamp is invalid. +func timestampProto(t time.Time) (*timestamp, error) { + seconds := t.Unix() + nanos := int32(t.Sub(time.Unix(seconds, 0))) + ts := ×tamp{ + Seconds: seconds, + Nanos: nanos, + } + if err := validateTimestamp(ts); err != nil { + return nil, err + } + return ts, nil +} diff --git a/vendor/github.com/gogo/protobuf/proto/timestamp_gogo.go b/vendor/github.com/gogo/protobuf/proto/timestamp_gogo.go new file mode 100644 index 00000000000..38439fa9901 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/timestamp_gogo.go @@ -0,0 +1,49 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2016, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "reflect" + "time" +) + +var timeType = reflect.TypeOf((*time.Time)(nil)).Elem() + +type timestamp struct { + Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"` + Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"` +} + +func (m *timestamp) Reset() { *m = timestamp{} } +func (*timestamp) ProtoMessage() {} +func (*timestamp) String() string { return "timestamp" } + +func init() { + RegisterType((*timestamp)(nil), "gogo.protobuf.proto.timestamp") +} diff --git a/vendor/github.com/gogo/protobuf/proto/wrappers.go b/vendor/github.com/gogo/protobuf/proto/wrappers.go new file mode 100644 index 00000000000..b175d1b6423 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/wrappers.go @@ -0,0 +1,1888 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2018, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "io" + "reflect" +) + +func makeStdDoubleValueMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + t := ptr.asPointerTo(u.typ).Interface().(*float64) + v := &float64Value{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + t := ptr.asPointerTo(u.typ).Interface().(*float64) + v := &float64Value{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdDoubleValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + if ptr.isNil() { + return 0 + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*float64) + v := &float64Value{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + if ptr.isNil() { + return b, nil + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*float64) + v := &float64Value{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdDoubleValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(u.typ) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(float64) + v := &float64Value{t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(u.typ) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(float64) + v := &float64Value{t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdDoubleValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*float64) + v := &float64Value{*t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*float64) + v := &float64Value{*t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdDoubleValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &float64Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(sub.typ).Elem() + s.Set(reflect.ValueOf(m.Value)) + return b[x:], nil + } +} + +func makeStdDoubleValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &float64Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() + s.Set(reflect.ValueOf(&m.Value)) + return b[x:], nil + } +} + +func makeStdDoubleValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &float64Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(reflect.PtrTo(sub.typ)) + newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdDoubleValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &float64Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(sub.typ) + newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdFloatValueMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + t := ptr.asPointerTo(u.typ).Interface().(*float32) + v := &float32Value{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + t := ptr.asPointerTo(u.typ).Interface().(*float32) + v := &float32Value{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdFloatValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + if ptr.isNil() { + return 0 + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*float32) + v := &float32Value{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + if ptr.isNil() { + return b, nil + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*float32) + v := &float32Value{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdFloatValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(u.typ) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(float32) + v := &float32Value{t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(u.typ) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(float32) + v := &float32Value{t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdFloatValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*float32) + v := &float32Value{*t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*float32) + v := &float32Value{*t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdFloatValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &float32Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(sub.typ).Elem() + s.Set(reflect.ValueOf(m.Value)) + return b[x:], nil + } +} + +func makeStdFloatValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &float32Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() + s.Set(reflect.ValueOf(&m.Value)) + return b[x:], nil + } +} + +func makeStdFloatValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &float32Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(reflect.PtrTo(sub.typ)) + newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdFloatValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &float32Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(sub.typ) + newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdInt64ValueMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + t := ptr.asPointerTo(u.typ).Interface().(*int64) + v := &int64Value{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + t := ptr.asPointerTo(u.typ).Interface().(*int64) + v := &int64Value{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdInt64ValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + if ptr.isNil() { + return 0 + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*int64) + v := &int64Value{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + if ptr.isNil() { + return b, nil + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*int64) + v := &int64Value{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdInt64ValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(u.typ) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(int64) + v := &int64Value{t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(u.typ) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(int64) + v := &int64Value{t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdInt64ValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*int64) + v := &int64Value{*t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*int64) + v := &int64Value{*t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdInt64ValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &int64Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(sub.typ).Elem() + s.Set(reflect.ValueOf(m.Value)) + return b[x:], nil + } +} + +func makeStdInt64ValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &int64Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() + s.Set(reflect.ValueOf(&m.Value)) + return b[x:], nil + } +} + +func makeStdInt64ValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &int64Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(reflect.PtrTo(sub.typ)) + newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdInt64ValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &int64Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(sub.typ) + newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdUInt64ValueMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + t := ptr.asPointerTo(u.typ).Interface().(*uint64) + v := &uint64Value{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + t := ptr.asPointerTo(u.typ).Interface().(*uint64) + v := &uint64Value{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdUInt64ValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + if ptr.isNil() { + return 0 + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*uint64) + v := &uint64Value{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + if ptr.isNil() { + return b, nil + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*uint64) + v := &uint64Value{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdUInt64ValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(u.typ) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(uint64) + v := &uint64Value{t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(u.typ) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(uint64) + v := &uint64Value{t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdUInt64ValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*uint64) + v := &uint64Value{*t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*uint64) + v := &uint64Value{*t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdUInt64ValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &uint64Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(sub.typ).Elem() + s.Set(reflect.ValueOf(m.Value)) + return b[x:], nil + } +} + +func makeStdUInt64ValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &uint64Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() + s.Set(reflect.ValueOf(&m.Value)) + return b[x:], nil + } +} + +func makeStdUInt64ValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &uint64Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(reflect.PtrTo(sub.typ)) + newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdUInt64ValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &uint64Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(sub.typ) + newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdInt32ValueMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + t := ptr.asPointerTo(u.typ).Interface().(*int32) + v := &int32Value{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + t := ptr.asPointerTo(u.typ).Interface().(*int32) + v := &int32Value{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdInt32ValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + if ptr.isNil() { + return 0 + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*int32) + v := &int32Value{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + if ptr.isNil() { + return b, nil + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*int32) + v := &int32Value{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdInt32ValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(u.typ) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(int32) + v := &int32Value{t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(u.typ) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(int32) + v := &int32Value{t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdInt32ValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*int32) + v := &int32Value{*t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*int32) + v := &int32Value{*t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdInt32ValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &int32Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(sub.typ).Elem() + s.Set(reflect.ValueOf(m.Value)) + return b[x:], nil + } +} + +func makeStdInt32ValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &int32Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() + s.Set(reflect.ValueOf(&m.Value)) + return b[x:], nil + } +} + +func makeStdInt32ValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &int32Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(reflect.PtrTo(sub.typ)) + newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdInt32ValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &int32Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(sub.typ) + newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdUInt32ValueMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + t := ptr.asPointerTo(u.typ).Interface().(*uint32) + v := &uint32Value{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + t := ptr.asPointerTo(u.typ).Interface().(*uint32) + v := &uint32Value{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdUInt32ValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + if ptr.isNil() { + return 0 + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*uint32) + v := &uint32Value{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + if ptr.isNil() { + return b, nil + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*uint32) + v := &uint32Value{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdUInt32ValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(u.typ) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(uint32) + v := &uint32Value{t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(u.typ) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(uint32) + v := &uint32Value{t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdUInt32ValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*uint32) + v := &uint32Value{*t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*uint32) + v := &uint32Value{*t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdUInt32ValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &uint32Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(sub.typ).Elem() + s.Set(reflect.ValueOf(m.Value)) + return b[x:], nil + } +} + +func makeStdUInt32ValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &uint32Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() + s.Set(reflect.ValueOf(&m.Value)) + return b[x:], nil + } +} + +func makeStdUInt32ValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &uint32Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(reflect.PtrTo(sub.typ)) + newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdUInt32ValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &uint32Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(sub.typ) + newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdBoolValueMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + t := ptr.asPointerTo(u.typ).Interface().(*bool) + v := &boolValue{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + t := ptr.asPointerTo(u.typ).Interface().(*bool) + v := &boolValue{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdBoolValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + if ptr.isNil() { + return 0 + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*bool) + v := &boolValue{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + if ptr.isNil() { + return b, nil + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*bool) + v := &boolValue{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdBoolValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(u.typ) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(bool) + v := &boolValue{t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(u.typ) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(bool) + v := &boolValue{t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdBoolValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*bool) + v := &boolValue{*t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*bool) + v := &boolValue{*t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdBoolValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &boolValue{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(sub.typ).Elem() + s.Set(reflect.ValueOf(m.Value)) + return b[x:], nil + } +} + +func makeStdBoolValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &boolValue{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() + s.Set(reflect.ValueOf(&m.Value)) + return b[x:], nil + } +} + +func makeStdBoolValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &boolValue{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(reflect.PtrTo(sub.typ)) + newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdBoolValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &boolValue{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(sub.typ) + newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdStringValueMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + t := ptr.asPointerTo(u.typ).Interface().(*string) + v := &stringValue{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + t := ptr.asPointerTo(u.typ).Interface().(*string) + v := &stringValue{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdStringValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + if ptr.isNil() { + return 0 + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*string) + v := &stringValue{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + if ptr.isNil() { + return b, nil + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*string) + v := &stringValue{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdStringValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(u.typ) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(string) + v := &stringValue{t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(u.typ) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(string) + v := &stringValue{t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdStringValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*string) + v := &stringValue{*t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*string) + v := &stringValue{*t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdStringValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &stringValue{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(sub.typ).Elem() + s.Set(reflect.ValueOf(m.Value)) + return b[x:], nil + } +} + +func makeStdStringValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &stringValue{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() + s.Set(reflect.ValueOf(&m.Value)) + return b[x:], nil + } +} + +func makeStdStringValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &stringValue{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(reflect.PtrTo(sub.typ)) + newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdStringValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &stringValue{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(sub.typ) + newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdBytesValueMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + t := ptr.asPointerTo(u.typ).Interface().(*[]byte) + v := &bytesValue{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + t := ptr.asPointerTo(u.typ).Interface().(*[]byte) + v := &bytesValue{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdBytesValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + if ptr.isNil() { + return 0 + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*[]byte) + v := &bytesValue{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + if ptr.isNil() { + return b, nil + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*[]byte) + v := &bytesValue{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdBytesValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(u.typ) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().([]byte) + v := &bytesValue{t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(u.typ) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().([]byte) + v := &bytesValue{t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdBytesValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*[]byte) + v := &bytesValue{*t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*[]byte) + v := &bytesValue{*t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdBytesValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &bytesValue{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(sub.typ).Elem() + s.Set(reflect.ValueOf(m.Value)) + return b[x:], nil + } +} + +func makeStdBytesValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &bytesValue{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() + s.Set(reflect.ValueOf(&m.Value)) + return b[x:], nil + } +} + +func makeStdBytesValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &bytesValue{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(reflect.PtrTo(sub.typ)) + newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdBytesValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &bytesValue{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(sub.typ) + newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} diff --git a/vendor/github.com/gogo/protobuf/proto/wrappers_gogo.go b/vendor/github.com/gogo/protobuf/proto/wrappers_gogo.go new file mode 100644 index 00000000000..c1cf7bf85e9 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/wrappers_gogo.go @@ -0,0 +1,113 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2018, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +type float64Value struct { + Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *float64Value) Reset() { *m = float64Value{} } +func (*float64Value) ProtoMessage() {} +func (*float64Value) String() string { return "float64" } + +type float32Value struct { + Value float32 `protobuf:"fixed32,1,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *float32Value) Reset() { *m = float32Value{} } +func (*float32Value) ProtoMessage() {} +func (*float32Value) String() string { return "float32" } + +type int64Value struct { + Value int64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *int64Value) Reset() { *m = int64Value{} } +func (*int64Value) ProtoMessage() {} +func (*int64Value) String() string { return "int64" } + +type uint64Value struct { + Value uint64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *uint64Value) Reset() { *m = uint64Value{} } +func (*uint64Value) ProtoMessage() {} +func (*uint64Value) String() string { return "uint64" } + +type int32Value struct { + Value int32 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *int32Value) Reset() { *m = int32Value{} } +func (*int32Value) ProtoMessage() {} +func (*int32Value) String() string { return "int32" } + +type uint32Value struct { + Value uint32 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *uint32Value) Reset() { *m = uint32Value{} } +func (*uint32Value) ProtoMessage() {} +func (*uint32Value) String() string { return "uint32" } + +type boolValue struct { + Value bool `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *boolValue) Reset() { *m = boolValue{} } +func (*boolValue) ProtoMessage() {} +func (*boolValue) String() string { return "bool" } + +type stringValue struct { + Value string `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *stringValue) Reset() { *m = stringValue{} } +func (*stringValue) ProtoMessage() {} +func (*stringValue) String() string { return "string" } + +type bytesValue struct { + Value []byte `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *bytesValue) Reset() { *m = bytesValue{} } +func (*bytesValue) ProtoMessage() {} +func (*bytesValue) String() string { return "[]byte" } + +func init() { + RegisterType((*float64Value)(nil), "gogo.protobuf.proto.DoubleValue") + RegisterType((*float32Value)(nil), "gogo.protobuf.proto.FloatValue") + RegisterType((*int64Value)(nil), "gogo.protobuf.proto.Int64Value") + RegisterType((*uint64Value)(nil), "gogo.protobuf.proto.UInt64Value") + RegisterType((*int32Value)(nil), "gogo.protobuf.proto.Int32Value") + RegisterType((*uint32Value)(nil), "gogo.protobuf.proto.UInt32Value") + RegisterType((*boolValue)(nil), "gogo.protobuf.proto.BoolValue") + RegisterType((*stringValue)(nil), "gogo.protobuf.proto.StringValue") + RegisterType((*bytesValue)(nil), "gogo.protobuf.proto.BytesValue") +} diff --git a/vendor/github.com/gogo/protobuf/sortkeys/sortkeys.go b/vendor/github.com/gogo/protobuf/sortkeys/sortkeys.go new file mode 100644 index 00000000000..ceadde6a5e1 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/sortkeys/sortkeys.go @@ -0,0 +1,101 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package sortkeys + +import ( + "sort" +) + +func Strings(l []string) { + sort.Strings(l) +} + +func Float64s(l []float64) { + sort.Float64s(l) +} + +func Float32s(l []float32) { + sort.Sort(Float32Slice(l)) +} + +func Int64s(l []int64) { + sort.Sort(Int64Slice(l)) +} + +func Int32s(l []int32) { + sort.Sort(Int32Slice(l)) +} + +func Uint64s(l []uint64) { + sort.Sort(Uint64Slice(l)) +} + +func Uint32s(l []uint32) { + sort.Sort(Uint32Slice(l)) +} + +func Bools(l []bool) { + sort.Sort(BoolSlice(l)) +} + +type BoolSlice []bool + +func (p BoolSlice) Len() int { return len(p) } +func (p BoolSlice) Less(i, j int) bool { return p[j] } +func (p BoolSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +type Int64Slice []int64 + +func (p Int64Slice) Len() int { return len(p) } +func (p Int64Slice) Less(i, j int) bool { return p[i] < p[j] } +func (p Int64Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +type Int32Slice []int32 + +func (p Int32Slice) Len() int { return len(p) } +func (p Int32Slice) Less(i, j int) bool { return p[i] < p[j] } +func (p Int32Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +type Uint64Slice []uint64 + +func (p Uint64Slice) Len() int { return len(p) } +func (p Uint64Slice) Less(i, j int) bool { return p[i] < p[j] } +func (p Uint64Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +type Uint32Slice []uint32 + +func (p Uint32Slice) Len() int { return len(p) } +func (p Uint32Slice) Less(i, j int) bool { return p[i] < p[j] } +func (p Uint32Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +type Float32Slice []float32 + +func (p Float32Slice) Len() int { return len(p) } +func (p Float32Slice) Less(i, j int) bool { return p[i] < p[j] } +func (p Float32Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } diff --git a/vendor/github.com/golang-jwt/jwt/.gitignore b/vendor/github.com/golang-jwt/jwt/.gitignore new file mode 100644 index 00000000000..09573e0169c --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/.gitignore @@ -0,0 +1,4 @@ +.DS_Store +bin +.idea/ + diff --git a/vendor/github.com/golang-jwt/jwt/.travis.yml b/vendor/github.com/golang-jwt/jwt/.travis.yml new file mode 100644 index 00000000000..036a862f87f --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/.travis.yml @@ -0,0 +1,11 @@ +language: go + +script: + - go vet ./... + - go test -v ./... + +go: + - 1.7 + - 1.8 + - 1.9 + - 1.10 diff --git a/vendor/github.com/golang-jwt/jwt/LICENSE b/vendor/github.com/golang-jwt/jwt/LICENSE new file mode 100644 index 00000000000..35dbc252041 --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/LICENSE @@ -0,0 +1,9 @@ +Copyright (c) 2012 Dave Grijalva +Copyright (c) 2021 golang-jwt maintainers + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + diff --git a/vendor/github.com/golang-jwt/jwt/MIGRATION_GUIDE.md b/vendor/github.com/golang-jwt/jwt/MIGRATION_GUIDE.md new file mode 100644 index 00000000000..c4efbd2a8c5 --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/MIGRATION_GUIDE.md @@ -0,0 +1,22 @@ +## Migration Guide (v3.2.1) + +Starting from [v3.2.1](https://github.com/golang-jwt/jwt/releases/tag/v3.2.1]), the import path has changed from `github.com/dgrijalva/jwt-go` to `github.com/golang-jwt/jwt`. Future releases will be using the `github.com/golang-jwt/jwt` import path and continue the existing versioning scheme of `v3.x.x+incompatible`. Backwards-compatible patches and fixes will be done on the `v3` release branch, where as new build-breaking features will be developed in a `v4` release, possibly including a SIV-style import path. + +### go.mod replacement + +In a first step, the easiest way is to use `go mod edit` to issue a replacement. + +``` +go mod edit -replace github.com/dgrijalva/jwt-go=github.com/golang-jwt/jwt@v3.2.1+incompatible +go mod tidy +``` + +This will still keep the old import path in your code but replace it with the new package and also introduce a new indirect dependency to `github.com/golang-jwt/jwt`. Try to compile your project; it should still work. + +### Cleanup + +If your code still consistently builds, you can replace all occurences of `github.com/dgrijalva/jwt-go` with `github.com/golang-jwt/jwt`, either manually or by using tools such as `sed`. Finally, the `replace` directive in the `go.mod` file can be removed. + +## Older releases (before v3.2.0) + +The original migration guide for older releases can be found at https://github.com/dgrijalva/jwt-go/blob/master/MIGRATION_GUIDE.md. \ No newline at end of file diff --git a/vendor/github.com/golang-jwt/jwt/README.md b/vendor/github.com/golang-jwt/jwt/README.md new file mode 100644 index 00000000000..13c31c09b6f --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/README.md @@ -0,0 +1,106 @@ +# jwt-go + +[![build](https://github.com/golang-jwt/jwt/actions/workflows/build.yml/badge.svg)](https://github.com/golang-jwt/jwt/actions/workflows/build.yml) +[![Go Reference](https://pkg.go.dev/badge/github.com/golang-jwt/jwt.svg)](https://pkg.go.dev/github.com/golang-jwt/jwt) + +A [go](http://www.golang.org) (or 'golang' for search engine friendliness) implementation of [JSON Web Tokens](https://datatracker.ietf.org/doc/html/rfc7519). + +**IMPORT PATH CHANGE:** Starting from [v3.2.1](https://github.com/golang-jwt/jwt/releases/tag/v3.2.1), the import path has changed from `github.com/dgrijalva/jwt-go` to `github.com/golang-jwt/jwt`. After the original author of the library suggested migrating the maintenance of `jwt-go`, a dedicated team of open source maintainers decided to clone the existing library into this repository. See [dgrijalva/jwt-go#462](https://github.com/dgrijalva/jwt-go/issues/462) for a detailed discussion on this topic. + +Future releases will be using the `github.com/golang-jwt/jwt` import path and continue the existing versioning scheme of `v3.x.x+incompatible`. Backwards-compatible patches and fixes will be done on the `v3` release branch, where as new build-breaking features will be developed in a `v4` release, possibly including a SIV-style import path. + +**SECURITY NOTICE:** Some older versions of Go have a security issue in the crypto/elliptic. Recommendation is to upgrade to at least 1.8.3. See issue [dgrijalva/jwt-go#216](https://github.com/dgrijalva/jwt-go/issues/216) for more detail. + +**SECURITY NOTICE:** It's important that you [validate the `alg` presented is what you expect](https://auth0.com/blog/critical-vulnerabilities-in-json-web-token-libraries/). This library attempts to make it easy to do the right thing by requiring key types match the expected alg, but you should take the extra step to verify it in your usage. See the examples provided. + +## What the heck is a JWT? + +JWT.io has [a great introduction](https://jwt.io/introduction) to JSON Web Tokens. + +In short, it's a signed JSON object that does something useful (for example, authentication). It's commonly used for `Bearer` tokens in Oauth 2. A token is made of three parts, separated by `.`'s. The first two parts are JSON objects, that have been [base64url](https://datatracker.ietf.org/doc/html/rfc4648) encoded. The last part is the signature, encoded the same way. + +The first part is called the header. It contains the necessary information for verifying the last part, the signature. For example, which encryption method was used for signing and what key was used. + +The part in the middle is the interesting bit. It's called the Claims and contains the actual stuff you care about. Refer to [RFC 7519](https://datatracker.ietf.org/doc/html/rfc7519) for information about reserved keys and the proper way to add your own. + +## What's in the box? + +This library supports the parsing and verification as well as the generation and signing of JWTs. Current supported signing algorithms are HMAC SHA, RSA, RSA-PSS, and ECDSA, though hooks are present for adding your own. + +## Examples + +See [the project documentation](https://pkg.go.dev/github.com/golang-jwt/jwt) for examples of usage: + +* [Simple example of parsing and validating a token](https://pkg.go.dev/github.com/golang-jwt/jwt#example-Parse-Hmac) +* [Simple example of building and signing a token](https://pkg.go.dev/github.com/golang-jwt/jwt#example-New-Hmac) +* [Directory of Examples](https://pkg.go.dev/github.com/golang-jwt/jwt#pkg-examples) + +## Extensions + +This library publishes all the necessary components for adding your own signing methods. Simply implement the `SigningMethod` interface and register a factory method using `RegisterSigningMethod`. + +Here's an example of an extension that integrates with multiple Google Cloud Platform signing tools (AppEngine, IAM API, Cloud KMS): https://github.com/someone1/gcp-jwt-go + +## Compliance + +This library was last reviewed to comply with [RTF 7519](https://datatracker.ietf.org/doc/html/rfc7519) dated May 2015 with a few notable differences: + +* In order to protect against accidental use of [Unsecured JWTs](https://datatracker.ietf.org/doc/html/rfc7519#section-6), tokens using `alg=none` will only be accepted if the constant `jwt.UnsafeAllowNoneSignatureType` is provided as the key. + +## Project Status & Versioning + +This library is considered production ready. Feedback and feature requests are appreciated. The API should be considered stable. There should be very few backwards-incompatible changes outside of major version updates (and only with good reason). + +This project uses [Semantic Versioning 2.0.0](http://semver.org). Accepted pull requests will land on `main`. Periodically, versions will be tagged from `main`. You can find all the releases on [the project releases page](https://github.com/golang-jwt/jwt/releases). + +While we try to make it obvious when we make breaking changes, there isn't a great mechanism for pushing announcements out to users. You may want to use this alternative package include: `gopkg.in/golang-jwt/jwt.v3`. It will do the right thing WRT semantic versioning. + +**BREAKING CHANGES:*** +* Version 3.0.0 includes _a lot_ of changes from the 2.x line, including a few that break the API. We've tried to break as few things as possible, so there should just be a few type signature changes. A full list of breaking changes is available in `VERSION_HISTORY.md`. See `MIGRATION_GUIDE.md` for more information on updating your code. + +## Usage Tips + +### Signing vs Encryption + +A token is simply a JSON object that is signed by its author. this tells you exactly two things about the data: + +* The author of the token was in the possession of the signing secret +* The data has not been modified since it was signed + +It's important to know that JWT does not provide encryption, which means anyone who has access to the token can read its contents. If you need to protect (encrypt) the data, there is a companion spec, `JWE`, that provides this functionality. JWE is currently outside the scope of this library. + +### Choosing a Signing Method + +There are several signing methods available, and you should probably take the time to learn about the various options before choosing one. The principal design decision is most likely going to be symmetric vs asymmetric. + +Symmetric signing methods, such as HSA, use only a single secret. This is probably the simplest signing method to use since any `[]byte` can be used as a valid secret. They are also slightly computationally faster to use, though this rarely is enough to matter. Symmetric signing methods work the best when both producers and consumers of tokens are trusted, or even the same system. Since the same secret is used to both sign and validate tokens, you can't easily distribute the key for validation. + +Asymmetric signing methods, such as RSA, use different keys for signing and verifying tokens. This makes it possible to produce tokens with a private key, and allow any consumer to access the public key for verification. + +### Signing Methods and Key Types + +Each signing method expects a different object type for its signing keys. See the package documentation for details. Here are the most common ones: + +* The [HMAC signing method](https://pkg.go.dev/github.com/golang-jwt/jwt#SigningMethodHMAC) (`HS256`,`HS384`,`HS512`) expect `[]byte` values for signing and validation +* The [RSA signing method](https://pkg.go.dev/github.com/golang-jwt/jwt#SigningMethodRSA) (`RS256`,`RS384`,`RS512`) expect `*rsa.PrivateKey` for signing and `*rsa.PublicKey` for validation +* The [ECDSA signing method](https://pkg.go.dev/github.com/golang-jwt/jwt#SigningMethodECDSA) (`ES256`,`ES384`,`ES512`) expect `*ecdsa.PrivateKey` for signing and `*ecdsa.PublicKey` for validation + +### JWT and OAuth + +It's worth mentioning that OAuth and JWT are not the same thing. A JWT token is simply a signed JSON object. It can be used anywhere such a thing is useful. There is some confusion, though, as JWT is the most common type of bearer token used in OAuth2 authentication. + +Without going too far down the rabbit hole, here's a description of the interaction of these technologies: + +* OAuth is a protocol for allowing an identity provider to be separate from the service a user is logging in to. For example, whenever you use Facebook to log into a different service (Yelp, Spotify, etc), you are using OAuth. +* OAuth defines several options for passing around authentication data. One popular method is called a "bearer token". A bearer token is simply a string that _should_ only be held by an authenticated user. Thus, simply presenting this token proves your identity. You can probably derive from here why a JWT might make a good bearer token. +* Because bearer tokens are used for authentication, it's important they're kept secret. This is why transactions that use bearer tokens typically happen over SSL. + +### Troubleshooting + +This library uses descriptive error messages whenever possible. If you are not getting the expected result, have a look at the errors. The most common place people get stuck is providing the correct type of key to the parser. See the above section on signing methods and key types. + +## More + +Documentation can be found [on pkg.go.dev](https://pkg.go.dev/github.com/golang-jwt/jwt). + +The command line utility included in this project (cmd/jwt) provides a straightforward example of token creation and parsing as well as a useful tool for debugging your own integration. You'll also find several implementation examples in the documentation. diff --git a/vendor/github.com/golang-jwt/jwt/VERSION_HISTORY.md b/vendor/github.com/golang-jwt/jwt/VERSION_HISTORY.md new file mode 100644 index 00000000000..dac737bcaa4 --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/VERSION_HISTORY.md @@ -0,0 +1,124 @@ +## `jwt-go` Version History + +#### 3.2.1 + +* **Import Path Change**: See MIGRATION_GUIDE.md for tips on updating your code + * Changed the import path from `github.com/dgrijalva/jwt-go` to `github.com/golang-jwt/jwt` +* Fixed type confusing issue between `string` and `[]string` in `VerifyAudience` ([#12](https://github.com/golang-jwt/jwt/pull/12)). This fixes CVE-2020-26160 + +#### 3.2.0 + +* Added method `ParseUnverified` to allow users to split up the tasks of parsing and validation +* HMAC signing method returns `ErrInvalidKeyType` instead of `ErrInvalidKey` where appropriate +* Added options to `request.ParseFromRequest`, which allows for an arbitrary list of modifiers to parsing behavior. Initial set include `WithClaims` and `WithParser`. Existing usage of this function will continue to work as before. +* Deprecated `ParseFromRequestWithClaims` to simplify API in the future. + +#### 3.1.0 + +* Improvements to `jwt` command line tool +* Added `SkipClaimsValidation` option to `Parser` +* Documentation updates + +#### 3.0.0 + +* **Compatibility Breaking Changes**: See MIGRATION_GUIDE.md for tips on updating your code + * Dropped support for `[]byte` keys when using RSA signing methods. This convenience feature could contribute to security vulnerabilities involving mismatched key types with signing methods. + * `ParseFromRequest` has been moved to `request` subpackage and usage has changed + * The `Claims` property on `Token` is now type `Claims` instead of `map[string]interface{}`. The default value is type `MapClaims`, which is an alias to `map[string]interface{}`. This makes it possible to use a custom type when decoding claims. +* Other Additions and Changes + * Added `Claims` interface type to allow users to decode the claims into a custom type + * Added `ParseWithClaims`, which takes a third argument of type `Claims`. Use this function instead of `Parse` if you have a custom type you'd like to decode into. + * Dramatically improved the functionality and flexibility of `ParseFromRequest`, which is now in the `request` subpackage + * Added `ParseFromRequestWithClaims` which is the `FromRequest` equivalent of `ParseWithClaims` + * Added new interface type `Extractor`, which is used for extracting JWT strings from http requests. Used with `ParseFromRequest` and `ParseFromRequestWithClaims`. + * Added several new, more specific, validation errors to error type bitmask + * Moved examples from README to executable example files + * Signing method registry is now thread safe + * Added new property to `ValidationError`, which contains the raw error returned by calls made by parse/verify (such as those returned by keyfunc or json parser) + +#### 2.7.0 + +This will likely be the last backwards compatible release before 3.0.0, excluding essential bug fixes. + +* Added new option `-show` to the `jwt` command that will just output the decoded token without verifying +* Error text for expired tokens includes how long it's been expired +* Fixed incorrect error returned from `ParseRSAPublicKeyFromPEM` +* Documentation updates + +#### 2.6.0 + +* Exposed inner error within ValidationError +* Fixed validation errors when using UseJSONNumber flag +* Added several unit tests + +#### 2.5.0 + +* Added support for signing method none. You shouldn't use this. The API tries to make this clear. +* Updated/fixed some documentation +* Added more helpful error message when trying to parse tokens that begin with `BEARER ` + +#### 2.4.0 + +* Added new type, Parser, to allow for configuration of various parsing parameters + * You can now specify a list of valid signing methods. Anything outside this set will be rejected. + * You can now opt to use the `json.Number` type instead of `float64` when parsing token JSON +* Added support for [Travis CI](https://travis-ci.org/dgrijalva/jwt-go) +* Fixed some bugs with ECDSA parsing + +#### 2.3.0 + +* Added support for ECDSA signing methods +* Added support for RSA PSS signing methods (requires go v1.4) + +#### 2.2.0 + +* Gracefully handle a `nil` `Keyfunc` being passed to `Parse`. Result will now be the parsed token and an error, instead of a panic. + +#### 2.1.0 + +Backwards compatible API change that was missed in 2.0.0. + +* The `SignedString` method on `Token` now takes `interface{}` instead of `[]byte` + +#### 2.0.0 + +There were two major reasons for breaking backwards compatibility with this update. The first was a refactor required to expand the width of the RSA and HMAC-SHA signing implementations. There will likely be no required code changes to support this change. + +The second update, while unfortunately requiring a small change in integration, is required to open up this library to other signing methods. Not all keys used for all signing methods have a single standard on-disk representation. Requiring `[]byte` as the type for all keys proved too limiting. Additionally, this implementation allows for pre-parsed tokens to be reused, which might matter in an application that parses a high volume of tokens with a small set of keys. Backwards compatibilty has been maintained for passing `[]byte` to the RSA signing methods, but they will also accept `*rsa.PublicKey` and `*rsa.PrivateKey`. + +It is likely the only integration change required here will be to change `func(t *jwt.Token) ([]byte, error)` to `func(t *jwt.Token) (interface{}, error)` when calling `Parse`. + +* **Compatibility Breaking Changes** + * `SigningMethodHS256` is now `*SigningMethodHMAC` instead of `type struct` + * `SigningMethodRS256` is now `*SigningMethodRSA` instead of `type struct` + * `KeyFunc` now returns `interface{}` instead of `[]byte` + * `SigningMethod.Sign` now takes `interface{}` instead of `[]byte` for the key + * `SigningMethod.Verify` now takes `interface{}` instead of `[]byte` for the key +* Renamed type `SigningMethodHS256` to `SigningMethodHMAC`. Specific sizes are now just instances of this type. + * Added public package global `SigningMethodHS256` + * Added public package global `SigningMethodHS384` + * Added public package global `SigningMethodHS512` +* Renamed type `SigningMethodRS256` to `SigningMethodRSA`. Specific sizes are now just instances of this type. + * Added public package global `SigningMethodRS256` + * Added public package global `SigningMethodRS384` + * Added public package global `SigningMethodRS512` +* Moved sample private key for HMAC tests from an inline value to a file on disk. Value is unchanged. +* Refactored the RSA implementation to be easier to read +* Exposed helper methods `ParseRSAPrivateKeyFromPEM` and `ParseRSAPublicKeyFromPEM` + +#### 1.0.2 + +* Fixed bug in parsing public keys from certificates +* Added more tests around the parsing of keys for RS256 +* Code refactoring in RS256 implementation. No functional changes + +#### 1.0.1 + +* Fixed panic if RS256 signing method was passed an invalid key + +#### 1.0.0 + +* First versioned release +* API stabilized +* Supports creating, signing, parsing, and validating JWT tokens +* Supports RS256 and HS256 signing methods diff --git a/vendor/github.com/golang-jwt/jwt/claims.go b/vendor/github.com/golang-jwt/jwt/claims.go new file mode 100644 index 00000000000..f1dba3cb916 --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/claims.go @@ -0,0 +1,146 @@ +package jwt + +import ( + "crypto/subtle" + "fmt" + "time" +) + +// For a type to be a Claims object, it must just have a Valid method that determines +// if the token is invalid for any supported reason +type Claims interface { + Valid() error +} + +// Structured version of Claims Section, as referenced at +// https://tools.ietf.org/html/rfc7519#section-4.1 +// See examples for how to use this with your own claim types +type StandardClaims struct { + Audience string `json:"aud,omitempty"` + ExpiresAt int64 `json:"exp,omitempty"` + Id string `json:"jti,omitempty"` + IssuedAt int64 `json:"iat,omitempty"` + Issuer string `json:"iss,omitempty"` + NotBefore int64 `json:"nbf,omitempty"` + Subject string `json:"sub,omitempty"` +} + +// Validates time based claims "exp, iat, nbf". +// There is no accounting for clock skew. +// As well, if any of the above claims are not in the token, it will still +// be considered a valid claim. +func (c StandardClaims) Valid() error { + vErr := new(ValidationError) + now := TimeFunc().Unix() + + // The claims below are optional, by default, so if they are set to the + // default value in Go, let's not fail the verification for them. + if !c.VerifyExpiresAt(now, false) { + delta := time.Unix(now, 0).Sub(time.Unix(c.ExpiresAt, 0)) + vErr.Inner = fmt.Errorf("token is expired by %v", delta) + vErr.Errors |= ValidationErrorExpired + } + + if !c.VerifyIssuedAt(now, false) { + vErr.Inner = fmt.Errorf("Token used before issued") + vErr.Errors |= ValidationErrorIssuedAt + } + + if !c.VerifyNotBefore(now, false) { + vErr.Inner = fmt.Errorf("token is not valid yet") + vErr.Errors |= ValidationErrorNotValidYet + } + + if vErr.valid() { + return nil + } + + return vErr +} + +// Compares the aud claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (c *StandardClaims) VerifyAudience(cmp string, req bool) bool { + return verifyAud([]string{c.Audience}, cmp, req) +} + +// Compares the exp claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (c *StandardClaims) VerifyExpiresAt(cmp int64, req bool) bool { + return verifyExp(c.ExpiresAt, cmp, req) +} + +// Compares the iat claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (c *StandardClaims) VerifyIssuedAt(cmp int64, req bool) bool { + return verifyIat(c.IssuedAt, cmp, req) +} + +// Compares the iss claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (c *StandardClaims) VerifyIssuer(cmp string, req bool) bool { + return verifyIss(c.Issuer, cmp, req) +} + +// Compares the nbf claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (c *StandardClaims) VerifyNotBefore(cmp int64, req bool) bool { + return verifyNbf(c.NotBefore, cmp, req) +} + +// ----- helpers + +func verifyAud(aud []string, cmp string, required bool) bool { + if len(aud) == 0 { + return !required + } + // use a var here to keep constant time compare when looping over a number of claims + result := false + + var stringClaims string + for _, a := range aud { + if subtle.ConstantTimeCompare([]byte(a), []byte(cmp)) != 0 { + result = true + } + stringClaims = stringClaims + a + } + + // case where "" is sent in one or many aud claims + if len(stringClaims) == 0 { + return !required + } + + return result +} + +func verifyExp(exp int64, now int64, required bool) bool { + if exp == 0 { + return !required + } + return now <= exp +} + +func verifyIat(iat int64, now int64, required bool) bool { + if iat == 0 { + return !required + } + return now >= iat +} + +func verifyIss(iss string, cmp string, required bool) bool { + if iss == "" { + return !required + } + if subtle.ConstantTimeCompare([]byte(iss), []byte(cmp)) != 0 { + return true + } else { + return false + } +} + +func verifyNbf(nbf int64, now int64, required bool) bool { + if nbf == 0 { + return !required + } + return now >= nbf +} diff --git a/vendor/github.com/golang-jwt/jwt/doc.go b/vendor/github.com/golang-jwt/jwt/doc.go new file mode 100644 index 00000000000..a86dc1a3b34 --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/doc.go @@ -0,0 +1,4 @@ +// Package jwt is a Go implementation of JSON Web Tokens: http://self-issued.info/docs/draft-jones-json-web-token.html +// +// See README.md for more info. +package jwt diff --git a/vendor/github.com/golang-jwt/jwt/ecdsa.go b/vendor/github.com/golang-jwt/jwt/ecdsa.go new file mode 100644 index 00000000000..d310af1c7c0 --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/ecdsa.go @@ -0,0 +1,148 @@ +package jwt + +import ( + "crypto" + "crypto/ecdsa" + "crypto/rand" + "errors" + "math/big" +) + +var ( + // Sadly this is missing from crypto/ecdsa compared to crypto/rsa + ErrECDSAVerification = errors.New("crypto/ecdsa: verification error") +) + +// Implements the ECDSA family of signing methods signing methods +// Expects *ecdsa.PrivateKey for signing and *ecdsa.PublicKey for verification +type SigningMethodECDSA struct { + Name string + Hash crypto.Hash + KeySize int + CurveBits int +} + +// Specific instances for EC256 and company +var ( + SigningMethodES256 *SigningMethodECDSA + SigningMethodES384 *SigningMethodECDSA + SigningMethodES512 *SigningMethodECDSA +) + +func init() { + // ES256 + SigningMethodES256 = &SigningMethodECDSA{"ES256", crypto.SHA256, 32, 256} + RegisterSigningMethod(SigningMethodES256.Alg(), func() SigningMethod { + return SigningMethodES256 + }) + + // ES384 + SigningMethodES384 = &SigningMethodECDSA{"ES384", crypto.SHA384, 48, 384} + RegisterSigningMethod(SigningMethodES384.Alg(), func() SigningMethod { + return SigningMethodES384 + }) + + // ES512 + SigningMethodES512 = &SigningMethodECDSA{"ES512", crypto.SHA512, 66, 521} + RegisterSigningMethod(SigningMethodES512.Alg(), func() SigningMethod { + return SigningMethodES512 + }) +} + +func (m *SigningMethodECDSA) Alg() string { + return m.Name +} + +// Implements the Verify method from SigningMethod +// For this verify method, key must be an ecdsa.PublicKey struct +func (m *SigningMethodECDSA) Verify(signingString, signature string, key interface{}) error { + var err error + + // Decode the signature + var sig []byte + if sig, err = DecodeSegment(signature); err != nil { + return err + } + + // Get the key + var ecdsaKey *ecdsa.PublicKey + switch k := key.(type) { + case *ecdsa.PublicKey: + ecdsaKey = k + default: + return ErrInvalidKeyType + } + + if len(sig) != 2*m.KeySize { + return ErrECDSAVerification + } + + r := big.NewInt(0).SetBytes(sig[:m.KeySize]) + s := big.NewInt(0).SetBytes(sig[m.KeySize:]) + + // Create hasher + if !m.Hash.Available() { + return ErrHashUnavailable + } + hasher := m.Hash.New() + hasher.Write([]byte(signingString)) + + // Verify the signature + if verifystatus := ecdsa.Verify(ecdsaKey, hasher.Sum(nil), r, s); verifystatus { + return nil + } + + return ErrECDSAVerification +} + +// Implements the Sign method from SigningMethod +// For this signing method, key must be an ecdsa.PrivateKey struct +func (m *SigningMethodECDSA) Sign(signingString string, key interface{}) (string, error) { + // Get the key + var ecdsaKey *ecdsa.PrivateKey + switch k := key.(type) { + case *ecdsa.PrivateKey: + ecdsaKey = k + default: + return "", ErrInvalidKeyType + } + + // Create the hasher + if !m.Hash.Available() { + return "", ErrHashUnavailable + } + + hasher := m.Hash.New() + hasher.Write([]byte(signingString)) + + // Sign the string and return r, s + if r, s, err := ecdsa.Sign(rand.Reader, ecdsaKey, hasher.Sum(nil)); err == nil { + curveBits := ecdsaKey.Curve.Params().BitSize + + if m.CurveBits != curveBits { + return "", ErrInvalidKey + } + + keyBytes := curveBits / 8 + if curveBits%8 > 0 { + keyBytes += 1 + } + + // We serialize the outpus (r and s) into big-endian byte arrays and pad + // them with zeros on the left to make sure the sizes work out. Both arrays + // must be keyBytes long, and the output must be 2*keyBytes long. + rBytes := r.Bytes() + rBytesPadded := make([]byte, keyBytes) + copy(rBytesPadded[keyBytes-len(rBytes):], rBytes) + + sBytes := s.Bytes() + sBytesPadded := make([]byte, keyBytes) + copy(sBytesPadded[keyBytes-len(sBytes):], sBytes) + + out := append(rBytesPadded, sBytesPadded...) + + return EncodeSegment(out), nil + } else { + return "", err + } +} diff --git a/vendor/github.com/golang-jwt/jwt/ecdsa_utils.go b/vendor/github.com/golang-jwt/jwt/ecdsa_utils.go new file mode 100644 index 00000000000..db9f4be7d8e --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/ecdsa_utils.go @@ -0,0 +1,69 @@ +package jwt + +import ( + "crypto/ecdsa" + "crypto/x509" + "encoding/pem" + "errors" +) + +var ( + ErrNotECPublicKey = errors.New("Key is not a valid ECDSA public key") + ErrNotECPrivateKey = errors.New("Key is not a valid ECDSA private key") +) + +// Parse PEM encoded Elliptic Curve Private Key Structure +func ParseECPrivateKeyFromPEM(key []byte) (*ecdsa.PrivateKey, error) { + var err error + + // Parse PEM block + var block *pem.Block + if block, _ = pem.Decode(key); block == nil { + return nil, ErrKeyMustBePEMEncoded + } + + // Parse the key + var parsedKey interface{} + if parsedKey, err = x509.ParseECPrivateKey(block.Bytes); err != nil { + if parsedKey, err = x509.ParsePKCS8PrivateKey(block.Bytes); err != nil { + return nil, err + } + } + + var pkey *ecdsa.PrivateKey + var ok bool + if pkey, ok = parsedKey.(*ecdsa.PrivateKey); !ok { + return nil, ErrNotECPrivateKey + } + + return pkey, nil +} + +// Parse PEM encoded PKCS1 or PKCS8 public key +func ParseECPublicKeyFromPEM(key []byte) (*ecdsa.PublicKey, error) { + var err error + + // Parse PEM block + var block *pem.Block + if block, _ = pem.Decode(key); block == nil { + return nil, ErrKeyMustBePEMEncoded + } + + // Parse the key + var parsedKey interface{} + if parsedKey, err = x509.ParsePKIXPublicKey(block.Bytes); err != nil { + if cert, err := x509.ParseCertificate(block.Bytes); err == nil { + parsedKey = cert.PublicKey + } else { + return nil, err + } + } + + var pkey *ecdsa.PublicKey + var ok bool + if pkey, ok = parsedKey.(*ecdsa.PublicKey); !ok { + return nil, ErrNotECPublicKey + } + + return pkey, nil +} diff --git a/vendor/github.com/golang-jwt/jwt/errors.go b/vendor/github.com/golang-jwt/jwt/errors.go new file mode 100644 index 00000000000..1c93024aad2 --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/errors.go @@ -0,0 +1,59 @@ +package jwt + +import ( + "errors" +) + +// Error constants +var ( + ErrInvalidKey = errors.New("key is invalid") + ErrInvalidKeyType = errors.New("key is of invalid type") + ErrHashUnavailable = errors.New("the requested hash function is unavailable") +) + +// The errors that might occur when parsing and validating a token +const ( + ValidationErrorMalformed uint32 = 1 << iota // Token is malformed + ValidationErrorUnverifiable // Token could not be verified because of signing problems + ValidationErrorSignatureInvalid // Signature validation failed + + // Standard Claim validation errors + ValidationErrorAudience // AUD validation failed + ValidationErrorExpired // EXP validation failed + ValidationErrorIssuedAt // IAT validation failed + ValidationErrorIssuer // ISS validation failed + ValidationErrorNotValidYet // NBF validation failed + ValidationErrorId // JTI validation failed + ValidationErrorClaimsInvalid // Generic claims validation error +) + +// Helper for constructing a ValidationError with a string error message +func NewValidationError(errorText string, errorFlags uint32) *ValidationError { + return &ValidationError{ + text: errorText, + Errors: errorFlags, + } +} + +// The error from Parse if token is not valid +type ValidationError struct { + Inner error // stores the error returned by external dependencies, i.e.: KeyFunc + Errors uint32 // bitfield. see ValidationError... constants + text string // errors that do not have a valid error just have text +} + +// Validation error is an error type +func (e ValidationError) Error() string { + if e.Inner != nil { + return e.Inner.Error() + } else if e.text != "" { + return e.text + } else { + return "token is invalid" + } +} + +// No errors +func (e *ValidationError) valid() bool { + return e.Errors == 0 +} diff --git a/vendor/github.com/golang-jwt/jwt/hmac.go b/vendor/github.com/golang-jwt/jwt/hmac.go new file mode 100644 index 00000000000..addbe5d4018 --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/hmac.go @@ -0,0 +1,95 @@ +package jwt + +import ( + "crypto" + "crypto/hmac" + "errors" +) + +// Implements the HMAC-SHA family of signing methods signing methods +// Expects key type of []byte for both signing and validation +type SigningMethodHMAC struct { + Name string + Hash crypto.Hash +} + +// Specific instances for HS256 and company +var ( + SigningMethodHS256 *SigningMethodHMAC + SigningMethodHS384 *SigningMethodHMAC + SigningMethodHS512 *SigningMethodHMAC + ErrSignatureInvalid = errors.New("signature is invalid") +) + +func init() { + // HS256 + SigningMethodHS256 = &SigningMethodHMAC{"HS256", crypto.SHA256} + RegisterSigningMethod(SigningMethodHS256.Alg(), func() SigningMethod { + return SigningMethodHS256 + }) + + // HS384 + SigningMethodHS384 = &SigningMethodHMAC{"HS384", crypto.SHA384} + RegisterSigningMethod(SigningMethodHS384.Alg(), func() SigningMethod { + return SigningMethodHS384 + }) + + // HS512 + SigningMethodHS512 = &SigningMethodHMAC{"HS512", crypto.SHA512} + RegisterSigningMethod(SigningMethodHS512.Alg(), func() SigningMethod { + return SigningMethodHS512 + }) +} + +func (m *SigningMethodHMAC) Alg() string { + return m.Name +} + +// Verify the signature of HSXXX tokens. Returns nil if the signature is valid. +func (m *SigningMethodHMAC) Verify(signingString, signature string, key interface{}) error { + // Verify the key is the right type + keyBytes, ok := key.([]byte) + if !ok { + return ErrInvalidKeyType + } + + // Decode signature, for comparison + sig, err := DecodeSegment(signature) + if err != nil { + return err + } + + // Can we use the specified hashing method? + if !m.Hash.Available() { + return ErrHashUnavailable + } + + // This signing method is symmetric, so we validate the signature + // by reproducing the signature from the signing string and key, then + // comparing that against the provided signature. + hasher := hmac.New(m.Hash.New, keyBytes) + hasher.Write([]byte(signingString)) + if !hmac.Equal(sig, hasher.Sum(nil)) { + return ErrSignatureInvalid + } + + // No validation errors. Signature is good. + return nil +} + +// Implements the Sign method from SigningMethod for this signing method. +// Key must be []byte +func (m *SigningMethodHMAC) Sign(signingString string, key interface{}) (string, error) { + if keyBytes, ok := key.([]byte); ok { + if !m.Hash.Available() { + return "", ErrHashUnavailable + } + + hasher := hmac.New(m.Hash.New, keyBytes) + hasher.Write([]byte(signingString)) + + return EncodeSegment(hasher.Sum(nil)), nil + } + + return "", ErrInvalidKeyType +} diff --git a/vendor/github.com/golang-jwt/jwt/map_claims.go b/vendor/github.com/golang-jwt/jwt/map_claims.go new file mode 100644 index 00000000000..ba290f429af --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/map_claims.go @@ -0,0 +1,108 @@ +package jwt + +import ( + "encoding/json" + "errors" + // "fmt" +) + +// Claims type that uses the map[string]interface{} for JSON decoding +// This is the default claims type if you don't supply one +type MapClaims map[string]interface{} + +// VerifyAudience Compares the aud claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (m MapClaims) VerifyAudience(cmp string, req bool) bool { + var aud []string + switch v := m["aud"].(type) { + case string: + aud = append(aud, v) + case []string: + aud = v + case []interface{}: + for _, a := range v { + vs, ok := a.(string) + if !ok { + return false + } + aud = append(aud, vs) + } + } + return verifyAud(aud, cmp, req) +} + +// Compares the exp claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (m MapClaims) VerifyExpiresAt(cmp int64, req bool) bool { + switch exp := m["exp"].(type) { + case float64: + return verifyExp(int64(exp), cmp, req) + case json.Number: + v, _ := exp.Int64() + return verifyExp(v, cmp, req) + } + return !req +} + +// Compares the iat claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (m MapClaims) VerifyIssuedAt(cmp int64, req bool) bool { + switch iat := m["iat"].(type) { + case float64: + return verifyIat(int64(iat), cmp, req) + case json.Number: + v, _ := iat.Int64() + return verifyIat(v, cmp, req) + } + return !req +} + +// Compares the iss claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (m MapClaims) VerifyIssuer(cmp string, req bool) bool { + iss, _ := m["iss"].(string) + return verifyIss(iss, cmp, req) +} + +// Compares the nbf claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (m MapClaims) VerifyNotBefore(cmp int64, req bool) bool { + switch nbf := m["nbf"].(type) { + case float64: + return verifyNbf(int64(nbf), cmp, req) + case json.Number: + v, _ := nbf.Int64() + return verifyNbf(v, cmp, req) + } + return !req +} + +// Validates time based claims "exp, iat, nbf". +// There is no accounting for clock skew. +// As well, if any of the above claims are not in the token, it will still +// be considered a valid claim. +func (m MapClaims) Valid() error { + vErr := new(ValidationError) + now := TimeFunc().Unix() + + if !m.VerifyExpiresAt(now, false) { + vErr.Inner = errors.New("Token is expired") + vErr.Errors |= ValidationErrorExpired + } + + if !m.VerifyIssuedAt(now, false) { + vErr.Inner = errors.New("Token used before issued") + vErr.Errors |= ValidationErrorIssuedAt + } + + if !m.VerifyNotBefore(now, false) { + vErr.Inner = errors.New("Token is not valid yet") + vErr.Errors |= ValidationErrorNotValidYet + } + + if vErr.valid() { + return nil + } + + return vErr +} diff --git a/vendor/github.com/golang-jwt/jwt/none.go b/vendor/github.com/golang-jwt/jwt/none.go new file mode 100644 index 00000000000..f04d189d067 --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/none.go @@ -0,0 +1,52 @@ +package jwt + +// Implements the none signing method. This is required by the spec +// but you probably should never use it. +var SigningMethodNone *signingMethodNone + +const UnsafeAllowNoneSignatureType unsafeNoneMagicConstant = "none signing method allowed" + +var NoneSignatureTypeDisallowedError error + +type signingMethodNone struct{} +type unsafeNoneMagicConstant string + +func init() { + SigningMethodNone = &signingMethodNone{} + NoneSignatureTypeDisallowedError = NewValidationError("'none' signature type is not allowed", ValidationErrorSignatureInvalid) + + RegisterSigningMethod(SigningMethodNone.Alg(), func() SigningMethod { + return SigningMethodNone + }) +} + +func (m *signingMethodNone) Alg() string { + return "none" +} + +// Only allow 'none' alg type if UnsafeAllowNoneSignatureType is specified as the key +func (m *signingMethodNone) Verify(signingString, signature string, key interface{}) (err error) { + // Key must be UnsafeAllowNoneSignatureType to prevent accidentally + // accepting 'none' signing method + if _, ok := key.(unsafeNoneMagicConstant); !ok { + return NoneSignatureTypeDisallowedError + } + // If signing method is none, signature must be an empty string + if signature != "" { + return NewValidationError( + "'none' signing method with non-empty signature", + ValidationErrorSignatureInvalid, + ) + } + + // Accept 'none' signing method. + return nil +} + +// Only allow 'none' signing if UnsafeAllowNoneSignatureType is specified as the key +func (m *signingMethodNone) Sign(signingString string, key interface{}) (string, error) { + if _, ok := key.(unsafeNoneMagicConstant); ok { + return "", nil + } + return "", NoneSignatureTypeDisallowedError +} diff --git a/vendor/github.com/golang-jwt/jwt/parser.go b/vendor/github.com/golang-jwt/jwt/parser.go new file mode 100644 index 00000000000..d6901d9adb5 --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/parser.go @@ -0,0 +1,148 @@ +package jwt + +import ( + "bytes" + "encoding/json" + "fmt" + "strings" +) + +type Parser struct { + ValidMethods []string // If populated, only these methods will be considered valid + UseJSONNumber bool // Use JSON Number format in JSON decoder + SkipClaimsValidation bool // Skip claims validation during token parsing +} + +// Parse, validate, and return a token. +// keyFunc will receive the parsed token and should return the key for validating. +// If everything is kosher, err will be nil +func (p *Parser) Parse(tokenString string, keyFunc Keyfunc) (*Token, error) { + return p.ParseWithClaims(tokenString, MapClaims{}, keyFunc) +} + +func (p *Parser) ParseWithClaims(tokenString string, claims Claims, keyFunc Keyfunc) (*Token, error) { + token, parts, err := p.ParseUnverified(tokenString, claims) + if err != nil { + return token, err + } + + // Verify signing method is in the required set + if p.ValidMethods != nil { + var signingMethodValid = false + var alg = token.Method.Alg() + for _, m := range p.ValidMethods { + if m == alg { + signingMethodValid = true + break + } + } + if !signingMethodValid { + // signing method is not in the listed set + return token, NewValidationError(fmt.Sprintf("signing method %v is invalid", alg), ValidationErrorSignatureInvalid) + } + } + + // Lookup key + var key interface{} + if keyFunc == nil { + // keyFunc was not provided. short circuiting validation + return token, NewValidationError("no Keyfunc was provided.", ValidationErrorUnverifiable) + } + if key, err = keyFunc(token); err != nil { + // keyFunc returned an error + if ve, ok := err.(*ValidationError); ok { + return token, ve + } + return token, &ValidationError{Inner: err, Errors: ValidationErrorUnverifiable} + } + + vErr := &ValidationError{} + + // Validate Claims + if !p.SkipClaimsValidation { + if err := token.Claims.Valid(); err != nil { + + // If the Claims Valid returned an error, check if it is a validation error, + // If it was another error type, create a ValidationError with a generic ClaimsInvalid flag set + if e, ok := err.(*ValidationError); !ok { + vErr = &ValidationError{Inner: err, Errors: ValidationErrorClaimsInvalid} + } else { + vErr = e + } + } + } + + // Perform validation + token.Signature = parts[2] + if err = token.Method.Verify(strings.Join(parts[0:2], "."), token.Signature, key); err != nil { + vErr.Inner = err + vErr.Errors |= ValidationErrorSignatureInvalid + } + + if vErr.valid() { + token.Valid = true + return token, nil + } + + return token, vErr +} + +// WARNING: Don't use this method unless you know what you're doing +// +// This method parses the token but doesn't validate the signature. It's only +// ever useful in cases where you know the signature is valid (because it has +// been checked previously in the stack) and you want to extract values from +// it. +func (p *Parser) ParseUnverified(tokenString string, claims Claims) (token *Token, parts []string, err error) { + parts = strings.Split(tokenString, ".") + if len(parts) != 3 { + return nil, parts, NewValidationError("token contains an invalid number of segments", ValidationErrorMalformed) + } + + token = &Token{Raw: tokenString} + + // parse Header + var headerBytes []byte + if headerBytes, err = DecodeSegment(parts[0]); err != nil { + if strings.HasPrefix(strings.ToLower(tokenString), "bearer ") { + return token, parts, NewValidationError("tokenstring should not contain 'bearer '", ValidationErrorMalformed) + } + return token, parts, &ValidationError{Inner: err, Errors: ValidationErrorMalformed} + } + if err = json.Unmarshal(headerBytes, &token.Header); err != nil { + return token, parts, &ValidationError{Inner: err, Errors: ValidationErrorMalformed} + } + + // parse Claims + var claimBytes []byte + token.Claims = claims + + if claimBytes, err = DecodeSegment(parts[1]); err != nil { + return token, parts, &ValidationError{Inner: err, Errors: ValidationErrorMalformed} + } + dec := json.NewDecoder(bytes.NewBuffer(claimBytes)) + if p.UseJSONNumber { + dec.UseNumber() + } + // JSON Decode. Special case for map type to avoid weird pointer behavior + if c, ok := token.Claims.(MapClaims); ok { + err = dec.Decode(&c) + } else { + err = dec.Decode(&claims) + } + // Handle decode error + if err != nil { + return token, parts, &ValidationError{Inner: err, Errors: ValidationErrorMalformed} + } + + // Lookup signature method + if method, ok := token.Header["alg"].(string); ok { + if token.Method = GetSigningMethod(method); token.Method == nil { + return token, parts, NewValidationError("signing method (alg) is unavailable.", ValidationErrorUnverifiable) + } + } else { + return token, parts, NewValidationError("signing method (alg) is unspecified.", ValidationErrorUnverifiable) + } + + return token, parts, nil +} diff --git a/vendor/github.com/golang-jwt/jwt/rsa.go b/vendor/github.com/golang-jwt/jwt/rsa.go new file mode 100644 index 00000000000..e4caf1ca4a1 --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/rsa.go @@ -0,0 +1,101 @@ +package jwt + +import ( + "crypto" + "crypto/rand" + "crypto/rsa" +) + +// Implements the RSA family of signing methods signing methods +// Expects *rsa.PrivateKey for signing and *rsa.PublicKey for validation +type SigningMethodRSA struct { + Name string + Hash crypto.Hash +} + +// Specific instances for RS256 and company +var ( + SigningMethodRS256 *SigningMethodRSA + SigningMethodRS384 *SigningMethodRSA + SigningMethodRS512 *SigningMethodRSA +) + +func init() { + // RS256 + SigningMethodRS256 = &SigningMethodRSA{"RS256", crypto.SHA256} + RegisterSigningMethod(SigningMethodRS256.Alg(), func() SigningMethod { + return SigningMethodRS256 + }) + + // RS384 + SigningMethodRS384 = &SigningMethodRSA{"RS384", crypto.SHA384} + RegisterSigningMethod(SigningMethodRS384.Alg(), func() SigningMethod { + return SigningMethodRS384 + }) + + // RS512 + SigningMethodRS512 = &SigningMethodRSA{"RS512", crypto.SHA512} + RegisterSigningMethod(SigningMethodRS512.Alg(), func() SigningMethod { + return SigningMethodRS512 + }) +} + +func (m *SigningMethodRSA) Alg() string { + return m.Name +} + +// Implements the Verify method from SigningMethod +// For this signing method, must be an *rsa.PublicKey structure. +func (m *SigningMethodRSA) Verify(signingString, signature string, key interface{}) error { + var err error + + // Decode the signature + var sig []byte + if sig, err = DecodeSegment(signature); err != nil { + return err + } + + var rsaKey *rsa.PublicKey + var ok bool + + if rsaKey, ok = key.(*rsa.PublicKey); !ok { + return ErrInvalidKeyType + } + + // Create hasher + if !m.Hash.Available() { + return ErrHashUnavailable + } + hasher := m.Hash.New() + hasher.Write([]byte(signingString)) + + // Verify the signature + return rsa.VerifyPKCS1v15(rsaKey, m.Hash, hasher.Sum(nil), sig) +} + +// Implements the Sign method from SigningMethod +// For this signing method, must be an *rsa.PrivateKey structure. +func (m *SigningMethodRSA) Sign(signingString string, key interface{}) (string, error) { + var rsaKey *rsa.PrivateKey + var ok bool + + // Validate type of key + if rsaKey, ok = key.(*rsa.PrivateKey); !ok { + return "", ErrInvalidKey + } + + // Create the hasher + if !m.Hash.Available() { + return "", ErrHashUnavailable + } + + hasher := m.Hash.New() + hasher.Write([]byte(signingString)) + + // Sign the string and return the encoded bytes + if sigBytes, err := rsa.SignPKCS1v15(rand.Reader, rsaKey, m.Hash, hasher.Sum(nil)); err == nil { + return EncodeSegment(sigBytes), nil + } else { + return "", err + } +} diff --git a/vendor/github.com/golang-jwt/jwt/rsa_pss.go b/vendor/github.com/golang-jwt/jwt/rsa_pss.go new file mode 100644 index 00000000000..c0147086480 --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/rsa_pss.go @@ -0,0 +1,142 @@ +// +build go1.4 + +package jwt + +import ( + "crypto" + "crypto/rand" + "crypto/rsa" +) + +// Implements the RSAPSS family of signing methods signing methods +type SigningMethodRSAPSS struct { + *SigningMethodRSA + Options *rsa.PSSOptions + // VerifyOptions is optional. If set overrides Options for rsa.VerifyPPS. + // Used to accept tokens signed with rsa.PSSSaltLengthAuto, what doesn't follow + // https://tools.ietf.org/html/rfc7518#section-3.5 but was used previously. + // See https://github.com/dgrijalva/jwt-go/issues/285#issuecomment-437451244 for details. + VerifyOptions *rsa.PSSOptions +} + +// Specific instances for RS/PS and company. +var ( + SigningMethodPS256 *SigningMethodRSAPSS + SigningMethodPS384 *SigningMethodRSAPSS + SigningMethodPS512 *SigningMethodRSAPSS +) + +func init() { + // PS256 + SigningMethodPS256 = &SigningMethodRSAPSS{ + SigningMethodRSA: &SigningMethodRSA{ + Name: "PS256", + Hash: crypto.SHA256, + }, + Options: &rsa.PSSOptions{ + SaltLength: rsa.PSSSaltLengthEqualsHash, + }, + VerifyOptions: &rsa.PSSOptions{ + SaltLength: rsa.PSSSaltLengthAuto, + }, + } + RegisterSigningMethod(SigningMethodPS256.Alg(), func() SigningMethod { + return SigningMethodPS256 + }) + + // PS384 + SigningMethodPS384 = &SigningMethodRSAPSS{ + SigningMethodRSA: &SigningMethodRSA{ + Name: "PS384", + Hash: crypto.SHA384, + }, + Options: &rsa.PSSOptions{ + SaltLength: rsa.PSSSaltLengthEqualsHash, + }, + VerifyOptions: &rsa.PSSOptions{ + SaltLength: rsa.PSSSaltLengthAuto, + }, + } + RegisterSigningMethod(SigningMethodPS384.Alg(), func() SigningMethod { + return SigningMethodPS384 + }) + + // PS512 + SigningMethodPS512 = &SigningMethodRSAPSS{ + SigningMethodRSA: &SigningMethodRSA{ + Name: "PS512", + Hash: crypto.SHA512, + }, + Options: &rsa.PSSOptions{ + SaltLength: rsa.PSSSaltLengthEqualsHash, + }, + VerifyOptions: &rsa.PSSOptions{ + SaltLength: rsa.PSSSaltLengthAuto, + }, + } + RegisterSigningMethod(SigningMethodPS512.Alg(), func() SigningMethod { + return SigningMethodPS512 + }) +} + +// Implements the Verify method from SigningMethod +// For this verify method, key must be an rsa.PublicKey struct +func (m *SigningMethodRSAPSS) Verify(signingString, signature string, key interface{}) error { + var err error + + // Decode the signature + var sig []byte + if sig, err = DecodeSegment(signature); err != nil { + return err + } + + var rsaKey *rsa.PublicKey + switch k := key.(type) { + case *rsa.PublicKey: + rsaKey = k + default: + return ErrInvalidKey + } + + // Create hasher + if !m.Hash.Available() { + return ErrHashUnavailable + } + hasher := m.Hash.New() + hasher.Write([]byte(signingString)) + + opts := m.Options + if m.VerifyOptions != nil { + opts = m.VerifyOptions + } + + return rsa.VerifyPSS(rsaKey, m.Hash, hasher.Sum(nil), sig, opts) +} + +// Implements the Sign method from SigningMethod +// For this signing method, key must be an rsa.PrivateKey struct +func (m *SigningMethodRSAPSS) Sign(signingString string, key interface{}) (string, error) { + var rsaKey *rsa.PrivateKey + + switch k := key.(type) { + case *rsa.PrivateKey: + rsaKey = k + default: + return "", ErrInvalidKeyType + } + + // Create the hasher + if !m.Hash.Available() { + return "", ErrHashUnavailable + } + + hasher := m.Hash.New() + hasher.Write([]byte(signingString)) + + // Sign the string and return the encoded bytes + if sigBytes, err := rsa.SignPSS(rand.Reader, rsaKey, m.Hash, hasher.Sum(nil), m.Options); err == nil { + return EncodeSegment(sigBytes), nil + } else { + return "", err + } +} diff --git a/vendor/github.com/golang-jwt/jwt/rsa_utils.go b/vendor/github.com/golang-jwt/jwt/rsa_utils.go new file mode 100644 index 00000000000..14c78c292a9 --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/rsa_utils.go @@ -0,0 +1,101 @@ +package jwt + +import ( + "crypto/rsa" + "crypto/x509" + "encoding/pem" + "errors" +) + +var ( + ErrKeyMustBePEMEncoded = errors.New("Invalid Key: Key must be a PEM encoded PKCS1 or PKCS8 key") + ErrNotRSAPrivateKey = errors.New("Key is not a valid RSA private key") + ErrNotRSAPublicKey = errors.New("Key is not a valid RSA public key") +) + +// Parse PEM encoded PKCS1 or PKCS8 private key +func ParseRSAPrivateKeyFromPEM(key []byte) (*rsa.PrivateKey, error) { + var err error + + // Parse PEM block + var block *pem.Block + if block, _ = pem.Decode(key); block == nil { + return nil, ErrKeyMustBePEMEncoded + } + + var parsedKey interface{} + if parsedKey, err = x509.ParsePKCS1PrivateKey(block.Bytes); err != nil { + if parsedKey, err = x509.ParsePKCS8PrivateKey(block.Bytes); err != nil { + return nil, err + } + } + + var pkey *rsa.PrivateKey + var ok bool + if pkey, ok = parsedKey.(*rsa.PrivateKey); !ok { + return nil, ErrNotRSAPrivateKey + } + + return pkey, nil +} + +// Parse PEM encoded PKCS1 or PKCS8 private key protected with password +func ParseRSAPrivateKeyFromPEMWithPassword(key []byte, password string) (*rsa.PrivateKey, error) { + var err error + + // Parse PEM block + var block *pem.Block + if block, _ = pem.Decode(key); block == nil { + return nil, ErrKeyMustBePEMEncoded + } + + var parsedKey interface{} + + var blockDecrypted []byte + if blockDecrypted, err = x509.DecryptPEMBlock(block, []byte(password)); err != nil { + return nil, err + } + + if parsedKey, err = x509.ParsePKCS1PrivateKey(blockDecrypted); err != nil { + if parsedKey, err = x509.ParsePKCS8PrivateKey(blockDecrypted); err != nil { + return nil, err + } + } + + var pkey *rsa.PrivateKey + var ok bool + if pkey, ok = parsedKey.(*rsa.PrivateKey); !ok { + return nil, ErrNotRSAPrivateKey + } + + return pkey, nil +} + +// Parse PEM encoded PKCS1 or PKCS8 public key +func ParseRSAPublicKeyFromPEM(key []byte) (*rsa.PublicKey, error) { + var err error + + // Parse PEM block + var block *pem.Block + if block, _ = pem.Decode(key); block == nil { + return nil, ErrKeyMustBePEMEncoded + } + + // Parse the key + var parsedKey interface{} + if parsedKey, err = x509.ParsePKIXPublicKey(block.Bytes); err != nil { + if cert, err := x509.ParseCertificate(block.Bytes); err == nil { + parsedKey = cert.PublicKey + } else { + return nil, err + } + } + + var pkey *rsa.PublicKey + var ok bool + if pkey, ok = parsedKey.(*rsa.PublicKey); !ok { + return nil, ErrNotRSAPublicKey + } + + return pkey, nil +} diff --git a/vendor/github.com/golang-jwt/jwt/signing_method.go b/vendor/github.com/golang-jwt/jwt/signing_method.go new file mode 100644 index 00000000000..ed1f212b21e --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/signing_method.go @@ -0,0 +1,35 @@ +package jwt + +import ( + "sync" +) + +var signingMethods = map[string]func() SigningMethod{} +var signingMethodLock = new(sync.RWMutex) + +// Implement SigningMethod to add new methods for signing or verifying tokens. +type SigningMethod interface { + Verify(signingString, signature string, key interface{}) error // Returns nil if signature is valid + Sign(signingString string, key interface{}) (string, error) // Returns encoded signature or error + Alg() string // returns the alg identifier for this method (example: 'HS256') +} + +// Register the "alg" name and a factory function for signing method. +// This is typically done during init() in the method's implementation +func RegisterSigningMethod(alg string, f func() SigningMethod) { + signingMethodLock.Lock() + defer signingMethodLock.Unlock() + + signingMethods[alg] = f +} + +// Get a signing method from an "alg" string +func GetSigningMethod(alg string) (method SigningMethod) { + signingMethodLock.RLock() + defer signingMethodLock.RUnlock() + + if methodF, ok := signingMethods[alg]; ok { + method = methodF() + } + return +} diff --git a/vendor/github.com/golang-jwt/jwt/token.go b/vendor/github.com/golang-jwt/jwt/token.go new file mode 100644 index 00000000000..99868d29b9f --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/token.go @@ -0,0 +1,108 @@ +package jwt + +import ( + "encoding/base64" + "encoding/json" + "strings" + "time" +) + +// TimeFunc provides the current time when parsing token to validate "exp" claim (expiration time). +// You can override it to use another time value. This is useful for testing or if your +// server uses a different time zone than your tokens. +var TimeFunc = time.Now + +// Parse methods use this callback function to supply +// the key for verification. The function receives the parsed, +// but unverified Token. This allows you to use properties in the +// Header of the token (such as `kid`) to identify which key to use. +type Keyfunc func(*Token) (interface{}, error) + +// A JWT Token. Different fields will be used depending on whether you're +// creating or parsing/verifying a token. +type Token struct { + Raw string // The raw token. Populated when you Parse a token + Method SigningMethod // The signing method used or to be used + Header map[string]interface{} // The first segment of the token + Claims Claims // The second segment of the token + Signature string // The third segment of the token. Populated when you Parse a token + Valid bool // Is the token valid? Populated when you Parse/Verify a token +} + +// Create a new Token. Takes a signing method +func New(method SigningMethod) *Token { + return NewWithClaims(method, MapClaims{}) +} + +func NewWithClaims(method SigningMethod, claims Claims) *Token { + return &Token{ + Header: map[string]interface{}{ + "typ": "JWT", + "alg": method.Alg(), + }, + Claims: claims, + Method: method, + } +} + +// Get the complete, signed token +func (t *Token) SignedString(key interface{}) (string, error) { + var sig, sstr string + var err error + if sstr, err = t.SigningString(); err != nil { + return "", err + } + if sig, err = t.Method.Sign(sstr, key); err != nil { + return "", err + } + return strings.Join([]string{sstr, sig}, "."), nil +} + +// Generate the signing string. This is the +// most expensive part of the whole deal. Unless you +// need this for something special, just go straight for +// the SignedString. +func (t *Token) SigningString() (string, error) { + var err error + parts := make([]string, 2) + for i := range parts { + var jsonValue []byte + if i == 0 { + if jsonValue, err = json.Marshal(t.Header); err != nil { + return "", err + } + } else { + if jsonValue, err = json.Marshal(t.Claims); err != nil { + return "", err + } + } + + parts[i] = EncodeSegment(jsonValue) + } + return strings.Join(parts, "."), nil +} + +// Parse, validate, and return a token. +// keyFunc will receive the parsed token and should return the key for validating. +// If everything is kosher, err will be nil +func Parse(tokenString string, keyFunc Keyfunc) (*Token, error) { + return new(Parser).Parse(tokenString, keyFunc) +} + +func ParseWithClaims(tokenString string, claims Claims, keyFunc Keyfunc) (*Token, error) { + return new(Parser).ParseWithClaims(tokenString, claims, keyFunc) +} + +// Encode JWT specific base64url encoding with padding stripped +func EncodeSegment(seg []byte) string { + return strings.TrimRight(base64.URLEncoding.EncodeToString(seg), "=") +} + +// Decode JWT specific base64url encoding with padding stripped +func DecodeSegment(seg string) ([]byte, error) { + if l := len(seg) % 4; l > 0 { + seg += strings.Repeat("=", 4-l) + } + + return base64.URLEncoding.DecodeString(seg) +} diff --git a/vendor/github.com/golang/protobuf/proto/registry.go b/vendor/github.com/golang/protobuf/proto/registry.go index 1e7ff642057..066b4323b49 100644 --- a/vendor/github.com/golang/protobuf/proto/registry.go +++ b/vendor/github.com/golang/protobuf/proto/registry.go @@ -13,6 +13,7 @@ import ( "strings" "sync" + "google.golang.org/protobuf/reflect/protodesc" "google.golang.org/protobuf/reflect/protoreflect" "google.golang.org/protobuf/reflect/protoregistry" "google.golang.org/protobuf/runtime/protoimpl" @@ -62,14 +63,7 @@ func FileDescriptor(s filePath) fileDescGZIP { // Find the descriptor in the v2 registry. var b []byte if fd, _ := protoregistry.GlobalFiles.FindFileByPath(s); fd != nil { - if fd, ok := fd.(interface{ ProtoLegacyRawDesc() []byte }); ok { - b = fd.ProtoLegacyRawDesc() - } else { - // TODO: Use protodesc.ToFileDescriptorProto to construct - // a descriptorpb.FileDescriptorProto and marshal it. - // However, doing so causes the proto package to have a dependency - // on descriptorpb, leading to cyclic dependency issues. - } + b, _ = Marshal(protodesc.ToFileDescriptorProto(fd)) } // Locally cache the raw descriptor form for the file. diff --git a/vendor/github.com/golang/protobuf/proto/text_decode.go b/vendor/github.com/golang/protobuf/proto/text_decode.go index 4a593100987..47eb3e44501 100644 --- a/vendor/github.com/golang/protobuf/proto/text_decode.go +++ b/vendor/github.com/golang/protobuf/proto/text_decode.go @@ -765,7 +765,7 @@ func unescape(s string) (ch string, tail string, err error) { if i > utf8.MaxRune { return "", "", fmt.Errorf(`\%c%s is not a valid Unicode code point`, r, ss) } - return string(i), s, nil + return string(rune(i)), s, nil } return "", "", fmt.Errorf(`unknown escape \%c`, r) } diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go b/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go deleted file mode 100644 index 63dc0578514..00000000000 --- a/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go +++ /dev/null @@ -1,200 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.proto - -package descriptor - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - descriptorpb "google.golang.org/protobuf/types/descriptorpb" - reflect "reflect" -) - -// Symbols defined in public import of google/protobuf/descriptor.proto. - -type FieldDescriptorProto_Type = descriptorpb.FieldDescriptorProto_Type - -const FieldDescriptorProto_TYPE_DOUBLE = descriptorpb.FieldDescriptorProto_TYPE_DOUBLE -const FieldDescriptorProto_TYPE_FLOAT = descriptorpb.FieldDescriptorProto_TYPE_FLOAT -const FieldDescriptorProto_TYPE_INT64 = descriptorpb.FieldDescriptorProto_TYPE_INT64 -const FieldDescriptorProto_TYPE_UINT64 = descriptorpb.FieldDescriptorProto_TYPE_UINT64 -const FieldDescriptorProto_TYPE_INT32 = descriptorpb.FieldDescriptorProto_TYPE_INT32 -const FieldDescriptorProto_TYPE_FIXED64 = descriptorpb.FieldDescriptorProto_TYPE_FIXED64 -const FieldDescriptorProto_TYPE_FIXED32 = descriptorpb.FieldDescriptorProto_TYPE_FIXED32 -const FieldDescriptorProto_TYPE_BOOL = descriptorpb.FieldDescriptorProto_TYPE_BOOL -const FieldDescriptorProto_TYPE_STRING = descriptorpb.FieldDescriptorProto_TYPE_STRING -const FieldDescriptorProto_TYPE_GROUP = descriptorpb.FieldDescriptorProto_TYPE_GROUP -const FieldDescriptorProto_TYPE_MESSAGE = descriptorpb.FieldDescriptorProto_TYPE_MESSAGE -const FieldDescriptorProto_TYPE_BYTES = descriptorpb.FieldDescriptorProto_TYPE_BYTES -const FieldDescriptorProto_TYPE_UINT32 = descriptorpb.FieldDescriptorProto_TYPE_UINT32 -const FieldDescriptorProto_TYPE_ENUM = descriptorpb.FieldDescriptorProto_TYPE_ENUM -const FieldDescriptorProto_TYPE_SFIXED32 = descriptorpb.FieldDescriptorProto_TYPE_SFIXED32 -const FieldDescriptorProto_TYPE_SFIXED64 = descriptorpb.FieldDescriptorProto_TYPE_SFIXED64 -const FieldDescriptorProto_TYPE_SINT32 = descriptorpb.FieldDescriptorProto_TYPE_SINT32 -const FieldDescriptorProto_TYPE_SINT64 = descriptorpb.FieldDescriptorProto_TYPE_SINT64 - -var FieldDescriptorProto_Type_name = descriptorpb.FieldDescriptorProto_Type_name -var FieldDescriptorProto_Type_value = descriptorpb.FieldDescriptorProto_Type_value - -type FieldDescriptorProto_Label = descriptorpb.FieldDescriptorProto_Label - -const FieldDescriptorProto_LABEL_OPTIONAL = descriptorpb.FieldDescriptorProto_LABEL_OPTIONAL -const FieldDescriptorProto_LABEL_REQUIRED = descriptorpb.FieldDescriptorProto_LABEL_REQUIRED -const FieldDescriptorProto_LABEL_REPEATED = descriptorpb.FieldDescriptorProto_LABEL_REPEATED - -var FieldDescriptorProto_Label_name = descriptorpb.FieldDescriptorProto_Label_name -var FieldDescriptorProto_Label_value = descriptorpb.FieldDescriptorProto_Label_value - -type FileOptions_OptimizeMode = descriptorpb.FileOptions_OptimizeMode - -const FileOptions_SPEED = descriptorpb.FileOptions_SPEED -const FileOptions_CODE_SIZE = descriptorpb.FileOptions_CODE_SIZE -const FileOptions_LITE_RUNTIME = descriptorpb.FileOptions_LITE_RUNTIME - -var FileOptions_OptimizeMode_name = descriptorpb.FileOptions_OptimizeMode_name -var FileOptions_OptimizeMode_value = descriptorpb.FileOptions_OptimizeMode_value - -type FieldOptions_CType = descriptorpb.FieldOptions_CType - -const FieldOptions_STRING = descriptorpb.FieldOptions_STRING -const FieldOptions_CORD = descriptorpb.FieldOptions_CORD -const FieldOptions_STRING_PIECE = descriptorpb.FieldOptions_STRING_PIECE - -var FieldOptions_CType_name = descriptorpb.FieldOptions_CType_name -var FieldOptions_CType_value = descriptorpb.FieldOptions_CType_value - -type FieldOptions_JSType = descriptorpb.FieldOptions_JSType - -const FieldOptions_JS_NORMAL = descriptorpb.FieldOptions_JS_NORMAL -const FieldOptions_JS_STRING = descriptorpb.FieldOptions_JS_STRING -const FieldOptions_JS_NUMBER = descriptorpb.FieldOptions_JS_NUMBER - -var FieldOptions_JSType_name = descriptorpb.FieldOptions_JSType_name -var FieldOptions_JSType_value = descriptorpb.FieldOptions_JSType_value - -type MethodOptions_IdempotencyLevel = descriptorpb.MethodOptions_IdempotencyLevel - -const MethodOptions_IDEMPOTENCY_UNKNOWN = descriptorpb.MethodOptions_IDEMPOTENCY_UNKNOWN -const MethodOptions_NO_SIDE_EFFECTS = descriptorpb.MethodOptions_NO_SIDE_EFFECTS -const MethodOptions_IDEMPOTENT = descriptorpb.MethodOptions_IDEMPOTENT - -var MethodOptions_IdempotencyLevel_name = descriptorpb.MethodOptions_IdempotencyLevel_name -var MethodOptions_IdempotencyLevel_value = descriptorpb.MethodOptions_IdempotencyLevel_value - -type FileDescriptorSet = descriptorpb.FileDescriptorSet -type FileDescriptorProto = descriptorpb.FileDescriptorProto -type DescriptorProto = descriptorpb.DescriptorProto -type ExtensionRangeOptions = descriptorpb.ExtensionRangeOptions -type FieldDescriptorProto = descriptorpb.FieldDescriptorProto -type OneofDescriptorProto = descriptorpb.OneofDescriptorProto -type EnumDescriptorProto = descriptorpb.EnumDescriptorProto -type EnumValueDescriptorProto = descriptorpb.EnumValueDescriptorProto -type ServiceDescriptorProto = descriptorpb.ServiceDescriptorProto -type MethodDescriptorProto = descriptorpb.MethodDescriptorProto - -const Default_MethodDescriptorProto_ClientStreaming = descriptorpb.Default_MethodDescriptorProto_ClientStreaming -const Default_MethodDescriptorProto_ServerStreaming = descriptorpb.Default_MethodDescriptorProto_ServerStreaming - -type FileOptions = descriptorpb.FileOptions - -const Default_FileOptions_JavaMultipleFiles = descriptorpb.Default_FileOptions_JavaMultipleFiles -const Default_FileOptions_JavaStringCheckUtf8 = descriptorpb.Default_FileOptions_JavaStringCheckUtf8 -const Default_FileOptions_OptimizeFor = descriptorpb.Default_FileOptions_OptimizeFor -const Default_FileOptions_CcGenericServices = descriptorpb.Default_FileOptions_CcGenericServices -const Default_FileOptions_JavaGenericServices = descriptorpb.Default_FileOptions_JavaGenericServices -const Default_FileOptions_PyGenericServices = descriptorpb.Default_FileOptions_PyGenericServices -const Default_FileOptions_PhpGenericServices = descriptorpb.Default_FileOptions_PhpGenericServices -const Default_FileOptions_Deprecated = descriptorpb.Default_FileOptions_Deprecated -const Default_FileOptions_CcEnableArenas = descriptorpb.Default_FileOptions_CcEnableArenas - -type MessageOptions = descriptorpb.MessageOptions - -const Default_MessageOptions_MessageSetWireFormat = descriptorpb.Default_MessageOptions_MessageSetWireFormat -const Default_MessageOptions_NoStandardDescriptorAccessor = descriptorpb.Default_MessageOptions_NoStandardDescriptorAccessor -const Default_MessageOptions_Deprecated = descriptorpb.Default_MessageOptions_Deprecated - -type FieldOptions = descriptorpb.FieldOptions - -const Default_FieldOptions_Ctype = descriptorpb.Default_FieldOptions_Ctype -const Default_FieldOptions_Jstype = descriptorpb.Default_FieldOptions_Jstype -const Default_FieldOptions_Lazy = descriptorpb.Default_FieldOptions_Lazy -const Default_FieldOptions_Deprecated = descriptorpb.Default_FieldOptions_Deprecated -const Default_FieldOptions_Weak = descriptorpb.Default_FieldOptions_Weak - -type OneofOptions = descriptorpb.OneofOptions -type EnumOptions = descriptorpb.EnumOptions - -const Default_EnumOptions_Deprecated = descriptorpb.Default_EnumOptions_Deprecated - -type EnumValueOptions = descriptorpb.EnumValueOptions - -const Default_EnumValueOptions_Deprecated = descriptorpb.Default_EnumValueOptions_Deprecated - -type ServiceOptions = descriptorpb.ServiceOptions - -const Default_ServiceOptions_Deprecated = descriptorpb.Default_ServiceOptions_Deprecated - -type MethodOptions = descriptorpb.MethodOptions - -const Default_MethodOptions_Deprecated = descriptorpb.Default_MethodOptions_Deprecated -const Default_MethodOptions_IdempotencyLevel = descriptorpb.Default_MethodOptions_IdempotencyLevel - -type UninterpretedOption = descriptorpb.UninterpretedOption -type SourceCodeInfo = descriptorpb.SourceCodeInfo -type GeneratedCodeInfo = descriptorpb.GeneratedCodeInfo -type DescriptorProto_ExtensionRange = descriptorpb.DescriptorProto_ExtensionRange -type DescriptorProto_ReservedRange = descriptorpb.DescriptorProto_ReservedRange -type EnumDescriptorProto_EnumReservedRange = descriptorpb.EnumDescriptorProto_EnumReservedRange -type UninterpretedOption_NamePart = descriptorpb.UninterpretedOption_NamePart -type SourceCodeInfo_Location = descriptorpb.SourceCodeInfo_Location -type GeneratedCodeInfo_Annotation = descriptorpb.GeneratedCodeInfo_Annotation - -var File_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto protoreflect.FileDescriptor - -var file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_rawDesc = []byte{ - 0x0a, 0x44, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, - 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x63, 0x2d, 0x67, 0x65, 0x6e, 0x2d, 0x67, 0x6f, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, - 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, - 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x40, 0x5a, 0x3e, 0x67, 0x69, 0x74, 0x68, - 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x2d, 0x67, 0x65, - 0x6e, 0x2d, 0x67, 0x6f, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x3b, - 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x32, -} - -var file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_goTypes = []interface{}{} -var file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_depIdxs = []int32{ - 0, // [0:0] is the sub-list for method output_type - 0, // [0:0] is the sub-list for method input_type - 0, // [0:0] is the sub-list for extension type_name - 0, // [0:0] is the sub-list for extension extendee - 0, // [0:0] is the sub-list for field type_name -} - -func init() { file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_init() } -func file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_init() { - if File_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto != nil { - return - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_rawDesc, - NumEnums: 0, - NumMessages: 0, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_goTypes, - DependencyIndexes: file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_depIdxs, - }.Build() - File_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto = out.File - file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_rawDesc = nil - file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_goTypes = nil - file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_depIdxs = nil -} diff --git a/vendor/github.com/golang/protobuf/ptypes/any.go b/vendor/github.com/golang/protobuf/ptypes/any.go index e729dcff13c..85f9f57365f 100644 --- a/vendor/github.com/golang/protobuf/ptypes/any.go +++ b/vendor/github.com/golang/protobuf/ptypes/any.go @@ -19,6 +19,8 @@ const urlPrefix = "type.googleapis.com/" // AnyMessageName returns the message name contained in an anypb.Any message. // Most type assertions should use the Is function instead. +// +// Deprecated: Call the any.MessageName method instead. func AnyMessageName(any *anypb.Any) (string, error) { name, err := anyMessageName(any) return string(name), err @@ -38,6 +40,8 @@ func anyMessageName(any *anypb.Any) (protoreflect.FullName, error) { } // MarshalAny marshals the given message m into an anypb.Any message. +// +// Deprecated: Call the anypb.New function instead. func MarshalAny(m proto.Message) (*anypb.Any, error) { switch dm := m.(type) { case DynamicAny: @@ -58,6 +62,9 @@ func MarshalAny(m proto.Message) (*anypb.Any, error) { // Empty returns a new message of the type specified in an anypb.Any message. // It returns protoregistry.NotFound if the corresponding message type could not // be resolved in the global registry. +// +// Deprecated: Use protoregistry.GlobalTypes.FindMessageByName instead +// to resolve the message name and create a new instance of it. func Empty(any *anypb.Any) (proto.Message, error) { name, err := anyMessageName(any) if err != nil { @@ -76,6 +83,8 @@ func Empty(any *anypb.Any) (proto.Message, error) { // // The target message m may be a *DynamicAny message. If the underlying message // type could not be resolved, then this returns protoregistry.NotFound. +// +// Deprecated: Call the any.UnmarshalTo method instead. func UnmarshalAny(any *anypb.Any, m proto.Message) error { if dm, ok := m.(*DynamicAny); ok { if dm.Message == nil { @@ -100,6 +109,8 @@ func UnmarshalAny(any *anypb.Any, m proto.Message) error { } // Is reports whether the Any message contains a message of the specified type. +// +// Deprecated: Call the any.MessageIs method instead. func Is(any *anypb.Any, m proto.Message) bool { if any == nil || m == nil { return false @@ -119,6 +130,9 @@ func Is(any *anypb.Any, m proto.Message) bool { // var x ptypes.DynamicAny // if err := ptypes.UnmarshalAny(a, &x); err != nil { ... } // fmt.Printf("unmarshaled message: %v", x.Message) +// +// Deprecated: Use the any.UnmarshalNew method instead to unmarshal +// the any message contents into a new instance of the underlying message. type DynamicAny struct{ proto.Message } func (m DynamicAny) String() string { diff --git a/vendor/github.com/golang/protobuf/ptypes/doc.go b/vendor/github.com/golang/protobuf/ptypes/doc.go index fb9edd5c627..d3c33259d28 100644 --- a/vendor/github.com/golang/protobuf/ptypes/doc.go +++ b/vendor/github.com/golang/protobuf/ptypes/doc.go @@ -3,4 +3,8 @@ // license that can be found in the LICENSE file. // Package ptypes provides functionality for interacting with well-known types. +// +// Deprecated: Well-known types have specialized functionality directly +// injected into the generated packages for each message type. +// See the deprecation notice for each function for the suggested alternative. package ptypes diff --git a/vendor/github.com/golang/protobuf/ptypes/duration.go b/vendor/github.com/golang/protobuf/ptypes/duration.go index 6110ae8a41d..b2b55dd851f 100644 --- a/vendor/github.com/golang/protobuf/ptypes/duration.go +++ b/vendor/github.com/golang/protobuf/ptypes/duration.go @@ -21,6 +21,8 @@ const ( // Duration converts a durationpb.Duration to a time.Duration. // Duration returns an error if dur is invalid or overflows a time.Duration. +// +// Deprecated: Call the dur.AsDuration and dur.CheckValid methods instead. func Duration(dur *durationpb.Duration) (time.Duration, error) { if err := validateDuration(dur); err != nil { return 0, err @@ -39,6 +41,8 @@ func Duration(dur *durationpb.Duration) (time.Duration, error) { } // DurationProto converts a time.Duration to a durationpb.Duration. +// +// Deprecated: Call the durationpb.New function instead. func DurationProto(d time.Duration) *durationpb.Duration { nanos := d.Nanoseconds() secs := nanos / 1e9 diff --git a/vendor/github.com/golang/protobuf/ptypes/empty/empty.pb.go b/vendor/github.com/golang/protobuf/ptypes/empty/empty.pb.go deleted file mode 100644 index 16686a65523..00000000000 --- a/vendor/github.com/golang/protobuf/ptypes/empty/empty.pb.go +++ /dev/null @@ -1,62 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: github.com/golang/protobuf/ptypes/empty/empty.proto - -package empty - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - emptypb "google.golang.org/protobuf/types/known/emptypb" - reflect "reflect" -) - -// Symbols defined in public import of google/protobuf/empty.proto. - -type Empty = emptypb.Empty - -var File_github_com_golang_protobuf_ptypes_empty_empty_proto protoreflect.FileDescriptor - -var file_github_com_golang_protobuf_ptypes_empty_empty_proto_rawDesc = []byte{ - 0x0a, 0x33, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, - 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, - 0x70, 0x65, 0x73, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x42, 0x2f, 0x5a, 0x2d, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, - 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2f, 0x70, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x3b, 0x65, 0x6d, - 0x70, 0x74, 0x79, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var file_github_com_golang_protobuf_ptypes_empty_empty_proto_goTypes = []interface{}{} -var file_github_com_golang_protobuf_ptypes_empty_empty_proto_depIdxs = []int32{ - 0, // [0:0] is the sub-list for method output_type - 0, // [0:0] is the sub-list for method input_type - 0, // [0:0] is the sub-list for extension type_name - 0, // [0:0] is the sub-list for extension extendee - 0, // [0:0] is the sub-list for field type_name -} - -func init() { file_github_com_golang_protobuf_ptypes_empty_empty_proto_init() } -func file_github_com_golang_protobuf_ptypes_empty_empty_proto_init() { - if File_github_com_golang_protobuf_ptypes_empty_empty_proto != nil { - return - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_github_com_golang_protobuf_ptypes_empty_empty_proto_rawDesc, - NumEnums: 0, - NumMessages: 0, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_github_com_golang_protobuf_ptypes_empty_empty_proto_goTypes, - DependencyIndexes: file_github_com_golang_protobuf_ptypes_empty_empty_proto_depIdxs, - }.Build() - File_github_com_golang_protobuf_ptypes_empty_empty_proto = out.File - file_github_com_golang_protobuf_ptypes_empty_empty_proto_rawDesc = nil - file_github_com_golang_protobuf_ptypes_empty_empty_proto_goTypes = nil - file_github_com_golang_protobuf_ptypes_empty_empty_proto_depIdxs = nil -} diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp.go b/vendor/github.com/golang/protobuf/ptypes/timestamp.go index 026d0d49155..8368a3f70d3 100644 --- a/vendor/github.com/golang/protobuf/ptypes/timestamp.go +++ b/vendor/github.com/golang/protobuf/ptypes/timestamp.go @@ -33,6 +33,8 @@ const ( // // A nil Timestamp returns an error. The first return value in that case is // undefined. +// +// Deprecated: Call the ts.AsTime and ts.CheckValid methods instead. func Timestamp(ts *timestamppb.Timestamp) (time.Time, error) { // Don't return the zero value on error, because corresponds to a valid // timestamp. Instead return whatever time.Unix gives us. @@ -46,6 +48,8 @@ func Timestamp(ts *timestamppb.Timestamp) (time.Time, error) { } // TimestampNow returns a google.protobuf.Timestamp for the current time. +// +// Deprecated: Call the timestamppb.Now function instead. func TimestampNow() *timestamppb.Timestamp { ts, err := TimestampProto(time.Now()) if err != nil { @@ -56,6 +60,8 @@ func TimestampNow() *timestamppb.Timestamp { // TimestampProto converts the time.Time to a google.protobuf.Timestamp proto. // It returns an error if the resulting Timestamp is invalid. +// +// Deprecated: Call the timestamppb.New function instead. func TimestampProto(t time.Time) (*timestamppb.Timestamp, error) { ts := ×tamppb.Timestamp{ Seconds: t.Unix(), @@ -69,6 +75,9 @@ func TimestampProto(t time.Time) (*timestamppb.Timestamp, error) { // TimestampString returns the RFC 3339 string for valid Timestamps. // For invalid Timestamps, it returns an error message in parentheses. +// +// Deprecated: Call the ts.AsTime method instead, +// followed by a call to the Format method on the time.Time value. func TimestampString(ts *timestamppb.Timestamp) string { t, err := Timestamp(ts) if err != nil { diff --git a/vendor/github.com/gomodule/redigo/LICENSE b/vendor/github.com/gomodule/redigo/LICENSE new file mode 100644 index 00000000000..f433b1a53f5 --- /dev/null +++ b/vendor/github.com/gomodule/redigo/LICENSE @@ -0,0 +1,177 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS diff --git a/vendor/github.com/gomodule/redigo/redis/commandinfo.go b/vendor/github.com/gomodule/redigo/redis/commandinfo.go new file mode 100644 index 00000000000..b6df6a25aa3 --- /dev/null +++ b/vendor/github.com/gomodule/redigo/redis/commandinfo.go @@ -0,0 +1,55 @@ +// Copyright 2014 Gary Burd +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package redis + +import ( + "strings" +) + +const ( + connectionWatchState = 1 << iota + connectionMultiState + connectionSubscribeState + connectionMonitorState +) + +type commandInfo struct { + // Set or Clear these states on connection. + Set, Clear int +} + +var commandInfos = map[string]commandInfo{ + "WATCH": {Set: connectionWatchState}, + "UNWATCH": {Clear: connectionWatchState}, + "MULTI": {Set: connectionMultiState}, + "EXEC": {Clear: connectionWatchState | connectionMultiState}, + "DISCARD": {Clear: connectionWatchState | connectionMultiState}, + "PSUBSCRIBE": {Set: connectionSubscribeState}, + "SUBSCRIBE": {Set: connectionSubscribeState}, + "MONITOR": {Set: connectionMonitorState}, +} + +func init() { + for n, ci := range commandInfos { + commandInfos[strings.ToLower(n)] = ci + } +} + +func lookupCommandInfo(commandName string) commandInfo { + if ci, ok := commandInfos[commandName]; ok { + return ci + } + return commandInfos[strings.ToUpper(commandName)] +} diff --git a/vendor/github.com/gomodule/redigo/redis/conn.go b/vendor/github.com/gomodule/redigo/redis/conn.go new file mode 100644 index 00000000000..7d757dced33 --- /dev/null +++ b/vendor/github.com/gomodule/redigo/redis/conn.go @@ -0,0 +1,798 @@ +// Copyright 2012 Gary Burd +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package redis + +import ( + "bufio" + "bytes" + "context" + "crypto/tls" + "errors" + "fmt" + "io" + "net" + "net/url" + "regexp" + "strconv" + "sync" + "time" +) + +var ( + _ ConnWithTimeout = (*conn)(nil) +) + +// conn is the low-level implementation of Conn +type conn struct { + // Shared + mu sync.Mutex + pending int + err error + conn net.Conn + + // Read + readTimeout time.Duration + br *bufio.Reader + + // Write + writeTimeout time.Duration + bw *bufio.Writer + + // Scratch space for formatting argument length. + // '*' or '$', length, "\r\n" + lenScratch [32]byte + + // Scratch space for formatting integers and floats. + numScratch [40]byte +} + +// DialTimeout acts like Dial but takes timeouts for establishing the +// connection to the server, writing a command and reading a reply. +// +// Deprecated: Use Dial with options instead. +func DialTimeout(network, address string, connectTimeout, readTimeout, writeTimeout time.Duration) (Conn, error) { + return Dial(network, address, + DialConnectTimeout(connectTimeout), + DialReadTimeout(readTimeout), + DialWriteTimeout(writeTimeout)) +} + +// DialOption specifies an option for dialing a Redis server. +type DialOption struct { + f func(*dialOptions) +} + +type dialOptions struct { + readTimeout time.Duration + writeTimeout time.Duration + tlsHandshakeTimeout time.Duration + dialer *net.Dialer + dialContext func(ctx context.Context, network, addr string) (net.Conn, error) + db int + username string + password string + clientName string + useTLS bool + skipVerify bool + tlsConfig *tls.Config +} + +// DialTLSHandshakeTimeout specifies the maximum amount of time waiting to +// wait for a TLS handshake. Zero means no timeout. +// If no DialTLSHandshakeTimeout option is specified then the default is 30 seconds. +func DialTLSHandshakeTimeout(d time.Duration) DialOption { + return DialOption{func(do *dialOptions) { + do.tlsHandshakeTimeout = d + }} +} + +// DialReadTimeout specifies the timeout for reading a single command reply. +func DialReadTimeout(d time.Duration) DialOption { + return DialOption{func(do *dialOptions) { + do.readTimeout = d + }} +} + +// DialWriteTimeout specifies the timeout for writing a single command. +func DialWriteTimeout(d time.Duration) DialOption { + return DialOption{func(do *dialOptions) { + do.writeTimeout = d + }} +} + +// DialConnectTimeout specifies the timeout for connecting to the Redis server when +// no DialNetDial option is specified. +// If no DialConnectTimeout option is specified then the default is 30 seconds. +func DialConnectTimeout(d time.Duration) DialOption { + return DialOption{func(do *dialOptions) { + do.dialer.Timeout = d + }} +} + +// DialKeepAlive specifies the keep-alive period for TCP connections to the Redis server +// when no DialNetDial option is specified. +// If zero, keep-alives are not enabled. If no DialKeepAlive option is specified then +// the default of 5 minutes is used to ensure that half-closed TCP sessions are detected. +func DialKeepAlive(d time.Duration) DialOption { + return DialOption{func(do *dialOptions) { + do.dialer.KeepAlive = d + }} +} + +// DialNetDial specifies a custom dial function for creating TCP +// connections, otherwise a net.Dialer customized via the other options is used. +// DialNetDial overrides DialConnectTimeout and DialKeepAlive. +func DialNetDial(dial func(network, addr string) (net.Conn, error)) DialOption { + return DialOption{func(do *dialOptions) { + do.dialContext = func(ctx context.Context, network, addr string) (net.Conn, error) { + return dial(network, addr) + } + }} +} + +// DialContextFunc specifies a custom dial function with context for creating TCP +// connections, otherwise a net.Dialer customized via the other options is used. +// DialContextFunc overrides DialConnectTimeout and DialKeepAlive. +func DialContextFunc(f func(ctx context.Context, network, addr string) (net.Conn, error)) DialOption { + return DialOption{func(do *dialOptions) { + do.dialContext = f + }} +} + +// DialDatabase specifies the database to select when dialing a connection. +func DialDatabase(db int) DialOption { + return DialOption{func(do *dialOptions) { + do.db = db + }} +} + +// DialPassword specifies the password to use when connecting to +// the Redis server. +func DialPassword(password string) DialOption { + return DialOption{func(do *dialOptions) { + do.password = password + }} +} + +// DialUsername specifies the username to use when connecting to +// the Redis server when Redis ACLs are used. +// A DialPassword must also be passed otherwise this option will have no effect. +func DialUsername(username string) DialOption { + return DialOption{func(do *dialOptions) { + do.username = username + }} +} + +// DialClientName specifies a client name to be used +// by the Redis server connection. +func DialClientName(name string) DialOption { + return DialOption{func(do *dialOptions) { + do.clientName = name + }} +} + +// DialTLSConfig specifies the config to use when a TLS connection is dialed. +// Has no effect when not dialing a TLS connection. +func DialTLSConfig(c *tls.Config) DialOption { + return DialOption{func(do *dialOptions) { + do.tlsConfig = c + }} +} + +// DialTLSSkipVerify disables server name verification when connecting over +// TLS. Has no effect when not dialing a TLS connection. +func DialTLSSkipVerify(skip bool) DialOption { + return DialOption{func(do *dialOptions) { + do.skipVerify = skip + }} +} + +// DialUseTLS specifies whether TLS should be used when connecting to the +// server. This option is ignore by DialURL. +func DialUseTLS(useTLS bool) DialOption { + return DialOption{func(do *dialOptions) { + do.useTLS = useTLS + }} +} + +// Dial connects to the Redis server at the given network and +// address using the specified options. +func Dial(network, address string, options ...DialOption) (Conn, error) { + return DialContext(context.Background(), network, address, options...) +} + +type tlsHandshakeTimeoutError struct{} + +func (tlsHandshakeTimeoutError) Timeout() bool { return true } +func (tlsHandshakeTimeoutError) Temporary() bool { return true } +func (tlsHandshakeTimeoutError) Error() string { return "TLS handshake timeout" } + +// DialContext connects to the Redis server at the given network and +// address using the specified options and context. +func DialContext(ctx context.Context, network, address string, options ...DialOption) (Conn, error) { + do := dialOptions{ + dialer: &net.Dialer{ + Timeout: time.Second * 30, + KeepAlive: time.Minute * 5, + }, + tlsHandshakeTimeout: time.Second * 10, + } + for _, option := range options { + option.f(&do) + } + if do.dialContext == nil { + do.dialContext = do.dialer.DialContext + } + + netConn, err := do.dialContext(ctx, network, address) + if err != nil { + return nil, err + } + + if do.useTLS { + var tlsConfig *tls.Config + if do.tlsConfig == nil { + tlsConfig = &tls.Config{InsecureSkipVerify: do.skipVerify} + } else { + tlsConfig = cloneTLSConfig(do.tlsConfig) + } + if tlsConfig.ServerName == "" { + host, _, err := net.SplitHostPort(address) + if err != nil { + netConn.Close() + return nil, err + } + tlsConfig.ServerName = host + } + + tlsConn := tls.Client(netConn, tlsConfig) + errc := make(chan error, 2) // buffered so we don't block timeout or Handshake + if d := do.tlsHandshakeTimeout; d != 0 { + timer := time.AfterFunc(d, func() { + errc <- tlsHandshakeTimeoutError{} + }) + defer timer.Stop() + } + go func() { + errc <- tlsConn.Handshake() + }() + if err := <-errc; err != nil { + // Timeout or Handshake error. + netConn.Close() // nolint: errcheck + return nil, err + } + + netConn = tlsConn + } + + c := &conn{ + conn: netConn, + bw: bufio.NewWriter(netConn), + br: bufio.NewReader(netConn), + readTimeout: do.readTimeout, + writeTimeout: do.writeTimeout, + } + + if do.password != "" { + authArgs := make([]interface{}, 0, 2) + if do.username != "" { + authArgs = append(authArgs, do.username) + } + authArgs = append(authArgs, do.password) + if _, err := c.Do("AUTH", authArgs...); err != nil { + netConn.Close() + return nil, err + } + } + + if do.clientName != "" { + if _, err := c.Do("CLIENT", "SETNAME", do.clientName); err != nil { + netConn.Close() + return nil, err + } + } + + if do.db != 0 { + if _, err := c.Do("SELECT", do.db); err != nil { + netConn.Close() + return nil, err + } + } + + return c, nil +} + +var pathDBRegexp = regexp.MustCompile(`/(\d*)\z`) + +// DialURL connects to a Redis server at the given URL using the Redis +// URI scheme. URLs should follow the draft IANA specification for the +// scheme (https://www.iana.org/assignments/uri-schemes/prov/redis). +func DialURL(rawurl string, options ...DialOption) (Conn, error) { + u, err := url.Parse(rawurl) + if err != nil { + return nil, err + } + + if u.Scheme != "redis" && u.Scheme != "rediss" { + return nil, fmt.Errorf("invalid redis URL scheme: %s", u.Scheme) + } + + if u.Opaque != "" { + return nil, fmt.Errorf("invalid redis URL, url is opaque: %s", rawurl) + } + + // As per the IANA draft spec, the host defaults to localhost and + // the port defaults to 6379. + host, port, err := net.SplitHostPort(u.Host) + if err != nil { + // assume port is missing + host = u.Host + port = "6379" + } + if host == "" { + host = "localhost" + } + address := net.JoinHostPort(host, port) + + if u.User != nil { + password, isSet := u.User.Password() + username := u.User.Username() + if isSet { + if username != "" { + // ACL + options = append(options, DialUsername(username), DialPassword(password)) + } else { + // requirepass - user-info username:password with blank username + options = append(options, DialPassword(password)) + } + } else if username != "" { + // requirepass - redis-cli compatibility which treats as single arg in user-info as a password + options = append(options, DialPassword(username)) + } + } + + match := pathDBRegexp.FindStringSubmatch(u.Path) + if len(match) == 2 { + db := 0 + if len(match[1]) > 0 { + db, err = strconv.Atoi(match[1]) + if err != nil { + return nil, fmt.Errorf("invalid database: %s", u.Path[1:]) + } + } + if db != 0 { + options = append(options, DialDatabase(db)) + } + } else if u.Path != "" { + return nil, fmt.Errorf("invalid database: %s", u.Path[1:]) + } + + options = append(options, DialUseTLS(u.Scheme == "rediss")) + + return Dial("tcp", address, options...) +} + +// NewConn returns a new Redigo connection for the given net connection. +func NewConn(netConn net.Conn, readTimeout, writeTimeout time.Duration) Conn { + return &conn{ + conn: netConn, + bw: bufio.NewWriter(netConn), + br: bufio.NewReader(netConn), + readTimeout: readTimeout, + writeTimeout: writeTimeout, + } +} + +func (c *conn) Close() error { + c.mu.Lock() + err := c.err + if c.err == nil { + c.err = errors.New("redigo: closed") + err = c.conn.Close() + } + c.mu.Unlock() + return err +} + +func (c *conn) fatal(err error) error { + c.mu.Lock() + if c.err == nil { + c.err = err + // Close connection to force errors on subsequent calls and to unblock + // other reader or writer. + c.conn.Close() + } + c.mu.Unlock() + return err +} + +func (c *conn) Err() error { + c.mu.Lock() + err := c.err + c.mu.Unlock() + return err +} + +func (c *conn) writeLen(prefix byte, n int) error { + c.lenScratch[len(c.lenScratch)-1] = '\n' + c.lenScratch[len(c.lenScratch)-2] = '\r' + i := len(c.lenScratch) - 3 + for { + c.lenScratch[i] = byte('0' + n%10) + i -= 1 + n = n / 10 + if n == 0 { + break + } + } + c.lenScratch[i] = prefix + _, err := c.bw.Write(c.lenScratch[i:]) + return err +} + +func (c *conn) writeString(s string) error { + if err := c.writeLen('$', len(s)); err != nil { + return err + } + if _, err := c.bw.WriteString(s); err != nil { + return err + } + _, err := c.bw.WriteString("\r\n") + return err +} + +func (c *conn) writeBytes(p []byte) error { + if err := c.writeLen('$', len(p)); err != nil { + return err + } + if _, err := c.bw.Write(p); err != nil { + return err + } + _, err := c.bw.WriteString("\r\n") + return err +} + +func (c *conn) writeInt64(n int64) error { + return c.writeBytes(strconv.AppendInt(c.numScratch[:0], n, 10)) +} + +func (c *conn) writeFloat64(n float64) error { + return c.writeBytes(strconv.AppendFloat(c.numScratch[:0], n, 'g', -1, 64)) +} + +func (c *conn) writeCommand(cmd string, args []interface{}) error { + if err := c.writeLen('*', 1+len(args)); err != nil { + return err + } + if err := c.writeString(cmd); err != nil { + return err + } + for _, arg := range args { + if err := c.writeArg(arg, true); err != nil { + return err + } + } + return nil +} + +func (c *conn) writeArg(arg interface{}, argumentTypeOK bool) (err error) { + switch arg := arg.(type) { + case string: + return c.writeString(arg) + case []byte: + return c.writeBytes(arg) + case int: + return c.writeInt64(int64(arg)) + case int64: + return c.writeInt64(arg) + case float64: + return c.writeFloat64(arg) + case bool: + if arg { + return c.writeString("1") + } else { + return c.writeString("0") + } + case nil: + return c.writeString("") + case Argument: + if argumentTypeOK { + return c.writeArg(arg.RedisArg(), false) + } + // See comment in default clause below. + var buf bytes.Buffer + fmt.Fprint(&buf, arg) + return c.writeBytes(buf.Bytes()) + default: + // This default clause is intended to handle builtin numeric types. + // The function should return an error for other types, but this is not + // done for compatibility with previous versions of the package. + var buf bytes.Buffer + fmt.Fprint(&buf, arg) + return c.writeBytes(buf.Bytes()) + } +} + +type protocolError string + +func (pe protocolError) Error() string { + return fmt.Sprintf("redigo: %s (possible server error or unsupported concurrent read by application)", string(pe)) +} + +// readLine reads a line of input from the RESP stream. +func (c *conn) readLine() ([]byte, error) { + // To avoid allocations, attempt to read the line using ReadSlice. This + // call typically succeeds. The known case where the call fails is when + // reading the output from the MONITOR command. + p, err := c.br.ReadSlice('\n') + if err == bufio.ErrBufferFull { + // The line does not fit in the bufio.Reader's buffer. Fall back to + // allocating a buffer for the line. + buf := append([]byte{}, p...) + for err == bufio.ErrBufferFull { + p, err = c.br.ReadSlice('\n') + buf = append(buf, p...) + } + p = buf + } + if err != nil { + return nil, err + } + i := len(p) - 2 + if i < 0 || p[i] != '\r' { + return nil, protocolError("bad response line terminator") + } + return p[:i], nil +} + +// parseLen parses bulk string and array lengths. +func parseLen(p []byte) (int, error) { + if len(p) == 0 { + return -1, protocolError("malformed length") + } + + if p[0] == '-' && len(p) == 2 && p[1] == '1' { + // handle $-1 and $-1 null replies. + return -1, nil + } + + var n int + for _, b := range p { + n *= 10 + if b < '0' || b > '9' { + return -1, protocolError("illegal bytes in length") + } + n += int(b - '0') + } + + return n, nil +} + +// parseInt parses an integer reply. +func parseInt(p []byte) (interface{}, error) { + if len(p) == 0 { + return 0, protocolError("malformed integer") + } + + var negate bool + if p[0] == '-' { + negate = true + p = p[1:] + if len(p) == 0 { + return 0, protocolError("malformed integer") + } + } + + var n int64 + for _, b := range p { + n *= 10 + if b < '0' || b > '9' { + return 0, protocolError("illegal bytes in length") + } + n += int64(b - '0') + } + + if negate { + n = -n + } + return n, nil +} + +var ( + okReply interface{} = "OK" + pongReply interface{} = "PONG" +) + +func (c *conn) readReply() (interface{}, error) { + line, err := c.readLine() + if err != nil { + return nil, err + } + if len(line) == 0 { + return nil, protocolError("short response line") + } + switch line[0] { + case '+': + switch string(line[1:]) { + case "OK": + // Avoid allocation for frequent "+OK" response. + return okReply, nil + case "PONG": + // Avoid allocation in PING command benchmarks :) + return pongReply, nil + default: + return string(line[1:]), nil + } + case '-': + return Error(line[1:]), nil + case ':': + return parseInt(line[1:]) + case '$': + n, err := parseLen(line[1:]) + if n < 0 || err != nil { + return nil, err + } + p := make([]byte, n) + _, err = io.ReadFull(c.br, p) + if err != nil { + return nil, err + } + if line, err := c.readLine(); err != nil { + return nil, err + } else if len(line) != 0 { + return nil, protocolError("bad bulk string format") + } + return p, nil + case '*': + n, err := parseLen(line[1:]) + if n < 0 || err != nil { + return nil, err + } + r := make([]interface{}, n) + for i := range r { + r[i], err = c.readReply() + if err != nil { + return nil, err + } + } + return r, nil + } + return nil, protocolError("unexpected response line") +} + +func (c *conn) Send(cmd string, args ...interface{}) error { + c.mu.Lock() + c.pending += 1 + c.mu.Unlock() + if c.writeTimeout != 0 { + if err := c.conn.SetWriteDeadline(time.Now().Add(c.writeTimeout)); err != nil { + return c.fatal(err) + } + } + if err := c.writeCommand(cmd, args); err != nil { + return c.fatal(err) + } + return nil +} + +func (c *conn) Flush() error { + if c.writeTimeout != 0 { + if err := c.conn.SetWriteDeadline(time.Now().Add(c.writeTimeout)); err != nil { + return c.fatal(err) + } + } + if err := c.bw.Flush(); err != nil { + return c.fatal(err) + } + return nil +} + +func (c *conn) Receive() (interface{}, error) { + return c.ReceiveWithTimeout(c.readTimeout) +} + +func (c *conn) ReceiveWithTimeout(timeout time.Duration) (reply interface{}, err error) { + var deadline time.Time + if timeout != 0 { + deadline = time.Now().Add(timeout) + } + if err := c.conn.SetReadDeadline(deadline); err != nil { + return nil, c.fatal(err) + } + + if reply, err = c.readReply(); err != nil { + return nil, c.fatal(err) + } + // When using pub/sub, the number of receives can be greater than the + // number of sends. To enable normal use of the connection after + // unsubscribing from all channels, we do not decrement pending to a + // negative value. + // + // The pending field is decremented after the reply is read to handle the + // case where Receive is called before Send. + c.mu.Lock() + if c.pending > 0 { + c.pending -= 1 + } + c.mu.Unlock() + if err, ok := reply.(Error); ok { + return nil, err + } + return +} + +func (c *conn) Do(cmd string, args ...interface{}) (interface{}, error) { + return c.DoWithTimeout(c.readTimeout, cmd, args...) +} + +func (c *conn) DoWithTimeout(readTimeout time.Duration, cmd string, args ...interface{}) (interface{}, error) { + c.mu.Lock() + pending := c.pending + c.pending = 0 + c.mu.Unlock() + + if cmd == "" && pending == 0 { + return nil, nil + } + + if c.writeTimeout != 0 { + if err := c.conn.SetWriteDeadline(time.Now().Add(c.writeTimeout)); err != nil { + return nil, c.fatal(err) + } + } + + if cmd != "" { + if err := c.writeCommand(cmd, args); err != nil { + return nil, c.fatal(err) + } + } + + if err := c.bw.Flush(); err != nil { + return nil, c.fatal(err) + } + + var deadline time.Time + if readTimeout != 0 { + deadline = time.Now().Add(readTimeout) + } + if err := c.conn.SetReadDeadline(deadline); err != nil { + return nil, c.fatal(err) + } + + if cmd == "" { + reply := make([]interface{}, pending) + for i := range reply { + r, e := c.readReply() + if e != nil { + return nil, c.fatal(e) + } + reply[i] = r + } + return reply, nil + } + + var err error + var reply interface{} + for i := 0; i <= pending; i++ { + var e error + if reply, e = c.readReply(); e != nil { + return nil, c.fatal(e) + } + if e, ok := reply.(Error); ok && err == nil { + err = e + } + } + return reply, err +} diff --git a/vendor/github.com/gomodule/redigo/redis/doc.go b/vendor/github.com/gomodule/redigo/redis/doc.go new file mode 100644 index 00000000000..69ad506cd3a --- /dev/null +++ b/vendor/github.com/gomodule/redigo/redis/doc.go @@ -0,0 +1,177 @@ +// Copyright 2012 Gary Burd +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +// Package redis is a client for the Redis database. +// +// The Redigo FAQ (https://github.com/gomodule/redigo/wiki/FAQ) contains more +// documentation about this package. +// +// Connections +// +// The Conn interface is the primary interface for working with Redis. +// Applications create connections by calling the Dial, DialWithTimeout or +// NewConn functions. In the future, functions will be added for creating +// sharded and other types of connections. +// +// The application must call the connection Close method when the application +// is done with the connection. +// +// Executing Commands +// +// The Conn interface has a generic method for executing Redis commands: +// +// Do(commandName string, args ...interface{}) (reply interface{}, err error) +// +// The Redis command reference (http://redis.io/commands) lists the available +// commands. An example of using the Redis APPEND command is: +// +// n, err := conn.Do("APPEND", "key", "value") +// +// The Do method converts command arguments to bulk strings for transmission +// to the server as follows: +// +// Go Type Conversion +// []byte Sent as is +// string Sent as is +// int, int64 strconv.FormatInt(v) +// float64 strconv.FormatFloat(v, 'g', -1, 64) +// bool true -> "1", false -> "0" +// nil "" +// all other types fmt.Fprint(w, v) +// +// Redis command reply types are represented using the following Go types: +// +// Redis type Go type +// error redis.Error +// integer int64 +// simple string string +// bulk string []byte or nil if value not present. +// array []interface{} or nil if value not present. +// +// Use type assertions or the reply helper functions to convert from +// interface{} to the specific Go type for the command result. +// +// Pipelining +// +// Connections support pipelining using the Send, Flush and Receive methods. +// +// Send(commandName string, args ...interface{}) error +// Flush() error +// Receive() (reply interface{}, err error) +// +// Send writes the command to the connection's output buffer. Flush flushes the +// connection's output buffer to the server. Receive reads a single reply from +// the server. The following example shows a simple pipeline. +// +// c.Send("SET", "foo", "bar") +// c.Send("GET", "foo") +// c.Flush() +// c.Receive() // reply from SET +// v, err = c.Receive() // reply from GET +// +// The Do method combines the functionality of the Send, Flush and Receive +// methods. The Do method starts by writing the command and flushing the output +// buffer. Next, the Do method receives all pending replies including the reply +// for the command just sent by Do. If any of the received replies is an error, +// then Do returns the error. If there are no errors, then Do returns the last +// reply. If the command argument to the Do method is "", then the Do method +// will flush the output buffer and receive pending replies without sending a +// command. +// +// Use the Send and Do methods to implement pipelined transactions. +// +// c.Send("MULTI") +// c.Send("INCR", "foo") +// c.Send("INCR", "bar") +// r, err := c.Do("EXEC") +// fmt.Println(r) // prints [1, 1] +// +// Concurrency +// +// Connections support one concurrent caller to the Receive method and one +// concurrent caller to the Send and Flush methods. No other concurrency is +// supported including concurrent calls to the Do and Close methods. +// +// For full concurrent access to Redis, use the thread-safe Pool to get, use +// and release a connection from within a goroutine. Connections returned from +// a Pool have the concurrency restrictions described in the previous +// paragraph. +// +// Publish and Subscribe +// +// Use the Send, Flush and Receive methods to implement Pub/Sub subscribers. +// +// c.Send("SUBSCRIBE", "example") +// c.Flush() +// for { +// reply, err := c.Receive() +// if err != nil { +// return err +// } +// // process pushed message +// } +// +// The PubSubConn type wraps a Conn with convenience methods for implementing +// subscribers. The Subscribe, PSubscribe, Unsubscribe and PUnsubscribe methods +// send and flush a subscription management command. The receive method +// converts a pushed message to convenient types for use in a type switch. +// +// psc := redis.PubSubConn{Conn: c} +// psc.Subscribe("example") +// for { +// switch v := psc.Receive().(type) { +// case redis.Message: +// fmt.Printf("%s: message: %s\n", v.Channel, v.Data) +// case redis.Subscription: +// fmt.Printf("%s: %s %d\n", v.Channel, v.Kind, v.Count) +// case error: +// return v +// } +// } +// +// Reply Helpers +// +// The Bool, Int, Bytes, String, Strings and Values functions convert a reply +// to a value of a specific type. To allow convenient wrapping of calls to the +// connection Do and Receive methods, the functions take a second argument of +// type error. If the error is non-nil, then the helper function returns the +// error. If the error is nil, the function converts the reply to the specified +// type: +// +// exists, err := redis.Bool(c.Do("EXISTS", "foo")) +// if err != nil { +// // handle error return from c.Do or type conversion error. +// } +// +// The Scan function converts elements of a array reply to Go types: +// +// var value1 int +// var value2 string +// reply, err := redis.Values(c.Do("MGET", "key1", "key2")) +// if err != nil { +// // handle error +// } +// if _, err := redis.Scan(reply, &value1, &value2); err != nil { +// // handle error +// } +// +// Errors +// +// Connection methods return error replies from the server as type redis.Error. +// +// Call the connection Err() method to determine if the connection encountered +// non-recoverable error such as a network error or protocol parsing error. If +// Err() returns a non-nil value, then the connection is not usable and should +// be closed. +package redis diff --git a/vendor/github.com/gomodule/redigo/redis/go17.go b/vendor/github.com/gomodule/redigo/redis/go17.go new file mode 100644 index 00000000000..5f36379113c --- /dev/null +++ b/vendor/github.com/gomodule/redigo/redis/go17.go @@ -0,0 +1,29 @@ +// +build go1.7,!go1.8 + +package redis + +import "crypto/tls" + +func cloneTLSConfig(cfg *tls.Config) *tls.Config { + return &tls.Config{ + Rand: cfg.Rand, + Time: cfg.Time, + Certificates: cfg.Certificates, + NameToCertificate: cfg.NameToCertificate, + GetCertificate: cfg.GetCertificate, + RootCAs: cfg.RootCAs, + NextProtos: cfg.NextProtos, + ServerName: cfg.ServerName, + ClientAuth: cfg.ClientAuth, + ClientCAs: cfg.ClientCAs, + InsecureSkipVerify: cfg.InsecureSkipVerify, + CipherSuites: cfg.CipherSuites, + PreferServerCipherSuites: cfg.PreferServerCipherSuites, + ClientSessionCache: cfg.ClientSessionCache, + MinVersion: cfg.MinVersion, + MaxVersion: cfg.MaxVersion, + CurvePreferences: cfg.CurvePreferences, + DynamicRecordSizingDisabled: cfg.DynamicRecordSizingDisabled, + Renegotiation: cfg.Renegotiation, + } +} diff --git a/vendor/github.com/gomodule/redigo/redis/go18.go b/vendor/github.com/gomodule/redigo/redis/go18.go new file mode 100644 index 00000000000..558363be39a --- /dev/null +++ b/vendor/github.com/gomodule/redigo/redis/go18.go @@ -0,0 +1,9 @@ +// +build go1.8 + +package redis + +import "crypto/tls" + +func cloneTLSConfig(cfg *tls.Config) *tls.Config { + return cfg.Clone() +} diff --git a/vendor/github.com/gomodule/redigo/redis/log.go b/vendor/github.com/gomodule/redigo/redis/log.go new file mode 100644 index 00000000000..ef8cd7a0239 --- /dev/null +++ b/vendor/github.com/gomodule/redigo/redis/log.go @@ -0,0 +1,146 @@ +// Copyright 2012 Gary Burd +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package redis + +import ( + "bytes" + "fmt" + "log" + "time" +) + +var ( + _ ConnWithTimeout = (*loggingConn)(nil) +) + +// NewLoggingConn returns a logging wrapper around a connection. +func NewLoggingConn(conn Conn, logger *log.Logger, prefix string) Conn { + if prefix != "" { + prefix = prefix + "." + } + return &loggingConn{conn, logger, prefix, nil} +} + +//NewLoggingConnFilter returns a logging wrapper around a connection and a filter function. +func NewLoggingConnFilter(conn Conn, logger *log.Logger, prefix string, skip func(cmdName string) bool) Conn { + if prefix != "" { + prefix = prefix + "." + } + return &loggingConn{conn, logger, prefix, skip} +} + +type loggingConn struct { + Conn + logger *log.Logger + prefix string + skip func(cmdName string) bool +} + +func (c *loggingConn) Close() error { + err := c.Conn.Close() + var buf bytes.Buffer + fmt.Fprintf(&buf, "%sClose() -> (%v)", c.prefix, err) + c.logger.Output(2, buf.String()) // nolint: errcheck + return err +} + +func (c *loggingConn) printValue(buf *bytes.Buffer, v interface{}) { + const chop = 32 + switch v := v.(type) { + case []byte: + if len(v) > chop { + fmt.Fprintf(buf, "%q...", v[:chop]) + } else { + fmt.Fprintf(buf, "%q", v) + } + case string: + if len(v) > chop { + fmt.Fprintf(buf, "%q...", v[:chop]) + } else { + fmt.Fprintf(buf, "%q", v) + } + case []interface{}: + if len(v) == 0 { + buf.WriteString("[]") + } else { + sep := "[" + fin := "]" + if len(v) > chop { + v = v[:chop] + fin = "...]" + } + for _, vv := range v { + buf.WriteString(sep) + c.printValue(buf, vv) + sep = ", " + } + buf.WriteString(fin) + } + default: + fmt.Fprint(buf, v) + } +} + +func (c *loggingConn) print(method, commandName string, args []interface{}, reply interface{}, err error) { + if c.skip != nil && c.skip(commandName) { + return + } + var buf bytes.Buffer + fmt.Fprintf(&buf, "%s%s(", c.prefix, method) + if method != "Receive" { + buf.WriteString(commandName) + for _, arg := range args { + buf.WriteString(", ") + c.printValue(&buf, arg) + } + } + buf.WriteString(") -> (") + if method != "Send" { + c.printValue(&buf, reply) + buf.WriteString(", ") + } + fmt.Fprintf(&buf, "%v)", err) + c.logger.Output(3, buf.String()) // nolint: errcheck +} + +func (c *loggingConn) Do(commandName string, args ...interface{}) (interface{}, error) { + reply, err := c.Conn.Do(commandName, args...) + c.print("Do", commandName, args, reply, err) + return reply, err +} + +func (c *loggingConn) DoWithTimeout(timeout time.Duration, commandName string, args ...interface{}) (interface{}, error) { + reply, err := DoWithTimeout(c.Conn, timeout, commandName, args...) + c.print("DoWithTimeout", commandName, args, reply, err) + return reply, err +} + +func (c *loggingConn) Send(commandName string, args ...interface{}) error { + err := c.Conn.Send(commandName, args...) + c.print("Send", commandName, args, nil, err) + return err +} + +func (c *loggingConn) Receive() (interface{}, error) { + reply, err := c.Conn.Receive() + c.print("Receive", "", nil, reply, err) + return reply, err +} + +func (c *loggingConn) ReceiveWithTimeout(timeout time.Duration) (interface{}, error) { + reply, err := ReceiveWithTimeout(c.Conn, timeout) + c.print("ReceiveWithTimeout", "", nil, reply, err) + return reply, err +} diff --git a/vendor/github.com/gomodule/redigo/redis/pool.go b/vendor/github.com/gomodule/redigo/redis/pool.go new file mode 100644 index 00000000000..c7a2f19435b --- /dev/null +++ b/vendor/github.com/gomodule/redigo/redis/pool.go @@ -0,0 +1,636 @@ +// Copyright 2012 Gary Burd +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package redis + +import ( + "bytes" + "context" + "crypto/rand" + "crypto/sha1" + "errors" + "io" + "strconv" + "sync" + "time" +) + +var ( + _ ConnWithTimeout = (*activeConn)(nil) + _ ConnWithTimeout = (*errorConn)(nil) +) + +var nowFunc = time.Now // for testing + +// ErrPoolExhausted is returned from a pool connection method (Do, Send, +// Receive, Flush, Err) when the maximum number of database connections in the +// pool has been reached. +var ErrPoolExhausted = errors.New("redigo: connection pool exhausted") + +var ( + errConnClosed = errors.New("redigo: connection closed") +) + +// Pool maintains a pool of connections. The application calls the Get method +// to get a connection from the pool and the connection's Close method to +// return the connection's resources to the pool. +// +// The following example shows how to use a pool in a web application. The +// application creates a pool at application startup and makes it available to +// request handlers using a package level variable. The pool configuration used +// here is an example, not a recommendation. +// +// func newPool(addr string) *redis.Pool { +// return &redis.Pool{ +// MaxIdle: 3, +// IdleTimeout: 240 * time.Second, +// // Dial or DialContext must be set. When both are set, DialContext takes precedence over Dial. +// Dial: func () (redis.Conn, error) { return redis.Dial("tcp", addr) }, +// } +// } +// +// var ( +// pool *redis.Pool +// redisServer = flag.String("redisServer", ":6379", "") +// ) +// +// func main() { +// flag.Parse() +// pool = newPool(*redisServer) +// ... +// } +// +// A request handler gets a connection from the pool and closes the connection +// when the handler is done: +// +// func serveHome(w http.ResponseWriter, r *http.Request) { +// conn := pool.Get() +// defer conn.Close() +// ... +// } +// +// Use the Dial function to authenticate connections with the AUTH command or +// select a database with the SELECT command: +// +// pool := &redis.Pool{ +// // Other pool configuration not shown in this example. +// Dial: func () (redis.Conn, error) { +// c, err := redis.Dial("tcp", server) +// if err != nil { +// return nil, err +// } +// if _, err := c.Do("AUTH", password); err != nil { +// c.Close() +// return nil, err +// } +// if _, err := c.Do("SELECT", db); err != nil { +// c.Close() +// return nil, err +// } +// return c, nil +// }, +// } +// +// Use the TestOnBorrow function to check the health of an idle connection +// before the connection is returned to the application. This example PINGs +// connections that have been idle more than a minute: +// +// pool := &redis.Pool{ +// // Other pool configuration not shown in this example. +// TestOnBorrow: func(c redis.Conn, t time.Time) error { +// if time.Since(t) < time.Minute { +// return nil +// } +// _, err := c.Do("PING") +// return err +// }, +// } +// +type Pool struct { + // Dial is an application supplied function for creating and configuring a + // connection. + // + // The connection returned from Dial must not be in a special state + // (subscribed to pubsub channel, transaction started, ...). + Dial func() (Conn, error) + + // DialContext is an application supplied function for creating and configuring a + // connection with the given context. + // + // The connection returned from Dial must not be in a special state + // (subscribed to pubsub channel, transaction started, ...). + DialContext func(ctx context.Context) (Conn, error) + + // TestOnBorrow is an optional application supplied function for checking + // the health of an idle connection before the connection is used again by + // the application. Argument t is the time that the connection was returned + // to the pool. If the function returns an error, then the connection is + // closed. + TestOnBorrow func(c Conn, t time.Time) error + + // Maximum number of idle connections in the pool. + MaxIdle int + + // Maximum number of connections allocated by the pool at a given time. + // When zero, there is no limit on the number of connections in the pool. + MaxActive int + + // Close connections after remaining idle for this duration. If the value + // is zero, then idle connections are not closed. Applications should set + // the timeout to a value less than the server's timeout. + IdleTimeout time.Duration + + // If Wait is true and the pool is at the MaxActive limit, then Get() waits + // for a connection to be returned to the pool before returning. + Wait bool + + // Close connections older than this duration. If the value is zero, then + // the pool does not close connections based on age. + MaxConnLifetime time.Duration + + mu sync.Mutex // mu protects the following fields + closed bool // set to true when the pool is closed. + active int // the number of open connections in the pool + initOnce sync.Once // the init ch once func + ch chan struct{} // limits open connections when p.Wait is true + idle idleList // idle connections + waitCount int64 // total number of connections waited for. + waitDuration time.Duration // total time waited for new connections. +} + +// NewPool creates a new pool. +// +// Deprecated: Initialize the Pool directly as shown in the example. +func NewPool(newFn func() (Conn, error), maxIdle int) *Pool { + return &Pool{Dial: newFn, MaxIdle: maxIdle} +} + +// Get gets a connection. The application must close the returned connection. +// This method always returns a valid connection so that applications can defer +// error handling to the first use of the connection. If there is an error +// getting an underlying connection, then the connection Err, Do, Send, Flush +// and Receive methods return that error. +func (p *Pool) Get() Conn { + // GetContext returns errorConn in the first argument when an error occurs. + c, _ := p.GetContext(context.Background()) + return c +} + +// GetContext gets a connection using the provided context. +// +// The provided Context must be non-nil. If the context expires before the +// connection is complete, an error is returned. Any expiration on the context +// will not affect the returned connection. +// +// If the function completes without error, then the application must close the +// returned connection. +func (p *Pool) GetContext(ctx context.Context) (Conn, error) { + // Wait until there is a vacant connection in the pool. + waited, err := p.waitVacantConn(ctx) + if err != nil { + return errorConn{err}, err + } + + p.mu.Lock() + + if waited > 0 { + p.waitCount++ + p.waitDuration += waited + } + + // Prune stale connections at the back of the idle list. + if p.IdleTimeout > 0 { + n := p.idle.count + for i := 0; i < n && p.idle.back != nil && p.idle.back.t.Add(p.IdleTimeout).Before(nowFunc()); i++ { + pc := p.idle.back + p.idle.popBack() + p.mu.Unlock() + pc.c.Close() + p.mu.Lock() + p.active-- + } + } + + // Get idle connection from the front of idle list. + for p.idle.front != nil { + pc := p.idle.front + p.idle.popFront() + p.mu.Unlock() + if (p.TestOnBorrow == nil || p.TestOnBorrow(pc.c, pc.t) == nil) && + (p.MaxConnLifetime == 0 || nowFunc().Sub(pc.created) < p.MaxConnLifetime) { + return &activeConn{p: p, pc: pc}, nil + } + pc.c.Close() + p.mu.Lock() + p.active-- + } + + // Check for pool closed before dialing a new connection. + if p.closed { + p.mu.Unlock() + err := errors.New("redigo: get on closed pool") + return errorConn{err}, err + } + + // Handle limit for p.Wait == false. + if !p.Wait && p.MaxActive > 0 && p.active >= p.MaxActive { + p.mu.Unlock() + return errorConn{ErrPoolExhausted}, ErrPoolExhausted + } + + p.active++ + p.mu.Unlock() + c, err := p.dial(ctx) + if err != nil { + p.mu.Lock() + p.active-- + if p.ch != nil && !p.closed { + p.ch <- struct{}{} + } + p.mu.Unlock() + return errorConn{err}, err + } + return &activeConn{p: p, pc: &poolConn{c: c, created: nowFunc()}}, nil +} + +// PoolStats contains pool statistics. +type PoolStats struct { + // ActiveCount is the number of connections in the pool. The count includes + // idle connections and connections in use. + ActiveCount int + // IdleCount is the number of idle connections in the pool. + IdleCount int + + // WaitCount is the total number of connections waited for. + // This value is currently not guaranteed to be 100% accurate. + WaitCount int64 + + // WaitDuration is the total time blocked waiting for a new connection. + // This value is currently not guaranteed to be 100% accurate. + WaitDuration time.Duration +} + +// Stats returns pool's statistics. +func (p *Pool) Stats() PoolStats { + p.mu.Lock() + stats := PoolStats{ + ActiveCount: p.active, + IdleCount: p.idle.count, + WaitCount: p.waitCount, + WaitDuration: p.waitDuration, + } + p.mu.Unlock() + + return stats +} + +// ActiveCount returns the number of connections in the pool. The count +// includes idle connections and connections in use. +func (p *Pool) ActiveCount() int { + p.mu.Lock() + active := p.active + p.mu.Unlock() + return active +} + +// IdleCount returns the number of idle connections in the pool. +func (p *Pool) IdleCount() int { + p.mu.Lock() + idle := p.idle.count + p.mu.Unlock() + return idle +} + +// Close releases the resources used by the pool. +func (p *Pool) Close() error { + p.mu.Lock() + if p.closed { + p.mu.Unlock() + return nil + } + p.closed = true + p.active -= p.idle.count + pc := p.idle.front + p.idle.count = 0 + p.idle.front, p.idle.back = nil, nil + if p.ch != nil { + close(p.ch) + } + p.mu.Unlock() + for ; pc != nil; pc = pc.next { + pc.c.Close() + } + return nil +} + +func (p *Pool) lazyInit() { + p.initOnce.Do(func() { + p.ch = make(chan struct{}, p.MaxActive) + if p.closed { + close(p.ch) + } else { + for i := 0; i < p.MaxActive; i++ { + p.ch <- struct{}{} + } + } + }) +} + +// waitVacantConn waits for a vacant connection in pool if waiting +// is enabled and pool size is limited, otherwise returns instantly. +// If ctx expires before that, an error is returned. +// +// If there were no vacant connection in the pool right away it returns the time spent waiting +// for that connection to appear in the pool. +func (p *Pool) waitVacantConn(ctx context.Context) (waited time.Duration, err error) { + if !p.Wait || p.MaxActive <= 0 { + // No wait or no connection limit. + return 0, nil + } + + p.lazyInit() + + // wait indicates if we believe it will block so its not 100% accurate + // however for stats it should be good enough. + wait := len(p.ch) == 0 + var start time.Time + if wait { + start = time.Now() + } + + select { + case <-p.ch: + // Additionally check that context hasn't expired while we were waiting, + // because `select` picks a random `case` if several of them are "ready". + select { + case <-ctx.Done(): + p.ch <- struct{}{} + return 0, ctx.Err() + default: + } + case <-ctx.Done(): + return 0, ctx.Err() + } + + if wait { + return time.Since(start), nil + } + return 0, nil +} + +func (p *Pool) dial(ctx context.Context) (Conn, error) { + if p.DialContext != nil { + return p.DialContext(ctx) + } + if p.Dial != nil { + return p.Dial() + } + return nil, errors.New("redigo: must pass Dial or DialContext to pool") +} + +func (p *Pool) put(pc *poolConn, forceClose bool) error { + p.mu.Lock() + if !p.closed && !forceClose { + pc.t = nowFunc() + p.idle.pushFront(pc) + if p.idle.count > p.MaxIdle { + pc = p.idle.back + p.idle.popBack() + } else { + pc = nil + } + } + + if pc != nil { + p.mu.Unlock() + pc.c.Close() + p.mu.Lock() + p.active-- + } + + if p.ch != nil && !p.closed { + p.ch <- struct{}{} + } + p.mu.Unlock() + return nil +} + +type activeConn struct { + p *Pool + pc *poolConn + state int +} + +var ( + sentinel []byte + sentinelOnce sync.Once +) + +func initSentinel() { + p := make([]byte, 64) + if _, err := rand.Read(p); err == nil { + sentinel = p + } else { + h := sha1.New() + io.WriteString(h, "Oops, rand failed. Use time instead.") // nolint: errcheck + io.WriteString(h, strconv.FormatInt(time.Now().UnixNano(), 10)) // nolint: errcheck + sentinel = h.Sum(nil) + } +} + +func (ac *activeConn) firstError(errs ...error) error { + for _, err := range errs[:len(errs)-1] { + if err != nil { + return err + } + } + return errs[len(errs)-1] +} + +func (ac *activeConn) Close() (err error) { + pc := ac.pc + if pc == nil { + return nil + } + ac.pc = nil + + if ac.state&connectionMultiState != 0 { + err = pc.c.Send("DISCARD") + ac.state &^= (connectionMultiState | connectionWatchState) + } else if ac.state&connectionWatchState != 0 { + err = pc.c.Send("UNWATCH") + ac.state &^= connectionWatchState + } + if ac.state&connectionSubscribeState != 0 { + err = ac.firstError(err, + pc.c.Send("UNSUBSCRIBE"), + pc.c.Send("PUNSUBSCRIBE"), + ) + // To detect the end of the message stream, ask the server to echo + // a sentinel value and read until we see that value. + sentinelOnce.Do(initSentinel) + err = ac.firstError(err, + pc.c.Send("ECHO", sentinel), + pc.c.Flush(), + ) + for { + p, err2 := pc.c.Receive() + if err2 != nil { + err = ac.firstError(err, err2) + break + } + if p, ok := p.([]byte); ok && bytes.Equal(p, sentinel) { + ac.state &^= connectionSubscribeState + break + } + } + } + _, err2 := pc.c.Do("") + return ac.firstError( + err, + err2, + ac.p.put(pc, ac.state != 0 || pc.c.Err() != nil), + ) +} + +func (ac *activeConn) Err() error { + pc := ac.pc + if pc == nil { + return errConnClosed + } + return pc.c.Err() +} + +func (ac *activeConn) Do(commandName string, args ...interface{}) (reply interface{}, err error) { + pc := ac.pc + if pc == nil { + return nil, errConnClosed + } + ci := lookupCommandInfo(commandName) + ac.state = (ac.state | ci.Set) &^ ci.Clear + return pc.c.Do(commandName, args...) +} + +func (ac *activeConn) DoWithTimeout(timeout time.Duration, commandName string, args ...interface{}) (reply interface{}, err error) { + pc := ac.pc + if pc == nil { + return nil, errConnClosed + } + cwt, ok := pc.c.(ConnWithTimeout) + if !ok { + return nil, errTimeoutNotSupported + } + ci := lookupCommandInfo(commandName) + ac.state = (ac.state | ci.Set) &^ ci.Clear + return cwt.DoWithTimeout(timeout, commandName, args...) +} + +func (ac *activeConn) Send(commandName string, args ...interface{}) error { + pc := ac.pc + if pc == nil { + return errConnClosed + } + ci := lookupCommandInfo(commandName) + ac.state = (ac.state | ci.Set) &^ ci.Clear + return pc.c.Send(commandName, args...) +} + +func (ac *activeConn) Flush() error { + pc := ac.pc + if pc == nil { + return errConnClosed + } + return pc.c.Flush() +} + +func (ac *activeConn) Receive() (reply interface{}, err error) { + pc := ac.pc + if pc == nil { + return nil, errConnClosed + } + return pc.c.Receive() +} + +func (ac *activeConn) ReceiveWithTimeout(timeout time.Duration) (reply interface{}, err error) { + pc := ac.pc + if pc == nil { + return nil, errConnClosed + } + cwt, ok := pc.c.(ConnWithTimeout) + if !ok { + return nil, errTimeoutNotSupported + } + return cwt.ReceiveWithTimeout(timeout) +} + +type errorConn struct{ err error } + +func (ec errorConn) Do(string, ...interface{}) (interface{}, error) { return nil, ec.err } +func (ec errorConn) DoWithTimeout(time.Duration, string, ...interface{}) (interface{}, error) { + return nil, ec.err +} +func (ec errorConn) Send(string, ...interface{}) error { return ec.err } +func (ec errorConn) Err() error { return ec.err } +func (ec errorConn) Close() error { return nil } +func (ec errorConn) Flush() error { return ec.err } +func (ec errorConn) Receive() (interface{}, error) { return nil, ec.err } +func (ec errorConn) ReceiveWithTimeout(time.Duration) (interface{}, error) { return nil, ec.err } + +type idleList struct { + count int + front, back *poolConn +} + +type poolConn struct { + c Conn + t time.Time + created time.Time + next, prev *poolConn +} + +func (l *idleList) pushFront(pc *poolConn) { + pc.next = l.front + pc.prev = nil + if l.count == 0 { + l.back = pc + } else { + l.front.prev = pc + } + l.front = pc + l.count++ +} + +func (l *idleList) popFront() { + pc := l.front + l.count-- + if l.count == 0 { + l.front, l.back = nil, nil + } else { + pc.next.prev = nil + l.front = pc.next + } + pc.next, pc.prev = nil, nil +} + +func (l *idleList) popBack() { + pc := l.back + l.count-- + if l.count == 0 { + l.front, l.back = nil, nil + } else { + pc.prev.next = nil + l.back = pc.prev + } + pc.next, pc.prev = nil, nil +} diff --git a/vendor/github.com/gomodule/redigo/redis/pubsub.go b/vendor/github.com/gomodule/redigo/redis/pubsub.go new file mode 100644 index 00000000000..cc58575760a --- /dev/null +++ b/vendor/github.com/gomodule/redigo/redis/pubsub.go @@ -0,0 +1,158 @@ +// Copyright 2012 Gary Burd +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package redis + +import ( + "errors" + "time" +) + +// Subscription represents a subscribe or unsubscribe notification. +type Subscription struct { + // Kind is "subscribe", "unsubscribe", "psubscribe" or "punsubscribe" + Kind string + + // The channel that was changed. + Channel string + + // The current number of subscriptions for connection. + Count int +} + +// Message represents a message notification. +type Message struct { + // The originating channel. + Channel string + + // The matched pattern, if any + Pattern string + + // The message data. + Data []byte +} + +// Pong represents a pubsub pong notification. +type Pong struct { + Data string +} + +// PubSubConn wraps a Conn with convenience methods for subscribers. +type PubSubConn struct { + Conn Conn +} + +// Close closes the connection. +func (c PubSubConn) Close() error { + return c.Conn.Close() +} + +// Subscribe subscribes the connection to the specified channels. +func (c PubSubConn) Subscribe(channel ...interface{}) error { + if err := c.Conn.Send("SUBSCRIBE", channel...); err != nil { + return err + } + return c.Conn.Flush() +} + +// PSubscribe subscribes the connection to the given patterns. +func (c PubSubConn) PSubscribe(channel ...interface{}) error { + if err := c.Conn.Send("PSUBSCRIBE", channel...); err != nil { + return err + } + return c.Conn.Flush() +} + +// Unsubscribe unsubscribes the connection from the given channels, or from all +// of them if none is given. +func (c PubSubConn) Unsubscribe(channel ...interface{}) error { + if err := c.Conn.Send("UNSUBSCRIBE", channel...); err != nil { + return err + } + return c.Conn.Flush() +} + +// PUnsubscribe unsubscribes the connection from the given patterns, or from all +// of them if none is given. +func (c PubSubConn) PUnsubscribe(channel ...interface{}) error { + if err := c.Conn.Send("PUNSUBSCRIBE", channel...); err != nil { + return err + } + return c.Conn.Flush() +} + +// Ping sends a PING to the server with the specified data. +// +// The connection must be subscribed to at least one channel or pattern when +// calling this method. +func (c PubSubConn) Ping(data string) error { + if err := c.Conn.Send("PING", data); err != nil { + return err + } + return c.Conn.Flush() +} + +// Receive returns a pushed message as a Subscription, Message, Pong or error. +// The return value is intended to be used directly in a type switch as +// illustrated in the PubSubConn example. +func (c PubSubConn) Receive() interface{} { + return c.receiveInternal(c.Conn.Receive()) +} + +// ReceiveWithTimeout is like Receive, but it allows the application to +// override the connection's default timeout. +func (c PubSubConn) ReceiveWithTimeout(timeout time.Duration) interface{} { + return c.receiveInternal(ReceiveWithTimeout(c.Conn, timeout)) +} + +func (c PubSubConn) receiveInternal(replyArg interface{}, errArg error) interface{} { + reply, err := Values(replyArg, errArg) + if err != nil { + return err + } + + var kind string + reply, err = Scan(reply, &kind) + if err != nil { + return err + } + + switch kind { + case "message": + var m Message + if _, err := Scan(reply, &m.Channel, &m.Data); err != nil { + return err + } + return m + case "pmessage": + var m Message + if _, err := Scan(reply, &m.Pattern, &m.Channel, &m.Data); err != nil { + return err + } + return m + case "subscribe", "psubscribe", "unsubscribe", "punsubscribe": + s := Subscription{Kind: kind} + if _, err := Scan(reply, &s.Channel, &s.Count); err != nil { + return err + } + return s + case "pong": + var p Pong + if _, err := Scan(reply, &p.Data); err != nil { + return err + } + return p + } + return errors.New("redigo: unknown pubsub notification") +} diff --git a/vendor/github.com/gomodule/redigo/redis/redis.go b/vendor/github.com/gomodule/redigo/redis/redis.go new file mode 100644 index 00000000000..e4464874471 --- /dev/null +++ b/vendor/github.com/gomodule/redigo/redis/redis.go @@ -0,0 +1,138 @@ +// Copyright 2012 Gary Burd +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package redis + +import ( + "errors" + "time" +) + +// Error represents an error returned in a command reply. +type Error string + +func (err Error) Error() string { return string(err) } + +// Conn represents a connection to a Redis server. +type Conn interface { + // Close closes the connection. + Close() error + + // Err returns a non-nil value when the connection is not usable. + Err() error + + // Do sends a command to the server and returns the received reply. + Do(commandName string, args ...interface{}) (reply interface{}, err error) + + // Send writes the command to the client's output buffer. + Send(commandName string, args ...interface{}) error + + // Flush flushes the output buffer to the Redis server. + Flush() error + + // Receive receives a single reply from the Redis server + Receive() (reply interface{}, err error) +} + +// Argument is the interface implemented by an object which wants to control how +// the object is converted to Redis bulk strings. +type Argument interface { + // RedisArg returns a value to be encoded as a bulk string per the + // conversions listed in the section 'Executing Commands'. + // Implementations should typically return a []byte or string. + RedisArg() interface{} +} + +// Scanner is implemented by an object which wants to control its value is +// interpreted when read from Redis. +type Scanner interface { + // RedisScan assigns a value from a Redis value. The argument src is one of + // the reply types listed in the section `Executing Commands`. + // + // An error should be returned if the value cannot be stored without + // loss of information. + RedisScan(src interface{}) error +} + +// ConnWithTimeout is an optional interface that allows the caller to override +// a connection's default read timeout. This interface is useful for executing +// the BLPOP, BRPOP, BRPOPLPUSH, XREAD and other commands that block at the +// server. +// +// A connection's default read timeout is set with the DialReadTimeout dial +// option. Applications should rely on the default timeout for commands that do +// not block at the server. +// +// All of the Conn implementations in this package satisfy the ConnWithTimeout +// interface. +// +// Use the DoWithTimeout and ReceiveWithTimeout helper functions to simplify +// use of this interface. +type ConnWithTimeout interface { + Conn + + // Do sends a command to the server and returns the received reply. + // The timeout overrides the read timeout set when dialing the + // connection. + DoWithTimeout(timeout time.Duration, commandName string, args ...interface{}) (reply interface{}, err error) + + // Receive receives a single reply from the Redis server. The timeout + // overrides the read timeout set when dialing the connection. + ReceiveWithTimeout(timeout time.Duration) (reply interface{}, err error) +} + +var errTimeoutNotSupported = errors.New("redis: connection does not support ConnWithTimeout") + +// DoWithTimeout executes a Redis command with the specified read timeout. If +// the connection does not satisfy the ConnWithTimeout interface, then an error +// is returned. +func DoWithTimeout(c Conn, timeout time.Duration, cmd string, args ...interface{}) (interface{}, error) { + cwt, ok := c.(ConnWithTimeout) + if !ok { + return nil, errTimeoutNotSupported + } + return cwt.DoWithTimeout(timeout, cmd, args...) +} + +// ReceiveWithTimeout receives a reply with the specified read timeout. If the +// connection does not satisfy the ConnWithTimeout interface, then an error is +// returned. +func ReceiveWithTimeout(c Conn, timeout time.Duration) (interface{}, error) { + cwt, ok := c.(ConnWithTimeout) + if !ok { + return nil, errTimeoutNotSupported + } + return cwt.ReceiveWithTimeout(timeout) +} + +// SlowLog represents a redis SlowLog +type SlowLog struct { + // ID is a unique progressive identifier for every slow log entry. + ID int64 + + // Time is the unix timestamp at which the logged command was processed. + Time time.Time + + // ExecutationTime is the amount of time needed for the command execution. + ExecutionTime time.Duration + + // Args is the command name and arguments + Args []string + + // ClientAddr is the client IP address (4.0 only). + ClientAddr string + + // ClientName is the name set via the CLIENT SETNAME command (4.0 only). + ClientName string +} diff --git a/vendor/github.com/gomodule/redigo/redis/reply.go b/vendor/github.com/gomodule/redigo/redis/reply.go new file mode 100644 index 00000000000..dfe6aff7938 --- /dev/null +++ b/vendor/github.com/gomodule/redigo/redis/reply.go @@ -0,0 +1,583 @@ +// Copyright 2012 Gary Burd +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package redis + +import ( + "errors" + "fmt" + "strconv" + "time" +) + +// ErrNil indicates that a reply value is nil. +var ErrNil = errors.New("redigo: nil returned") + +// Int is a helper that converts a command reply to an integer. If err is not +// equal to nil, then Int returns 0, err. Otherwise, Int converts the +// reply to an int as follows: +// +// Reply type Result +// integer int(reply), nil +// bulk string parsed reply, nil +// nil 0, ErrNil +// other 0, error +func Int(reply interface{}, err error) (int, error) { + if err != nil { + return 0, err + } + switch reply := reply.(type) { + case int64: + x := int(reply) + if int64(x) != reply { + return 0, strconv.ErrRange + } + return x, nil + case []byte: + n, err := strconv.ParseInt(string(reply), 10, 0) + return int(n), err + case nil: + return 0, ErrNil + case Error: + return 0, reply + } + return 0, fmt.Errorf("redigo: unexpected type for Int, got type %T", reply) +} + +// Int64 is a helper that converts a command reply to 64 bit integer. If err is +// not equal to nil, then Int64 returns 0, err. Otherwise, Int64 converts the +// reply to an int64 as follows: +// +// Reply type Result +// integer reply, nil +// bulk string parsed reply, nil +// nil 0, ErrNil +// other 0, error +func Int64(reply interface{}, err error) (int64, error) { + if err != nil { + return 0, err + } + switch reply := reply.(type) { + case int64: + return reply, nil + case []byte: + n, err := strconv.ParseInt(string(reply), 10, 64) + return n, err + case nil: + return 0, ErrNil + case Error: + return 0, reply + } + return 0, fmt.Errorf("redigo: unexpected type for Int64, got type %T", reply) +} + +func errNegativeInt(v int64) error { + return fmt.Errorf("redigo: unexpected negative value %v for Uint64", v) +} + +// Uint64 is a helper that converts a command reply to 64 bit unsigned integer. +// If err is not equal to nil, then Uint64 returns 0, err. Otherwise, Uint64 converts the +// reply to an uint64 as follows: +// +// Reply type Result +// +integer reply, nil +// bulk string parsed reply, nil +// nil 0, ErrNil +// other 0, error +func Uint64(reply interface{}, err error) (uint64, error) { + if err != nil { + return 0, err + } + switch reply := reply.(type) { + case int64: + if reply < 0 { + return 0, errNegativeInt(reply) + } + return uint64(reply), nil + case []byte: + n, err := strconv.ParseUint(string(reply), 10, 64) + return n, err + case nil: + return 0, ErrNil + case Error: + return 0, reply + } + return 0, fmt.Errorf("redigo: unexpected type for Uint64, got type %T", reply) +} + +// Float64 is a helper that converts a command reply to 64 bit float. If err is +// not equal to nil, then Float64 returns 0, err. Otherwise, Float64 converts +// the reply to a float64 as follows: +// +// Reply type Result +// bulk string parsed reply, nil +// nil 0, ErrNil +// other 0, error +func Float64(reply interface{}, err error) (float64, error) { + if err != nil { + return 0, err + } + switch reply := reply.(type) { + case []byte: + n, err := strconv.ParseFloat(string(reply), 64) + return n, err + case nil: + return 0, ErrNil + case Error: + return 0, reply + } + return 0, fmt.Errorf("redigo: unexpected type for Float64, got type %T", reply) +} + +// String is a helper that converts a command reply to a string. If err is not +// equal to nil, then String returns "", err. Otherwise String converts the +// reply to a string as follows: +// +// Reply type Result +// bulk string string(reply), nil +// simple string reply, nil +// nil "", ErrNil +// other "", error +func String(reply interface{}, err error) (string, error) { + if err != nil { + return "", err + } + switch reply := reply.(type) { + case []byte: + return string(reply), nil + case string: + return reply, nil + case nil: + return "", ErrNil + case Error: + return "", reply + } + return "", fmt.Errorf("redigo: unexpected type for String, got type %T", reply) +} + +// Bytes is a helper that converts a command reply to a slice of bytes. If err +// is not equal to nil, then Bytes returns nil, err. Otherwise Bytes converts +// the reply to a slice of bytes as follows: +// +// Reply type Result +// bulk string reply, nil +// simple string []byte(reply), nil +// nil nil, ErrNil +// other nil, error +func Bytes(reply interface{}, err error) ([]byte, error) { + if err != nil { + return nil, err + } + switch reply := reply.(type) { + case []byte: + return reply, nil + case string: + return []byte(reply), nil + case nil: + return nil, ErrNil + case Error: + return nil, reply + } + return nil, fmt.Errorf("redigo: unexpected type for Bytes, got type %T", reply) +} + +// Bool is a helper that converts a command reply to a boolean. If err is not +// equal to nil, then Bool returns false, err. Otherwise Bool converts the +// reply to boolean as follows: +// +// Reply type Result +// integer value != 0, nil +// bulk string strconv.ParseBool(reply) +// nil false, ErrNil +// other false, error +func Bool(reply interface{}, err error) (bool, error) { + if err != nil { + return false, err + } + switch reply := reply.(type) { + case int64: + return reply != 0, nil + case []byte: + return strconv.ParseBool(string(reply)) + case nil: + return false, ErrNil + case Error: + return false, reply + } + return false, fmt.Errorf("redigo: unexpected type for Bool, got type %T", reply) +} + +// MultiBulk is a helper that converts an array command reply to a []interface{}. +// +// Deprecated: Use Values instead. +func MultiBulk(reply interface{}, err error) ([]interface{}, error) { return Values(reply, err) } + +// Values is a helper that converts an array command reply to a []interface{}. +// If err is not equal to nil, then Values returns nil, err. Otherwise, Values +// converts the reply as follows: +// +// Reply type Result +// array reply, nil +// nil nil, ErrNil +// other nil, error +func Values(reply interface{}, err error) ([]interface{}, error) { + if err != nil { + return nil, err + } + switch reply := reply.(type) { + case []interface{}: + return reply, nil + case nil: + return nil, ErrNil + case Error: + return nil, reply + } + return nil, fmt.Errorf("redigo: unexpected type for Values, got type %T", reply) +} + +func sliceHelper(reply interface{}, err error, name string, makeSlice func(int), assign func(int, interface{}) error) error { + if err != nil { + return err + } + switch reply := reply.(type) { + case []interface{}: + makeSlice(len(reply)) + for i := range reply { + if reply[i] == nil { + continue + } + if err := assign(i, reply[i]); err != nil { + return err + } + } + return nil + case nil: + return ErrNil + case Error: + return reply + } + return fmt.Errorf("redigo: unexpected type for %s, got type %T", name, reply) +} + +// Float64s is a helper that converts an array command reply to a []float64. If +// err is not equal to nil, then Float64s returns nil, err. Nil array items are +// converted to 0 in the output slice. Floats64 returns an error if an array +// item is not a bulk string or nil. +func Float64s(reply interface{}, err error) ([]float64, error) { + var result []float64 + err = sliceHelper(reply, err, "Float64s", func(n int) { result = make([]float64, n) }, func(i int, v interface{}) error { + p, ok := v.([]byte) + if !ok { + return fmt.Errorf("redigo: unexpected element type for Floats64, got type %T", v) + } + f, err := strconv.ParseFloat(string(p), 64) + result[i] = f + return err + }) + return result, err +} + +// Strings is a helper that converts an array command reply to a []string. If +// err is not equal to nil, then Strings returns nil, err. Nil array items are +// converted to "" in the output slice. Strings returns an error if an array +// item is not a bulk string or nil. +func Strings(reply interface{}, err error) ([]string, error) { + var result []string + err = sliceHelper(reply, err, "Strings", func(n int) { result = make([]string, n) }, func(i int, v interface{}) error { + switch v := v.(type) { + case string: + result[i] = v + return nil + case []byte: + result[i] = string(v) + return nil + default: + return fmt.Errorf("redigo: unexpected element type for Strings, got type %T", v) + } + }) + return result, err +} + +// ByteSlices is a helper that converts an array command reply to a [][]byte. +// If err is not equal to nil, then ByteSlices returns nil, err. Nil array +// items are stay nil. ByteSlices returns an error if an array item is not a +// bulk string or nil. +func ByteSlices(reply interface{}, err error) ([][]byte, error) { + var result [][]byte + err = sliceHelper(reply, err, "ByteSlices", func(n int) { result = make([][]byte, n) }, func(i int, v interface{}) error { + p, ok := v.([]byte) + if !ok { + return fmt.Errorf("redigo: unexpected element type for ByteSlices, got type %T", v) + } + result[i] = p + return nil + }) + return result, err +} + +// Int64s is a helper that converts an array command reply to a []int64. +// If err is not equal to nil, then Int64s returns nil, err. Nil array +// items are stay nil. Int64s returns an error if an array item is not a +// bulk string or nil. +func Int64s(reply interface{}, err error) ([]int64, error) { + var result []int64 + err = sliceHelper(reply, err, "Int64s", func(n int) { result = make([]int64, n) }, func(i int, v interface{}) error { + switch v := v.(type) { + case int64: + result[i] = v + return nil + case []byte: + n, err := strconv.ParseInt(string(v), 10, 64) + result[i] = n + return err + default: + return fmt.Errorf("redigo: unexpected element type for Int64s, got type %T", v) + } + }) + return result, err +} + +// Ints is a helper that converts an array command reply to a []int. +// If err is not equal to nil, then Ints returns nil, err. Nil array +// items are stay nil. Ints returns an error if an array item is not a +// bulk string or nil. +func Ints(reply interface{}, err error) ([]int, error) { + var result []int + err = sliceHelper(reply, err, "Ints", func(n int) { result = make([]int, n) }, func(i int, v interface{}) error { + switch v := v.(type) { + case int64: + n := int(v) + if int64(n) != v { + return strconv.ErrRange + } + result[i] = n + return nil + case []byte: + n, err := strconv.Atoi(string(v)) + result[i] = n + return err + default: + return fmt.Errorf("redigo: unexpected element type for Ints, got type %T", v) + } + }) + return result, err +} + +// StringMap is a helper that converts an array of strings (alternating key, value) +// into a map[string]string. The HGETALL and CONFIG GET commands return replies in this format. +// Requires an even number of values in result. +func StringMap(result interface{}, err error) (map[string]string, error) { + values, err := Values(result, err) + if err != nil { + return nil, err + } + if len(values)%2 != 0 { + return nil, errors.New("redigo: StringMap expects even number of values result") + } + m := make(map[string]string, len(values)/2) + for i := 0; i < len(values); i += 2 { + key, okKey := values[i].([]byte) + value, okValue := values[i+1].([]byte) + if !okKey || !okValue { + return nil, errors.New("redigo: StringMap key not a bulk string value") + } + m[string(key)] = string(value) + } + return m, nil +} + +// IntMap is a helper that converts an array of strings (alternating key, value) +// into a map[string]int. The HGETALL commands return replies in this format. +// Requires an even number of values in result. +func IntMap(result interface{}, err error) (map[string]int, error) { + values, err := Values(result, err) + if err != nil { + return nil, err + } + if len(values)%2 != 0 { + return nil, errors.New("redigo: IntMap expects even number of values result") + } + m := make(map[string]int, len(values)/2) + for i := 0; i < len(values); i += 2 { + key, ok := values[i].([]byte) + if !ok { + return nil, errors.New("redigo: IntMap key not a bulk string value") + } + value, err := Int(values[i+1], nil) + if err != nil { + return nil, err + } + m[string(key)] = value + } + return m, nil +} + +// Int64Map is a helper that converts an array of strings (alternating key, value) +// into a map[string]int64. The HGETALL commands return replies in this format. +// Requires an even number of values in result. +func Int64Map(result interface{}, err error) (map[string]int64, error) { + values, err := Values(result, err) + if err != nil { + return nil, err + } + if len(values)%2 != 0 { + return nil, errors.New("redigo: Int64Map expects even number of values result") + } + m := make(map[string]int64, len(values)/2) + for i := 0; i < len(values); i += 2 { + key, ok := values[i].([]byte) + if !ok { + return nil, errors.New("redigo: Int64Map key not a bulk string value") + } + value, err := Int64(values[i+1], nil) + if err != nil { + return nil, err + } + m[string(key)] = value + } + return m, nil +} + +// Positions is a helper that converts an array of positions (lat, long) +// into a [][2]float64. The GEOPOS command returns replies in this format. +func Positions(result interface{}, err error) ([]*[2]float64, error) { + values, err := Values(result, err) + if err != nil { + return nil, err + } + positions := make([]*[2]float64, len(values)) + for i := range values { + if values[i] == nil { + continue + } + p, ok := values[i].([]interface{}) + if !ok { + return nil, fmt.Errorf("redigo: unexpected element type for interface slice, got type %T", values[i]) + } + if len(p) != 2 { + return nil, fmt.Errorf("redigo: unexpected number of values for a member position, got %d", len(p)) + } + lat, err := Float64(p[0], nil) + if err != nil { + return nil, err + } + long, err := Float64(p[1], nil) + if err != nil { + return nil, err + } + positions[i] = &[2]float64{lat, long} + } + return positions, nil +} + +// Uint64s is a helper that converts an array command reply to a []uint64. +// If err is not equal to nil, then Uint64s returns nil, err. Nil array +// items are stay nil. Uint64s returns an error if an array item is not a +// bulk string or nil. +func Uint64s(reply interface{}, err error) ([]uint64, error) { + var result []uint64 + err = sliceHelper(reply, err, "Uint64s", func(n int) { result = make([]uint64, n) }, func(i int, v interface{}) error { + switch v := v.(type) { + case uint64: + result[i] = v + return nil + case []byte: + n, err := strconv.ParseUint(string(v), 10, 64) + result[i] = n + return err + default: + return fmt.Errorf("redigo: unexpected element type for Uint64s, got type %T", v) + } + }) + return result, err +} + +// Uint64Map is a helper that converts an array of strings (alternating key, value) +// into a map[string]uint64. The HGETALL commands return replies in this format. +// Requires an even number of values in result. +func Uint64Map(result interface{}, err error) (map[string]uint64, error) { + values, err := Values(result, err) + if err != nil { + return nil, err + } + if len(values)%2 != 0 { + return nil, errors.New("redigo: Uint64Map expects even number of values result") + } + m := make(map[string]uint64, len(values)/2) + for i := 0; i < len(values); i += 2 { + key, ok := values[i].([]byte) + if !ok { + return nil, errors.New("redigo: Uint64Map key not a bulk string value") + } + value, err := Uint64(values[i+1], nil) + if err != nil { + return nil, err + } + m[string(key)] = value + } + return m, nil +} + +// SlowLogs is a helper that parse the SLOWLOG GET command output and +// return the array of SlowLog +func SlowLogs(result interface{}, err error) ([]SlowLog, error) { + rawLogs, err := Values(result, err) + if err != nil { + return nil, err + } + logs := make([]SlowLog, len(rawLogs)) + for i, rawLog := range rawLogs { + rawLog, ok := rawLog.([]interface{}) + if !ok { + return nil, errors.New("redigo: slowlog element is not an array") + } + + var log SlowLog + + if len(rawLog) < 4 { + return nil, errors.New("redigo: slowlog element has less than four elements") + } + log.ID, ok = rawLog[0].(int64) + if !ok { + return nil, errors.New("redigo: slowlog element[0] not an int64") + } + timestamp, ok := rawLog[1].(int64) + if !ok { + return nil, errors.New("redigo: slowlog element[1] not an int64") + } + log.Time = time.Unix(timestamp, 0) + duration, ok := rawLog[2].(int64) + if !ok { + return nil, errors.New("redigo: slowlog element[2] not an int64") + } + log.ExecutionTime = time.Duration(duration) * time.Microsecond + + log.Args, err = Strings(rawLog[3], nil) + if err != nil { + return nil, fmt.Errorf("redigo: slowlog element[3] is not array of string. actual error is : %s", err.Error()) + } + if len(rawLog) >= 6 { + log.ClientAddr, err = String(rawLog[4], nil) + if err != nil { + return nil, fmt.Errorf("redigo: slowlog element[4] is not a string. actual error is : %s", err.Error()) + } + log.ClientName, err = String(rawLog[5], nil) + if err != nil { + return nil, fmt.Errorf("redigo: slowlog element[5] is not a string. actual error is : %s", err.Error()) + } + } + logs[i] = log + } + return logs, nil +} diff --git a/vendor/github.com/gomodule/redigo/redis/scan.go b/vendor/github.com/gomodule/redigo/redis/scan.go new file mode 100644 index 00000000000..379206edea5 --- /dev/null +++ b/vendor/github.com/gomodule/redigo/redis/scan.go @@ -0,0 +1,683 @@ +// Copyright 2012 Gary Burd +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package redis + +import ( + "errors" + "fmt" + "reflect" + "strconv" + "strings" + "sync" +) + +var ( + scannerType = reflect.TypeOf((*Scanner)(nil)).Elem() +) + +func ensureLen(d reflect.Value, n int) { + if n > d.Cap() { + d.Set(reflect.MakeSlice(d.Type(), n, n)) + } else { + d.SetLen(n) + } +} + +func cannotConvert(d reflect.Value, s interface{}) error { + var sname string + switch s.(type) { + case string: + sname = "Redis simple string" + case Error: + sname = "Redis error" + case int64: + sname = "Redis integer" + case []byte: + sname = "Redis bulk string" + case []interface{}: + sname = "Redis array" + case nil: + sname = "Redis nil" + default: + sname = reflect.TypeOf(s).String() + } + return fmt.Errorf("cannot convert from %s to %s", sname, d.Type()) +} + +func convertAssignNil(d reflect.Value) (err error) { + switch d.Type().Kind() { + case reflect.Slice, reflect.Interface: + d.Set(reflect.Zero(d.Type())) + default: + err = cannotConvert(d, nil) + } + return err +} + +func convertAssignError(d reflect.Value, s Error) (err error) { + if d.Kind() == reflect.String { + d.SetString(string(s)) + } else if d.Kind() == reflect.Slice && d.Type().Elem().Kind() == reflect.Uint8 { + d.SetBytes([]byte(s)) + } else { + err = cannotConvert(d, s) + } + return +} + +func convertAssignString(d reflect.Value, s string) (err error) { + switch d.Type().Kind() { + case reflect.Float32, reflect.Float64: + var x float64 + x, err = strconv.ParseFloat(s, d.Type().Bits()) + d.SetFloat(x) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + var x int64 + x, err = strconv.ParseInt(s, 10, d.Type().Bits()) + d.SetInt(x) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + var x uint64 + x, err = strconv.ParseUint(s, 10, d.Type().Bits()) + d.SetUint(x) + case reflect.Bool: + var x bool + x, err = strconv.ParseBool(s) + d.SetBool(x) + case reflect.String: + d.SetString(s) + case reflect.Slice: + if d.Type().Elem().Kind() == reflect.Uint8 { + d.SetBytes([]byte(s)) + } else { + err = cannotConvert(d, s) + } + case reflect.Ptr: + err = convertAssignString(d.Elem(), s) + default: + err = cannotConvert(d, s) + } + return +} + +func convertAssignBulkString(d reflect.Value, s []byte) (err error) { + switch d.Type().Kind() { + case reflect.Slice: + // Handle []byte destination here to avoid unnecessary + // []byte -> string -> []byte converion. + if d.Type().Elem().Kind() == reflect.Uint8 { + d.SetBytes(s) + } else { + err = cannotConvert(d, s) + } + case reflect.Ptr: + if d.CanInterface() && d.CanSet() { + if s == nil { + if d.IsNil() { + return nil + } + + d.Set(reflect.Zero(d.Type())) + return nil + } + + if d.IsNil() { + d.Set(reflect.New(d.Type().Elem())) + } + + if sc, ok := d.Interface().(Scanner); ok { + return sc.RedisScan(s) + } + } + err = convertAssignString(d, string(s)) + default: + err = convertAssignString(d, string(s)) + } + return err +} + +func convertAssignInt(d reflect.Value, s int64) (err error) { + switch d.Type().Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + d.SetInt(s) + if d.Int() != s { + err = strconv.ErrRange + d.SetInt(0) + } + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + if s < 0 { + err = strconv.ErrRange + } else { + x := uint64(s) + d.SetUint(x) + if d.Uint() != x { + err = strconv.ErrRange + d.SetUint(0) + } + } + case reflect.Bool: + d.SetBool(s != 0) + default: + err = cannotConvert(d, s) + } + return +} + +func convertAssignValue(d reflect.Value, s interface{}) (err error) { + if d.Kind() != reflect.Ptr { + if d.CanAddr() { + d2 := d.Addr() + if d2.CanInterface() { + if scanner, ok := d2.Interface().(Scanner); ok { + return scanner.RedisScan(s) + } + } + } + } else if d.CanInterface() { + // Already a reflect.Ptr + if d.IsNil() { + d.Set(reflect.New(d.Type().Elem())) + } + if scanner, ok := d.Interface().(Scanner); ok { + return scanner.RedisScan(s) + } + } + + switch s := s.(type) { + case nil: + err = convertAssignNil(d) + case []byte: + err = convertAssignBulkString(d, s) + case int64: + err = convertAssignInt(d, s) + case string: + err = convertAssignString(d, s) + case Error: + err = convertAssignError(d, s) + default: + err = cannotConvert(d, s) + } + return err +} + +func convertAssignArray(d reflect.Value, s []interface{}) error { + if d.Type().Kind() != reflect.Slice { + return cannotConvert(d, s) + } + ensureLen(d, len(s)) + for i := 0; i < len(s); i++ { + if err := convertAssignValue(d.Index(i), s[i]); err != nil { + return err + } + } + return nil +} + +func convertAssign(d interface{}, s interface{}) (err error) { + if scanner, ok := d.(Scanner); ok { + return scanner.RedisScan(s) + } + + // Handle the most common destination types using type switches and + // fall back to reflection for all other types. + switch s := s.(type) { + case nil: + // ignore + case []byte: + switch d := d.(type) { + case *string: + *d = string(s) + case *int: + *d, err = strconv.Atoi(string(s)) + case *bool: + *d, err = strconv.ParseBool(string(s)) + case *[]byte: + *d = s + case *interface{}: + *d = s + case nil: + // skip value + default: + if d := reflect.ValueOf(d); d.Type().Kind() != reflect.Ptr { + err = cannotConvert(d, s) + } else { + err = convertAssignBulkString(d.Elem(), s) + } + } + case int64: + switch d := d.(type) { + case *int: + x := int(s) + if int64(x) != s { + err = strconv.ErrRange + x = 0 + } + *d = x + case *bool: + *d = s != 0 + case *interface{}: + *d = s + case nil: + // skip value + default: + if d := reflect.ValueOf(d); d.Type().Kind() != reflect.Ptr { + err = cannotConvert(d, s) + } else { + err = convertAssignInt(d.Elem(), s) + } + } + case string: + switch d := d.(type) { + case *string: + *d = s + case *interface{}: + *d = s + case nil: + // skip value + default: + err = cannotConvert(reflect.ValueOf(d), s) + } + case []interface{}: + switch d := d.(type) { + case *[]interface{}: + *d = s + case *interface{}: + *d = s + case nil: + // skip value + default: + if d := reflect.ValueOf(d); d.Type().Kind() != reflect.Ptr { + err = cannotConvert(d, s) + } else { + err = convertAssignArray(d.Elem(), s) + } + } + case Error: + err = s + default: + err = cannotConvert(reflect.ValueOf(d), s) + } + return +} + +// Scan copies from src to the values pointed at by dest. +// +// Scan uses RedisScan if available otherwise: +// +// The values pointed at by dest must be an integer, float, boolean, string, +// []byte, interface{} or slices of these types. Scan uses the standard strconv +// package to convert bulk strings to numeric and boolean types. +// +// If a dest value is nil, then the corresponding src value is skipped. +// +// If a src element is nil, then the corresponding dest value is not modified. +// +// To enable easy use of Scan in a loop, Scan returns the slice of src +// following the copied values. +func Scan(src []interface{}, dest ...interface{}) ([]interface{}, error) { + if len(src) < len(dest) { + return nil, errors.New("redigo.Scan: array short") + } + var err error + for i, d := range dest { + err = convertAssign(d, src[i]) + if err != nil { + err = fmt.Errorf("redigo.Scan: cannot assign to dest %d: %v", i, err) + break + } + } + return src[len(dest):], err +} + +type fieldSpec struct { + name string + index []int + omitEmpty bool +} + +type structSpec struct { + m map[string]*fieldSpec + l []*fieldSpec +} + +func (ss *structSpec) fieldSpec(name []byte) *fieldSpec { + return ss.m[string(name)] +} + +func compileStructSpec(t reflect.Type, depth map[string]int, index []int, ss *structSpec) { +LOOP: + for i := 0; i < t.NumField(); i++ { + f := t.Field(i) + switch { + case f.PkgPath != "" && !f.Anonymous: + // Ignore unexported fields. + case f.Anonymous: + switch f.Type.Kind() { + case reflect.Struct: + compileStructSpec(f.Type, depth, append(index, i), ss) + case reflect.Ptr: + // TODO(steve): Protect against infinite recursion. + if f.Type.Elem().Kind() == reflect.Struct { + compileStructSpec(f.Type.Elem(), depth, append(index, i), ss) + } + } + default: + fs := &fieldSpec{name: f.Name} + tag := f.Tag.Get("redis") + + var ( + p string + ) + first := true + for len(tag) > 0 { + i := strings.IndexByte(tag, ',') + if i < 0 { + p, tag = tag, "" + } else { + p, tag = tag[:i], tag[i+1:] + } + if p == "-" { + continue LOOP + } + if first && len(p) > 0 { + fs.name = p + first = false + } else { + switch p { + case "omitempty": + fs.omitEmpty = true + default: + panic(fmt.Errorf("redigo: unknown field tag %s for type %s", p, t.Name())) + } + } + } + d, found := depth[fs.name] + if !found { + d = 1 << 30 + } + switch { + case len(index) == d: + // At same depth, remove from result. + delete(ss.m, fs.name) + j := 0 + for i := 0; i < len(ss.l); i++ { + if fs.name != ss.l[i].name { + ss.l[j] = ss.l[i] + j += 1 + } + } + ss.l = ss.l[:j] + case len(index) < d: + fs.index = make([]int, len(index)+1) + copy(fs.index, index) + fs.index[len(index)] = i + depth[fs.name] = len(index) + ss.m[fs.name] = fs + ss.l = append(ss.l, fs) + } + } + } +} + +var ( + structSpecMutex sync.RWMutex + structSpecCache = make(map[reflect.Type]*structSpec) +) + +func structSpecForType(t reflect.Type) *structSpec { + + structSpecMutex.RLock() + ss, found := structSpecCache[t] + structSpecMutex.RUnlock() + if found { + return ss + } + + structSpecMutex.Lock() + defer structSpecMutex.Unlock() + ss, found = structSpecCache[t] + if found { + return ss + } + + ss = &structSpec{m: make(map[string]*fieldSpec)} + compileStructSpec(t, make(map[string]int), nil, ss) + structSpecCache[t] = ss + return ss +} + +var errScanStructValue = errors.New("redigo.ScanStruct: value must be non-nil pointer to a struct") + +// ScanStruct scans alternating names and values from src to a struct. The +// HGETALL and CONFIG GET commands return replies in this format. +// +// ScanStruct uses exported field names to match values in the response. Use +// 'redis' field tag to override the name: +// +// Field int `redis:"myName"` +// +// Fields with the tag redis:"-" are ignored. +// +// Each field uses RedisScan if available otherwise: +// Integer, float, boolean, string and []byte fields are supported. Scan uses the +// standard strconv package to convert bulk string values to numeric and +// boolean types. +// +// If a src element is nil, then the corresponding field is not modified. +func ScanStruct(src []interface{}, dest interface{}) error { + d := reflect.ValueOf(dest) + if d.Kind() != reflect.Ptr || d.IsNil() { + return errScanStructValue + } + d = d.Elem() + if d.Kind() != reflect.Struct { + return errScanStructValue + } + ss := structSpecForType(d.Type()) + + if len(src)%2 != 0 { + return errors.New("redigo.ScanStruct: number of values not a multiple of 2") + } + + for i := 0; i < len(src); i += 2 { + s := src[i+1] + if s == nil { + continue + } + name, ok := src[i].([]byte) + if !ok { + return fmt.Errorf("redigo.ScanStruct: key %d not a bulk string value", i) + } + fs := ss.fieldSpec(name) + if fs == nil { + continue + } + if err := convertAssignValue(d.FieldByIndex(fs.index), s); err != nil { + return fmt.Errorf("redigo.ScanStruct: cannot assign field %s: %v", fs.name, err) + } + } + return nil +} + +var ( + errScanSliceValue = errors.New("redigo.ScanSlice: dest must be non-nil pointer to a struct") +) + +// ScanSlice scans src to the slice pointed to by dest. +// +// If the target is a slice of types which implement Scanner then the custom +// RedisScan method is used otherwise the following rules apply: +// +// The elements in the dest slice must be integer, float, boolean, string, struct +// or pointer to struct values. +// +// Struct fields must be integer, float, boolean or string values. All struct +// fields are used unless a subset is specified using fieldNames. +func ScanSlice(src []interface{}, dest interface{}, fieldNames ...string) error { + d := reflect.ValueOf(dest) + if d.Kind() != reflect.Ptr || d.IsNil() { + return errScanSliceValue + } + d = d.Elem() + if d.Kind() != reflect.Slice { + return errScanSliceValue + } + + isPtr := false + t := d.Type().Elem() + st := t + if t.Kind() == reflect.Ptr && t.Elem().Kind() == reflect.Struct { + isPtr = true + t = t.Elem() + } + + if t.Kind() != reflect.Struct || st.Implements(scannerType) { + ensureLen(d, len(src)) + for i, s := range src { + if s == nil { + continue + } + if err := convertAssignValue(d.Index(i), s); err != nil { + return fmt.Errorf("redigo.ScanSlice: cannot assign element %d: %v", i, err) + } + } + return nil + } + + ss := structSpecForType(t) + fss := ss.l + if len(fieldNames) > 0 { + fss = make([]*fieldSpec, len(fieldNames)) + for i, name := range fieldNames { + fss[i] = ss.m[name] + if fss[i] == nil { + return fmt.Errorf("redigo.ScanSlice: ScanSlice bad field name %s", name) + } + } + } + + if len(fss) == 0 { + return errors.New("redigo.ScanSlice: no struct fields") + } + + n := len(src) / len(fss) + if n*len(fss) != len(src) { + return errors.New("redigo.ScanSlice: length not a multiple of struct field count") + } + + ensureLen(d, n) + for i := 0; i < n; i++ { + d := d.Index(i) + if isPtr { + if d.IsNil() { + d.Set(reflect.New(t)) + } + d = d.Elem() + } + for j, fs := range fss { + s := src[i*len(fss)+j] + if s == nil { + continue + } + if err := convertAssignValue(d.FieldByIndex(fs.index), s); err != nil { + return fmt.Errorf("redigo.ScanSlice: cannot assign element %d to field %s: %v", i*len(fss)+j, fs.name, err) + } + } + } + return nil +} + +// Args is a helper for constructing command arguments from structured values. +type Args []interface{} + +// Add returns the result of appending value to args. +func (args Args) Add(value ...interface{}) Args { + return append(args, value...) +} + +// AddFlat returns the result of appending the flattened value of v to args. +// +// Maps are flattened by appending the alternating keys and map values to args. +// +// Slices are flattened by appending the slice elements to args. +// +// Structs are flattened by appending the alternating names and values of +// exported fields to args. If v is a nil struct pointer, then nothing is +// appended. The 'redis' field tag overrides struct field names. See ScanStruct +// for more information on the use of the 'redis' field tag. +// +// Other types are appended to args as is. +func (args Args) AddFlat(v interface{}) Args { + rv := reflect.ValueOf(v) + switch rv.Kind() { + case reflect.Struct: + args = flattenStruct(args, rv) + case reflect.Slice: + for i := 0; i < rv.Len(); i++ { + args = append(args, rv.Index(i).Interface()) + } + case reflect.Map: + for _, k := range rv.MapKeys() { + args = append(args, k.Interface(), rv.MapIndex(k).Interface()) + } + case reflect.Ptr: + if rv.Type().Elem().Kind() == reflect.Struct { + if !rv.IsNil() { + args = flattenStruct(args, rv.Elem()) + } + } else { + args = append(args, v) + } + default: + args = append(args, v) + } + return args +} + +func flattenStruct(args Args, v reflect.Value) Args { + ss := structSpecForType(v.Type()) + for _, fs := range ss.l { + fv := v.FieldByIndex(fs.index) + if fs.omitEmpty { + var empty = false + switch fv.Kind() { + case reflect.Array, reflect.Map, reflect.Slice, reflect.String: + empty = fv.Len() == 0 + case reflect.Bool: + empty = !fv.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + empty = fv.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + empty = fv.Uint() == 0 + case reflect.Float32, reflect.Float64: + empty = fv.Float() == 0 + case reflect.Interface, reflect.Ptr: + empty = fv.IsNil() + } + if empty { + continue + } + } + if arg, ok := fv.Interface().(Argument); ok { + args = append(args, fs.name, arg.RedisArg()) + } else if fv.Kind() == reflect.Ptr { + if !fv.IsNil() { + args = append(args, fs.name, fv.Elem().Interface()) + } + } else { + args = append(args, fs.name, fv.Interface()) + } + } + return args +} diff --git a/vendor/github.com/gomodule/redigo/redis/script.go b/vendor/github.com/gomodule/redigo/redis/script.go new file mode 100644 index 00000000000..d0cec1ed98e --- /dev/null +++ b/vendor/github.com/gomodule/redigo/redis/script.go @@ -0,0 +1,91 @@ +// Copyright 2012 Gary Burd +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package redis + +import ( + "crypto/sha1" + "encoding/hex" + "io" + "strings" +) + +// Script encapsulates the source, hash and key count for a Lua script. See +// http://redis.io/commands/eval for information on scripts in Redis. +type Script struct { + keyCount int + src string + hash string +} + +// NewScript returns a new script object. If keyCount is greater than or equal +// to zero, then the count is automatically inserted in the EVAL command +// argument list. If keyCount is less than zero, then the application supplies +// the count as the first value in the keysAndArgs argument to the Do, Send and +// SendHash methods. +func NewScript(keyCount int, src string) *Script { + h := sha1.New() + io.WriteString(h, src) // nolint: errcheck + return &Script{keyCount, src, hex.EncodeToString(h.Sum(nil))} +} + +func (s *Script) args(spec string, keysAndArgs []interface{}) []interface{} { + var args []interface{} + if s.keyCount < 0 { + args = make([]interface{}, 1+len(keysAndArgs)) + args[0] = spec + copy(args[1:], keysAndArgs) + } else { + args = make([]interface{}, 2+len(keysAndArgs)) + args[0] = spec + args[1] = s.keyCount + copy(args[2:], keysAndArgs) + } + return args +} + +// Hash returns the script hash. +func (s *Script) Hash() string { + return s.hash +} + +// Do evaluates the script. Under the covers, Do optimistically evaluates the +// script using the EVALSHA command. If the command fails because the script is +// not loaded, then Do evaluates the script using the EVAL command (thus +// causing the script to load). +func (s *Script) Do(c Conn, keysAndArgs ...interface{}) (interface{}, error) { + v, err := c.Do("EVALSHA", s.args(s.hash, keysAndArgs)...) + if e, ok := err.(Error); ok && strings.HasPrefix(string(e), "NOSCRIPT ") { + v, err = c.Do("EVAL", s.args(s.src, keysAndArgs)...) + } + return v, err +} + +// SendHash evaluates the script without waiting for the reply. The script is +// evaluated with the EVALSHA command. The application must ensure that the +// script is loaded by a previous call to Send, Do or Load methods. +func (s *Script) SendHash(c Conn, keysAndArgs ...interface{}) error { + return c.Send("EVALSHA", s.args(s.hash, keysAndArgs)...) +} + +// Send evaluates the script without waiting for the reply. +func (s *Script) Send(c Conn, keysAndArgs ...interface{}) error { + return c.Send("EVAL", s.args(s.src, keysAndArgs)...) +} + +// Load loads the script without evaluating it. +func (s *Script) Load(c Conn) error { + _, err := c.Do("SCRIPT", "LOAD", s.src) + return err +} diff --git a/vendor/github.com/google/btree/.travis.yml b/vendor/github.com/google/btree/.travis.yml new file mode 100644 index 00000000000..4f2ee4d9733 --- /dev/null +++ b/vendor/github.com/google/btree/.travis.yml @@ -0,0 +1 @@ +language: go diff --git a/vendor/github.com/google/btree/LICENSE b/vendor/github.com/google/btree/LICENSE new file mode 100644 index 00000000000..d6456956733 --- /dev/null +++ b/vendor/github.com/google/btree/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/google/btree/README.md b/vendor/github.com/google/btree/README.md new file mode 100644 index 00000000000..6062a4dacd4 --- /dev/null +++ b/vendor/github.com/google/btree/README.md @@ -0,0 +1,12 @@ +# BTree implementation for Go + +![Travis CI Build Status](https://api.travis-ci.org/google/btree.svg?branch=master) + +This package provides an in-memory B-Tree implementation for Go, useful as +an ordered, mutable data structure. + +The API is based off of the wonderful +http://godoc.org/github.com/petar/GoLLRB/llrb, and is meant to allow btree to +act as a drop-in replacement for gollrb trees. + +See http://godoc.org/github.com/google/btree for documentation. diff --git a/vendor/github.com/google/btree/btree.go b/vendor/github.com/google/btree/btree.go new file mode 100644 index 00000000000..b83acdbc6d3 --- /dev/null +++ b/vendor/github.com/google/btree/btree.go @@ -0,0 +1,890 @@ +// Copyright 2014 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package btree implements in-memory B-Trees of arbitrary degree. +// +// btree implements an in-memory B-Tree for use as an ordered data structure. +// It is not meant for persistent storage solutions. +// +// It has a flatter structure than an equivalent red-black or other binary tree, +// which in some cases yields better memory usage and/or performance. +// See some discussion on the matter here: +// http://google-opensource.blogspot.com/2013/01/c-containers-that-save-memory-and-time.html +// Note, though, that this project is in no way related to the C++ B-Tree +// implementation written about there. +// +// Within this tree, each node contains a slice of items and a (possibly nil) +// slice of children. For basic numeric values or raw structs, this can cause +// efficiency differences when compared to equivalent C++ template code that +// stores values in arrays within the node: +// * Due to the overhead of storing values as interfaces (each +// value needs to be stored as the value itself, then 2 words for the +// interface pointing to that value and its type), resulting in higher +// memory use. +// * Since interfaces can point to values anywhere in memory, values are +// most likely not stored in contiguous blocks, resulting in a higher +// number of cache misses. +// These issues don't tend to matter, though, when working with strings or other +// heap-allocated structures, since C++-equivalent structures also must store +// pointers and also distribute their values across the heap. +// +// This implementation is designed to be a drop-in replacement to gollrb.LLRB +// trees, (http://github.com/petar/gollrb), an excellent and probably the most +// widely used ordered tree implementation in the Go ecosystem currently. +// Its functions, therefore, exactly mirror those of +// llrb.LLRB where possible. Unlike gollrb, though, we currently don't +// support storing multiple equivalent values. +package btree + +import ( + "fmt" + "io" + "sort" + "strings" + "sync" +) + +// Item represents a single object in the tree. +type Item interface { + // Less tests whether the current item is less than the given argument. + // + // This must provide a strict weak ordering. + // If !a.Less(b) && !b.Less(a), we treat this to mean a == b (i.e. we can only + // hold one of either a or b in the tree). + Less(than Item) bool +} + +const ( + DefaultFreeListSize = 32 +) + +var ( + nilItems = make(items, 16) + nilChildren = make(children, 16) +) + +// FreeList represents a free list of btree nodes. By default each +// BTree has its own FreeList, but multiple BTrees can share the same +// FreeList. +// Two Btrees using the same freelist are safe for concurrent write access. +type FreeList struct { + mu sync.Mutex + freelist []*node +} + +// NewFreeList creates a new free list. +// size is the maximum size of the returned free list. +func NewFreeList(size int) *FreeList { + return &FreeList{freelist: make([]*node, 0, size)} +} + +func (f *FreeList) newNode() (n *node) { + f.mu.Lock() + index := len(f.freelist) - 1 + if index < 0 { + f.mu.Unlock() + return new(node) + } + n = f.freelist[index] + f.freelist[index] = nil + f.freelist = f.freelist[:index] + f.mu.Unlock() + return +} + +// freeNode adds the given node to the list, returning true if it was added +// and false if it was discarded. +func (f *FreeList) freeNode(n *node) (out bool) { + f.mu.Lock() + if len(f.freelist) < cap(f.freelist) { + f.freelist = append(f.freelist, n) + out = true + } + f.mu.Unlock() + return +} + +// ItemIterator allows callers of Ascend* to iterate in-order over portions of +// the tree. When this function returns false, iteration will stop and the +// associated Ascend* function will immediately return. +type ItemIterator func(i Item) bool + +// New creates a new B-Tree with the given degree. +// +// New(2), for example, will create a 2-3-4 tree (each node contains 1-3 items +// and 2-4 children). +func New(degree int) *BTree { + return NewWithFreeList(degree, NewFreeList(DefaultFreeListSize)) +} + +// NewWithFreeList creates a new B-Tree that uses the given node free list. +func NewWithFreeList(degree int, f *FreeList) *BTree { + if degree <= 1 { + panic("bad degree") + } + return &BTree{ + degree: degree, + cow: ©OnWriteContext{freelist: f}, + } +} + +// items stores items in a node. +type items []Item + +// insertAt inserts a value into the given index, pushing all subsequent values +// forward. +func (s *items) insertAt(index int, item Item) { + *s = append(*s, nil) + if index < len(*s) { + copy((*s)[index+1:], (*s)[index:]) + } + (*s)[index] = item +} + +// removeAt removes a value at a given index, pulling all subsequent values +// back. +func (s *items) removeAt(index int) Item { + item := (*s)[index] + copy((*s)[index:], (*s)[index+1:]) + (*s)[len(*s)-1] = nil + *s = (*s)[:len(*s)-1] + return item +} + +// pop removes and returns the last element in the list. +func (s *items) pop() (out Item) { + index := len(*s) - 1 + out = (*s)[index] + (*s)[index] = nil + *s = (*s)[:index] + return +} + +// truncate truncates this instance at index so that it contains only the +// first index items. index must be less than or equal to length. +func (s *items) truncate(index int) { + var toClear items + *s, toClear = (*s)[:index], (*s)[index:] + for len(toClear) > 0 { + toClear = toClear[copy(toClear, nilItems):] + } +} + +// find returns the index where the given item should be inserted into this +// list. 'found' is true if the item already exists in the list at the given +// index. +func (s items) find(item Item) (index int, found bool) { + i := sort.Search(len(s), func(i int) bool { + return item.Less(s[i]) + }) + if i > 0 && !s[i-1].Less(item) { + return i - 1, true + } + return i, false +} + +// children stores child nodes in a node. +type children []*node + +// insertAt inserts a value into the given index, pushing all subsequent values +// forward. +func (s *children) insertAt(index int, n *node) { + *s = append(*s, nil) + if index < len(*s) { + copy((*s)[index+1:], (*s)[index:]) + } + (*s)[index] = n +} + +// removeAt removes a value at a given index, pulling all subsequent values +// back. +func (s *children) removeAt(index int) *node { + n := (*s)[index] + copy((*s)[index:], (*s)[index+1:]) + (*s)[len(*s)-1] = nil + *s = (*s)[:len(*s)-1] + return n +} + +// pop removes and returns the last element in the list. +func (s *children) pop() (out *node) { + index := len(*s) - 1 + out = (*s)[index] + (*s)[index] = nil + *s = (*s)[:index] + return +} + +// truncate truncates this instance at index so that it contains only the +// first index children. index must be less than or equal to length. +func (s *children) truncate(index int) { + var toClear children + *s, toClear = (*s)[:index], (*s)[index:] + for len(toClear) > 0 { + toClear = toClear[copy(toClear, nilChildren):] + } +} + +// node is an internal node in a tree. +// +// It must at all times maintain the invariant that either +// * len(children) == 0, len(items) unconstrained +// * len(children) == len(items) + 1 +type node struct { + items items + children children + cow *copyOnWriteContext +} + +func (n *node) mutableFor(cow *copyOnWriteContext) *node { + if n.cow == cow { + return n + } + out := cow.newNode() + if cap(out.items) >= len(n.items) { + out.items = out.items[:len(n.items)] + } else { + out.items = make(items, len(n.items), cap(n.items)) + } + copy(out.items, n.items) + // Copy children + if cap(out.children) >= len(n.children) { + out.children = out.children[:len(n.children)] + } else { + out.children = make(children, len(n.children), cap(n.children)) + } + copy(out.children, n.children) + return out +} + +func (n *node) mutableChild(i int) *node { + c := n.children[i].mutableFor(n.cow) + n.children[i] = c + return c +} + +// split splits the given node at the given index. The current node shrinks, +// and this function returns the item that existed at that index and a new node +// containing all items/children after it. +func (n *node) split(i int) (Item, *node) { + item := n.items[i] + next := n.cow.newNode() + next.items = append(next.items, n.items[i+1:]...) + n.items.truncate(i) + if len(n.children) > 0 { + next.children = append(next.children, n.children[i+1:]...) + n.children.truncate(i + 1) + } + return item, next +} + +// maybeSplitChild checks if a child should be split, and if so splits it. +// Returns whether or not a split occurred. +func (n *node) maybeSplitChild(i, maxItems int) bool { + if len(n.children[i].items) < maxItems { + return false + } + first := n.mutableChild(i) + item, second := first.split(maxItems / 2) + n.items.insertAt(i, item) + n.children.insertAt(i+1, second) + return true +} + +// insert inserts an item into the subtree rooted at this node, making sure +// no nodes in the subtree exceed maxItems items. Should an equivalent item be +// be found/replaced by insert, it will be returned. +func (n *node) insert(item Item, maxItems int) Item { + i, found := n.items.find(item) + if found { + out := n.items[i] + n.items[i] = item + return out + } + if len(n.children) == 0 { + n.items.insertAt(i, item) + return nil + } + if n.maybeSplitChild(i, maxItems) { + inTree := n.items[i] + switch { + case item.Less(inTree): + // no change, we want first split node + case inTree.Less(item): + i++ // we want second split node + default: + out := n.items[i] + n.items[i] = item + return out + } + } + return n.mutableChild(i).insert(item, maxItems) +} + +// get finds the given key in the subtree and returns it. +func (n *node) get(key Item) Item { + i, found := n.items.find(key) + if found { + return n.items[i] + } else if len(n.children) > 0 { + return n.children[i].get(key) + } + return nil +} + +// min returns the first item in the subtree. +func min(n *node) Item { + if n == nil { + return nil + } + for len(n.children) > 0 { + n = n.children[0] + } + if len(n.items) == 0 { + return nil + } + return n.items[0] +} + +// max returns the last item in the subtree. +func max(n *node) Item { + if n == nil { + return nil + } + for len(n.children) > 0 { + n = n.children[len(n.children)-1] + } + if len(n.items) == 0 { + return nil + } + return n.items[len(n.items)-1] +} + +// toRemove details what item to remove in a node.remove call. +type toRemove int + +const ( + removeItem toRemove = iota // removes the given item + removeMin // removes smallest item in the subtree + removeMax // removes largest item in the subtree +) + +// remove removes an item from the subtree rooted at this node. +func (n *node) remove(item Item, minItems int, typ toRemove) Item { + var i int + var found bool + switch typ { + case removeMax: + if len(n.children) == 0 { + return n.items.pop() + } + i = len(n.items) + case removeMin: + if len(n.children) == 0 { + return n.items.removeAt(0) + } + i = 0 + case removeItem: + i, found = n.items.find(item) + if len(n.children) == 0 { + if found { + return n.items.removeAt(i) + } + return nil + } + default: + panic("invalid type") + } + // If we get to here, we have children. + if len(n.children[i].items) <= minItems { + return n.growChildAndRemove(i, item, minItems, typ) + } + child := n.mutableChild(i) + // Either we had enough items to begin with, or we've done some + // merging/stealing, because we've got enough now and we're ready to return + // stuff. + if found { + // The item exists at index 'i', and the child we've selected can give us a + // predecessor, since if we've gotten here it's got > minItems items in it. + out := n.items[i] + // We use our special-case 'remove' call with typ=maxItem to pull the + // predecessor of item i (the rightmost leaf of our immediate left child) + // and set it into where we pulled the item from. + n.items[i] = child.remove(nil, minItems, removeMax) + return out + } + // Final recursive call. Once we're here, we know that the item isn't in this + // node and that the child is big enough to remove from. + return child.remove(item, minItems, typ) +} + +// growChildAndRemove grows child 'i' to make sure it's possible to remove an +// item from it while keeping it at minItems, then calls remove to actually +// remove it. +// +// Most documentation says we have to do two sets of special casing: +// 1) item is in this node +// 2) item is in child +// In both cases, we need to handle the two subcases: +// A) node has enough values that it can spare one +// B) node doesn't have enough values +// For the latter, we have to check: +// a) left sibling has node to spare +// b) right sibling has node to spare +// c) we must merge +// To simplify our code here, we handle cases #1 and #2 the same: +// If a node doesn't have enough items, we make sure it does (using a,b,c). +// We then simply redo our remove call, and the second time (regardless of +// whether we're in case 1 or 2), we'll have enough items and can guarantee +// that we hit case A. +func (n *node) growChildAndRemove(i int, item Item, minItems int, typ toRemove) Item { + if i > 0 && len(n.children[i-1].items) > minItems { + // Steal from left child + child := n.mutableChild(i) + stealFrom := n.mutableChild(i - 1) + stolenItem := stealFrom.items.pop() + child.items.insertAt(0, n.items[i-1]) + n.items[i-1] = stolenItem + if len(stealFrom.children) > 0 { + child.children.insertAt(0, stealFrom.children.pop()) + } + } else if i < len(n.items) && len(n.children[i+1].items) > minItems { + // steal from right child + child := n.mutableChild(i) + stealFrom := n.mutableChild(i + 1) + stolenItem := stealFrom.items.removeAt(0) + child.items = append(child.items, n.items[i]) + n.items[i] = stolenItem + if len(stealFrom.children) > 0 { + child.children = append(child.children, stealFrom.children.removeAt(0)) + } + } else { + if i >= len(n.items) { + i-- + } + child := n.mutableChild(i) + // merge with right child + mergeItem := n.items.removeAt(i) + mergeChild := n.children.removeAt(i + 1) + child.items = append(child.items, mergeItem) + child.items = append(child.items, mergeChild.items...) + child.children = append(child.children, mergeChild.children...) + n.cow.freeNode(mergeChild) + } + return n.remove(item, minItems, typ) +} + +type direction int + +const ( + descend = direction(-1) + ascend = direction(+1) +) + +// iterate provides a simple method for iterating over elements in the tree. +// +// When ascending, the 'start' should be less than 'stop' and when descending, +// the 'start' should be greater than 'stop'. Setting 'includeStart' to true +// will force the iterator to include the first item when it equals 'start', +// thus creating a "greaterOrEqual" or "lessThanEqual" rather than just a +// "greaterThan" or "lessThan" queries. +func (n *node) iterate(dir direction, start, stop Item, includeStart bool, hit bool, iter ItemIterator) (bool, bool) { + var ok, found bool + var index int + switch dir { + case ascend: + if start != nil { + index, _ = n.items.find(start) + } + for i := index; i < len(n.items); i++ { + if len(n.children) > 0 { + if hit, ok = n.children[i].iterate(dir, start, stop, includeStart, hit, iter); !ok { + return hit, false + } + } + if !includeStart && !hit && start != nil && !start.Less(n.items[i]) { + hit = true + continue + } + hit = true + if stop != nil && !n.items[i].Less(stop) { + return hit, false + } + if !iter(n.items[i]) { + return hit, false + } + } + if len(n.children) > 0 { + if hit, ok = n.children[len(n.children)-1].iterate(dir, start, stop, includeStart, hit, iter); !ok { + return hit, false + } + } + case descend: + if start != nil { + index, found = n.items.find(start) + if !found { + index = index - 1 + } + } else { + index = len(n.items) - 1 + } + for i := index; i >= 0; i-- { + if start != nil && !n.items[i].Less(start) { + if !includeStart || hit || start.Less(n.items[i]) { + continue + } + } + if len(n.children) > 0 { + if hit, ok = n.children[i+1].iterate(dir, start, stop, includeStart, hit, iter); !ok { + return hit, false + } + } + if stop != nil && !stop.Less(n.items[i]) { + return hit, false // continue + } + hit = true + if !iter(n.items[i]) { + return hit, false + } + } + if len(n.children) > 0 { + if hit, ok = n.children[0].iterate(dir, start, stop, includeStart, hit, iter); !ok { + return hit, false + } + } + } + return hit, true +} + +// Used for testing/debugging purposes. +func (n *node) print(w io.Writer, level int) { + fmt.Fprintf(w, "%sNODE:%v\n", strings.Repeat(" ", level), n.items) + for _, c := range n.children { + c.print(w, level+1) + } +} + +// BTree is an implementation of a B-Tree. +// +// BTree stores Item instances in an ordered structure, allowing easy insertion, +// removal, and iteration. +// +// Write operations are not safe for concurrent mutation by multiple +// goroutines, but Read operations are. +type BTree struct { + degree int + length int + root *node + cow *copyOnWriteContext +} + +// copyOnWriteContext pointers determine node ownership... a tree with a write +// context equivalent to a node's write context is allowed to modify that node. +// A tree whose write context does not match a node's is not allowed to modify +// it, and must create a new, writable copy (IE: it's a Clone). +// +// When doing any write operation, we maintain the invariant that the current +// node's context is equal to the context of the tree that requested the write. +// We do this by, before we descend into any node, creating a copy with the +// correct context if the contexts don't match. +// +// Since the node we're currently visiting on any write has the requesting +// tree's context, that node is modifiable in place. Children of that node may +// not share context, but before we descend into them, we'll make a mutable +// copy. +type copyOnWriteContext struct { + freelist *FreeList +} + +// Clone clones the btree, lazily. Clone should not be called concurrently, +// but the original tree (t) and the new tree (t2) can be used concurrently +// once the Clone call completes. +// +// The internal tree structure of b is marked read-only and shared between t and +// t2. Writes to both t and t2 use copy-on-write logic, creating new nodes +// whenever one of b's original nodes would have been modified. Read operations +// should have no performance degredation. Write operations for both t and t2 +// will initially experience minor slow-downs caused by additional allocs and +// copies due to the aforementioned copy-on-write logic, but should converge to +// the original performance characteristics of the original tree. +func (t *BTree) Clone() (t2 *BTree) { + // Create two entirely new copy-on-write contexts. + // This operation effectively creates three trees: + // the original, shared nodes (old b.cow) + // the new b.cow nodes + // the new out.cow nodes + cow1, cow2 := *t.cow, *t.cow + out := *t + t.cow = &cow1 + out.cow = &cow2 + return &out +} + +// maxItems returns the max number of items to allow per node. +func (t *BTree) maxItems() int { + return t.degree*2 - 1 +} + +// minItems returns the min number of items to allow per node (ignored for the +// root node). +func (t *BTree) minItems() int { + return t.degree - 1 +} + +func (c *copyOnWriteContext) newNode() (n *node) { + n = c.freelist.newNode() + n.cow = c + return +} + +type freeType int + +const ( + ftFreelistFull freeType = iota // node was freed (available for GC, not stored in freelist) + ftStored // node was stored in the freelist for later use + ftNotOwned // node was ignored by COW, since it's owned by another one +) + +// freeNode frees a node within a given COW context, if it's owned by that +// context. It returns what happened to the node (see freeType const +// documentation). +func (c *copyOnWriteContext) freeNode(n *node) freeType { + if n.cow == c { + // clear to allow GC + n.items.truncate(0) + n.children.truncate(0) + n.cow = nil + if c.freelist.freeNode(n) { + return ftStored + } else { + return ftFreelistFull + } + } else { + return ftNotOwned + } +} + +// ReplaceOrInsert adds the given item to the tree. If an item in the tree +// already equals the given one, it is removed from the tree and returned. +// Otherwise, nil is returned. +// +// nil cannot be added to the tree (will panic). +func (t *BTree) ReplaceOrInsert(item Item) Item { + if item == nil { + panic("nil item being added to BTree") + } + if t.root == nil { + t.root = t.cow.newNode() + t.root.items = append(t.root.items, item) + t.length++ + return nil + } else { + t.root = t.root.mutableFor(t.cow) + if len(t.root.items) >= t.maxItems() { + item2, second := t.root.split(t.maxItems() / 2) + oldroot := t.root + t.root = t.cow.newNode() + t.root.items = append(t.root.items, item2) + t.root.children = append(t.root.children, oldroot, second) + } + } + out := t.root.insert(item, t.maxItems()) + if out == nil { + t.length++ + } + return out +} + +// Delete removes an item equal to the passed in item from the tree, returning +// it. If no such item exists, returns nil. +func (t *BTree) Delete(item Item) Item { + return t.deleteItem(item, removeItem) +} + +// DeleteMin removes the smallest item in the tree and returns it. +// If no such item exists, returns nil. +func (t *BTree) DeleteMin() Item { + return t.deleteItem(nil, removeMin) +} + +// DeleteMax removes the largest item in the tree and returns it. +// If no such item exists, returns nil. +func (t *BTree) DeleteMax() Item { + return t.deleteItem(nil, removeMax) +} + +func (t *BTree) deleteItem(item Item, typ toRemove) Item { + if t.root == nil || len(t.root.items) == 0 { + return nil + } + t.root = t.root.mutableFor(t.cow) + out := t.root.remove(item, t.minItems(), typ) + if len(t.root.items) == 0 && len(t.root.children) > 0 { + oldroot := t.root + t.root = t.root.children[0] + t.cow.freeNode(oldroot) + } + if out != nil { + t.length-- + } + return out +} + +// AscendRange calls the iterator for every value in the tree within the range +// [greaterOrEqual, lessThan), until iterator returns false. +func (t *BTree) AscendRange(greaterOrEqual, lessThan Item, iterator ItemIterator) { + if t.root == nil { + return + } + t.root.iterate(ascend, greaterOrEqual, lessThan, true, false, iterator) +} + +// AscendLessThan calls the iterator for every value in the tree within the range +// [first, pivot), until iterator returns false. +func (t *BTree) AscendLessThan(pivot Item, iterator ItemIterator) { + if t.root == nil { + return + } + t.root.iterate(ascend, nil, pivot, false, false, iterator) +} + +// AscendGreaterOrEqual calls the iterator for every value in the tree within +// the range [pivot, last], until iterator returns false. +func (t *BTree) AscendGreaterOrEqual(pivot Item, iterator ItemIterator) { + if t.root == nil { + return + } + t.root.iterate(ascend, pivot, nil, true, false, iterator) +} + +// Ascend calls the iterator for every value in the tree within the range +// [first, last], until iterator returns false. +func (t *BTree) Ascend(iterator ItemIterator) { + if t.root == nil { + return + } + t.root.iterate(ascend, nil, nil, false, false, iterator) +} + +// DescendRange calls the iterator for every value in the tree within the range +// [lessOrEqual, greaterThan), until iterator returns false. +func (t *BTree) DescendRange(lessOrEqual, greaterThan Item, iterator ItemIterator) { + if t.root == nil { + return + } + t.root.iterate(descend, lessOrEqual, greaterThan, true, false, iterator) +} + +// DescendLessOrEqual calls the iterator for every value in the tree within the range +// [pivot, first], until iterator returns false. +func (t *BTree) DescendLessOrEqual(pivot Item, iterator ItemIterator) { + if t.root == nil { + return + } + t.root.iterate(descend, pivot, nil, true, false, iterator) +} + +// DescendGreaterThan calls the iterator for every value in the tree within +// the range [last, pivot), until iterator returns false. +func (t *BTree) DescendGreaterThan(pivot Item, iterator ItemIterator) { + if t.root == nil { + return + } + t.root.iterate(descend, nil, pivot, false, false, iterator) +} + +// Descend calls the iterator for every value in the tree within the range +// [last, first], until iterator returns false. +func (t *BTree) Descend(iterator ItemIterator) { + if t.root == nil { + return + } + t.root.iterate(descend, nil, nil, false, false, iterator) +} + +// Get looks for the key item in the tree, returning it. It returns nil if +// unable to find that item. +func (t *BTree) Get(key Item) Item { + if t.root == nil { + return nil + } + return t.root.get(key) +} + +// Min returns the smallest item in the tree, or nil if the tree is empty. +func (t *BTree) Min() Item { + return min(t.root) +} + +// Max returns the largest item in the tree, or nil if the tree is empty. +func (t *BTree) Max() Item { + return max(t.root) +} + +// Has returns true if the given key is in the tree. +func (t *BTree) Has(key Item) bool { + return t.Get(key) != nil +} + +// Len returns the number of items currently in the tree. +func (t *BTree) Len() int { + return t.length +} + +// Clear removes all items from the btree. If addNodesToFreelist is true, +// t's nodes are added to its freelist as part of this call, until the freelist +// is full. Otherwise, the root node is simply dereferenced and the subtree +// left to Go's normal GC processes. +// +// This can be much faster +// than calling Delete on all elements, because that requires finding/removing +// each element in the tree and updating the tree accordingly. It also is +// somewhat faster than creating a new tree to replace the old one, because +// nodes from the old tree are reclaimed into the freelist for use by the new +// one, instead of being lost to the garbage collector. +// +// This call takes: +// O(1): when addNodesToFreelist is false, this is a single operation. +// O(1): when the freelist is already full, it breaks out immediately +// O(freelist size): when the freelist is empty and the nodes are all owned +// by this tree, nodes are added to the freelist until full. +// O(tree size): when all nodes are owned by another tree, all nodes are +// iterated over looking for nodes to add to the freelist, and due to +// ownership, none are. +func (t *BTree) Clear(addNodesToFreelist bool) { + if t.root != nil && addNodesToFreelist { + t.root.reset(t.cow) + } + t.root, t.length = nil, 0 +} + +// reset returns a subtree to the freelist. It breaks out immediately if the +// freelist is full, since the only benefit of iterating is to fill that +// freelist up. Returns true if parent reset call should continue. +func (n *node) reset(c *copyOnWriteContext) bool { + for _, child := range n.children { + if !child.reset(c) { + return false + } + } + return c.freeNode(n) != ftFreelistFull +} + +// Int implements the Item interface for integers. +type Int int + +// Less returns true if int(a) < int(b). +func (a Int) Less(b Item) bool { + return a < b.(Int) +} diff --git a/vendor/github.com/google/btree/go.mod b/vendor/github.com/google/btree/go.mod new file mode 100644 index 00000000000..fe4d5ca17b3 --- /dev/null +++ b/vendor/github.com/google/btree/go.mod @@ -0,0 +1,17 @@ +// Copyright 2014 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +module github.com/google/btree + +go 1.12 diff --git a/vendor/github.com/google/go-cmp/cmp/compare.go b/vendor/github.com/google/go-cmp/cmp/compare.go index 580ae209782..86d0903b8b5 100644 --- a/vendor/github.com/google/go-cmp/cmp/compare.go +++ b/vendor/github.com/google/go-cmp/cmp/compare.go @@ -1,6 +1,6 @@ // Copyright 2017, The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE.md file. +// license that can be found in the LICENSE file. // Package cmp determines equality of values. // @@ -95,13 +95,13 @@ func Equal(x, y interface{}, opts ...Option) bool { return s.result.Equal() } -// Diff returns a human-readable report of the differences between two values. -// It returns an empty string if and only if Equal returns true for the same -// input values and options. +// Diff returns a human-readable report of the differences between two values: +// y - x. It returns an empty string if and only if Equal returns true for the +// same input values and options. // // The output is displayed as a literal in pseudo-Go syntax. // At the start of each line, a "-" prefix indicates an element removed from x, -// a "+" prefix to indicates an element added to y, and the lack of a prefix +// a "+" prefix to indicates an element added from y, and the lack of a prefix // indicates an element common to both x and y. If possible, the output // uses fmt.Stringer.String or error.Error methods to produce more humanly // readable outputs. In such cases, the string is prefixed with either an diff --git a/vendor/github.com/google/go-cmp/cmp/export_panic.go b/vendor/github.com/google/go-cmp/cmp/export_panic.go index dfa5d213769..5ff0b4218c6 100644 --- a/vendor/github.com/google/go-cmp/cmp/export_panic.go +++ b/vendor/github.com/google/go-cmp/cmp/export_panic.go @@ -1,6 +1,6 @@ // Copyright 2017, The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE.md file. +// license that can be found in the LICENSE file. // +build purego diff --git a/vendor/github.com/google/go-cmp/cmp/export_unsafe.go b/vendor/github.com/google/go-cmp/cmp/export_unsafe.go index 351f1a34b46..21eb54858e0 100644 --- a/vendor/github.com/google/go-cmp/cmp/export_unsafe.go +++ b/vendor/github.com/google/go-cmp/cmp/export_unsafe.go @@ -1,6 +1,6 @@ // Copyright 2017, The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE.md file. +// license that can be found in the LICENSE file. // +build !purego diff --git a/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_disable.go b/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_disable.go index fe98dcc6774..1daaaacc5ee 100644 --- a/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_disable.go +++ b/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_disable.go @@ -1,6 +1,6 @@ // Copyright 2017, The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE.md file. +// license that can be found in the LICENSE file. // +build !cmp_debug diff --git a/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_enable.go b/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_enable.go index 597b6ae56b1..4b91dbcacae 100644 --- a/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_enable.go +++ b/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_enable.go @@ -1,6 +1,6 @@ // Copyright 2017, The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE.md file. +// license that can be found in the LICENSE file. // +build cmp_debug diff --git a/vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go b/vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go index 730e223ee7b..bc196b16cfa 100644 --- a/vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go +++ b/vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go @@ -1,6 +1,6 @@ // Copyright 2017, The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE.md file. +// license that can be found in the LICENSE file. // Package diff implements an algorithm for producing edit-scripts. // The edit-script is a sequence of operations needed to transform one list @@ -119,7 +119,7 @@ func (r Result) Similar() bool { return r.NumSame+1 >= r.NumDiff } -var randInt = rand.New(rand.NewSource(time.Now().Unix())).Intn(2) +var randBool = rand.New(rand.NewSource(time.Now().Unix())).Intn(2) == 0 // Difference reports whether two lists of lengths nx and ny are equal // given the definition of equality provided as f. @@ -168,17 +168,6 @@ func Difference(nx, ny int, f EqualFunc) (es EditScript) { // A vertical edge is equivalent to inserting a symbol from list Y. // A diagonal edge is equivalent to a matching symbol between both X and Y. - // To ensure flexibility in changing the algorithm in the future, - // introduce some degree of deliberate instability. - // This is achieved by fiddling the zigzag iterator to start searching - // the graph starting from the bottom-right versus than the top-left. - // The result may differ depending on the starting search location, - // but still produces a valid edit script. - zigzagInit := randInt // either 0 or 1 - if flags.Deterministic { - zigzagInit = 0 - } - // Invariants: // • 0 ≤ fwdPath.X ≤ (fwdFrontier.X, revFrontier.X) ≤ revPath.X ≤ nx // • 0 ≤ fwdPath.Y ≤ (fwdFrontier.Y, revFrontier.Y) ≤ revPath.Y ≤ ny @@ -197,6 +186,11 @@ func Difference(nx, ny int, f EqualFunc) (es EditScript) { // approximately the square-root of the search budget. searchBudget := 4 * (nx + ny) // O(n) + // Running the tests with the "cmp_debug" build tag prints a visualization + // of the algorithm running in real-time. This is educational for + // understanding how the algorithm works. See debug_enable.go. + f = debug.Begin(nx, ny, f, &fwdPath.es, &revPath.es) + // The algorithm below is a greedy, meet-in-the-middle algorithm for // computing sub-optimal edit-scripts between two lists. // @@ -214,22 +208,28 @@ func Difference(nx, ny int, f EqualFunc) (es EditScript) { // frontier towards the opposite corner. // • This algorithm terminates when either the X coordinates or the // Y coordinates of the forward and reverse frontier points ever intersect. - // + // This algorithm is correct even if searching only in the forward direction // or in the reverse direction. We do both because it is commonly observed // that two lists commonly differ because elements were added to the front // or end of the other list. // - // Running the tests with the "cmp_debug" build tag prints a visualization - // of the algorithm running in real-time. This is educational for - // understanding how the algorithm works. See debug_enable.go. - f = debug.Begin(nx, ny, f, &fwdPath.es, &revPath.es) - for { + // Non-deterministically start with either the forward or reverse direction + // to introduce some deliberate instability so that we have the flexibility + // to change this algorithm in the future. + if flags.Deterministic || randBool { + goto forwardSearch + } else { + goto reverseSearch + } + +forwardSearch: + { // Forward search from the beginning. if fwdFrontier.X >= revFrontier.X || fwdFrontier.Y >= revFrontier.Y || searchBudget == 0 { - break + goto finishSearch } - for stop1, stop2, i := false, false, zigzagInit; !(stop1 && stop2) && searchBudget > 0; i++ { + for stop1, stop2, i := false, false, 0; !(stop1 && stop2) && searchBudget > 0; i++ { // Search in a diagonal pattern for a match. z := zigzag(i) p := point{fwdFrontier.X + z, fwdFrontier.Y - z} @@ -262,10 +262,14 @@ func Difference(nx, ny int, f EqualFunc) (es EditScript) { } else { fwdFrontier.Y++ } + goto reverseSearch + } +reverseSearch: + { // Reverse search from the end. if fwdFrontier.X >= revFrontier.X || fwdFrontier.Y >= revFrontier.Y || searchBudget == 0 { - break + goto finishSearch } for stop1, stop2, i := false, false, 0; !(stop1 && stop2) && searchBudget > 0; i++ { // Search in a diagonal pattern for a match. @@ -300,8 +304,10 @@ func Difference(nx, ny int, f EqualFunc) (es EditScript) { } else { revFrontier.Y-- } + goto forwardSearch } +finishSearch: // Join the forward and reverse paths and then append the reverse path. fwdPath.connect(revPath.point, f) for i := len(revPath.es) - 1; i >= 0; i-- { diff --git a/vendor/github.com/google/go-cmp/cmp/internal/flags/flags.go b/vendor/github.com/google/go-cmp/cmp/internal/flags/flags.go index a9e7fc0b5b3..d8e459c9b93 100644 --- a/vendor/github.com/google/go-cmp/cmp/internal/flags/flags.go +++ b/vendor/github.com/google/go-cmp/cmp/internal/flags/flags.go @@ -1,6 +1,6 @@ // Copyright 2019, The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE.md file. +// license that can be found in the LICENSE file. package flags diff --git a/vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_legacy.go b/vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_legacy.go index 01aed0a1532..82d1d7fbf8a 100644 --- a/vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_legacy.go +++ b/vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_legacy.go @@ -1,6 +1,6 @@ // Copyright 2019, The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE.md file. +// license that can be found in the LICENSE file. // +build !go1.10 diff --git a/vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_recent.go b/vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_recent.go index c0b667f58b0..8646f052934 100644 --- a/vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_recent.go +++ b/vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_recent.go @@ -1,6 +1,6 @@ // Copyright 2019, The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE.md file. +// license that can be found in the LICENSE file. // +build go1.10 diff --git a/vendor/github.com/google/go-cmp/cmp/internal/function/func.go b/vendor/github.com/google/go-cmp/cmp/internal/function/func.go index ace1dbe86e5..d127d436230 100644 --- a/vendor/github.com/google/go-cmp/cmp/internal/function/func.go +++ b/vendor/github.com/google/go-cmp/cmp/internal/function/func.go @@ -1,6 +1,6 @@ // Copyright 2017, The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE.md file. +// license that can be found in the LICENSE file. // Package function provides functionality for identifying function types. package function diff --git a/vendor/github.com/google/go-cmp/cmp/internal/value/name.go b/vendor/github.com/google/go-cmp/cmp/internal/value/name.go index 8228e7d512a..b6c12cefb47 100644 --- a/vendor/github.com/google/go-cmp/cmp/internal/value/name.go +++ b/vendor/github.com/google/go-cmp/cmp/internal/value/name.go @@ -1,6 +1,6 @@ // Copyright 2020, The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE.md file. +// license that can be found in the LICENSE file. package value diff --git a/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_purego.go b/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_purego.go index e9e384a1c89..44f4a5afddc 100644 --- a/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_purego.go +++ b/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_purego.go @@ -1,6 +1,6 @@ // Copyright 2018, The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE.md file. +// license that can be found in the LICENSE file. // +build purego diff --git a/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_unsafe.go b/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_unsafe.go index b50c17ec725..a605953d466 100644 --- a/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_unsafe.go +++ b/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_unsafe.go @@ -1,6 +1,6 @@ // Copyright 2018, The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE.md file. +// license that can be found in the LICENSE file. // +build !purego diff --git a/vendor/github.com/google/go-cmp/cmp/internal/value/sort.go b/vendor/github.com/google/go-cmp/cmp/internal/value/sort.go index 24fbae6e3c5..98533b036cc 100644 --- a/vendor/github.com/google/go-cmp/cmp/internal/value/sort.go +++ b/vendor/github.com/google/go-cmp/cmp/internal/value/sort.go @@ -1,6 +1,6 @@ // Copyright 2017, The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE.md file. +// license that can be found in the LICENSE file. package value diff --git a/vendor/github.com/google/go-cmp/cmp/internal/value/zero.go b/vendor/github.com/google/go-cmp/cmp/internal/value/zero.go index 06a8ffd036d..9147a299731 100644 --- a/vendor/github.com/google/go-cmp/cmp/internal/value/zero.go +++ b/vendor/github.com/google/go-cmp/cmp/internal/value/zero.go @@ -1,6 +1,6 @@ // Copyright 2017, The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE.md file. +// license that can be found in the LICENSE file. package value diff --git a/vendor/github.com/google/go-cmp/cmp/options.go b/vendor/github.com/google/go-cmp/cmp/options.go index abbd2a63b69..e57b9eb5392 100644 --- a/vendor/github.com/google/go-cmp/cmp/options.go +++ b/vendor/github.com/google/go-cmp/cmp/options.go @@ -1,6 +1,6 @@ // Copyright 2017, The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE.md file. +// license that can be found in the LICENSE file. package cmp @@ -225,11 +225,14 @@ func (validator) apply(s *state, vx, vy reflect.Value) { // Unable to Interface implies unexported field without visibility access. if !vx.CanInterface() || !vy.CanInterface() { - const help = "consider using a custom Comparer; if you control the implementation of type, you can also consider using an Exporter, AllowUnexported, or cmpopts.IgnoreUnexported" + help := "consider using a custom Comparer; if you control the implementation of type, you can also consider using an Exporter, AllowUnexported, or cmpopts.IgnoreUnexported" var name string if t := s.curPath.Index(-2).Type(); t.Name() != "" { // Named type with unexported fields. name = fmt.Sprintf("%q.%v", t.PkgPath(), t.Name()) // e.g., "path/to/package".MyType + if _, ok := reflect.New(t).Interface().(error); ok { + help = "consider using cmpopts.EquateErrors to compare error values" + } } else { // Unnamed type with unexported fields. Derive PkgPath from field. var pkgPath string diff --git a/vendor/github.com/google/go-cmp/cmp/path.go b/vendor/github.com/google/go-cmp/cmp/path.go index 603dbb0026e..f01eff318c5 100644 --- a/vendor/github.com/google/go-cmp/cmp/path.go +++ b/vendor/github.com/google/go-cmp/cmp/path.go @@ -1,6 +1,6 @@ // Copyright 2017, The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE.md file. +// license that can be found in the LICENSE file. package cmp @@ -315,7 +315,7 @@ func (tf Transform) Option() Option { return tf.trans } // pops the address from the stack. Thus, when traversing into a pointer from // reflect.Ptr, reflect.Slice element, or reflect.Map, we can detect cycles // by checking whether the pointer has already been visited. The cycle detection -// uses a seperate stack for the x and y values. +// uses a separate stack for the x and y values. // // If a cycle is detected we need to determine whether the two pointers // should be considered equal. The definition of equality chosen by Equal diff --git a/vendor/github.com/google/go-cmp/cmp/report.go b/vendor/github.com/google/go-cmp/cmp/report.go index aafcb363545..f43cd12eb5f 100644 --- a/vendor/github.com/google/go-cmp/cmp/report.go +++ b/vendor/github.com/google/go-cmp/cmp/report.go @@ -1,6 +1,6 @@ // Copyright 2017, The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE.md file. +// license that can be found in the LICENSE file. package cmp diff --git a/vendor/github.com/google/go-cmp/cmp/report_compare.go b/vendor/github.com/google/go-cmp/cmp/report_compare.go index 9e2180964f1..104bb30538b 100644 --- a/vendor/github.com/google/go-cmp/cmp/report_compare.go +++ b/vendor/github.com/google/go-cmp/cmp/report_compare.go @@ -1,6 +1,6 @@ // Copyright 2019, The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE.md file. +// license that can be found in the LICENSE file. package cmp @@ -79,7 +79,7 @@ func (opts formatOptions) verbosity() uint { } } -const maxVerbosityPreset = 3 +const maxVerbosityPreset = 6 // verbosityPreset modifies the verbosity settings given an index // between 0 and maxVerbosityPreset, inclusive. @@ -100,7 +100,7 @@ func verbosityPreset(opts formatOptions, i int) formatOptions { func (opts formatOptions) FormatDiff(v *valueNode, ptrs *pointerReferences) (out textNode) { if opts.DiffMode == diffIdentical { opts = opts.WithVerbosity(1) - } else { + } else if opts.verbosity() < 3 { opts = opts.WithVerbosity(3) } diff --git a/vendor/github.com/google/go-cmp/cmp/report_references.go b/vendor/github.com/google/go-cmp/cmp/report_references.go index d620c2c20e7..be31b33a9e1 100644 --- a/vendor/github.com/google/go-cmp/cmp/report_references.go +++ b/vendor/github.com/google/go-cmp/cmp/report_references.go @@ -1,6 +1,6 @@ // Copyright 2020, The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE.md file. +// license that can be found in the LICENSE file. package cmp diff --git a/vendor/github.com/google/go-cmp/cmp/report_reflect.go b/vendor/github.com/google/go-cmp/cmp/report_reflect.go index 786f671269c..33f03577f98 100644 --- a/vendor/github.com/google/go-cmp/cmp/report_reflect.go +++ b/vendor/github.com/google/go-cmp/cmp/report_reflect.go @@ -1,6 +1,6 @@ // Copyright 2019, The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE.md file. +// license that can be found in the LICENSE file. package cmp @@ -351,6 +351,8 @@ func formatMapKey(v reflect.Value, disambiguate bool, ptrs *pointerReferences) s opts.PrintAddresses = disambiguate opts.AvoidStringer = disambiguate opts.QualifiedNames = disambiguate + opts.VerbosityLevel = maxVerbosityPreset + opts.LimitVerbosity = true s := opts.FormatValue(v, reflect.Map, ptrs).String() return strings.TrimSpace(s) } diff --git a/vendor/github.com/google/go-cmp/cmp/report_slices.go b/vendor/github.com/google/go-cmp/cmp/report_slices.go index 35315dad355..2ad3bc85ba8 100644 --- a/vendor/github.com/google/go-cmp/cmp/report_slices.go +++ b/vendor/github.com/google/go-cmp/cmp/report_slices.go @@ -1,12 +1,13 @@ // Copyright 2019, The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE.md file. +// license that can be found in the LICENSE file. package cmp import ( "bytes" "fmt" + "math" "reflect" "strconv" "strings" @@ -26,8 +27,6 @@ func (opts formatOptions) CanFormatDiffSlice(v *valueNode) bool { return false // No differences detected case !v.ValueX.IsValid() || !v.ValueY.IsValid(): return false // Both values must be valid - case v.Type.Kind() == reflect.Slice && (v.ValueX.Len() == 0 || v.ValueY.Len() == 0): - return false // Both slice values have to be non-empty case v.NumIgnored > 0: return false // Some ignore option was used case v.NumTransformed > 0: @@ -45,7 +44,16 @@ func (opts formatOptions) CanFormatDiffSlice(v *valueNode) bool { return false } - switch t := v.Type; t.Kind() { + // Check whether this is an interface with the same concrete types. + t := v.Type + vx, vy := v.ValueX, v.ValueY + if t.Kind() == reflect.Interface && !vx.IsNil() && !vy.IsNil() && vx.Elem().Type() == vy.Elem().Type() { + vx, vy = vx.Elem(), vy.Elem() + t = vx.Type() + } + + // Check whether we provide specialized diffing for this type. + switch t.Kind() { case reflect.String: case reflect.Array, reflect.Slice: // Only slices of primitive types have specialized handling. @@ -57,6 +65,11 @@ func (opts formatOptions) CanFormatDiffSlice(v *valueNode) bool { return false } + // Both slice values have to be non-empty. + if t.Kind() == reflect.Slice && (vx.Len() == 0 || vy.Len() == 0) { + return false + } + // If a sufficient number of elements already differ, // use specialized formatting even if length requirement is not met. if v.NumDiff > v.NumSame { @@ -68,7 +81,7 @@ func (opts formatOptions) CanFormatDiffSlice(v *valueNode) bool { // Use specialized string diffing for longer slices or strings. const minLength = 64 - return v.ValueX.Len() >= minLength && v.ValueY.Len() >= minLength + return vx.Len() >= minLength && vy.Len() >= minLength } // FormatDiffSlice prints a diff for the slices (or strings) represented by v. @@ -77,17 +90,23 @@ func (opts formatOptions) CanFormatDiffSlice(v *valueNode) bool { func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode { assert(opts.DiffMode == diffUnknown) t, vx, vy := v.Type, v.ValueX, v.ValueY + if t.Kind() == reflect.Interface { + vx, vy = vx.Elem(), vy.Elem() + t = vx.Type() + opts = opts.WithTypeMode(emitType) + } // Auto-detect the type of the data. - var isLinedText, isText, isBinary bool var sx, sy string + var ssx, ssy []string + var isString, isMostlyText, isPureLinedText, isBinary bool switch { case t.Kind() == reflect.String: sx, sy = vx.String(), vy.String() - isText = true // Initial estimate, verify later + isString = true case t.Kind() == reflect.Slice && t.Elem() == reflect.TypeOf(byte(0)): sx, sy = string(vx.Bytes()), string(vy.Bytes()) - isBinary = true // Initial estimate, verify later + isString = true case t.Kind() == reflect.Array: // Arrays need to be addressable for slice operations to work. vx2, vy2 := reflect.New(t).Elem(), reflect.New(t).Elem() @@ -95,13 +114,12 @@ func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode { vy2.Set(vy) vx, vy = vx2, vy2 } - if isText || isBinary { - var numLines, lastLineIdx, maxLineLen int - isBinary = !utf8.ValidString(sx) || !utf8.ValidString(sy) + if isString { + var numTotalRunes, numValidRunes, numLines, lastLineIdx, maxLineLen int for i, r := range sx + sy { - if !(unicode.IsPrint(r) || unicode.IsSpace(r)) || r == utf8.RuneError { - isBinary = true - break + numTotalRunes++ + if (unicode.IsPrint(r) || unicode.IsSpace(r)) && r != utf8.RuneError { + numValidRunes++ } if r == '\n' { if maxLineLen < i-lastLineIdx { @@ -111,8 +129,26 @@ func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode { numLines++ } } - isText = !isBinary - isLinedText = isText && numLines >= 4 && maxLineLen <= 1024 + isPureText := numValidRunes == numTotalRunes + isMostlyText = float64(numValidRunes) > math.Floor(0.90*float64(numTotalRunes)) + isPureLinedText = isPureText && numLines >= 4 && maxLineLen <= 1024 + isBinary = !isMostlyText + + // Avoid diffing by lines if it produces a significantly more complex + // edit script than diffing by bytes. + if isPureLinedText { + ssx = strings.Split(sx, "\n") + ssy = strings.Split(sy, "\n") + esLines := diff.Difference(len(ssx), len(ssy), func(ix, iy int) diff.Result { + return diff.BoolResult(ssx[ix] == ssy[iy]) + }) + esBytes := diff.Difference(len(sx), len(sy), func(ix, iy int) diff.Result { + return diff.BoolResult(sx[ix] == sy[iy]) + }) + efficiencyLines := float64(esLines.Dist()) / float64(len(esLines)) + efficiencyBytes := float64(esBytes.Dist()) / float64(len(esBytes)) + isPureLinedText = efficiencyLines < 4*efficiencyBytes + } } // Format the string into printable records. @@ -121,9 +157,7 @@ func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode { switch { // If the text appears to be multi-lined text, // then perform differencing across individual lines. - case isLinedText: - ssx := strings.Split(sx, "\n") - ssy := strings.Split(sy, "\n") + case isPureLinedText: list = opts.formatDiffSlice( reflect.ValueOf(ssx), reflect.ValueOf(ssy), 1, "line", func(v reflect.Value, d diffMode) textRecord { @@ -212,7 +246,7 @@ func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode { // If the text appears to be single-lined text, // then perform differencing in approximately fixed-sized chunks. // The output is printed as quoted strings. - case isText: + case isMostlyText: list = opts.formatDiffSlice( reflect.ValueOf(sx), reflect.ValueOf(sy), 64, "byte", func(v reflect.Value, d diffMode) textRecord { @@ -220,7 +254,6 @@ func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode { return textRecord{Diff: d, Value: textLine(s)} }, ) - delim = "" // If the text appears to be binary data, // then perform differencing in approximately fixed-sized chunks. @@ -282,7 +315,7 @@ func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode { // Wrap the output with appropriate type information. var out textNode = &textWrap{Prefix: "{", Value: list, Suffix: "}"} - if !isText { + if !isMostlyText { // The "{...}" byte-sequence literal is not valid Go syntax for strings. // Emit the type for extra clarity (e.g. "string{...}"). if t.Kind() == reflect.String { @@ -321,8 +354,11 @@ func (opts formatOptions) formatDiffSlice( vx, vy reflect.Value, chunkSize int, name string, makeRec func(reflect.Value, diffMode) textRecord, ) (list textList) { - es := diff.Difference(vx.Len(), vy.Len(), func(ix int, iy int) diff.Result { - return diff.BoolResult(vx.Index(ix).Interface() == vy.Index(iy).Interface()) + eq := func(ix, iy int) bool { + return vx.Index(ix).Interface() == vy.Index(iy).Interface() + } + es := diff.Difference(vx.Len(), vy.Len(), func(ix, iy int) diff.Result { + return diff.BoolResult(eq(ix, iy)) }) appendChunks := func(v reflect.Value, d diffMode) int { @@ -347,6 +383,7 @@ func (opts formatOptions) formatDiffSlice( groups := coalesceAdjacentEdits(name, es) groups = coalesceInterveningIdentical(groups, chunkSize/4) + groups = cleanupSurroundingIdentical(groups, eq) maxGroup := diffStats{Name: name} for i, ds := range groups { if maxLen >= 0 && numDiffs >= maxLen { @@ -399,25 +436,36 @@ func (opts formatOptions) formatDiffSlice( // coalesceAdjacentEdits coalesces the list of edits into groups of adjacent // equal or unequal counts. +// +// Example: +// +// Input: "..XXY...Y" +// Output: [ +// {NumIdentical: 2}, +// {NumRemoved: 2, NumInserted 1}, +// {NumIdentical: 3}, +// {NumInserted: 1}, +// ] +// func coalesceAdjacentEdits(name string, es diff.EditScript) (groups []diffStats) { - var prevCase int // Arbitrary index into which case last occurred - lastStats := func(i int) *diffStats { - if prevCase != i { + var prevMode byte + lastStats := func(mode byte) *diffStats { + if prevMode != mode { groups = append(groups, diffStats{Name: name}) - prevCase = i + prevMode = mode } return &groups[len(groups)-1] } for _, e := range es { switch e { case diff.Identity: - lastStats(1).NumIdentical++ + lastStats('=').NumIdentical++ case diff.UniqueX: - lastStats(2).NumRemoved++ + lastStats('!').NumRemoved++ case diff.UniqueY: - lastStats(2).NumInserted++ + lastStats('!').NumInserted++ case diff.Modified: - lastStats(2).NumModified++ + lastStats('!').NumModified++ } } return groups @@ -427,6 +475,35 @@ func coalesceAdjacentEdits(name string, es diff.EditScript) (groups []diffStats) // equal groups into adjacent unequal groups that currently result in a // dual inserted/removed printout. This acts as a high-pass filter to smooth // out high-frequency changes within the windowSize. +// +// Example: +// +// WindowSize: 16, +// Input: [ +// {NumIdentical: 61}, // group 0 +// {NumRemoved: 3, NumInserted: 1}, // group 1 +// {NumIdentical: 6}, // ├── coalesce +// {NumInserted: 2}, // ├── coalesce +// {NumIdentical: 1}, // ├── coalesce +// {NumRemoved: 9}, // └── coalesce +// {NumIdentical: 64}, // group 2 +// {NumRemoved: 3, NumInserted: 1}, // group 3 +// {NumIdentical: 6}, // ├── coalesce +// {NumInserted: 2}, // ├── coalesce +// {NumIdentical: 1}, // ├── coalesce +// {NumRemoved: 7}, // ├── coalesce +// {NumIdentical: 1}, // ├── coalesce +// {NumRemoved: 2}, // └── coalesce +// {NumIdentical: 63}, // group 4 +// ] +// Output: [ +// {NumIdentical: 61}, +// {NumIdentical: 7, NumRemoved: 12, NumInserted: 3}, +// {NumIdentical: 64}, +// {NumIdentical: 8, NumRemoved: 12, NumInserted: 3}, +// {NumIdentical: 63}, +// ] +// func coalesceInterveningIdentical(groups []diffStats, windowSize int) []diffStats { groups, groupsOrig := groups[:0], groups for i, ds := range groupsOrig { @@ -446,3 +523,91 @@ func coalesceInterveningIdentical(groups []diffStats, windowSize int) []diffStat } return groups } + +// cleanupSurroundingIdentical scans through all unequal groups, and +// moves any leading sequence of equal elements to the preceding equal group and +// moves and trailing sequence of equal elements to the succeeding equal group. +// +// This is necessary since coalesceInterveningIdentical may coalesce edit groups +// together such that leading/trailing spans of equal elements becomes possible. +// Note that this can occur even with an optimal diffing algorithm. +// +// Example: +// +// Input: [ +// {NumIdentical: 61}, +// {NumIdentical: 1 , NumRemoved: 11, NumInserted: 2}, // assume 3 leading identical elements +// {NumIdentical: 67}, +// {NumIdentical: 7, NumRemoved: 12, NumInserted: 3}, // assume 10 trailing identical elements +// {NumIdentical: 54}, +// ] +// Output: [ +// {NumIdentical: 64}, // incremented by 3 +// {NumRemoved: 9}, +// {NumIdentical: 67}, +// {NumRemoved: 9}, +// {NumIdentical: 64}, // incremented by 10 +// ] +// +func cleanupSurroundingIdentical(groups []diffStats, eq func(i, j int) bool) []diffStats { + var ix, iy int // indexes into sequence x and y + for i, ds := range groups { + // Handle equal group. + if ds.NumDiff() == 0 { + ix += ds.NumIdentical + iy += ds.NumIdentical + continue + } + + // Handle unequal group. + nx := ds.NumIdentical + ds.NumRemoved + ds.NumModified + ny := ds.NumIdentical + ds.NumInserted + ds.NumModified + var numLeadingIdentical, numTrailingIdentical int + for i := 0; i < nx && i < ny && eq(ix+i, iy+i); i++ { + numLeadingIdentical++ + } + for i := 0; i < nx && i < ny && eq(ix+nx-1-i, iy+ny-1-i); i++ { + numTrailingIdentical++ + } + if numIdentical := numLeadingIdentical + numTrailingIdentical; numIdentical > 0 { + if numLeadingIdentical > 0 { + // Remove leading identical span from this group and + // insert it into the preceding group. + if i-1 >= 0 { + groups[i-1].NumIdentical += numLeadingIdentical + } else { + // No preceding group exists, so prepend a new group, + // but do so after we finish iterating over all groups. + defer func() { + groups = append([]diffStats{{Name: groups[0].Name, NumIdentical: numLeadingIdentical}}, groups...) + }() + } + // Increment indexes since the preceding group would have handled this. + ix += numLeadingIdentical + iy += numLeadingIdentical + } + if numTrailingIdentical > 0 { + // Remove trailing identical span from this group and + // insert it into the succeeding group. + if i+1 < len(groups) { + groups[i+1].NumIdentical += numTrailingIdentical + } else { + // No succeeding group exists, so append a new group, + // but do so after we finish iterating over all groups. + defer func() { + groups = append(groups, diffStats{Name: groups[len(groups)-1].Name, NumIdentical: numTrailingIdentical}) + }() + } + // Do not increment indexes since the succeeding group will handle this. + } + + // Update this group since some identical elements were removed. + nx -= numIdentical + ny -= numIdentical + groups[i] = diffStats{Name: ds.Name, NumRemoved: nx, NumInserted: ny} + } + ix += nx + iy += ny + } + return groups +} diff --git a/vendor/github.com/google/go-cmp/cmp/report_text.go b/vendor/github.com/google/go-cmp/cmp/report_text.go index 8b12c05cd4f..0fd46d7ffb6 100644 --- a/vendor/github.com/google/go-cmp/cmp/report_text.go +++ b/vendor/github.com/google/go-cmp/cmp/report_text.go @@ -1,6 +1,6 @@ // Copyright 2019, The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE.md file. +// license that can be found in the LICENSE file. package cmp diff --git a/vendor/github.com/google/go-cmp/cmp/report_value.go b/vendor/github.com/google/go-cmp/cmp/report_value.go index 83031a7f507..668d470fd83 100644 --- a/vendor/github.com/google/go-cmp/cmp/report_value.go +++ b/vendor/github.com/google/go-cmp/cmp/report_value.go @@ -1,6 +1,6 @@ // Copyright 2019, The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE.md file. +// license that can be found in the LICENSE file. package cmp diff --git a/vendor/github.com/google/go-querystring/query/encode.go b/vendor/github.com/google/go-querystring/query/encode.go index 37080b19b5d..91198f819a7 100644 --- a/vendor/github.com/google/go-querystring/query/encode.go +++ b/vendor/github.com/google/go-querystring/query/encode.go @@ -51,8 +51,8 @@ type Encoder interface { // - the field is empty and its tag specifies the "omitempty" option // // The empty values are false, 0, any nil pointer or interface value, any array -// slice, map, or string of length zero, and any time.Time that returns true -// for IsZero(). +// slice, map, or string of length zero, and any type (such as time.Time) that +// returns true for IsZero(). // // The URL parameter name defaults to the struct field name but can be // specified in the struct field's tag value. The "url" key in the struct @@ -82,7 +82,14 @@ type Encoder interface { // // time.Time values default to encoding as RFC3339 timestamps. Including the // "unix" option signals that the field should be encoded as a Unix time (see -// time.Unix()) +// time.Unix()). The "unixmilli" and "unixnano" options will encode the number +// of milliseconds and nanoseconds, respectively, since January 1, 1970 (see +// time.UnixNano()). Including the "layout" struct tag (separate from the +// "url" tag) will use the value of the "layout" tag as a layout passed to +// time.Format. For example: +// +// // Encode a time.Time as YYYY-MM-DD +// Field time.Time `layout:"2006-01-02"` // // Slice and Array values default to encoding as multiple URL values of the // same name. Including the "comma" option signals that the field should be @@ -92,7 +99,13 @@ type Encoder interface { // Including the "brackets" option signals that the multiple URL values should // have "[]" appended to the value name. "numbered" will append a number to // the end of each incidence of the value name, example: -// name0=value0&name1=value1, etc. +// name0=value0&name1=value1, etc. Including the "del" struct tag (separate +// from the "url" tag) will use the value of the "del" tag as the delimiter. +// For example: +// +// // Encode a slice of bools as ints ("1" for true, "0" for false), +// // separated by exclamation points "!". +// Field []bool `url:",int" del:"!"` // // Anonymous struct fields are usually encoded as if their inner exported // fields were fields in the outer struct, subject to the standard Go @@ -151,11 +164,15 @@ func reflectValue(values url.Values, val reflect.Value, scope string) error { continue } name, opts := parseTag(tag) + if name == "" { - if sf.Anonymous && sv.Kind() == reflect.Struct { - // save embedded struct for later processing - embedded = append(embedded, sv) - continue + if sf.Anonymous { + v := reflect.Indirect(sv) + if v.IsValid() && v.Kind() == reflect.Struct { + // save embedded struct for later processing + embedded = append(embedded, v) + continue + } } name = sf.Name @@ -170,7 +187,9 @@ func reflectValue(values url.Values, val reflect.Value, scope string) error { } if sv.Type().Implements(encoderType) { - if !reflect.Indirect(sv).IsValid() { + // if sv is a nil pointer and the custom encoder is defined on a non-pointer + // method receiver, set sv to the zero value of the underlying type + if !reflect.Indirect(sv).IsValid() && sv.Type().Elem().Implements(encoderType) { sv = reflect.New(sv.Type().Elem()) } @@ -181,28 +200,38 @@ func reflectValue(values url.Values, val reflect.Value, scope string) error { continue } + // recursively dereference pointers. break on nil pointers + for sv.Kind() == reflect.Ptr { + if sv.IsNil() { + break + } + sv = sv.Elem() + } + if sv.Kind() == reflect.Slice || sv.Kind() == reflect.Array { - var del byte + var del string if opts.Contains("comma") { - del = ',' + del = "," } else if opts.Contains("space") { - del = ' ' + del = " " } else if opts.Contains("semicolon") { - del = ';' + del = ";" } else if opts.Contains("brackets") { name = name + "[]" + } else { + del = sf.Tag.Get("del") } - if del != 0 { + if del != "" { s := new(bytes.Buffer) first := true for i := 0; i < sv.Len(); i++ { if first { first = false } else { - s.WriteByte(del) + s.WriteString(del) } - s.WriteString(valueString(sv.Index(i), opts)) + s.WriteString(valueString(sv.Index(i), opts, sf)) } values.Add(name, s.String()) } else { @@ -211,30 +240,25 @@ func reflectValue(values url.Values, val reflect.Value, scope string) error { if opts.Contains("numbered") { k = fmt.Sprintf("%s%d", name, i) } - values.Add(k, valueString(sv.Index(i), opts)) + values.Add(k, valueString(sv.Index(i), opts, sf)) } } continue } - for sv.Kind() == reflect.Ptr { - if sv.IsNil() { - break - } - sv = sv.Elem() - } - if sv.Type() == timeType { - values.Add(name, valueString(sv, opts)) + values.Add(name, valueString(sv, opts, sf)) continue } if sv.Kind() == reflect.Struct { - reflectValue(values, sv, name) + if err := reflectValue(values, sv, name); err != nil { + return err + } continue } - values.Add(name, valueString(sv, opts)) + values.Add(name, valueString(sv, opts, sf)) } for _, f := range embedded { @@ -247,7 +271,7 @@ func reflectValue(values url.Values, val reflect.Value, scope string) error { } // valueString returns the string representation of a value. -func valueString(v reflect.Value, opts tagOptions) string { +func valueString(v reflect.Value, opts tagOptions, sf reflect.StructField) string { for v.Kind() == reflect.Ptr { if v.IsNil() { return "" @@ -267,6 +291,15 @@ func valueString(v reflect.Value, opts tagOptions) string { if opts.Contains("unix") { return strconv.FormatInt(t.Unix(), 10) } + if opts.Contains("unixmilli") { + return strconv.FormatInt((t.UnixNano() / 1e6), 10) + } + if opts.Contains("unixnano") { + return strconv.FormatInt(t.UnixNano(), 10) + } + if layout := sf.Tag.Get("layout"); layout != "" { + return t.Format(layout) + } return t.Format(time.RFC3339) } @@ -291,8 +324,12 @@ func isEmptyValue(v reflect.Value) bool { return v.IsNil() } - if v.Type() == timeType { - return v.Interface().(time.Time).IsZero() + type zeroable interface { + IsZero() bool + } + + if z, ok := v.Interface().(zeroable); ok { + return z.IsZero() } return false diff --git a/vendor/github.com/google/gofuzz/.travis.yml b/vendor/github.com/google/gofuzz/.travis.yml new file mode 100644 index 00000000000..a3286bb2e52 --- /dev/null +++ b/vendor/github.com/google/gofuzz/.travis.yml @@ -0,0 +1,10 @@ +language: go + +go: + - 1.13.x + - 1.14.x + - 1.15.x + - master + +script: + - go test -race -cover diff --git a/vendor/github.com/google/gofuzz/CONTRIBUTING.md b/vendor/github.com/google/gofuzz/CONTRIBUTING.md new file mode 100644 index 00000000000..97c1b34fd5e --- /dev/null +++ b/vendor/github.com/google/gofuzz/CONTRIBUTING.md @@ -0,0 +1,67 @@ +# How to contribute # + +We'd love to accept your patches and contributions to this project. There are +just a few small guidelines you need to follow. + + +## Contributor License Agreement ## + +Contributions to any Google project must be accompanied by a Contributor +License Agreement. This is not a copyright **assignment**, it simply gives +Google permission to use and redistribute your contributions as part of the +project. + + * If you are an individual writing original source code and you're sure you + own the intellectual property, then you'll need to sign an [individual + CLA][]. + + * If you work for a company that wants to allow you to contribute your work, + then you'll need to sign a [corporate CLA][]. + +You generally only need to submit a CLA once, so if you've already submitted +one (even if it was for a different project), you probably don't need to do it +again. + +[individual CLA]: https://developers.google.com/open-source/cla/individual +[corporate CLA]: https://developers.google.com/open-source/cla/corporate + + +## Submitting a patch ## + + 1. It's generally best to start by opening a new issue describing the bug or + feature you're intending to fix. Even if you think it's relatively minor, + it's helpful to know what people are working on. Mention in the initial + issue that you are planning to work on that bug or feature so that it can + be assigned to you. + + 1. Follow the normal process of [forking][] the project, and setup a new + branch to work in. It's important that each group of changes be done in + separate branches in order to ensure that a pull request only includes the + commits related to that bug or feature. + + 1. Go makes it very simple to ensure properly formatted code, so always run + `go fmt` on your code before committing it. You should also run + [golint][] over your code. As noted in the [golint readme][], it's not + strictly necessary that your code be completely "lint-free", but this will + help you find common style issues. + + 1. Any significant changes should almost always be accompanied by tests. The + project already has good test coverage, so look at some of the existing + tests if you're unsure how to go about it. [gocov][] and [gocov-html][] + are invaluable tools for seeing which parts of your code aren't being + exercised by your tests. + + 1. Do your best to have [well-formed commit messages][] for each change. + This provides consistency throughout the project, and ensures that commit + messages are able to be formatted properly by various git tools. + + 1. Finally, push the commits to your fork and submit a [pull request][]. + +[forking]: https://help.github.com/articles/fork-a-repo +[golint]: https://github.com/golang/lint +[golint readme]: https://github.com/golang/lint/blob/master/README +[gocov]: https://github.com/axw/gocov +[gocov-html]: https://github.com/matm/gocov-html +[well-formed commit messages]: http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html +[squash]: http://git-scm.com/book/en/Git-Tools-Rewriting-History#Squashing-Commits +[pull request]: https://help.github.com/articles/creating-a-pull-request diff --git a/vendor/github.com/google/gofuzz/LICENSE b/vendor/github.com/google/gofuzz/LICENSE new file mode 100644 index 00000000000..d6456956733 --- /dev/null +++ b/vendor/github.com/google/gofuzz/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/google/gofuzz/README.md b/vendor/github.com/google/gofuzz/README.md new file mode 100644 index 00000000000..b503aae7d71 --- /dev/null +++ b/vendor/github.com/google/gofuzz/README.md @@ -0,0 +1,89 @@ +gofuzz +====== + +gofuzz is a library for populating go objects with random values. + +[![GoDoc](https://godoc.org/github.com/google/gofuzz?status.svg)](https://godoc.org/github.com/google/gofuzz) +[![Travis](https://travis-ci.org/google/gofuzz.svg?branch=master)](https://travis-ci.org/google/gofuzz) + +This is useful for testing: + +* Do your project's objects really serialize/unserialize correctly in all cases? +* Is there an incorrectly formatted object that will cause your project to panic? + +Import with ```import "github.com/google/gofuzz"``` + +You can use it on single variables: +```go +f := fuzz.New() +var myInt int +f.Fuzz(&myInt) // myInt gets a random value. +``` + +You can use it on maps: +```go +f := fuzz.New().NilChance(0).NumElements(1, 1) +var myMap map[ComplexKeyType]string +f.Fuzz(&myMap) // myMap will have exactly one element. +``` + +Customize the chance of getting a nil pointer: +```go +f := fuzz.New().NilChance(.5) +var fancyStruct struct { + A, B, C, D *string +} +f.Fuzz(&fancyStruct) // About half the pointers should be set. +``` + +You can even customize the randomization completely if needed: +```go +type MyEnum string +const ( + A MyEnum = "A" + B MyEnum = "B" +) +type MyInfo struct { + Type MyEnum + AInfo *string + BInfo *string +} + +f := fuzz.New().NilChance(0).Funcs( + func(e *MyInfo, c fuzz.Continue) { + switch c.Intn(2) { + case 0: + e.Type = A + c.Fuzz(&e.AInfo) + case 1: + e.Type = B + c.Fuzz(&e.BInfo) + } + }, +) + +var myObject MyInfo +f.Fuzz(&myObject) // Type will correspond to whether A or B info is set. +``` + +See more examples in ```example_test.go```. + +You can use this library for easier [go-fuzz](https://github.com/dvyukov/go-fuzz)ing. +go-fuzz provides the user a byte-slice, which should be converted to different inputs +for the tested function. This library can help convert the byte slice. Consider for +example a fuzz test for a the function `mypackage.MyFunc` that takes an int arguments: +```go +// +build gofuzz +package mypackage + +import fuzz "github.com/google/gofuzz" + +func Fuzz(data []byte) int { + var i int + fuzz.NewFromGoFuzz(data).Fuzz(&i) + MyFunc(i) + return 0 +} +``` + +Happy testing! diff --git a/vendor/github.com/google/gofuzz/bytesource/bytesource.go b/vendor/github.com/google/gofuzz/bytesource/bytesource.go new file mode 100644 index 00000000000..5bb36594969 --- /dev/null +++ b/vendor/github.com/google/gofuzz/bytesource/bytesource.go @@ -0,0 +1,81 @@ +/* +Copyright 2014 Google Inc. All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package bytesource provides a rand.Source64 that is determined by a slice of bytes. +package bytesource + +import ( + "bytes" + "encoding/binary" + "io" + "math/rand" +) + +// ByteSource implements rand.Source64 determined by a slice of bytes. The random numbers are +// generated from each 8 bytes in the slice, until the last bytes are consumed, from which a +// fallback pseudo random source is created in case more random numbers are required. +// It also exposes a `bytes.Reader` API, which lets callers consume the bytes directly. +type ByteSource struct { + *bytes.Reader + fallback rand.Source +} + +// New returns a new ByteSource from a given slice of bytes. +func New(input []byte) *ByteSource { + s := &ByteSource{ + Reader: bytes.NewReader(input), + fallback: rand.NewSource(0), + } + if len(input) > 0 { + s.fallback = rand.NewSource(int64(s.consumeUint64())) + } + return s +} + +func (s *ByteSource) Uint64() uint64 { + // Return from input if it was not exhausted. + if s.Len() > 0 { + return s.consumeUint64() + } + + // Input was exhausted, return random number from fallback (in this case fallback should not be + // nil). Try first having a Uint64 output (Should work in current rand implementation), + // otherwise return a conversion of Int63. + if s64, ok := s.fallback.(rand.Source64); ok { + return s64.Uint64() + } + return uint64(s.fallback.Int63()) +} + +func (s *ByteSource) Int63() int64 { + return int64(s.Uint64() >> 1) +} + +func (s *ByteSource) Seed(seed int64) { + s.fallback = rand.NewSource(seed) + s.Reader = bytes.NewReader(nil) +} + +// consumeUint64 reads 8 bytes from the input and convert them to a uint64. It assumes that the the +// bytes reader is not empty. +func (s *ByteSource) consumeUint64() uint64 { + var bytes [8]byte + _, err := s.Read(bytes[:]) + if err != nil && err != io.EOF { + panic("failed reading source") // Should not happen. + } + return binary.BigEndian.Uint64(bytes[:]) +} diff --git a/vendor/github.com/google/gofuzz/doc.go b/vendor/github.com/google/gofuzz/doc.go new file mode 100644 index 00000000000..9f9956d4a64 --- /dev/null +++ b/vendor/github.com/google/gofuzz/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2014 Google Inc. All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package fuzz is a library for populating go objects with random values. +package fuzz diff --git a/vendor/github.com/google/gofuzz/fuzz.go b/vendor/github.com/google/gofuzz/fuzz.go new file mode 100644 index 00000000000..92c9165c036 --- /dev/null +++ b/vendor/github.com/google/gofuzz/fuzz.go @@ -0,0 +1,621 @@ +/* +Copyright 2014 Google Inc. All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fuzz + +import ( + "fmt" + "math/rand" + "reflect" + "regexp" + "sync" + "time" + + "github.com/google/gofuzz/bytesource" + "strings" +) + +// fuzzFuncMap is a map from a type to a fuzzFunc that handles that type. +type fuzzFuncMap map[reflect.Type]reflect.Value + +// Fuzzer knows how to fill any object with random fields. +type Fuzzer struct { + fuzzFuncs fuzzFuncMap + defaultFuzzFuncs fuzzFuncMap + r *rand.Rand + nilChance float64 + minElements int + maxElements int + maxDepth int + skipFieldPatterns []*regexp.Regexp + + fuzzLock sync.Mutex +} + +// New returns a new Fuzzer. Customize your Fuzzer further by calling Funcs, +// RandSource, NilChance, or NumElements in any order. +func New() *Fuzzer { + return NewWithSeed(time.Now().UnixNano()) +} + +func NewWithSeed(seed int64) *Fuzzer { + f := &Fuzzer{ + defaultFuzzFuncs: fuzzFuncMap{ + reflect.TypeOf(&time.Time{}): reflect.ValueOf(fuzzTime), + }, + + fuzzFuncs: fuzzFuncMap{}, + r: rand.New(rand.NewSource(seed)), + nilChance: .2, + minElements: 1, + maxElements: 10, + maxDepth: 100, + } + return f +} + +// NewFromGoFuzz is a helper function that enables using gofuzz (this +// project) with go-fuzz (https://github.com/dvyukov/go-fuzz) for continuous +// fuzzing. Essentially, it enables translating the fuzzing bytes from +// go-fuzz to any Go object using this library. +// +// This implementation promises a constant translation from a given slice of +// bytes to the fuzzed objects. This promise will remain over future +// versions of Go and of this library. +// +// Note: the returned Fuzzer should not be shared between multiple goroutines, +// as its deterministic output will no longer be available. +// +// Example: use go-fuzz to test the function `MyFunc(int)` in the package +// `mypackage`. Add the file: "mypacakge_fuzz.go" with the content: +// +// // +build gofuzz +// package mypacakge +// import fuzz "github.com/google/gofuzz" +// func Fuzz(data []byte) int { +// var i int +// fuzz.NewFromGoFuzz(data).Fuzz(&i) +// MyFunc(i) +// return 0 +// } +func NewFromGoFuzz(data []byte) *Fuzzer { + return New().RandSource(bytesource.New(data)) +} + +// Funcs adds each entry in fuzzFuncs as a custom fuzzing function. +// +// Each entry in fuzzFuncs must be a function taking two parameters. +// The first parameter must be a pointer or map. It is the variable that +// function will fill with random data. The second parameter must be a +// fuzz.Continue, which will provide a source of randomness and a way +// to automatically continue fuzzing smaller pieces of the first parameter. +// +// These functions are called sensibly, e.g., if you wanted custom string +// fuzzing, the function `func(s *string, c fuzz.Continue)` would get +// called and passed the address of strings. Maps and pointers will always +// be made/new'd for you, ignoring the NilChange option. For slices, it +// doesn't make much sense to pre-create them--Fuzzer doesn't know how +// long you want your slice--so take a pointer to a slice, and make it +// yourself. (If you don't want your map/pointer type pre-made, take a +// pointer to it, and make it yourself.) See the examples for a range of +// custom functions. +func (f *Fuzzer) Funcs(fuzzFuncs ...interface{}) *Fuzzer { + for i := range fuzzFuncs { + v := reflect.ValueOf(fuzzFuncs[i]) + if v.Kind() != reflect.Func { + panic("Need only funcs!") + } + t := v.Type() + if t.NumIn() != 2 || t.NumOut() != 0 { + panic("Need 2 in and 0 out params!") + } + argT := t.In(0) + switch argT.Kind() { + case reflect.Ptr, reflect.Map: + default: + panic("fuzzFunc must take pointer or map type") + } + if t.In(1) != reflect.TypeOf(Continue{}) { + panic("fuzzFunc's second parameter must be type fuzz.Continue") + } + f.fuzzFuncs[argT] = v + } + return f +} + +// RandSource causes f to get values from the given source of randomness. +// Use if you want deterministic fuzzing. +func (f *Fuzzer) RandSource(s rand.Source) *Fuzzer { + f.r = rand.New(s) + return f +} + +// NilChance sets the probability of creating a nil pointer, map, or slice to +// 'p'. 'p' should be between 0 (no nils) and 1 (all nils), inclusive. +func (f *Fuzzer) NilChance(p float64) *Fuzzer { + if p < 0 || p > 1 { + panic("p should be between 0 and 1, inclusive.") + } + f.nilChance = p + return f +} + +// NumElements sets the minimum and maximum number of elements that will be +// added to a non-nil map or slice. +func (f *Fuzzer) NumElements(atLeast, atMost int) *Fuzzer { + if atLeast > atMost { + panic("atLeast must be <= atMost") + } + if atLeast < 0 { + panic("atLeast must be >= 0") + } + f.minElements = atLeast + f.maxElements = atMost + return f +} + +func (f *Fuzzer) genElementCount() int { + if f.minElements == f.maxElements { + return f.minElements + } + return f.minElements + f.r.Intn(f.maxElements-f.minElements+1) +} + +func (f *Fuzzer) genShouldFill() bool { + return f.r.Float64() >= f.nilChance +} + +// MaxDepth sets the maximum number of recursive fuzz calls that will be made +// before stopping. This includes struct members, pointers, and map and slice +// elements. +func (f *Fuzzer) MaxDepth(d int) *Fuzzer { + f.maxDepth = d + return f +} + +// Skip fields which match the supplied pattern. Call this multiple times if needed +// This is useful to skip XXX_ fields generated by protobuf +func (f *Fuzzer) SkipFieldsWithPattern(pattern *regexp.Regexp) *Fuzzer { + f.skipFieldPatterns = append(f.skipFieldPatterns, pattern) + return f +} + +// Fuzz recursively fills all of obj's fields with something random. First +// this tries to find a custom fuzz function (see Funcs). If there is no +// custom function this tests whether the object implements fuzz.Interface and, +// if so, calls Fuzz on it to fuzz itself. If that fails, this will see if +// there is a default fuzz function provided by this package. If all of that +// fails, this will generate random values for all primitive fields and then +// recurse for all non-primitives. +// +// This is safe for cyclic or tree-like structs, up to a limit. Use the +// MaxDepth method to adjust how deep you need it to recurse. +// +// obj must be a pointer. Only exported (public) fields can be set (thanks, +// golang :/ ) Intended for tests, so will panic on bad input or unimplemented +// fields. +func (f *Fuzzer) Fuzz(obj interface{}) { + f.fuzzLock.Lock() + defer f.fuzzLock.Unlock() + + v := reflect.ValueOf(obj) + if v.Kind() != reflect.Ptr { + panic("needed ptr!") + } + v = v.Elem() + f.fuzzWithContext(v, 0) +} + +// FuzzNoCustom is just like Fuzz, except that any custom fuzz function for +// obj's type will not be called and obj will not be tested for fuzz.Interface +// conformance. This applies only to obj and not other instances of obj's +// type. +// Not safe for cyclic or tree-like structs! +// obj must be a pointer. Only exported (public) fields can be set (thanks, golang :/ ) +// Intended for tests, so will panic on bad input or unimplemented fields. +func (f *Fuzzer) FuzzNoCustom(obj interface{}) { + f.fuzzLock.Lock() + defer f.fuzzLock.Unlock() + + v := reflect.ValueOf(obj) + if v.Kind() != reflect.Ptr { + panic("needed ptr!") + } + v = v.Elem() + f.fuzzWithContext(v, flagNoCustomFuzz) +} + +const ( + // Do not try to find a custom fuzz function. Does not apply recursively. + flagNoCustomFuzz uint64 = 1 << iota +) + +func (f *Fuzzer) fuzzWithContext(v reflect.Value, flags uint64) { + fc := &fuzzerContext{fuzzer: f} + fc.doFuzz(v, flags) +} + +// fuzzerContext carries context about a single fuzzing run, which lets Fuzzer +// be thread-safe. +type fuzzerContext struct { + fuzzer *Fuzzer + curDepth int +} + +func (fc *fuzzerContext) doFuzz(v reflect.Value, flags uint64) { + if fc.curDepth >= fc.fuzzer.maxDepth { + return + } + fc.curDepth++ + defer func() { fc.curDepth-- }() + + if !v.CanSet() { + return + } + + if flags&flagNoCustomFuzz == 0 { + // Check for both pointer and non-pointer custom functions. + if v.CanAddr() && fc.tryCustom(v.Addr()) { + return + } + if fc.tryCustom(v) { + return + } + } + + if fn, ok := fillFuncMap[v.Kind()]; ok { + fn(v, fc.fuzzer.r) + return + } + + switch v.Kind() { + case reflect.Map: + if fc.fuzzer.genShouldFill() { + v.Set(reflect.MakeMap(v.Type())) + n := fc.fuzzer.genElementCount() + for i := 0; i < n; i++ { + key := reflect.New(v.Type().Key()).Elem() + fc.doFuzz(key, 0) + val := reflect.New(v.Type().Elem()).Elem() + fc.doFuzz(val, 0) + v.SetMapIndex(key, val) + } + return + } + v.Set(reflect.Zero(v.Type())) + case reflect.Ptr: + if fc.fuzzer.genShouldFill() { + v.Set(reflect.New(v.Type().Elem())) + fc.doFuzz(v.Elem(), 0) + return + } + v.Set(reflect.Zero(v.Type())) + case reflect.Slice: + if fc.fuzzer.genShouldFill() { + n := fc.fuzzer.genElementCount() + v.Set(reflect.MakeSlice(v.Type(), n, n)) + for i := 0; i < n; i++ { + fc.doFuzz(v.Index(i), 0) + } + return + } + v.Set(reflect.Zero(v.Type())) + case reflect.Array: + if fc.fuzzer.genShouldFill() { + n := v.Len() + for i := 0; i < n; i++ { + fc.doFuzz(v.Index(i), 0) + } + return + } + v.Set(reflect.Zero(v.Type())) + case reflect.Struct: + for i := 0; i < v.NumField(); i++ { + skipField := false + fieldName := v.Type().Field(i).Name + for _, pattern := range fc.fuzzer.skipFieldPatterns { + if pattern.MatchString(fieldName) { + skipField = true + break + } + } + if !skipField { + fc.doFuzz(v.Field(i), 0) + } + } + case reflect.Chan: + fallthrough + case reflect.Func: + fallthrough + case reflect.Interface: + fallthrough + default: + panic(fmt.Sprintf("Can't handle %#v", v.Interface())) + } +} + +// tryCustom searches for custom handlers, and returns true iff it finds a match +// and successfully randomizes v. +func (fc *fuzzerContext) tryCustom(v reflect.Value) bool { + // First: see if we have a fuzz function for it. + doCustom, ok := fc.fuzzer.fuzzFuncs[v.Type()] + if !ok { + // Second: see if it can fuzz itself. + if v.CanInterface() { + intf := v.Interface() + if fuzzable, ok := intf.(Interface); ok { + fuzzable.Fuzz(Continue{fc: fc, Rand: fc.fuzzer.r}) + return true + } + } + // Finally: see if there is a default fuzz function. + doCustom, ok = fc.fuzzer.defaultFuzzFuncs[v.Type()] + if !ok { + return false + } + } + + switch v.Kind() { + case reflect.Ptr: + if v.IsNil() { + if !v.CanSet() { + return false + } + v.Set(reflect.New(v.Type().Elem())) + } + case reflect.Map: + if v.IsNil() { + if !v.CanSet() { + return false + } + v.Set(reflect.MakeMap(v.Type())) + } + default: + return false + } + + doCustom.Call([]reflect.Value{v, reflect.ValueOf(Continue{ + fc: fc, + Rand: fc.fuzzer.r, + })}) + return true +} + +// Interface represents an object that knows how to fuzz itself. Any time we +// find a type that implements this interface we will delegate the act of +// fuzzing itself. +type Interface interface { + Fuzz(c Continue) +} + +// Continue can be passed to custom fuzzing functions to allow them to use +// the correct source of randomness and to continue fuzzing their members. +type Continue struct { + fc *fuzzerContext + + // For convenience, Continue implements rand.Rand via embedding. + // Use this for generating any randomness if you want your fuzzing + // to be repeatable for a given seed. + *rand.Rand +} + +// Fuzz continues fuzzing obj. obj must be a pointer or a reflect.Value of a +// pointer. +func (c Continue) Fuzz(obj interface{}) { + v, ok := obj.(reflect.Value) + if !ok { + v = reflect.ValueOf(obj) + } + if v.Kind() != reflect.Ptr { + panic("needed ptr!") + } + v = v.Elem() + c.fc.doFuzz(v, 0) +} + +// FuzzNoCustom continues fuzzing obj, except that any custom fuzz function for +// obj's type will not be called and obj will not be tested for fuzz.Interface +// conformance. This applies only to obj and not other instances of obj's +// type. +func (c Continue) FuzzNoCustom(obj interface{}) { + v, ok := obj.(reflect.Value) + if !ok { + v = reflect.ValueOf(obj) + } + if v.Kind() != reflect.Ptr { + panic("needed ptr!") + } + v = v.Elem() + c.fc.doFuzz(v, flagNoCustomFuzz) +} + +// RandString makes a random string up to 20 characters long. The returned string +// may include a variety of (valid) UTF-8 encodings. +func (c Continue) RandString() string { + return randString(c.Rand) +} + +// RandUint64 makes random 64 bit numbers. +// Weirdly, rand doesn't have a function that gives you 64 random bits. +func (c Continue) RandUint64() uint64 { + return randUint64(c.Rand) +} + +// RandBool returns true or false randomly. +func (c Continue) RandBool() bool { + return randBool(c.Rand) +} + +func fuzzInt(v reflect.Value, r *rand.Rand) { + v.SetInt(int64(randUint64(r))) +} + +func fuzzUint(v reflect.Value, r *rand.Rand) { + v.SetUint(randUint64(r)) +} + +func fuzzTime(t *time.Time, c Continue) { + var sec, nsec int64 + // Allow for about 1000 years of random time values, which keeps things + // like JSON parsing reasonably happy. + sec = c.Rand.Int63n(1000 * 365 * 24 * 60 * 60) + c.Fuzz(&nsec) + *t = time.Unix(sec, nsec) +} + +var fillFuncMap = map[reflect.Kind]func(reflect.Value, *rand.Rand){ + reflect.Bool: func(v reflect.Value, r *rand.Rand) { + v.SetBool(randBool(r)) + }, + reflect.Int: fuzzInt, + reflect.Int8: fuzzInt, + reflect.Int16: fuzzInt, + reflect.Int32: fuzzInt, + reflect.Int64: fuzzInt, + reflect.Uint: fuzzUint, + reflect.Uint8: fuzzUint, + reflect.Uint16: fuzzUint, + reflect.Uint32: fuzzUint, + reflect.Uint64: fuzzUint, + reflect.Uintptr: fuzzUint, + reflect.Float32: func(v reflect.Value, r *rand.Rand) { + v.SetFloat(float64(r.Float32())) + }, + reflect.Float64: func(v reflect.Value, r *rand.Rand) { + v.SetFloat(r.Float64()) + }, + reflect.Complex64: func(v reflect.Value, r *rand.Rand) { + v.SetComplex(complex128(complex(r.Float32(), r.Float32()))) + }, + reflect.Complex128: func(v reflect.Value, r *rand.Rand) { + v.SetComplex(complex(r.Float64(), r.Float64())) + }, + reflect.String: func(v reflect.Value, r *rand.Rand) { + v.SetString(randString(r)) + }, + reflect.UnsafePointer: func(v reflect.Value, r *rand.Rand) { + panic("unimplemented") + }, +} + +// randBool returns true or false randomly. +func randBool(r *rand.Rand) bool { + return r.Int31()&(1<<30) == 0 +} + +type int63nPicker interface { + Int63n(int64) int64 +} + +// UnicodeRange describes a sequential range of unicode characters. +// Last must be numerically greater than First. +type UnicodeRange struct { + First, Last rune +} + +// UnicodeRanges describes an arbitrary number of sequential ranges of unicode characters. +// To be useful, each range must have at least one character (First <= Last) and +// there must be at least one range. +type UnicodeRanges []UnicodeRange + +// choose returns a random unicode character from the given range, using the +// given randomness source. +func (ur UnicodeRange) choose(r int63nPicker) rune { + count := int64(ur.Last - ur.First + 1) + return ur.First + rune(r.Int63n(count)) +} + +// CustomStringFuzzFunc constructs a FuzzFunc which produces random strings. +// Each character is selected from the range ur. If there are no characters +// in the range (cr.Last < cr.First), this will panic. +func (ur UnicodeRange) CustomStringFuzzFunc() func(s *string, c Continue) { + ur.check() + return func(s *string, c Continue) { + *s = ur.randString(c.Rand) + } +} + +// check is a function that used to check whether the first of ur(UnicodeRange) +// is greater than the last one. +func (ur UnicodeRange) check() { + if ur.Last < ur.First { + panic("The last encoding must be greater than the first one.") + } +} + +// randString of UnicodeRange makes a random string up to 20 characters long. +// Each character is selected form ur(UnicodeRange). +func (ur UnicodeRange) randString(r *rand.Rand) string { + n := r.Intn(20) + sb := strings.Builder{} + sb.Grow(n) + for i := 0; i < n; i++ { + sb.WriteRune(ur.choose(r)) + } + return sb.String() +} + +// defaultUnicodeRanges sets a default unicode range when user do not set +// CustomStringFuzzFunc() but wants fuzz string. +var defaultUnicodeRanges = UnicodeRanges{ + {' ', '~'}, // ASCII characters + {'\u00a0', '\u02af'}, // Multi-byte encoded characters + {'\u4e00', '\u9fff'}, // Common CJK (even longer encodings) +} + +// CustomStringFuzzFunc constructs a FuzzFunc which produces random strings. +// Each character is selected from one of the ranges of ur(UnicodeRanges). +// Each range has an equal probability of being chosen. If there are no ranges, +// or a selected range has no characters (.Last < .First), this will panic. +// Do not modify any of the ranges in ur after calling this function. +func (ur UnicodeRanges) CustomStringFuzzFunc() func(s *string, c Continue) { + // Check unicode ranges slice is empty. + if len(ur) == 0 { + panic("UnicodeRanges is empty.") + } + // if not empty, each range should be checked. + for i := range ur { + ur[i].check() + } + return func(s *string, c Continue) { + *s = ur.randString(c.Rand) + } +} + +// randString of UnicodeRanges makes a random string up to 20 characters long. +// Each character is selected form one of the ranges of ur(UnicodeRanges), +// and each range has an equal probability of being chosen. +func (ur UnicodeRanges) randString(r *rand.Rand) string { + n := r.Intn(20) + sb := strings.Builder{} + sb.Grow(n) + for i := 0; i < n; i++ { + sb.WriteRune(ur[r.Intn(len(ur))].choose(r)) + } + return sb.String() +} + +// randString makes a random string up to 20 characters long. The returned string +// may include a variety of (valid) UTF-8 encodings. +func randString(r *rand.Rand) string { + return defaultUnicodeRanges.randString(r) +} + +// randUint64 makes random 64 bit numbers. +// Weirdly, rand doesn't have a function that gives you 64 random bits. +func randUint64(r *rand.Rand) uint64 { + return uint64(r.Uint32())<<32 | uint64(r.Uint32()) +} diff --git a/vendor/github.com/google/gofuzz/go.mod b/vendor/github.com/google/gofuzz/go.mod new file mode 100644 index 00000000000..8ec4fe9e972 --- /dev/null +++ b/vendor/github.com/google/gofuzz/go.mod @@ -0,0 +1,3 @@ +module github.com/google/gofuzz + +go 1.12 diff --git a/vendor/github.com/google/pprof/AUTHORS b/vendor/github.com/google/pprof/AUTHORS new file mode 100644 index 00000000000..fd736cb1cfb --- /dev/null +++ b/vendor/github.com/google/pprof/AUTHORS @@ -0,0 +1,7 @@ +# This is the official list of pprof authors for copyright purposes. +# This file is distinct from the CONTRIBUTORS files. +# See the latter for an explanation. +# Names should be added to this file as: +# Name or Organization +# The email address is not required for organizations. +Google Inc. \ No newline at end of file diff --git a/vendor/github.com/google/pprof/CONTRIBUTORS b/vendor/github.com/google/pprof/CONTRIBUTORS new file mode 100644 index 00000000000..8c8c37d2c8f --- /dev/null +++ b/vendor/github.com/google/pprof/CONTRIBUTORS @@ -0,0 +1,16 @@ +# People who have agreed to one of the CLAs and can contribute patches. +# The AUTHORS file lists the copyright holders; this file +# lists people. For example, Google employees are listed here +# but not in AUTHORS, because Google holds the copyright. +# +# https://developers.google.com/open-source/cla/individual +# https://developers.google.com/open-source/cla/corporate +# +# Names should be added to this file as: +# Name +Raul Silvera +Tipp Moseley +Hyoun Kyu Cho +Martin Spier +Taco de Wolff +Andrew Hunter diff --git a/vendor/github.com/google/pprof/LICENSE b/vendor/github.com/google/pprof/LICENSE new file mode 100644 index 00000000000..d6456956733 --- /dev/null +++ b/vendor/github.com/google/pprof/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/google/pprof/profile/encode.go b/vendor/github.com/google/pprof/profile/encode.go new file mode 100644 index 00000000000..1e84c72d43d --- /dev/null +++ b/vendor/github.com/google/pprof/profile/encode.go @@ -0,0 +1,567 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package profile + +import ( + "errors" + "sort" +) + +func (p *Profile) decoder() []decoder { + return profileDecoder +} + +// preEncode populates the unexported fields to be used by encode +// (with suffix X) from the corresponding exported fields. The +// exported fields are cleared up to facilitate testing. +func (p *Profile) preEncode() { + strings := make(map[string]int) + addString(strings, "") + + for _, st := range p.SampleType { + st.typeX = addString(strings, st.Type) + st.unitX = addString(strings, st.Unit) + } + + for _, s := range p.Sample { + s.labelX = nil + var keys []string + for k := range s.Label { + keys = append(keys, k) + } + sort.Strings(keys) + for _, k := range keys { + vs := s.Label[k] + for _, v := range vs { + s.labelX = append(s.labelX, + label{ + keyX: addString(strings, k), + strX: addString(strings, v), + }, + ) + } + } + var numKeys []string + for k := range s.NumLabel { + numKeys = append(numKeys, k) + } + sort.Strings(numKeys) + for _, k := range numKeys { + keyX := addString(strings, k) + vs := s.NumLabel[k] + units := s.NumUnit[k] + for i, v := range vs { + var unitX int64 + if len(units) != 0 { + unitX = addString(strings, units[i]) + } + s.labelX = append(s.labelX, + label{ + keyX: keyX, + numX: v, + unitX: unitX, + }, + ) + } + } + s.locationIDX = make([]uint64, len(s.Location)) + for i, loc := range s.Location { + s.locationIDX[i] = loc.ID + } + } + + for _, m := range p.Mapping { + m.fileX = addString(strings, m.File) + m.buildIDX = addString(strings, m.BuildID) + } + + for _, l := range p.Location { + for i, ln := range l.Line { + if ln.Function != nil { + l.Line[i].functionIDX = ln.Function.ID + } else { + l.Line[i].functionIDX = 0 + } + } + if l.Mapping != nil { + l.mappingIDX = l.Mapping.ID + } else { + l.mappingIDX = 0 + } + } + for _, f := range p.Function { + f.nameX = addString(strings, f.Name) + f.systemNameX = addString(strings, f.SystemName) + f.filenameX = addString(strings, f.Filename) + } + + p.dropFramesX = addString(strings, p.DropFrames) + p.keepFramesX = addString(strings, p.KeepFrames) + + if pt := p.PeriodType; pt != nil { + pt.typeX = addString(strings, pt.Type) + pt.unitX = addString(strings, pt.Unit) + } + + p.commentX = nil + for _, c := range p.Comments { + p.commentX = append(p.commentX, addString(strings, c)) + } + + p.defaultSampleTypeX = addString(strings, p.DefaultSampleType) + + p.stringTable = make([]string, len(strings)) + for s, i := range strings { + p.stringTable[i] = s + } +} + +func (p *Profile) encode(b *buffer) { + for _, x := range p.SampleType { + encodeMessage(b, 1, x) + } + for _, x := range p.Sample { + encodeMessage(b, 2, x) + } + for _, x := range p.Mapping { + encodeMessage(b, 3, x) + } + for _, x := range p.Location { + encodeMessage(b, 4, x) + } + for _, x := range p.Function { + encodeMessage(b, 5, x) + } + encodeStrings(b, 6, p.stringTable) + encodeInt64Opt(b, 7, p.dropFramesX) + encodeInt64Opt(b, 8, p.keepFramesX) + encodeInt64Opt(b, 9, p.TimeNanos) + encodeInt64Opt(b, 10, p.DurationNanos) + if pt := p.PeriodType; pt != nil && (pt.typeX != 0 || pt.unitX != 0) { + encodeMessage(b, 11, p.PeriodType) + } + encodeInt64Opt(b, 12, p.Period) + encodeInt64s(b, 13, p.commentX) + encodeInt64(b, 14, p.defaultSampleTypeX) +} + +var profileDecoder = []decoder{ + nil, // 0 + // repeated ValueType sample_type = 1 + func(b *buffer, m message) error { + x := new(ValueType) + pp := m.(*Profile) + pp.SampleType = append(pp.SampleType, x) + return decodeMessage(b, x) + }, + // repeated Sample sample = 2 + func(b *buffer, m message) error { + x := new(Sample) + pp := m.(*Profile) + pp.Sample = append(pp.Sample, x) + return decodeMessage(b, x) + }, + // repeated Mapping mapping = 3 + func(b *buffer, m message) error { + x := new(Mapping) + pp := m.(*Profile) + pp.Mapping = append(pp.Mapping, x) + return decodeMessage(b, x) + }, + // repeated Location location = 4 + func(b *buffer, m message) error { + x := new(Location) + x.Line = make([]Line, 0, 8) // Pre-allocate Line buffer + pp := m.(*Profile) + pp.Location = append(pp.Location, x) + err := decodeMessage(b, x) + var tmp []Line + x.Line = append(tmp, x.Line...) // Shrink to allocated size + return err + }, + // repeated Function function = 5 + func(b *buffer, m message) error { + x := new(Function) + pp := m.(*Profile) + pp.Function = append(pp.Function, x) + return decodeMessage(b, x) + }, + // repeated string string_table = 6 + func(b *buffer, m message) error { + err := decodeStrings(b, &m.(*Profile).stringTable) + if err != nil { + return err + } + if m.(*Profile).stringTable[0] != "" { + return errors.New("string_table[0] must be ''") + } + return nil + }, + // int64 drop_frames = 7 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*Profile).dropFramesX) }, + // int64 keep_frames = 8 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*Profile).keepFramesX) }, + // int64 time_nanos = 9 + func(b *buffer, m message) error { + if m.(*Profile).TimeNanos != 0 { + return errConcatProfile + } + return decodeInt64(b, &m.(*Profile).TimeNanos) + }, + // int64 duration_nanos = 10 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*Profile).DurationNanos) }, + // ValueType period_type = 11 + func(b *buffer, m message) error { + x := new(ValueType) + pp := m.(*Profile) + pp.PeriodType = x + return decodeMessage(b, x) + }, + // int64 period = 12 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*Profile).Period) }, + // repeated int64 comment = 13 + func(b *buffer, m message) error { return decodeInt64s(b, &m.(*Profile).commentX) }, + // int64 defaultSampleType = 14 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*Profile).defaultSampleTypeX) }, +} + +// postDecode takes the unexported fields populated by decode (with +// suffix X) and populates the corresponding exported fields. +// The unexported fields are cleared up to facilitate testing. +func (p *Profile) postDecode() error { + var err error + mappings := make(map[uint64]*Mapping, len(p.Mapping)) + mappingIds := make([]*Mapping, len(p.Mapping)+1) + for _, m := range p.Mapping { + m.File, err = getString(p.stringTable, &m.fileX, err) + m.BuildID, err = getString(p.stringTable, &m.buildIDX, err) + if m.ID < uint64(len(mappingIds)) { + mappingIds[m.ID] = m + } else { + mappings[m.ID] = m + } + } + + functions := make(map[uint64]*Function, len(p.Function)) + functionIds := make([]*Function, len(p.Function)+1) + for _, f := range p.Function { + f.Name, err = getString(p.stringTable, &f.nameX, err) + f.SystemName, err = getString(p.stringTable, &f.systemNameX, err) + f.Filename, err = getString(p.stringTable, &f.filenameX, err) + if f.ID < uint64(len(functionIds)) { + functionIds[f.ID] = f + } else { + functions[f.ID] = f + } + } + + locations := make(map[uint64]*Location, len(p.Location)) + locationIds := make([]*Location, len(p.Location)+1) + for _, l := range p.Location { + if id := l.mappingIDX; id < uint64(len(mappingIds)) { + l.Mapping = mappingIds[id] + } else { + l.Mapping = mappings[id] + } + l.mappingIDX = 0 + for i, ln := range l.Line { + if id := ln.functionIDX; id != 0 { + l.Line[i].functionIDX = 0 + if id < uint64(len(functionIds)) { + l.Line[i].Function = functionIds[id] + } else { + l.Line[i].Function = functions[id] + } + } + } + if l.ID < uint64(len(locationIds)) { + locationIds[l.ID] = l + } else { + locations[l.ID] = l + } + } + + for _, st := range p.SampleType { + st.Type, err = getString(p.stringTable, &st.typeX, err) + st.Unit, err = getString(p.stringTable, &st.unitX, err) + } + + for _, s := range p.Sample { + labels := make(map[string][]string, len(s.labelX)) + numLabels := make(map[string][]int64, len(s.labelX)) + numUnits := make(map[string][]string, len(s.labelX)) + for _, l := range s.labelX { + var key, value string + key, err = getString(p.stringTable, &l.keyX, err) + if l.strX != 0 { + value, err = getString(p.stringTable, &l.strX, err) + labels[key] = append(labels[key], value) + } else if l.numX != 0 { + numValues := numLabels[key] + units := numUnits[key] + if l.unitX != 0 { + var unit string + unit, err = getString(p.stringTable, &l.unitX, err) + units = padStringArray(units, len(numValues)) + numUnits[key] = append(units, unit) + } + numLabels[key] = append(numLabels[key], l.numX) + } + } + if len(labels) > 0 { + s.Label = labels + } + if len(numLabels) > 0 { + s.NumLabel = numLabels + for key, units := range numUnits { + if len(units) > 0 { + numUnits[key] = padStringArray(units, len(numLabels[key])) + } + } + s.NumUnit = numUnits + } + s.Location = make([]*Location, len(s.locationIDX)) + for i, lid := range s.locationIDX { + if lid < uint64(len(locationIds)) { + s.Location[i] = locationIds[lid] + } else { + s.Location[i] = locations[lid] + } + } + s.locationIDX = nil + } + + p.DropFrames, err = getString(p.stringTable, &p.dropFramesX, err) + p.KeepFrames, err = getString(p.stringTable, &p.keepFramesX, err) + + if pt := p.PeriodType; pt == nil { + p.PeriodType = &ValueType{} + } + + if pt := p.PeriodType; pt != nil { + pt.Type, err = getString(p.stringTable, &pt.typeX, err) + pt.Unit, err = getString(p.stringTable, &pt.unitX, err) + } + + for _, i := range p.commentX { + var c string + c, err = getString(p.stringTable, &i, err) + p.Comments = append(p.Comments, c) + } + + p.commentX = nil + p.DefaultSampleType, err = getString(p.stringTable, &p.defaultSampleTypeX, err) + p.stringTable = nil + return err +} + +// padStringArray pads arr with enough empty strings to make arr +// length l when arr's length is less than l. +func padStringArray(arr []string, l int) []string { + if l <= len(arr) { + return arr + } + return append(arr, make([]string, l-len(arr))...) +} + +func (p *ValueType) decoder() []decoder { + return valueTypeDecoder +} + +func (p *ValueType) encode(b *buffer) { + encodeInt64Opt(b, 1, p.typeX) + encodeInt64Opt(b, 2, p.unitX) +} + +var valueTypeDecoder = []decoder{ + nil, // 0 + // optional int64 type = 1 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*ValueType).typeX) }, + // optional int64 unit = 2 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*ValueType).unitX) }, +} + +func (p *Sample) decoder() []decoder { + return sampleDecoder +} + +func (p *Sample) encode(b *buffer) { + encodeUint64s(b, 1, p.locationIDX) + encodeInt64s(b, 2, p.Value) + for _, x := range p.labelX { + encodeMessage(b, 3, x) + } +} + +var sampleDecoder = []decoder{ + nil, // 0 + // repeated uint64 location = 1 + func(b *buffer, m message) error { return decodeUint64s(b, &m.(*Sample).locationIDX) }, + // repeated int64 value = 2 + func(b *buffer, m message) error { return decodeInt64s(b, &m.(*Sample).Value) }, + // repeated Label label = 3 + func(b *buffer, m message) error { + s := m.(*Sample) + n := len(s.labelX) + s.labelX = append(s.labelX, label{}) + return decodeMessage(b, &s.labelX[n]) + }, +} + +func (p label) decoder() []decoder { + return labelDecoder +} + +func (p label) encode(b *buffer) { + encodeInt64Opt(b, 1, p.keyX) + encodeInt64Opt(b, 2, p.strX) + encodeInt64Opt(b, 3, p.numX) + encodeInt64Opt(b, 4, p.unitX) +} + +var labelDecoder = []decoder{ + nil, // 0 + // optional int64 key = 1 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*label).keyX) }, + // optional int64 str = 2 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*label).strX) }, + // optional int64 num = 3 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*label).numX) }, + // optional int64 num = 4 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*label).unitX) }, +} + +func (p *Mapping) decoder() []decoder { + return mappingDecoder +} + +func (p *Mapping) encode(b *buffer) { + encodeUint64Opt(b, 1, p.ID) + encodeUint64Opt(b, 2, p.Start) + encodeUint64Opt(b, 3, p.Limit) + encodeUint64Opt(b, 4, p.Offset) + encodeInt64Opt(b, 5, p.fileX) + encodeInt64Opt(b, 6, p.buildIDX) + encodeBoolOpt(b, 7, p.HasFunctions) + encodeBoolOpt(b, 8, p.HasFilenames) + encodeBoolOpt(b, 9, p.HasLineNumbers) + encodeBoolOpt(b, 10, p.HasInlineFrames) +} + +var mappingDecoder = []decoder{ + nil, // 0 + func(b *buffer, m message) error { return decodeUint64(b, &m.(*Mapping).ID) }, // optional uint64 id = 1 + func(b *buffer, m message) error { return decodeUint64(b, &m.(*Mapping).Start) }, // optional uint64 memory_offset = 2 + func(b *buffer, m message) error { return decodeUint64(b, &m.(*Mapping).Limit) }, // optional uint64 memory_limit = 3 + func(b *buffer, m message) error { return decodeUint64(b, &m.(*Mapping).Offset) }, // optional uint64 file_offset = 4 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*Mapping).fileX) }, // optional int64 filename = 5 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*Mapping).buildIDX) }, // optional int64 build_id = 6 + func(b *buffer, m message) error { return decodeBool(b, &m.(*Mapping).HasFunctions) }, // optional bool has_functions = 7 + func(b *buffer, m message) error { return decodeBool(b, &m.(*Mapping).HasFilenames) }, // optional bool has_filenames = 8 + func(b *buffer, m message) error { return decodeBool(b, &m.(*Mapping).HasLineNumbers) }, // optional bool has_line_numbers = 9 + func(b *buffer, m message) error { return decodeBool(b, &m.(*Mapping).HasInlineFrames) }, // optional bool has_inline_frames = 10 +} + +func (p *Location) decoder() []decoder { + return locationDecoder +} + +func (p *Location) encode(b *buffer) { + encodeUint64Opt(b, 1, p.ID) + encodeUint64Opt(b, 2, p.mappingIDX) + encodeUint64Opt(b, 3, p.Address) + for i := range p.Line { + encodeMessage(b, 4, &p.Line[i]) + } + encodeBoolOpt(b, 5, p.IsFolded) +} + +var locationDecoder = []decoder{ + nil, // 0 + func(b *buffer, m message) error { return decodeUint64(b, &m.(*Location).ID) }, // optional uint64 id = 1; + func(b *buffer, m message) error { return decodeUint64(b, &m.(*Location).mappingIDX) }, // optional uint64 mapping_id = 2; + func(b *buffer, m message) error { return decodeUint64(b, &m.(*Location).Address) }, // optional uint64 address = 3; + func(b *buffer, m message) error { // repeated Line line = 4 + pp := m.(*Location) + n := len(pp.Line) + pp.Line = append(pp.Line, Line{}) + return decodeMessage(b, &pp.Line[n]) + }, + func(b *buffer, m message) error { return decodeBool(b, &m.(*Location).IsFolded) }, // optional bool is_folded = 5; +} + +func (p *Line) decoder() []decoder { + return lineDecoder +} + +func (p *Line) encode(b *buffer) { + encodeUint64Opt(b, 1, p.functionIDX) + encodeInt64Opt(b, 2, p.Line) +} + +var lineDecoder = []decoder{ + nil, // 0 + // optional uint64 function_id = 1 + func(b *buffer, m message) error { return decodeUint64(b, &m.(*Line).functionIDX) }, + // optional int64 line = 2 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*Line).Line) }, +} + +func (p *Function) decoder() []decoder { + return functionDecoder +} + +func (p *Function) encode(b *buffer) { + encodeUint64Opt(b, 1, p.ID) + encodeInt64Opt(b, 2, p.nameX) + encodeInt64Opt(b, 3, p.systemNameX) + encodeInt64Opt(b, 4, p.filenameX) + encodeInt64Opt(b, 5, p.StartLine) +} + +var functionDecoder = []decoder{ + nil, // 0 + // optional uint64 id = 1 + func(b *buffer, m message) error { return decodeUint64(b, &m.(*Function).ID) }, + // optional int64 function_name = 2 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*Function).nameX) }, + // optional int64 function_system_name = 3 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*Function).systemNameX) }, + // repeated int64 filename = 4 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*Function).filenameX) }, + // optional int64 start_line = 5 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*Function).StartLine) }, +} + +func addString(strings map[string]int, s string) int64 { + i, ok := strings[s] + if !ok { + i = len(strings) + strings[s] = i + } + return int64(i) +} + +func getString(strings []string, strng *int64, err error) (string, error) { + if err != nil { + return "", err + } + s := int(*strng) + if s < 0 || s >= len(strings) { + return "", errMalformed + } + *strng = 0 + return strings[s], nil +} diff --git a/vendor/github.com/google/pprof/profile/filter.go b/vendor/github.com/google/pprof/profile/filter.go new file mode 100644 index 00000000000..ea8e66c68d2 --- /dev/null +++ b/vendor/github.com/google/pprof/profile/filter.go @@ -0,0 +1,270 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package profile + +// Implements methods to filter samples from profiles. + +import "regexp" + +// FilterSamplesByName filters the samples in a profile and only keeps +// samples where at least one frame matches focus but none match ignore. +// Returns true is the corresponding regexp matched at least one sample. +func (p *Profile) FilterSamplesByName(focus, ignore, hide, show *regexp.Regexp) (fm, im, hm, hnm bool) { + focusOrIgnore := make(map[uint64]bool) + hidden := make(map[uint64]bool) + for _, l := range p.Location { + if ignore != nil && l.matchesName(ignore) { + im = true + focusOrIgnore[l.ID] = false + } else if focus == nil || l.matchesName(focus) { + fm = true + focusOrIgnore[l.ID] = true + } + + if hide != nil && l.matchesName(hide) { + hm = true + l.Line = l.unmatchedLines(hide) + if len(l.Line) == 0 { + hidden[l.ID] = true + } + } + if show != nil { + l.Line = l.matchedLines(show) + if len(l.Line) == 0 { + hidden[l.ID] = true + } else { + hnm = true + } + } + } + + s := make([]*Sample, 0, len(p.Sample)) + for _, sample := range p.Sample { + if focusedAndNotIgnored(sample.Location, focusOrIgnore) { + if len(hidden) > 0 { + var locs []*Location + for _, loc := range sample.Location { + if !hidden[loc.ID] { + locs = append(locs, loc) + } + } + if len(locs) == 0 { + // Remove sample with no locations (by not adding it to s). + continue + } + sample.Location = locs + } + s = append(s, sample) + } + } + p.Sample = s + + return +} + +// ShowFrom drops all stack frames above the highest matching frame and returns +// whether a match was found. If showFrom is nil it returns false and does not +// modify the profile. +// +// Example: consider a sample with frames [A, B, C, B], where A is the root. +// ShowFrom(nil) returns false and has frames [A, B, C, B]. +// ShowFrom(A) returns true and has frames [A, B, C, B]. +// ShowFrom(B) returns true and has frames [B, C, B]. +// ShowFrom(C) returns true and has frames [C, B]. +// ShowFrom(D) returns false and drops the sample because no frames remain. +func (p *Profile) ShowFrom(showFrom *regexp.Regexp) (matched bool) { + if showFrom == nil { + return false + } + // showFromLocs stores location IDs that matched ShowFrom. + showFromLocs := make(map[uint64]bool) + // Apply to locations. + for _, loc := range p.Location { + if filterShowFromLocation(loc, showFrom) { + showFromLocs[loc.ID] = true + matched = true + } + } + // For all samples, strip locations after the highest matching one. + s := make([]*Sample, 0, len(p.Sample)) + for _, sample := range p.Sample { + for i := len(sample.Location) - 1; i >= 0; i-- { + if showFromLocs[sample.Location[i].ID] { + sample.Location = sample.Location[:i+1] + s = append(s, sample) + break + } + } + } + p.Sample = s + return matched +} + +// filterShowFromLocation tests a showFrom regex against a location, removes +// lines after the last match and returns whether a match was found. If the +// mapping is matched, then all lines are kept. +func filterShowFromLocation(loc *Location, showFrom *regexp.Regexp) bool { + if m := loc.Mapping; m != nil && showFrom.MatchString(m.File) { + return true + } + if i := loc.lastMatchedLineIndex(showFrom); i >= 0 { + loc.Line = loc.Line[:i+1] + return true + } + return false +} + +// lastMatchedLineIndex returns the index of the last line that matches a regex, +// or -1 if no match is found. +func (loc *Location) lastMatchedLineIndex(re *regexp.Regexp) int { + for i := len(loc.Line) - 1; i >= 0; i-- { + if fn := loc.Line[i].Function; fn != nil { + if re.MatchString(fn.Name) || re.MatchString(fn.Filename) { + return i + } + } + } + return -1 +} + +// FilterTagsByName filters the tags in a profile and only keeps +// tags that match show and not hide. +func (p *Profile) FilterTagsByName(show, hide *regexp.Regexp) (sm, hm bool) { + matchRemove := func(name string) bool { + matchShow := show == nil || show.MatchString(name) + matchHide := hide != nil && hide.MatchString(name) + + if matchShow { + sm = true + } + if matchHide { + hm = true + } + return !matchShow || matchHide + } + for _, s := range p.Sample { + for lab := range s.Label { + if matchRemove(lab) { + delete(s.Label, lab) + } + } + for lab := range s.NumLabel { + if matchRemove(lab) { + delete(s.NumLabel, lab) + } + } + } + return +} + +// matchesName returns whether the location matches the regular +// expression. It checks any available function names, file names, and +// mapping object filename. +func (loc *Location) matchesName(re *regexp.Regexp) bool { + for _, ln := range loc.Line { + if fn := ln.Function; fn != nil { + if re.MatchString(fn.Name) || re.MatchString(fn.Filename) { + return true + } + } + } + if m := loc.Mapping; m != nil && re.MatchString(m.File) { + return true + } + return false +} + +// unmatchedLines returns the lines in the location that do not match +// the regular expression. +func (loc *Location) unmatchedLines(re *regexp.Regexp) []Line { + if m := loc.Mapping; m != nil && re.MatchString(m.File) { + return nil + } + var lines []Line + for _, ln := range loc.Line { + if fn := ln.Function; fn != nil { + if re.MatchString(fn.Name) || re.MatchString(fn.Filename) { + continue + } + } + lines = append(lines, ln) + } + return lines +} + +// matchedLines returns the lines in the location that match +// the regular expression. +func (loc *Location) matchedLines(re *regexp.Regexp) []Line { + if m := loc.Mapping; m != nil && re.MatchString(m.File) { + return loc.Line + } + var lines []Line + for _, ln := range loc.Line { + if fn := ln.Function; fn != nil { + if !re.MatchString(fn.Name) && !re.MatchString(fn.Filename) { + continue + } + } + lines = append(lines, ln) + } + return lines +} + +// focusedAndNotIgnored looks up a slice of ids against a map of +// focused/ignored locations. The map only contains locations that are +// explicitly focused or ignored. Returns whether there is at least +// one focused location but no ignored locations. +func focusedAndNotIgnored(locs []*Location, m map[uint64]bool) bool { + var f bool + for _, loc := range locs { + if focus, focusOrIgnore := m[loc.ID]; focusOrIgnore { + if focus { + // Found focused location. Must keep searching in case there + // is an ignored one as well. + f = true + } else { + // Found ignored location. Can return false right away. + return false + } + } + } + return f +} + +// TagMatch selects tags for filtering +type TagMatch func(s *Sample) bool + +// FilterSamplesByTag removes all samples from the profile, except +// those that match focus and do not match the ignore regular +// expression. +func (p *Profile) FilterSamplesByTag(focus, ignore TagMatch) (fm, im bool) { + samples := make([]*Sample, 0, len(p.Sample)) + for _, s := range p.Sample { + focused, ignored := true, false + if focus != nil { + focused = focus(s) + } + if ignore != nil { + ignored = ignore(s) + } + fm = fm || focused + im = im || ignored + if focused && !ignored { + samples = append(samples, s) + } + } + p.Sample = samples + return +} diff --git a/vendor/github.com/google/pprof/profile/index.go b/vendor/github.com/google/pprof/profile/index.go new file mode 100644 index 00000000000..bef1d60467c --- /dev/null +++ b/vendor/github.com/google/pprof/profile/index.go @@ -0,0 +1,64 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package profile + +import ( + "fmt" + "strconv" + "strings" +) + +// SampleIndexByName returns the appropriate index for a value of sample index. +// If numeric, it returns the number, otherwise it looks up the text in the +// profile sample types. +func (p *Profile) SampleIndexByName(sampleIndex string) (int, error) { + if sampleIndex == "" { + if dst := p.DefaultSampleType; dst != "" { + for i, t := range sampleTypes(p) { + if t == dst { + return i, nil + } + } + } + // By default select the last sample value + return len(p.SampleType) - 1, nil + } + if i, err := strconv.Atoi(sampleIndex); err == nil { + if i < 0 || i >= len(p.SampleType) { + return 0, fmt.Errorf("sample_index %s is outside the range [0..%d]", sampleIndex, len(p.SampleType)-1) + } + return i, nil + } + + // Remove the inuse_ prefix to support legacy pprof options + // "inuse_space" and "inuse_objects" for profiles containing types + // "space" and "objects". + noInuse := strings.TrimPrefix(sampleIndex, "inuse_") + for i, t := range p.SampleType { + if t.Type == sampleIndex || t.Type == noInuse { + return i, nil + } + } + + return 0, fmt.Errorf("sample_index %q must be one of: %v", sampleIndex, sampleTypes(p)) +} + +func sampleTypes(p *Profile) []string { + types := make([]string, len(p.SampleType)) + for i, t := range p.SampleType { + types[i] = t.Type + } + return types +} diff --git a/vendor/github.com/google/pprof/profile/legacy_java_profile.go b/vendor/github.com/google/pprof/profile/legacy_java_profile.go new file mode 100644 index 00000000000..91f45e53c6c --- /dev/null +++ b/vendor/github.com/google/pprof/profile/legacy_java_profile.go @@ -0,0 +1,315 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This file implements parsers to convert java legacy profiles into +// the profile.proto format. + +package profile + +import ( + "bytes" + "fmt" + "io" + "path/filepath" + "regexp" + "strconv" + "strings" +) + +var ( + attributeRx = regexp.MustCompile(`([\w ]+)=([\w ]+)`) + javaSampleRx = regexp.MustCompile(` *(\d+) +(\d+) +@ +([ x0-9a-f]*)`) + javaLocationRx = regexp.MustCompile(`^\s*0x([[:xdigit:]]+)\s+(.*)\s*$`) + javaLocationFileLineRx = regexp.MustCompile(`^(.*)\s+\((.+):(-?[[:digit:]]+)\)$`) + javaLocationPathRx = regexp.MustCompile(`^(.*)\s+\((.*)\)$`) +) + +// javaCPUProfile returns a new Profile from profilez data. +// b is the profile bytes after the header, period is the profiling +// period, and parse is a function to parse 8-byte chunks from the +// profile in its native endianness. +func javaCPUProfile(b []byte, period int64, parse func(b []byte) (uint64, []byte)) (*Profile, error) { + p := &Profile{ + Period: period * 1000, + PeriodType: &ValueType{Type: "cpu", Unit: "nanoseconds"}, + SampleType: []*ValueType{{Type: "samples", Unit: "count"}, {Type: "cpu", Unit: "nanoseconds"}}, + } + var err error + var locs map[uint64]*Location + if b, locs, err = parseCPUSamples(b, parse, false, p); err != nil { + return nil, err + } + + if err = parseJavaLocations(b, locs, p); err != nil { + return nil, err + } + + // Strip out addresses for better merge. + if err = p.Aggregate(true, true, true, true, false); err != nil { + return nil, err + } + + return p, nil +} + +// parseJavaProfile returns a new profile from heapz or contentionz +// data. b is the profile bytes after the header. +func parseJavaProfile(b []byte) (*Profile, error) { + h := bytes.SplitAfterN(b, []byte("\n"), 2) + if len(h) < 2 { + return nil, errUnrecognized + } + + p := &Profile{ + PeriodType: &ValueType{}, + } + header := string(bytes.TrimSpace(h[0])) + + var err error + var pType string + switch header { + case "--- heapz 1 ---": + pType = "heap" + case "--- contentionz 1 ---": + pType = "contention" + default: + return nil, errUnrecognized + } + + if b, err = parseJavaHeader(pType, h[1], p); err != nil { + return nil, err + } + var locs map[uint64]*Location + if b, locs, err = parseJavaSamples(pType, b, p); err != nil { + return nil, err + } + if err = parseJavaLocations(b, locs, p); err != nil { + return nil, err + } + + // Strip out addresses for better merge. + if err = p.Aggregate(true, true, true, true, false); err != nil { + return nil, err + } + + return p, nil +} + +// parseJavaHeader parses the attribute section on a java profile and +// populates a profile. Returns the remainder of the buffer after all +// attributes. +func parseJavaHeader(pType string, b []byte, p *Profile) ([]byte, error) { + nextNewLine := bytes.IndexByte(b, byte('\n')) + for nextNewLine != -1 { + line := string(bytes.TrimSpace(b[0:nextNewLine])) + if line != "" { + h := attributeRx.FindStringSubmatch(line) + if h == nil { + // Not a valid attribute, exit. + return b, nil + } + + attribute, value := strings.TrimSpace(h[1]), strings.TrimSpace(h[2]) + var err error + switch pType + "/" + attribute { + case "heap/format", "cpu/format", "contention/format": + if value != "java" { + return nil, errUnrecognized + } + case "heap/resolution": + p.SampleType = []*ValueType{ + {Type: "inuse_objects", Unit: "count"}, + {Type: "inuse_space", Unit: value}, + } + case "contention/resolution": + p.SampleType = []*ValueType{ + {Type: "contentions", Unit: "count"}, + {Type: "delay", Unit: value}, + } + case "contention/sampling period": + p.PeriodType = &ValueType{ + Type: "contentions", Unit: "count", + } + if p.Period, err = strconv.ParseInt(value, 0, 64); err != nil { + return nil, fmt.Errorf("failed to parse attribute %s: %v", line, err) + } + case "contention/ms since reset": + millis, err := strconv.ParseInt(value, 0, 64) + if err != nil { + return nil, fmt.Errorf("failed to parse attribute %s: %v", line, err) + } + p.DurationNanos = millis * 1000 * 1000 + default: + return nil, errUnrecognized + } + } + // Grab next line. + b = b[nextNewLine+1:] + nextNewLine = bytes.IndexByte(b, byte('\n')) + } + return b, nil +} + +// parseJavaSamples parses the samples from a java profile and +// populates the Samples in a profile. Returns the remainder of the +// buffer after the samples. +func parseJavaSamples(pType string, b []byte, p *Profile) ([]byte, map[uint64]*Location, error) { + nextNewLine := bytes.IndexByte(b, byte('\n')) + locs := make(map[uint64]*Location) + for nextNewLine != -1 { + line := string(bytes.TrimSpace(b[0:nextNewLine])) + if line != "" { + sample := javaSampleRx.FindStringSubmatch(line) + if sample == nil { + // Not a valid sample, exit. + return b, locs, nil + } + + // Java profiles have data/fields inverted compared to other + // profile types. + var err error + value1, value2, value3 := sample[2], sample[1], sample[3] + addrs, err := parseHexAddresses(value3) + if err != nil { + return nil, nil, fmt.Errorf("malformed sample: %s: %v", line, err) + } + + var sloc []*Location + for _, addr := range addrs { + loc := locs[addr] + if locs[addr] == nil { + loc = &Location{ + Address: addr, + } + p.Location = append(p.Location, loc) + locs[addr] = loc + } + sloc = append(sloc, loc) + } + s := &Sample{ + Value: make([]int64, 2), + Location: sloc, + } + + if s.Value[0], err = strconv.ParseInt(value1, 0, 64); err != nil { + return nil, nil, fmt.Errorf("parsing sample %s: %v", line, err) + } + if s.Value[1], err = strconv.ParseInt(value2, 0, 64); err != nil { + return nil, nil, fmt.Errorf("parsing sample %s: %v", line, err) + } + + switch pType { + case "heap": + const javaHeapzSamplingRate = 524288 // 512K + if s.Value[0] == 0 { + return nil, nil, fmt.Errorf("parsing sample %s: second value must be non-zero", line) + } + s.NumLabel = map[string][]int64{"bytes": {s.Value[1] / s.Value[0]}} + s.Value[0], s.Value[1] = scaleHeapSample(s.Value[0], s.Value[1], javaHeapzSamplingRate) + case "contention": + if period := p.Period; period != 0 { + s.Value[0] = s.Value[0] * p.Period + s.Value[1] = s.Value[1] * p.Period + } + } + p.Sample = append(p.Sample, s) + } + // Grab next line. + b = b[nextNewLine+1:] + nextNewLine = bytes.IndexByte(b, byte('\n')) + } + return b, locs, nil +} + +// parseJavaLocations parses the location information in a java +// profile and populates the Locations in a profile. It uses the +// location addresses from the profile as both the ID of each +// location. +func parseJavaLocations(b []byte, locs map[uint64]*Location, p *Profile) error { + r := bytes.NewBuffer(b) + fns := make(map[string]*Function) + for { + line, err := r.ReadString('\n') + if err != nil { + if err != io.EOF { + return err + } + if line == "" { + break + } + } + + if line = strings.TrimSpace(line); line == "" { + continue + } + + jloc := javaLocationRx.FindStringSubmatch(line) + if len(jloc) != 3 { + continue + } + addr, err := strconv.ParseUint(jloc[1], 16, 64) + if err != nil { + return fmt.Errorf("parsing sample %s: %v", line, err) + } + loc := locs[addr] + if loc == nil { + // Unused/unseen + continue + } + var lineFunc, lineFile string + var lineNo int64 + + if fileLine := javaLocationFileLineRx.FindStringSubmatch(jloc[2]); len(fileLine) == 4 { + // Found a line of the form: "function (file:line)" + lineFunc, lineFile = fileLine[1], fileLine[2] + if n, err := strconv.ParseInt(fileLine[3], 10, 64); err == nil && n > 0 { + lineNo = n + } + } else if filePath := javaLocationPathRx.FindStringSubmatch(jloc[2]); len(filePath) == 3 { + // If there's not a file:line, it's a shared library path. + // The path isn't interesting, so just give the .so. + lineFunc, lineFile = filePath[1], filepath.Base(filePath[2]) + } else if strings.Contains(jloc[2], "generated stub/JIT") { + lineFunc = "STUB" + } else { + // Treat whole line as the function name. This is used by the + // java agent for internal states such as "GC" or "VM". + lineFunc = jloc[2] + } + fn := fns[lineFunc] + + if fn == nil { + fn = &Function{ + Name: lineFunc, + SystemName: lineFunc, + Filename: lineFile, + } + fns[lineFunc] = fn + p.Function = append(p.Function, fn) + } + loc.Line = []Line{ + { + Function: fn, + Line: lineNo, + }, + } + loc.Address = 0 + } + + p.remapLocationIDs() + p.remapFunctionIDs() + p.remapMappingIDs() + + return nil +} diff --git a/vendor/github.com/google/pprof/profile/legacy_profile.go b/vendor/github.com/google/pprof/profile/legacy_profile.go new file mode 100644 index 00000000000..0c8f3bb5b71 --- /dev/null +++ b/vendor/github.com/google/pprof/profile/legacy_profile.go @@ -0,0 +1,1225 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This file implements parsers to convert legacy profiles into the +// profile.proto format. + +package profile + +import ( + "bufio" + "bytes" + "fmt" + "io" + "math" + "regexp" + "strconv" + "strings" +) + +var ( + countStartRE = regexp.MustCompile(`\A(\S+) profile: total \d+\z`) + countRE = regexp.MustCompile(`\A(\d+) @(( 0x[0-9a-f]+)+)\z`) + + heapHeaderRE = regexp.MustCompile(`heap profile: *(\d+): *(\d+) *\[ *(\d+): *(\d+) *\] *@ *(heap[_a-z0-9]*)/?(\d*)`) + heapSampleRE = regexp.MustCompile(`(-?\d+): *(-?\d+) *\[ *(\d+): *(\d+) *] @([ x0-9a-f]*)`) + + contentionSampleRE = regexp.MustCompile(`(\d+) *(\d+) @([ x0-9a-f]*)`) + + hexNumberRE = regexp.MustCompile(`0x[0-9a-f]+`) + + growthHeaderRE = regexp.MustCompile(`heap profile: *(\d+): *(\d+) *\[ *(\d+): *(\d+) *\] @ growthz?`) + + fragmentationHeaderRE = regexp.MustCompile(`heap profile: *(\d+): *(\d+) *\[ *(\d+): *(\d+) *\] @ fragmentationz?`) + + threadzStartRE = regexp.MustCompile(`--- threadz \d+ ---`) + threadStartRE = regexp.MustCompile(`--- Thread ([[:xdigit:]]+) \(name: (.*)/(\d+)\) stack: ---`) + + // Regular expressions to parse process mappings. Support the format used by Linux /proc/.../maps and other tools. + // Recommended format: + // Start End object file name offset(optional) linker build id + // 0x40000-0x80000 /path/to/binary (@FF00) abc123456 + spaceDigits = `\s+[[:digit:]]+` + hexPair = `\s+[[:xdigit:]]+:[[:xdigit:]]+` + oSpace = `\s*` + // Capturing expressions. + cHex = `(?:0x)?([[:xdigit:]]+)` + cHexRange = `\s*` + cHex + `[\s-]?` + oSpace + cHex + `:?` + cSpaceString = `(?:\s+(\S+))?` + cSpaceHex = `(?:\s+([[:xdigit:]]+))?` + cSpaceAtOffset = `(?:\s+\(@([[:xdigit:]]+)\))?` + cPerm = `(?:\s+([-rwxp]+))?` + + procMapsRE = regexp.MustCompile(`^` + cHexRange + cPerm + cSpaceHex + hexPair + spaceDigits + cSpaceString) + briefMapsRE = regexp.MustCompile(`^` + cHexRange + cPerm + cSpaceString + cSpaceAtOffset + cSpaceHex) + + // Regular expression to parse log data, of the form: + // ... file:line] msg... + logInfoRE = regexp.MustCompile(`^[^\[\]]+:[0-9]+]\s`) +) + +func isSpaceOrComment(line string) bool { + trimmed := strings.TrimSpace(line) + return len(trimmed) == 0 || trimmed[0] == '#' +} + +// parseGoCount parses a Go count profile (e.g., threadcreate or +// goroutine) and returns a new Profile. +func parseGoCount(b []byte) (*Profile, error) { + s := bufio.NewScanner(bytes.NewBuffer(b)) + // Skip comments at the beginning of the file. + for s.Scan() && isSpaceOrComment(s.Text()) { + } + if err := s.Err(); err != nil { + return nil, err + } + m := countStartRE.FindStringSubmatch(s.Text()) + if m == nil { + return nil, errUnrecognized + } + profileType := m[1] + p := &Profile{ + PeriodType: &ValueType{Type: profileType, Unit: "count"}, + Period: 1, + SampleType: []*ValueType{{Type: profileType, Unit: "count"}}, + } + locations := make(map[uint64]*Location) + for s.Scan() { + line := s.Text() + if isSpaceOrComment(line) { + continue + } + if strings.HasPrefix(line, "---") { + break + } + m := countRE.FindStringSubmatch(line) + if m == nil { + return nil, errMalformed + } + n, err := strconv.ParseInt(m[1], 0, 64) + if err != nil { + return nil, errMalformed + } + fields := strings.Fields(m[2]) + locs := make([]*Location, 0, len(fields)) + for _, stk := range fields { + addr, err := strconv.ParseUint(stk, 0, 64) + if err != nil { + return nil, errMalformed + } + // Adjust all frames by -1 to land on top of the call instruction. + addr-- + loc := locations[addr] + if loc == nil { + loc = &Location{ + Address: addr, + } + locations[addr] = loc + p.Location = append(p.Location, loc) + } + locs = append(locs, loc) + } + p.Sample = append(p.Sample, &Sample{ + Location: locs, + Value: []int64{n}, + }) + } + if err := s.Err(); err != nil { + return nil, err + } + + if err := parseAdditionalSections(s, p); err != nil { + return nil, err + } + return p, nil +} + +// remapLocationIDs ensures there is a location for each address +// referenced by a sample, and remaps the samples to point to the new +// location ids. +func (p *Profile) remapLocationIDs() { + seen := make(map[*Location]bool, len(p.Location)) + var locs []*Location + + for _, s := range p.Sample { + for _, l := range s.Location { + if seen[l] { + continue + } + l.ID = uint64(len(locs) + 1) + locs = append(locs, l) + seen[l] = true + } + } + p.Location = locs +} + +func (p *Profile) remapFunctionIDs() { + seen := make(map[*Function]bool, len(p.Function)) + var fns []*Function + + for _, l := range p.Location { + for _, ln := range l.Line { + fn := ln.Function + if fn == nil || seen[fn] { + continue + } + fn.ID = uint64(len(fns) + 1) + fns = append(fns, fn) + seen[fn] = true + } + } + p.Function = fns +} + +// remapMappingIDs matches location addresses with existing mappings +// and updates them appropriately. This is O(N*M), if this ever shows +// up as a bottleneck, evaluate sorting the mappings and doing a +// binary search, which would make it O(N*log(M)). +func (p *Profile) remapMappingIDs() { + // Some profile handlers will incorrectly set regions for the main + // executable if its section is remapped. Fix them through heuristics. + + if len(p.Mapping) > 0 { + // Remove the initial mapping if named '/anon_hugepage' and has a + // consecutive adjacent mapping. + if m := p.Mapping[0]; strings.HasPrefix(m.File, "/anon_hugepage") { + if len(p.Mapping) > 1 && m.Limit == p.Mapping[1].Start { + p.Mapping = p.Mapping[1:] + } + } + } + + // Subtract the offset from the start of the main mapping if it + // ends up at a recognizable start address. + if len(p.Mapping) > 0 { + const expectedStart = 0x400000 + if m := p.Mapping[0]; m.Start-m.Offset == expectedStart { + m.Start = expectedStart + m.Offset = 0 + } + } + + // Associate each location with an address to the corresponding + // mapping. Create fake mapping if a suitable one isn't found. + var fake *Mapping +nextLocation: + for _, l := range p.Location { + a := l.Address + if l.Mapping != nil || a == 0 { + continue + } + for _, m := range p.Mapping { + if m.Start <= a && a < m.Limit { + l.Mapping = m + continue nextLocation + } + } + // Work around legacy handlers failing to encode the first + // part of mappings split into adjacent ranges. + for _, m := range p.Mapping { + if m.Offset != 0 && m.Start-m.Offset <= a && a < m.Start { + m.Start -= m.Offset + m.Offset = 0 + l.Mapping = m + continue nextLocation + } + } + // If there is still no mapping, create a fake one. + // This is important for the Go legacy handler, which produced + // no mappings. + if fake == nil { + fake = &Mapping{ + ID: 1, + Limit: ^uint64(0), + } + p.Mapping = append(p.Mapping, fake) + } + l.Mapping = fake + } + + // Reset all mapping IDs. + for i, m := range p.Mapping { + m.ID = uint64(i + 1) + } +} + +var cpuInts = []func([]byte) (uint64, []byte){ + get32l, + get32b, + get64l, + get64b, +} + +func get32l(b []byte) (uint64, []byte) { + if len(b) < 4 { + return 0, nil + } + return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24, b[4:] +} + +func get32b(b []byte) (uint64, []byte) { + if len(b) < 4 { + return 0, nil + } + return uint64(b[3]) | uint64(b[2])<<8 | uint64(b[1])<<16 | uint64(b[0])<<24, b[4:] +} + +func get64l(b []byte) (uint64, []byte) { + if len(b) < 8 { + return 0, nil + } + return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56, b[8:] +} + +func get64b(b []byte) (uint64, []byte) { + if len(b) < 8 { + return 0, nil + } + return uint64(b[7]) | uint64(b[6])<<8 | uint64(b[5])<<16 | uint64(b[4])<<24 | uint64(b[3])<<32 | uint64(b[2])<<40 | uint64(b[1])<<48 | uint64(b[0])<<56, b[8:] +} + +// parseCPU parses a profilez legacy profile and returns a newly +// populated Profile. +// +// The general format for profilez samples is a sequence of words in +// binary format. The first words are a header with the following data: +// 1st word -- 0 +// 2nd word -- 3 +// 3rd word -- 0 if a c++ application, 1 if a java application. +// 4th word -- Sampling period (in microseconds). +// 5th word -- Padding. +func parseCPU(b []byte) (*Profile, error) { + var parse func([]byte) (uint64, []byte) + var n1, n2, n3, n4, n5 uint64 + for _, parse = range cpuInts { + var tmp []byte + n1, tmp = parse(b) + n2, tmp = parse(tmp) + n3, tmp = parse(tmp) + n4, tmp = parse(tmp) + n5, tmp = parse(tmp) + + if tmp != nil && n1 == 0 && n2 == 3 && n3 == 0 && n4 > 0 && n5 == 0 { + b = tmp + return cpuProfile(b, int64(n4), parse) + } + if tmp != nil && n1 == 0 && n2 == 3 && n3 == 1 && n4 > 0 && n5 == 0 { + b = tmp + return javaCPUProfile(b, int64(n4), parse) + } + } + return nil, errUnrecognized +} + +// cpuProfile returns a new Profile from C++ profilez data. +// b is the profile bytes after the header, period is the profiling +// period, and parse is a function to parse 8-byte chunks from the +// profile in its native endianness. +func cpuProfile(b []byte, period int64, parse func(b []byte) (uint64, []byte)) (*Profile, error) { + p := &Profile{ + Period: period * 1000, + PeriodType: &ValueType{Type: "cpu", Unit: "nanoseconds"}, + SampleType: []*ValueType{ + {Type: "samples", Unit: "count"}, + {Type: "cpu", Unit: "nanoseconds"}, + }, + } + var err error + if b, _, err = parseCPUSamples(b, parse, true, p); err != nil { + return nil, err + } + + // If *most* samples have the same second-to-the-bottom frame, it + // strongly suggests that it is an uninteresting artifact of + // measurement -- a stack frame pushed by the signal handler. The + // bottom frame is always correct as it is picked up from the signal + // structure, not the stack. Check if this is the case and if so, + // remove. + + // Remove up to two frames. + maxiter := 2 + // Allow one different sample for this many samples with the same + // second-to-last frame. + similarSamples := 32 + margin := len(p.Sample) / similarSamples + + for iter := 0; iter < maxiter; iter++ { + addr1 := make(map[uint64]int) + for _, s := range p.Sample { + if len(s.Location) > 1 { + a := s.Location[1].Address + addr1[a] = addr1[a] + 1 + } + } + + for id1, count := range addr1 { + if count >= len(p.Sample)-margin { + // Found uninteresting frame, strip it out from all samples + for _, s := range p.Sample { + if len(s.Location) > 1 && s.Location[1].Address == id1 { + s.Location = append(s.Location[:1], s.Location[2:]...) + } + } + break + } + } + } + + if err := p.ParseMemoryMap(bytes.NewBuffer(b)); err != nil { + return nil, err + } + + cleanupDuplicateLocations(p) + return p, nil +} + +func cleanupDuplicateLocations(p *Profile) { + // The profile handler may duplicate the leaf frame, because it gets + // its address both from stack unwinding and from the signal + // context. Detect this and delete the duplicate, which has been + // adjusted by -1. The leaf address should not be adjusted as it is + // not a call. + for _, s := range p.Sample { + if len(s.Location) > 1 && s.Location[0].Address == s.Location[1].Address+1 { + s.Location = append(s.Location[:1], s.Location[2:]...) + } + } +} + +// parseCPUSamples parses a collection of profilez samples from a +// profile. +// +// profilez samples are a repeated sequence of stack frames of the +// form: +// 1st word -- The number of times this stack was encountered. +// 2nd word -- The size of the stack (StackSize). +// 3rd word -- The first address on the stack. +// ... +// StackSize + 2 -- The last address on the stack +// The last stack trace is of the form: +// 1st word -- 0 +// 2nd word -- 1 +// 3rd word -- 0 +// +// Addresses from stack traces may point to the next instruction after +// each call. Optionally adjust by -1 to land somewhere on the actual +// call (except for the leaf, which is not a call). +func parseCPUSamples(b []byte, parse func(b []byte) (uint64, []byte), adjust bool, p *Profile) ([]byte, map[uint64]*Location, error) { + locs := make(map[uint64]*Location) + for len(b) > 0 { + var count, nstk uint64 + count, b = parse(b) + nstk, b = parse(b) + if b == nil || nstk > uint64(len(b)/4) { + return nil, nil, errUnrecognized + } + var sloc []*Location + addrs := make([]uint64, nstk) + for i := 0; i < int(nstk); i++ { + addrs[i], b = parse(b) + } + + if count == 0 && nstk == 1 && addrs[0] == 0 { + // End of data marker + break + } + for i, addr := range addrs { + if adjust && i > 0 { + addr-- + } + loc := locs[addr] + if loc == nil { + loc = &Location{ + Address: addr, + } + locs[addr] = loc + p.Location = append(p.Location, loc) + } + sloc = append(sloc, loc) + } + p.Sample = append(p.Sample, + &Sample{ + Value: []int64{int64(count), int64(count) * p.Period}, + Location: sloc, + }) + } + // Reached the end without finding the EOD marker. + return b, locs, nil +} + +// parseHeap parses a heapz legacy or a growthz profile and +// returns a newly populated Profile. +func parseHeap(b []byte) (p *Profile, err error) { + s := bufio.NewScanner(bytes.NewBuffer(b)) + if !s.Scan() { + if err := s.Err(); err != nil { + return nil, err + } + return nil, errUnrecognized + } + p = &Profile{} + + sampling := "" + hasAlloc := false + + line := s.Text() + p.PeriodType = &ValueType{Type: "space", Unit: "bytes"} + if header := heapHeaderRE.FindStringSubmatch(line); header != nil { + sampling, p.Period, hasAlloc, err = parseHeapHeader(line) + if err != nil { + return nil, err + } + } else if header = growthHeaderRE.FindStringSubmatch(line); header != nil { + p.Period = 1 + } else if header = fragmentationHeaderRE.FindStringSubmatch(line); header != nil { + p.Period = 1 + } else { + return nil, errUnrecognized + } + + if hasAlloc { + // Put alloc before inuse so that default pprof selection + // will prefer inuse_space. + p.SampleType = []*ValueType{ + {Type: "alloc_objects", Unit: "count"}, + {Type: "alloc_space", Unit: "bytes"}, + {Type: "inuse_objects", Unit: "count"}, + {Type: "inuse_space", Unit: "bytes"}, + } + } else { + p.SampleType = []*ValueType{ + {Type: "objects", Unit: "count"}, + {Type: "space", Unit: "bytes"}, + } + } + + locs := make(map[uint64]*Location) + for s.Scan() { + line := strings.TrimSpace(s.Text()) + + if isSpaceOrComment(line) { + continue + } + + if isMemoryMapSentinel(line) { + break + } + + value, blocksize, addrs, err := parseHeapSample(line, p.Period, sampling, hasAlloc) + if err != nil { + return nil, err + } + + var sloc []*Location + for _, addr := range addrs { + // Addresses from stack traces point to the next instruction after + // each call. Adjust by -1 to land somewhere on the actual call. + addr-- + loc := locs[addr] + if locs[addr] == nil { + loc = &Location{ + Address: addr, + } + p.Location = append(p.Location, loc) + locs[addr] = loc + } + sloc = append(sloc, loc) + } + + p.Sample = append(p.Sample, &Sample{ + Value: value, + Location: sloc, + NumLabel: map[string][]int64{"bytes": {blocksize}}, + }) + } + if err := s.Err(); err != nil { + return nil, err + } + if err := parseAdditionalSections(s, p); err != nil { + return nil, err + } + return p, nil +} + +func parseHeapHeader(line string) (sampling string, period int64, hasAlloc bool, err error) { + header := heapHeaderRE.FindStringSubmatch(line) + if header == nil { + return "", 0, false, errUnrecognized + } + + if len(header[6]) > 0 { + if period, err = strconv.ParseInt(header[6], 10, 64); err != nil { + return "", 0, false, errUnrecognized + } + } + + if (header[3] != header[1] && header[3] != "0") || (header[4] != header[2] && header[4] != "0") { + hasAlloc = true + } + + switch header[5] { + case "heapz_v2", "heap_v2": + return "v2", period, hasAlloc, nil + case "heapprofile": + return "", 1, hasAlloc, nil + case "heap": + return "v2", period / 2, hasAlloc, nil + default: + return "", 0, false, errUnrecognized + } +} + +// parseHeapSample parses a single row from a heap profile into a new Sample. +func parseHeapSample(line string, rate int64, sampling string, includeAlloc bool) (value []int64, blocksize int64, addrs []uint64, err error) { + sampleData := heapSampleRE.FindStringSubmatch(line) + if len(sampleData) != 6 { + return nil, 0, nil, fmt.Errorf("unexpected number of sample values: got %d, want 6", len(sampleData)) + } + + // This is a local-scoped helper function to avoid needing to pass + // around rate, sampling and many return parameters. + addValues := func(countString, sizeString string, label string) error { + count, err := strconv.ParseInt(countString, 10, 64) + if err != nil { + return fmt.Errorf("malformed sample: %s: %v", line, err) + } + size, err := strconv.ParseInt(sizeString, 10, 64) + if err != nil { + return fmt.Errorf("malformed sample: %s: %v", line, err) + } + if count == 0 && size != 0 { + return fmt.Errorf("%s count was 0 but %s bytes was %d", label, label, size) + } + if count != 0 { + blocksize = size / count + if sampling == "v2" { + count, size = scaleHeapSample(count, size, rate) + } + } + value = append(value, count, size) + return nil + } + + if includeAlloc { + if err := addValues(sampleData[3], sampleData[4], "allocation"); err != nil { + return nil, 0, nil, err + } + } + + if err := addValues(sampleData[1], sampleData[2], "inuse"); err != nil { + return nil, 0, nil, err + } + + addrs, err = parseHexAddresses(sampleData[5]) + if err != nil { + return nil, 0, nil, fmt.Errorf("malformed sample: %s: %v", line, err) + } + + return value, blocksize, addrs, nil +} + +// parseHexAddresses extracts hex numbers from a string, attempts to convert +// each to an unsigned 64-bit number and returns the resulting numbers as a +// slice, or an error if the string contains hex numbers which are too large to +// handle (which means a malformed profile). +func parseHexAddresses(s string) ([]uint64, error) { + hexStrings := hexNumberRE.FindAllString(s, -1) + var addrs []uint64 + for _, s := range hexStrings { + if addr, err := strconv.ParseUint(s, 0, 64); err == nil { + addrs = append(addrs, addr) + } else { + return nil, fmt.Errorf("failed to parse as hex 64-bit number: %s", s) + } + } + return addrs, nil +} + +// scaleHeapSample adjusts the data from a heapz Sample to +// account for its probability of appearing in the collected +// data. heapz profiles are a sampling of the memory allocations +// requests in a program. We estimate the unsampled value by dividing +// each collected sample by its probability of appearing in the +// profile. heapz v2 profiles rely on a poisson process to determine +// which samples to collect, based on the desired average collection +// rate R. The probability of a sample of size S to appear in that +// profile is 1-exp(-S/R). +func scaleHeapSample(count, size, rate int64) (int64, int64) { + if count == 0 || size == 0 { + return 0, 0 + } + + if rate <= 1 { + // if rate==1 all samples were collected so no adjustment is needed. + // if rate<1 treat as unknown and skip scaling. + return count, size + } + + avgSize := float64(size) / float64(count) + scale := 1 / (1 - math.Exp(-avgSize/float64(rate))) + + return int64(float64(count) * scale), int64(float64(size) * scale) +} + +// parseContention parses a mutex or contention profile. There are 2 cases: +// "--- contentionz " for legacy C++ profiles (and backwards compatibility) +// "--- mutex:" or "--- contention:" for profiles generated by the Go runtime. +func parseContention(b []byte) (*Profile, error) { + s := bufio.NewScanner(bytes.NewBuffer(b)) + if !s.Scan() { + if err := s.Err(); err != nil { + return nil, err + } + return nil, errUnrecognized + } + + switch l := s.Text(); { + case strings.HasPrefix(l, "--- contentionz "): + case strings.HasPrefix(l, "--- mutex:"): + case strings.HasPrefix(l, "--- contention:"): + default: + return nil, errUnrecognized + } + + p := &Profile{ + PeriodType: &ValueType{Type: "contentions", Unit: "count"}, + Period: 1, + SampleType: []*ValueType{ + {Type: "contentions", Unit: "count"}, + {Type: "delay", Unit: "nanoseconds"}, + }, + } + + var cpuHz int64 + // Parse text of the form "attribute = value" before the samples. + const delimiter = "=" + for s.Scan() { + line := s.Text() + if line = strings.TrimSpace(line); isSpaceOrComment(line) { + continue + } + if strings.HasPrefix(line, "---") { + break + } + attr := strings.SplitN(line, delimiter, 2) + if len(attr) != 2 { + break + } + key, val := strings.TrimSpace(attr[0]), strings.TrimSpace(attr[1]) + var err error + switch key { + case "cycles/second": + if cpuHz, err = strconv.ParseInt(val, 0, 64); err != nil { + return nil, errUnrecognized + } + case "sampling period": + if p.Period, err = strconv.ParseInt(val, 0, 64); err != nil { + return nil, errUnrecognized + } + case "ms since reset": + ms, err := strconv.ParseInt(val, 0, 64) + if err != nil { + return nil, errUnrecognized + } + p.DurationNanos = ms * 1000 * 1000 + case "format": + // CPP contentionz profiles don't have format. + return nil, errUnrecognized + case "resolution": + // CPP contentionz profiles don't have resolution. + return nil, errUnrecognized + case "discarded samples": + default: + return nil, errUnrecognized + } + } + if err := s.Err(); err != nil { + return nil, err + } + + locs := make(map[uint64]*Location) + for { + line := strings.TrimSpace(s.Text()) + if strings.HasPrefix(line, "---") { + break + } + if !isSpaceOrComment(line) { + value, addrs, err := parseContentionSample(line, p.Period, cpuHz) + if err != nil { + return nil, err + } + var sloc []*Location + for _, addr := range addrs { + // Addresses from stack traces point to the next instruction after + // each call. Adjust by -1 to land somewhere on the actual call. + addr-- + loc := locs[addr] + if locs[addr] == nil { + loc = &Location{ + Address: addr, + } + p.Location = append(p.Location, loc) + locs[addr] = loc + } + sloc = append(sloc, loc) + } + p.Sample = append(p.Sample, &Sample{ + Value: value, + Location: sloc, + }) + } + if !s.Scan() { + break + } + } + if err := s.Err(); err != nil { + return nil, err + } + + if err := parseAdditionalSections(s, p); err != nil { + return nil, err + } + + return p, nil +} + +// parseContentionSample parses a single row from a contention profile +// into a new Sample. +func parseContentionSample(line string, period, cpuHz int64) (value []int64, addrs []uint64, err error) { + sampleData := contentionSampleRE.FindStringSubmatch(line) + if sampleData == nil { + return nil, nil, errUnrecognized + } + + v1, err := strconv.ParseInt(sampleData[1], 10, 64) + if err != nil { + return nil, nil, fmt.Errorf("malformed sample: %s: %v", line, err) + } + v2, err := strconv.ParseInt(sampleData[2], 10, 64) + if err != nil { + return nil, nil, fmt.Errorf("malformed sample: %s: %v", line, err) + } + + // Unsample values if period and cpuHz are available. + // - Delays are scaled to cycles and then to nanoseconds. + // - Contentions are scaled to cycles. + if period > 0 { + if cpuHz > 0 { + cpuGHz := float64(cpuHz) / 1e9 + v1 = int64(float64(v1) * float64(period) / cpuGHz) + } + v2 = v2 * period + } + + value = []int64{v2, v1} + addrs, err = parseHexAddresses(sampleData[3]) + if err != nil { + return nil, nil, fmt.Errorf("malformed sample: %s: %v", line, err) + } + + return value, addrs, nil +} + +// parseThread parses a Threadz profile and returns a new Profile. +func parseThread(b []byte) (*Profile, error) { + s := bufio.NewScanner(bytes.NewBuffer(b)) + // Skip past comments and empty lines seeking a real header. + for s.Scan() && isSpaceOrComment(s.Text()) { + } + + line := s.Text() + if m := threadzStartRE.FindStringSubmatch(line); m != nil { + // Advance over initial comments until first stack trace. + for s.Scan() { + if line = s.Text(); isMemoryMapSentinel(line) || strings.HasPrefix(line, "-") { + break + } + } + } else if t := threadStartRE.FindStringSubmatch(line); len(t) != 4 { + return nil, errUnrecognized + } + + p := &Profile{ + SampleType: []*ValueType{{Type: "thread", Unit: "count"}}, + PeriodType: &ValueType{Type: "thread", Unit: "count"}, + Period: 1, + } + + locs := make(map[uint64]*Location) + // Recognize each thread and populate profile samples. + for !isMemoryMapSentinel(line) { + if strings.HasPrefix(line, "---- no stack trace for") { + line = "" + break + } + if t := threadStartRE.FindStringSubmatch(line); len(t) != 4 { + return nil, errUnrecognized + } + + var addrs []uint64 + var err error + line, addrs, err = parseThreadSample(s) + if err != nil { + return nil, err + } + if len(addrs) == 0 { + // We got a --same as previous threads--. Bump counters. + if len(p.Sample) > 0 { + s := p.Sample[len(p.Sample)-1] + s.Value[0]++ + } + continue + } + + var sloc []*Location + for i, addr := range addrs { + // Addresses from stack traces point to the next instruction after + // each call. Adjust by -1 to land somewhere on the actual call + // (except for the leaf, which is not a call). + if i > 0 { + addr-- + } + loc := locs[addr] + if locs[addr] == nil { + loc = &Location{ + Address: addr, + } + p.Location = append(p.Location, loc) + locs[addr] = loc + } + sloc = append(sloc, loc) + } + + p.Sample = append(p.Sample, &Sample{ + Value: []int64{1}, + Location: sloc, + }) + } + + if err := parseAdditionalSections(s, p); err != nil { + return nil, err + } + + cleanupDuplicateLocations(p) + return p, nil +} + +// parseThreadSample parses a symbolized or unsymbolized stack trace. +// Returns the first line after the traceback, the sample (or nil if +// it hits a 'same-as-previous' marker) and an error. +func parseThreadSample(s *bufio.Scanner) (nextl string, addrs []uint64, err error) { + var line string + sameAsPrevious := false + for s.Scan() { + line = strings.TrimSpace(s.Text()) + if line == "" { + continue + } + + if strings.HasPrefix(line, "---") { + break + } + if strings.Contains(line, "same as previous thread") { + sameAsPrevious = true + continue + } + + curAddrs, err := parseHexAddresses(line) + if err != nil { + return "", nil, fmt.Errorf("malformed sample: %s: %v", line, err) + } + addrs = append(addrs, curAddrs...) + } + if err := s.Err(); err != nil { + return "", nil, err + } + if sameAsPrevious { + return line, nil, nil + } + return line, addrs, nil +} + +// parseAdditionalSections parses any additional sections in the +// profile, ignoring any unrecognized sections. +func parseAdditionalSections(s *bufio.Scanner, p *Profile) error { + for !isMemoryMapSentinel(s.Text()) && s.Scan() { + } + if err := s.Err(); err != nil { + return err + } + return p.ParseMemoryMapFromScanner(s) +} + +// ParseProcMaps parses a memory map in the format of /proc/self/maps. +// ParseMemoryMap should be called after setting on a profile to +// associate locations to the corresponding mapping based on their +// address. +func ParseProcMaps(rd io.Reader) ([]*Mapping, error) { + s := bufio.NewScanner(rd) + return parseProcMapsFromScanner(s) +} + +func parseProcMapsFromScanner(s *bufio.Scanner) ([]*Mapping, error) { + var mapping []*Mapping + + var attrs []string + const delimiter = "=" + r := strings.NewReplacer() + for s.Scan() { + line := r.Replace(removeLoggingInfo(s.Text())) + m, err := parseMappingEntry(line) + if err != nil { + if err == errUnrecognized { + // Recognize assignments of the form: attr=value, and replace + // $attr with value on subsequent mappings. + if attr := strings.SplitN(line, delimiter, 2); len(attr) == 2 { + attrs = append(attrs, "$"+strings.TrimSpace(attr[0]), strings.TrimSpace(attr[1])) + r = strings.NewReplacer(attrs...) + } + // Ignore any unrecognized entries + continue + } + return nil, err + } + if m == nil { + continue + } + mapping = append(mapping, m) + } + if err := s.Err(); err != nil { + return nil, err + } + return mapping, nil +} + +// removeLoggingInfo detects and removes log prefix entries generated +// by the glog package. If no logging prefix is detected, the string +// is returned unmodified. +func removeLoggingInfo(line string) string { + if match := logInfoRE.FindStringIndex(line); match != nil { + return line[match[1]:] + } + return line +} + +// ParseMemoryMap parses a memory map in the format of +// /proc/self/maps, and overrides the mappings in the current profile. +// It renumbers the samples and locations in the profile correspondingly. +func (p *Profile) ParseMemoryMap(rd io.Reader) error { + return p.ParseMemoryMapFromScanner(bufio.NewScanner(rd)) +} + +// ParseMemoryMapFromScanner parses a memory map in the format of +// /proc/self/maps or a variety of legacy format, and overrides the +// mappings in the current profile. It renumbers the samples and +// locations in the profile correspondingly. +func (p *Profile) ParseMemoryMapFromScanner(s *bufio.Scanner) error { + mapping, err := parseProcMapsFromScanner(s) + if err != nil { + return err + } + p.Mapping = append(p.Mapping, mapping...) + p.massageMappings() + p.remapLocationIDs() + p.remapFunctionIDs() + p.remapMappingIDs() + return nil +} + +func parseMappingEntry(l string) (*Mapping, error) { + var start, end, perm, file, offset, buildID string + if me := procMapsRE.FindStringSubmatch(l); len(me) == 6 { + start, end, perm, offset, file = me[1], me[2], me[3], me[4], me[5] + } else if me := briefMapsRE.FindStringSubmatch(l); len(me) == 7 { + start, end, perm, file, offset, buildID = me[1], me[2], me[3], me[4], me[5], me[6] + } else { + return nil, errUnrecognized + } + + var err error + mapping := &Mapping{ + File: file, + BuildID: buildID, + } + if perm != "" && !strings.Contains(perm, "x") { + // Skip non-executable entries. + return nil, nil + } + if mapping.Start, err = strconv.ParseUint(start, 16, 64); err != nil { + return nil, errUnrecognized + } + if mapping.Limit, err = strconv.ParseUint(end, 16, 64); err != nil { + return nil, errUnrecognized + } + if offset != "" { + if mapping.Offset, err = strconv.ParseUint(offset, 16, 64); err != nil { + return nil, errUnrecognized + } + } + return mapping, nil +} + +var memoryMapSentinels = []string{ + "--- Memory map: ---", + "MAPPED_LIBRARIES:", +} + +// isMemoryMapSentinel returns true if the string contains one of the +// known sentinels for memory map information. +func isMemoryMapSentinel(line string) bool { + for _, s := range memoryMapSentinels { + if strings.Contains(line, s) { + return true + } + } + return false +} + +func (p *Profile) addLegacyFrameInfo() { + switch { + case isProfileType(p, heapzSampleTypes): + p.DropFrames, p.KeepFrames = allocRxStr, allocSkipRxStr + case isProfileType(p, contentionzSampleTypes): + p.DropFrames, p.KeepFrames = lockRxStr, "" + default: + p.DropFrames, p.KeepFrames = cpuProfilerRxStr, "" + } +} + +var heapzSampleTypes = [][]string{ + {"allocations", "size"}, // early Go pprof profiles + {"objects", "space"}, + {"inuse_objects", "inuse_space"}, + {"alloc_objects", "alloc_space"}, + {"alloc_objects", "alloc_space", "inuse_objects", "inuse_space"}, // Go pprof legacy profiles +} +var contentionzSampleTypes = [][]string{ + {"contentions", "delay"}, +} + +func isProfileType(p *Profile, types [][]string) bool { + st := p.SampleType +nextType: + for _, t := range types { + if len(st) != len(t) { + continue + } + + for i := range st { + if st[i].Type != t[i] { + continue nextType + } + } + return true + } + return false +} + +var allocRxStr = strings.Join([]string{ + // POSIX entry points. + `calloc`, + `cfree`, + `malloc`, + `free`, + `memalign`, + `do_memalign`, + `(__)?posix_memalign`, + `pvalloc`, + `valloc`, + `realloc`, + + // TC malloc. + `tcmalloc::.*`, + `tc_calloc`, + `tc_cfree`, + `tc_malloc`, + `tc_free`, + `tc_memalign`, + `tc_posix_memalign`, + `tc_pvalloc`, + `tc_valloc`, + `tc_realloc`, + `tc_new`, + `tc_delete`, + `tc_newarray`, + `tc_deletearray`, + `tc_new_nothrow`, + `tc_newarray_nothrow`, + + // Memory-allocation routines on OS X. + `malloc_zone_malloc`, + `malloc_zone_calloc`, + `malloc_zone_valloc`, + `malloc_zone_realloc`, + `malloc_zone_memalign`, + `malloc_zone_free`, + + // Go runtime + `runtime\..*`, + + // Other misc. memory allocation routines + `BaseArena::.*`, + `(::)?do_malloc_no_errno`, + `(::)?do_malloc_pages`, + `(::)?do_malloc`, + `DoSampledAllocation`, + `MallocedMemBlock::MallocedMemBlock`, + `_M_allocate`, + `__builtin_(vec_)?delete`, + `__builtin_(vec_)?new`, + `__gnu_cxx::new_allocator::allocate`, + `__libc_malloc`, + `__malloc_alloc_template::allocate`, + `allocate`, + `cpp_alloc`, + `operator new(\[\])?`, + `simple_alloc::allocate`, +}, `|`) + +var allocSkipRxStr = strings.Join([]string{ + // Preserve Go runtime frames that appear in the middle/bottom of + // the stack. + `runtime\.panic`, + `runtime\.reflectcall`, + `runtime\.call[0-9]*`, +}, `|`) + +var cpuProfilerRxStr = strings.Join([]string{ + `ProfileData::Add`, + `ProfileData::prof_handler`, + `CpuProfiler::prof_handler`, + `__pthread_sighandler`, + `__restore`, +}, `|`) + +var lockRxStr = strings.Join([]string{ + `RecordLockProfileData`, + `(base::)?RecordLockProfileData.*`, + `(base::)?SubmitMutexProfileData.*`, + `(base::)?SubmitSpinLockProfileData.*`, + `(base::Mutex::)?AwaitCommon.*`, + `(base::Mutex::)?Unlock.*`, + `(base::Mutex::)?UnlockSlow.*`, + `(base::Mutex::)?ReaderUnlock.*`, + `(base::MutexLock::)?~MutexLock.*`, + `(Mutex::)?AwaitCommon.*`, + `(Mutex::)?Unlock.*`, + `(Mutex::)?UnlockSlow.*`, + `(Mutex::)?ReaderUnlock.*`, + `(MutexLock::)?~MutexLock.*`, + `(SpinLock::)?Unlock.*`, + `(SpinLock::)?SlowUnlock.*`, + `(SpinLockHolder::)?~SpinLockHolder.*`, +}, `|`) diff --git a/vendor/github.com/google/pprof/profile/merge.go b/vendor/github.com/google/pprof/profile/merge.go new file mode 100644 index 00000000000..5ab6e9b9b08 --- /dev/null +++ b/vendor/github.com/google/pprof/profile/merge.go @@ -0,0 +1,482 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package profile + +import ( + "fmt" + "sort" + "strconv" + "strings" +) + +// Compact performs garbage collection on a profile to remove any +// unreferenced fields. This is useful to reduce the size of a profile +// after samples or locations have been removed. +func (p *Profile) Compact() *Profile { + p, _ = Merge([]*Profile{p}) + return p +} + +// Merge merges all the profiles in profs into a single Profile. +// Returns a new profile independent of the input profiles. The merged +// profile is compacted to eliminate unused samples, locations, +// functions and mappings. Profiles must have identical profile sample +// and period types or the merge will fail. profile.Period of the +// resulting profile will be the maximum of all profiles, and +// profile.TimeNanos will be the earliest nonzero one. Merges are +// associative with the caveat of the first profile having some +// specialization in how headers are combined. There may be other +// subtleties now or in the future regarding associativity. +func Merge(srcs []*Profile) (*Profile, error) { + if len(srcs) == 0 { + return nil, fmt.Errorf("no profiles to merge") + } + p, err := combineHeaders(srcs) + if err != nil { + return nil, err + } + + pm := &profileMerger{ + p: p, + samples: make(map[sampleKey]*Sample, len(srcs[0].Sample)), + locations: make(map[locationKey]*Location, len(srcs[0].Location)), + functions: make(map[functionKey]*Function, len(srcs[0].Function)), + mappings: make(map[mappingKey]*Mapping, len(srcs[0].Mapping)), + } + + for _, src := range srcs { + // Clear the profile-specific hash tables + pm.locationsByID = make(map[uint64]*Location, len(src.Location)) + pm.functionsByID = make(map[uint64]*Function, len(src.Function)) + pm.mappingsByID = make(map[uint64]mapInfo, len(src.Mapping)) + + if len(pm.mappings) == 0 && len(src.Mapping) > 0 { + // The Mapping list has the property that the first mapping + // represents the main binary. Take the first Mapping we see, + // otherwise the operations below will add mappings in an + // arbitrary order. + pm.mapMapping(src.Mapping[0]) + } + + for _, s := range src.Sample { + if !isZeroSample(s) { + pm.mapSample(s) + } + } + } + + for _, s := range p.Sample { + if isZeroSample(s) { + // If there are any zero samples, re-merge the profile to GC + // them. + return Merge([]*Profile{p}) + } + } + + return p, nil +} + +// Normalize normalizes the source profile by multiplying each value in profile by the +// ratio of the sum of the base profile's values of that sample type to the sum of the +// source profile's value of that sample type. +func (p *Profile) Normalize(pb *Profile) error { + + if err := p.compatible(pb); err != nil { + return err + } + + baseVals := make([]int64, len(p.SampleType)) + for _, s := range pb.Sample { + for i, v := range s.Value { + baseVals[i] += v + } + } + + srcVals := make([]int64, len(p.SampleType)) + for _, s := range p.Sample { + for i, v := range s.Value { + srcVals[i] += v + } + } + + normScale := make([]float64, len(baseVals)) + for i := range baseVals { + if srcVals[i] == 0 { + normScale[i] = 0.0 + } else { + normScale[i] = float64(baseVals[i]) / float64(srcVals[i]) + } + } + p.ScaleN(normScale) + return nil +} + +func isZeroSample(s *Sample) bool { + for _, v := range s.Value { + if v != 0 { + return false + } + } + return true +} + +type profileMerger struct { + p *Profile + + // Memoization tables within a profile. + locationsByID map[uint64]*Location + functionsByID map[uint64]*Function + mappingsByID map[uint64]mapInfo + + // Memoization tables for profile entities. + samples map[sampleKey]*Sample + locations map[locationKey]*Location + functions map[functionKey]*Function + mappings map[mappingKey]*Mapping +} + +type mapInfo struct { + m *Mapping + offset int64 +} + +func (pm *profileMerger) mapSample(src *Sample) *Sample { + s := &Sample{ + Location: make([]*Location, len(src.Location)), + Value: make([]int64, len(src.Value)), + Label: make(map[string][]string, len(src.Label)), + NumLabel: make(map[string][]int64, len(src.NumLabel)), + NumUnit: make(map[string][]string, len(src.NumLabel)), + } + for i, l := range src.Location { + s.Location[i] = pm.mapLocation(l) + } + for k, v := range src.Label { + vv := make([]string, len(v)) + copy(vv, v) + s.Label[k] = vv + } + for k, v := range src.NumLabel { + u := src.NumUnit[k] + vv := make([]int64, len(v)) + uu := make([]string, len(u)) + copy(vv, v) + copy(uu, u) + s.NumLabel[k] = vv + s.NumUnit[k] = uu + } + // Check memoization table. Must be done on the remapped location to + // account for the remapped mapping. Add current values to the + // existing sample. + k := s.key() + if ss, ok := pm.samples[k]; ok { + for i, v := range src.Value { + ss.Value[i] += v + } + return ss + } + copy(s.Value, src.Value) + pm.samples[k] = s + pm.p.Sample = append(pm.p.Sample, s) + return s +} + +// key generates sampleKey to be used as a key for maps. +func (sample *Sample) key() sampleKey { + ids := make([]string, len(sample.Location)) + for i, l := range sample.Location { + ids[i] = strconv.FormatUint(l.ID, 16) + } + + labels := make([]string, 0, len(sample.Label)) + for k, v := range sample.Label { + labels = append(labels, fmt.Sprintf("%q%q", k, v)) + } + sort.Strings(labels) + + numlabels := make([]string, 0, len(sample.NumLabel)) + for k, v := range sample.NumLabel { + numlabels = append(numlabels, fmt.Sprintf("%q%x%x", k, v, sample.NumUnit[k])) + } + sort.Strings(numlabels) + + return sampleKey{ + strings.Join(ids, "|"), + strings.Join(labels, ""), + strings.Join(numlabels, ""), + } +} + +type sampleKey struct { + locations string + labels string + numlabels string +} + +func (pm *profileMerger) mapLocation(src *Location) *Location { + if src == nil { + return nil + } + + if l, ok := pm.locationsByID[src.ID]; ok { + pm.locationsByID[src.ID] = l + return l + } + + mi := pm.mapMapping(src.Mapping) + l := &Location{ + ID: uint64(len(pm.p.Location) + 1), + Mapping: mi.m, + Address: uint64(int64(src.Address) + mi.offset), + Line: make([]Line, len(src.Line)), + IsFolded: src.IsFolded, + } + for i, ln := range src.Line { + l.Line[i] = pm.mapLine(ln) + } + // Check memoization table. Must be done on the remapped location to + // account for the remapped mapping ID. + k := l.key() + if ll, ok := pm.locations[k]; ok { + pm.locationsByID[src.ID] = ll + return ll + } + pm.locationsByID[src.ID] = l + pm.locations[k] = l + pm.p.Location = append(pm.p.Location, l) + return l +} + +// key generates locationKey to be used as a key for maps. +func (l *Location) key() locationKey { + key := locationKey{ + addr: l.Address, + isFolded: l.IsFolded, + } + if l.Mapping != nil { + // Normalizes address to handle address space randomization. + key.addr -= l.Mapping.Start + key.mappingID = l.Mapping.ID + } + lines := make([]string, len(l.Line)*2) + for i, line := range l.Line { + if line.Function != nil { + lines[i*2] = strconv.FormatUint(line.Function.ID, 16) + } + lines[i*2+1] = strconv.FormatInt(line.Line, 16) + } + key.lines = strings.Join(lines, "|") + return key +} + +type locationKey struct { + addr, mappingID uint64 + lines string + isFolded bool +} + +func (pm *profileMerger) mapMapping(src *Mapping) mapInfo { + if src == nil { + return mapInfo{} + } + + if mi, ok := pm.mappingsByID[src.ID]; ok { + return mi + } + + // Check memoization tables. + mk := src.key() + if m, ok := pm.mappings[mk]; ok { + mi := mapInfo{m, int64(m.Start) - int64(src.Start)} + pm.mappingsByID[src.ID] = mi + return mi + } + m := &Mapping{ + ID: uint64(len(pm.p.Mapping) + 1), + Start: src.Start, + Limit: src.Limit, + Offset: src.Offset, + File: src.File, + BuildID: src.BuildID, + HasFunctions: src.HasFunctions, + HasFilenames: src.HasFilenames, + HasLineNumbers: src.HasLineNumbers, + HasInlineFrames: src.HasInlineFrames, + } + pm.p.Mapping = append(pm.p.Mapping, m) + + // Update memoization tables. + pm.mappings[mk] = m + mi := mapInfo{m, 0} + pm.mappingsByID[src.ID] = mi + return mi +} + +// key generates encoded strings of Mapping to be used as a key for +// maps. +func (m *Mapping) key() mappingKey { + // Normalize addresses to handle address space randomization. + // Round up to next 4K boundary to avoid minor discrepancies. + const mapsizeRounding = 0x1000 + + size := m.Limit - m.Start + size = size + mapsizeRounding - 1 + size = size - (size % mapsizeRounding) + key := mappingKey{ + size: size, + offset: m.Offset, + } + + switch { + case m.BuildID != "": + key.buildIDOrFile = m.BuildID + case m.File != "": + key.buildIDOrFile = m.File + default: + // A mapping containing neither build ID nor file name is a fake mapping. A + // key with empty buildIDOrFile is used for fake mappings so that they are + // treated as the same mapping during merging. + } + return key +} + +type mappingKey struct { + size, offset uint64 + buildIDOrFile string +} + +func (pm *profileMerger) mapLine(src Line) Line { + ln := Line{ + Function: pm.mapFunction(src.Function), + Line: src.Line, + } + return ln +} + +func (pm *profileMerger) mapFunction(src *Function) *Function { + if src == nil { + return nil + } + if f, ok := pm.functionsByID[src.ID]; ok { + return f + } + k := src.key() + if f, ok := pm.functions[k]; ok { + pm.functionsByID[src.ID] = f + return f + } + f := &Function{ + ID: uint64(len(pm.p.Function) + 1), + Name: src.Name, + SystemName: src.SystemName, + Filename: src.Filename, + StartLine: src.StartLine, + } + pm.functions[k] = f + pm.functionsByID[src.ID] = f + pm.p.Function = append(pm.p.Function, f) + return f +} + +// key generates a struct to be used as a key for maps. +func (f *Function) key() functionKey { + return functionKey{ + f.StartLine, + f.Name, + f.SystemName, + f.Filename, + } +} + +type functionKey struct { + startLine int64 + name, systemName, fileName string +} + +// combineHeaders checks that all profiles can be merged and returns +// their combined profile. +func combineHeaders(srcs []*Profile) (*Profile, error) { + for _, s := range srcs[1:] { + if err := srcs[0].compatible(s); err != nil { + return nil, err + } + } + + var timeNanos, durationNanos, period int64 + var comments []string + seenComments := map[string]bool{} + var defaultSampleType string + for _, s := range srcs { + if timeNanos == 0 || s.TimeNanos < timeNanos { + timeNanos = s.TimeNanos + } + durationNanos += s.DurationNanos + if period == 0 || period < s.Period { + period = s.Period + } + for _, c := range s.Comments { + if seen := seenComments[c]; !seen { + comments = append(comments, c) + seenComments[c] = true + } + } + if defaultSampleType == "" { + defaultSampleType = s.DefaultSampleType + } + } + + p := &Profile{ + SampleType: make([]*ValueType, len(srcs[0].SampleType)), + + DropFrames: srcs[0].DropFrames, + KeepFrames: srcs[0].KeepFrames, + + TimeNanos: timeNanos, + DurationNanos: durationNanos, + PeriodType: srcs[0].PeriodType, + Period: period, + + Comments: comments, + DefaultSampleType: defaultSampleType, + } + copy(p.SampleType, srcs[0].SampleType) + return p, nil +} + +// compatible determines if two profiles can be compared/merged. +// returns nil if the profiles are compatible; otherwise an error with +// details on the incompatibility. +func (p *Profile) compatible(pb *Profile) error { + if !equalValueType(p.PeriodType, pb.PeriodType) { + return fmt.Errorf("incompatible period types %v and %v", p.PeriodType, pb.PeriodType) + } + + if len(p.SampleType) != len(pb.SampleType) { + return fmt.Errorf("incompatible sample types %v and %v", p.SampleType, pb.SampleType) + } + + for i := range p.SampleType { + if !equalValueType(p.SampleType[i], pb.SampleType[i]) { + return fmt.Errorf("incompatible sample types %v and %v", p.SampleType, pb.SampleType) + } + } + return nil +} + +// equalValueType returns true if the two value types are semantically +// equal. It ignores the internal fields used during encode/decode. +func equalValueType(st1, st2 *ValueType) bool { + return st1.Type == st2.Type && st1.Unit == st2.Unit +} diff --git a/vendor/github.com/google/pprof/profile/profile.go b/vendor/github.com/google/pprof/profile/profile.go new file mode 100644 index 00000000000..2590c8ddb42 --- /dev/null +++ b/vendor/github.com/google/pprof/profile/profile.go @@ -0,0 +1,805 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package profile provides a representation of profile.proto and +// methods to encode/decode profiles in this format. +package profile + +import ( + "bytes" + "compress/gzip" + "fmt" + "io" + "io/ioutil" + "math" + "path/filepath" + "regexp" + "sort" + "strings" + "sync" + "time" +) + +// Profile is an in-memory representation of profile.proto. +type Profile struct { + SampleType []*ValueType + DefaultSampleType string + Sample []*Sample + Mapping []*Mapping + Location []*Location + Function []*Function + Comments []string + + DropFrames string + KeepFrames string + + TimeNanos int64 + DurationNanos int64 + PeriodType *ValueType + Period int64 + + // The following fields are modified during encoding and copying, + // so are protected by a Mutex. + encodeMu sync.Mutex + + commentX []int64 + dropFramesX int64 + keepFramesX int64 + stringTable []string + defaultSampleTypeX int64 +} + +// ValueType corresponds to Profile.ValueType +type ValueType struct { + Type string // cpu, wall, inuse_space, etc + Unit string // seconds, nanoseconds, bytes, etc + + typeX int64 + unitX int64 +} + +// Sample corresponds to Profile.Sample +type Sample struct { + Location []*Location + Value []int64 + Label map[string][]string + NumLabel map[string][]int64 + NumUnit map[string][]string + + locationIDX []uint64 + labelX []label +} + +// label corresponds to Profile.Label +type label struct { + keyX int64 + // Exactly one of the two following values must be set + strX int64 + numX int64 // Integer value for this label + // can be set if numX has value + unitX int64 +} + +// Mapping corresponds to Profile.Mapping +type Mapping struct { + ID uint64 + Start uint64 + Limit uint64 + Offset uint64 + File string + BuildID string + HasFunctions bool + HasFilenames bool + HasLineNumbers bool + HasInlineFrames bool + + fileX int64 + buildIDX int64 +} + +// Location corresponds to Profile.Location +type Location struct { + ID uint64 + Mapping *Mapping + Address uint64 + Line []Line + IsFolded bool + + mappingIDX uint64 +} + +// Line corresponds to Profile.Line +type Line struct { + Function *Function + Line int64 + + functionIDX uint64 +} + +// Function corresponds to Profile.Function +type Function struct { + ID uint64 + Name string + SystemName string + Filename string + StartLine int64 + + nameX int64 + systemNameX int64 + filenameX int64 +} + +// Parse parses a profile and checks for its validity. The input +// may be a gzip-compressed encoded protobuf or one of many legacy +// profile formats which may be unsupported in the future. +func Parse(r io.Reader) (*Profile, error) { + data, err := ioutil.ReadAll(r) + if err != nil { + return nil, err + } + return ParseData(data) +} + +// ParseData parses a profile from a buffer and checks for its +// validity. +func ParseData(data []byte) (*Profile, error) { + var p *Profile + var err error + if len(data) >= 2 && data[0] == 0x1f && data[1] == 0x8b { + gz, err := gzip.NewReader(bytes.NewBuffer(data)) + if err == nil { + data, err = ioutil.ReadAll(gz) + } + if err != nil { + return nil, fmt.Errorf("decompressing profile: %v", err) + } + } + if p, err = ParseUncompressed(data); err != nil && err != errNoData && err != errConcatProfile { + p, err = parseLegacy(data) + } + + if err != nil { + return nil, fmt.Errorf("parsing profile: %v", err) + } + + if err := p.CheckValid(); err != nil { + return nil, fmt.Errorf("malformed profile: %v", err) + } + return p, nil +} + +var errUnrecognized = fmt.Errorf("unrecognized profile format") +var errMalformed = fmt.Errorf("malformed profile format") +var errNoData = fmt.Errorf("empty input file") +var errConcatProfile = fmt.Errorf("concatenated profiles detected") + +func parseLegacy(data []byte) (*Profile, error) { + parsers := []func([]byte) (*Profile, error){ + parseCPU, + parseHeap, + parseGoCount, // goroutine, threadcreate + parseThread, + parseContention, + parseJavaProfile, + } + + for _, parser := range parsers { + p, err := parser(data) + if err == nil { + p.addLegacyFrameInfo() + return p, nil + } + if err != errUnrecognized { + return nil, err + } + } + return nil, errUnrecognized +} + +// ParseUncompressed parses an uncompressed protobuf into a profile. +func ParseUncompressed(data []byte) (*Profile, error) { + if len(data) == 0 { + return nil, errNoData + } + p := &Profile{} + if err := unmarshal(data, p); err != nil { + return nil, err + } + + if err := p.postDecode(); err != nil { + return nil, err + } + + return p, nil +} + +var libRx = regexp.MustCompile(`([.]so$|[.]so[._][0-9]+)`) + +// massageMappings applies heuristic-based changes to the profile +// mappings to account for quirks of some environments. +func (p *Profile) massageMappings() { + // Merge adjacent regions with matching names, checking that the offsets match + if len(p.Mapping) > 1 { + mappings := []*Mapping{p.Mapping[0]} + for _, m := range p.Mapping[1:] { + lm := mappings[len(mappings)-1] + if adjacent(lm, m) { + lm.Limit = m.Limit + if m.File != "" { + lm.File = m.File + } + if m.BuildID != "" { + lm.BuildID = m.BuildID + } + p.updateLocationMapping(m, lm) + continue + } + mappings = append(mappings, m) + } + p.Mapping = mappings + } + + // Use heuristics to identify main binary and move it to the top of the list of mappings + for i, m := range p.Mapping { + file := strings.TrimSpace(strings.Replace(m.File, "(deleted)", "", -1)) + if len(file) == 0 { + continue + } + if len(libRx.FindStringSubmatch(file)) > 0 { + continue + } + if file[0] == '[' { + continue + } + // Swap what we guess is main to position 0. + p.Mapping[0], p.Mapping[i] = p.Mapping[i], p.Mapping[0] + break + } + + // Keep the mapping IDs neatly sorted + for i, m := range p.Mapping { + m.ID = uint64(i + 1) + } +} + +// adjacent returns whether two mapping entries represent the same +// mapping that has been split into two. Check that their addresses are adjacent, +// and if the offsets match, if they are available. +func adjacent(m1, m2 *Mapping) bool { + if m1.File != "" && m2.File != "" { + if m1.File != m2.File { + return false + } + } + if m1.BuildID != "" && m2.BuildID != "" { + if m1.BuildID != m2.BuildID { + return false + } + } + if m1.Limit != m2.Start { + return false + } + if m1.Offset != 0 && m2.Offset != 0 { + offset := m1.Offset + (m1.Limit - m1.Start) + if offset != m2.Offset { + return false + } + } + return true +} + +func (p *Profile) updateLocationMapping(from, to *Mapping) { + for _, l := range p.Location { + if l.Mapping == from { + l.Mapping = to + } + } +} + +func serialize(p *Profile) []byte { + p.encodeMu.Lock() + p.preEncode() + b := marshal(p) + p.encodeMu.Unlock() + return b +} + +// Write writes the profile as a gzip-compressed marshaled protobuf. +func (p *Profile) Write(w io.Writer) error { + zw := gzip.NewWriter(w) + defer zw.Close() + _, err := zw.Write(serialize(p)) + return err +} + +// WriteUncompressed writes the profile as a marshaled protobuf. +func (p *Profile) WriteUncompressed(w io.Writer) error { + _, err := w.Write(serialize(p)) + return err +} + +// CheckValid tests whether the profile is valid. Checks include, but are +// not limited to: +// - len(Profile.Sample[n].value) == len(Profile.value_unit) +// - Sample.id has a corresponding Profile.Location +func (p *Profile) CheckValid() error { + // Check that sample values are consistent + sampleLen := len(p.SampleType) + if sampleLen == 0 && len(p.Sample) != 0 { + return fmt.Errorf("missing sample type information") + } + for _, s := range p.Sample { + if s == nil { + return fmt.Errorf("profile has nil sample") + } + if len(s.Value) != sampleLen { + return fmt.Errorf("mismatch: sample has %d values vs. %d types", len(s.Value), len(p.SampleType)) + } + for _, l := range s.Location { + if l == nil { + return fmt.Errorf("sample has nil location") + } + } + } + + // Check that all mappings/locations/functions are in the tables + // Check that there are no duplicate ids + mappings := make(map[uint64]*Mapping, len(p.Mapping)) + for _, m := range p.Mapping { + if m == nil { + return fmt.Errorf("profile has nil mapping") + } + if m.ID == 0 { + return fmt.Errorf("found mapping with reserved ID=0") + } + if mappings[m.ID] != nil { + return fmt.Errorf("multiple mappings with same id: %d", m.ID) + } + mappings[m.ID] = m + } + functions := make(map[uint64]*Function, len(p.Function)) + for _, f := range p.Function { + if f == nil { + return fmt.Errorf("profile has nil function") + } + if f.ID == 0 { + return fmt.Errorf("found function with reserved ID=0") + } + if functions[f.ID] != nil { + return fmt.Errorf("multiple functions with same id: %d", f.ID) + } + functions[f.ID] = f + } + locations := make(map[uint64]*Location, len(p.Location)) + for _, l := range p.Location { + if l == nil { + return fmt.Errorf("profile has nil location") + } + if l.ID == 0 { + return fmt.Errorf("found location with reserved id=0") + } + if locations[l.ID] != nil { + return fmt.Errorf("multiple locations with same id: %d", l.ID) + } + locations[l.ID] = l + if m := l.Mapping; m != nil { + if m.ID == 0 || mappings[m.ID] != m { + return fmt.Errorf("inconsistent mapping %p: %d", m, m.ID) + } + } + for _, ln := range l.Line { + f := ln.Function + if f == nil { + return fmt.Errorf("location id: %d has a line with nil function", l.ID) + } + if f.ID == 0 || functions[f.ID] != f { + return fmt.Errorf("inconsistent function %p: %d", f, f.ID) + } + } + } + return nil +} + +// Aggregate merges the locations in the profile into equivalence +// classes preserving the request attributes. It also updates the +// samples to point to the merged locations. +func (p *Profile) Aggregate(inlineFrame, function, filename, linenumber, address bool) error { + for _, m := range p.Mapping { + m.HasInlineFrames = m.HasInlineFrames && inlineFrame + m.HasFunctions = m.HasFunctions && function + m.HasFilenames = m.HasFilenames && filename + m.HasLineNumbers = m.HasLineNumbers && linenumber + } + + // Aggregate functions + if !function || !filename { + for _, f := range p.Function { + if !function { + f.Name = "" + f.SystemName = "" + } + if !filename { + f.Filename = "" + } + } + } + + // Aggregate locations + if !inlineFrame || !address || !linenumber { + for _, l := range p.Location { + if !inlineFrame && len(l.Line) > 1 { + l.Line = l.Line[len(l.Line)-1:] + } + if !linenumber { + for i := range l.Line { + l.Line[i].Line = 0 + } + } + if !address { + l.Address = 0 + } + } + } + + return p.CheckValid() +} + +// NumLabelUnits returns a map of numeric label keys to the units +// associated with those keys and a map of those keys to any units +// that were encountered but not used. +// Unit for a given key is the first encountered unit for that key. If multiple +// units are encountered for values paired with a particular key, then the first +// unit encountered is used and all other units are returned in sorted order +// in map of ignored units. +// If no units are encountered for a particular key, the unit is then inferred +// based on the key. +func (p *Profile) NumLabelUnits() (map[string]string, map[string][]string) { + numLabelUnits := map[string]string{} + ignoredUnits := map[string]map[string]bool{} + encounteredKeys := map[string]bool{} + + // Determine units based on numeric tags for each sample. + for _, s := range p.Sample { + for k := range s.NumLabel { + encounteredKeys[k] = true + for _, unit := range s.NumUnit[k] { + if unit == "" { + continue + } + if wantUnit, ok := numLabelUnits[k]; !ok { + numLabelUnits[k] = unit + } else if wantUnit != unit { + if v, ok := ignoredUnits[k]; ok { + v[unit] = true + } else { + ignoredUnits[k] = map[string]bool{unit: true} + } + } + } + } + } + // Infer units for keys without any units associated with + // numeric tag values. + for key := range encounteredKeys { + unit := numLabelUnits[key] + if unit == "" { + switch key { + case "alignment", "request": + numLabelUnits[key] = "bytes" + default: + numLabelUnits[key] = key + } + } + } + + // Copy ignored units into more readable format + unitsIgnored := make(map[string][]string, len(ignoredUnits)) + for key, values := range ignoredUnits { + units := make([]string, len(values)) + i := 0 + for unit := range values { + units[i] = unit + i++ + } + sort.Strings(units) + unitsIgnored[key] = units + } + + return numLabelUnits, unitsIgnored +} + +// String dumps a text representation of a profile. Intended mainly +// for debugging purposes. +func (p *Profile) String() string { + ss := make([]string, 0, len(p.Comments)+len(p.Sample)+len(p.Mapping)+len(p.Location)) + for _, c := range p.Comments { + ss = append(ss, "Comment: "+c) + } + if pt := p.PeriodType; pt != nil { + ss = append(ss, fmt.Sprintf("PeriodType: %s %s", pt.Type, pt.Unit)) + } + ss = append(ss, fmt.Sprintf("Period: %d", p.Period)) + if p.TimeNanos != 0 { + ss = append(ss, fmt.Sprintf("Time: %v", time.Unix(0, p.TimeNanos))) + } + if p.DurationNanos != 0 { + ss = append(ss, fmt.Sprintf("Duration: %.4v", time.Duration(p.DurationNanos))) + } + + ss = append(ss, "Samples:") + var sh1 string + for _, s := range p.SampleType { + dflt := "" + if s.Type == p.DefaultSampleType { + dflt = "[dflt]" + } + sh1 = sh1 + fmt.Sprintf("%s/%s%s ", s.Type, s.Unit, dflt) + } + ss = append(ss, strings.TrimSpace(sh1)) + for _, s := range p.Sample { + ss = append(ss, s.string()) + } + + ss = append(ss, "Locations") + for _, l := range p.Location { + ss = append(ss, l.string()) + } + + ss = append(ss, "Mappings") + for _, m := range p.Mapping { + ss = append(ss, m.string()) + } + + return strings.Join(ss, "\n") + "\n" +} + +// string dumps a text representation of a mapping. Intended mainly +// for debugging purposes. +func (m *Mapping) string() string { + bits := "" + if m.HasFunctions { + bits = bits + "[FN]" + } + if m.HasFilenames { + bits = bits + "[FL]" + } + if m.HasLineNumbers { + bits = bits + "[LN]" + } + if m.HasInlineFrames { + bits = bits + "[IN]" + } + return fmt.Sprintf("%d: %#x/%#x/%#x %s %s %s", + m.ID, + m.Start, m.Limit, m.Offset, + m.File, + m.BuildID, + bits) +} + +// string dumps a text representation of a location. Intended mainly +// for debugging purposes. +func (l *Location) string() string { + ss := []string{} + locStr := fmt.Sprintf("%6d: %#x ", l.ID, l.Address) + if m := l.Mapping; m != nil { + locStr = locStr + fmt.Sprintf("M=%d ", m.ID) + } + if l.IsFolded { + locStr = locStr + "[F] " + } + if len(l.Line) == 0 { + ss = append(ss, locStr) + } + for li := range l.Line { + lnStr := "??" + if fn := l.Line[li].Function; fn != nil { + lnStr = fmt.Sprintf("%s %s:%d s=%d", + fn.Name, + fn.Filename, + l.Line[li].Line, + fn.StartLine) + if fn.Name != fn.SystemName { + lnStr = lnStr + "(" + fn.SystemName + ")" + } + } + ss = append(ss, locStr+lnStr) + // Do not print location details past the first line + locStr = " " + } + return strings.Join(ss, "\n") +} + +// string dumps a text representation of a sample. Intended mainly +// for debugging purposes. +func (s *Sample) string() string { + ss := []string{} + var sv string + for _, v := range s.Value { + sv = fmt.Sprintf("%s %10d", sv, v) + } + sv = sv + ": " + for _, l := range s.Location { + sv = sv + fmt.Sprintf("%d ", l.ID) + } + ss = append(ss, sv) + const labelHeader = " " + if len(s.Label) > 0 { + ss = append(ss, labelHeader+labelsToString(s.Label)) + } + if len(s.NumLabel) > 0 { + ss = append(ss, labelHeader+numLabelsToString(s.NumLabel, s.NumUnit)) + } + return strings.Join(ss, "\n") +} + +// labelsToString returns a string representation of a +// map representing labels. +func labelsToString(labels map[string][]string) string { + ls := []string{} + for k, v := range labels { + ls = append(ls, fmt.Sprintf("%s:%v", k, v)) + } + sort.Strings(ls) + return strings.Join(ls, " ") +} + +// numLabelsToString returns a string representation of a map +// representing numeric labels. +func numLabelsToString(numLabels map[string][]int64, numUnits map[string][]string) string { + ls := []string{} + for k, v := range numLabels { + units := numUnits[k] + var labelString string + if len(units) == len(v) { + values := make([]string, len(v)) + for i, vv := range v { + values[i] = fmt.Sprintf("%d %s", vv, units[i]) + } + labelString = fmt.Sprintf("%s:%v", k, values) + } else { + labelString = fmt.Sprintf("%s:%v", k, v) + } + ls = append(ls, labelString) + } + sort.Strings(ls) + return strings.Join(ls, " ") +} + +// SetLabel sets the specified key to the specified value for all samples in the +// profile. +func (p *Profile) SetLabel(key string, value []string) { + for _, sample := range p.Sample { + if sample.Label == nil { + sample.Label = map[string][]string{key: value} + } else { + sample.Label[key] = value + } + } +} + +// RemoveLabel removes all labels associated with the specified key for all +// samples in the profile. +func (p *Profile) RemoveLabel(key string) { + for _, sample := range p.Sample { + delete(sample.Label, key) + } +} + +// HasLabel returns true if a sample has a label with indicated key and value. +func (s *Sample) HasLabel(key, value string) bool { + for _, v := range s.Label[key] { + if v == value { + return true + } + } + return false +} + +// DiffBaseSample returns true if a sample belongs to the diff base and false +// otherwise. +func (s *Sample) DiffBaseSample() bool { + return s.HasLabel("pprof::base", "true") +} + +// Scale multiplies all sample values in a profile by a constant and keeps +// only samples that have at least one non-zero value. +func (p *Profile) Scale(ratio float64) { + if ratio == 1 { + return + } + ratios := make([]float64, len(p.SampleType)) + for i := range p.SampleType { + ratios[i] = ratio + } + p.ScaleN(ratios) +} + +// ScaleN multiplies each sample values in a sample by a different amount +// and keeps only samples that have at least one non-zero value. +func (p *Profile) ScaleN(ratios []float64) error { + if len(p.SampleType) != len(ratios) { + return fmt.Errorf("mismatched scale ratios, got %d, want %d", len(ratios), len(p.SampleType)) + } + allOnes := true + for _, r := range ratios { + if r != 1 { + allOnes = false + break + } + } + if allOnes { + return nil + } + fillIdx := 0 + for _, s := range p.Sample { + keepSample := false + for i, v := range s.Value { + if ratios[i] != 1 { + val := int64(math.Round(float64(v) * ratios[i])) + s.Value[i] = val + keepSample = keepSample || val != 0 + } + } + if keepSample { + p.Sample[fillIdx] = s + fillIdx++ + } + } + p.Sample = p.Sample[:fillIdx] + return nil +} + +// HasFunctions determines if all locations in this profile have +// symbolized function information. +func (p *Profile) HasFunctions() bool { + for _, l := range p.Location { + if l.Mapping != nil && !l.Mapping.HasFunctions { + return false + } + } + return true +} + +// HasFileLines determines if all locations in this profile have +// symbolized file and line number information. +func (p *Profile) HasFileLines() bool { + for _, l := range p.Location { + if l.Mapping != nil && (!l.Mapping.HasFilenames || !l.Mapping.HasLineNumbers) { + return false + } + } + return true +} + +// Unsymbolizable returns true if a mapping points to a binary for which +// locations can't be symbolized in principle, at least now. Examples are +// "[vdso]", [vsyscall]" and some others, see the code. +func (m *Mapping) Unsymbolizable() bool { + name := filepath.Base(m.File) + return strings.HasPrefix(name, "[") || strings.HasPrefix(name, "linux-vdso") || strings.HasPrefix(m.File, "/dev/dri/") +} + +// Copy makes a fully independent copy of a profile. +func (p *Profile) Copy() *Profile { + pp := &Profile{} + if err := unmarshal(serialize(p), pp); err != nil { + panic(err) + } + if err := pp.postDecode(); err != nil { + panic(err) + } + + return pp +} diff --git a/vendor/github.com/google/pprof/profile/proto.go b/vendor/github.com/google/pprof/profile/proto.go new file mode 100644 index 00000000000..539ad3ab33f --- /dev/null +++ b/vendor/github.com/google/pprof/profile/proto.go @@ -0,0 +1,370 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This file is a simple protocol buffer encoder and decoder. +// The format is described at +// https://developers.google.com/protocol-buffers/docs/encoding +// +// A protocol message must implement the message interface: +// decoder() []decoder +// encode(*buffer) +// +// The decode method returns a slice indexed by field number that gives the +// function to decode that field. +// The encode method encodes its receiver into the given buffer. +// +// The two methods are simple enough to be implemented by hand rather than +// by using a protocol compiler. +// +// See profile.go for examples of messages implementing this interface. +// +// There is no support for groups, message sets, or "has" bits. + +package profile + +import ( + "errors" + "fmt" +) + +type buffer struct { + field int // field tag + typ int // proto wire type code for field + u64 uint64 + data []byte + tmp [16]byte +} + +type decoder func(*buffer, message) error + +type message interface { + decoder() []decoder + encode(*buffer) +} + +func marshal(m message) []byte { + var b buffer + m.encode(&b) + return b.data +} + +func encodeVarint(b *buffer, x uint64) { + for x >= 128 { + b.data = append(b.data, byte(x)|0x80) + x >>= 7 + } + b.data = append(b.data, byte(x)) +} + +func encodeLength(b *buffer, tag int, len int) { + encodeVarint(b, uint64(tag)<<3|2) + encodeVarint(b, uint64(len)) +} + +func encodeUint64(b *buffer, tag int, x uint64) { + // append varint to b.data + encodeVarint(b, uint64(tag)<<3) + encodeVarint(b, x) +} + +func encodeUint64s(b *buffer, tag int, x []uint64) { + if len(x) > 2 { + // Use packed encoding + n1 := len(b.data) + for _, u := range x { + encodeVarint(b, u) + } + n2 := len(b.data) + encodeLength(b, tag, n2-n1) + n3 := len(b.data) + copy(b.tmp[:], b.data[n2:n3]) + copy(b.data[n1+(n3-n2):], b.data[n1:n2]) + copy(b.data[n1:], b.tmp[:n3-n2]) + return + } + for _, u := range x { + encodeUint64(b, tag, u) + } +} + +func encodeUint64Opt(b *buffer, tag int, x uint64) { + if x == 0 { + return + } + encodeUint64(b, tag, x) +} + +func encodeInt64(b *buffer, tag int, x int64) { + u := uint64(x) + encodeUint64(b, tag, u) +} + +func encodeInt64s(b *buffer, tag int, x []int64) { + if len(x) > 2 { + // Use packed encoding + n1 := len(b.data) + for _, u := range x { + encodeVarint(b, uint64(u)) + } + n2 := len(b.data) + encodeLength(b, tag, n2-n1) + n3 := len(b.data) + copy(b.tmp[:], b.data[n2:n3]) + copy(b.data[n1+(n3-n2):], b.data[n1:n2]) + copy(b.data[n1:], b.tmp[:n3-n2]) + return + } + for _, u := range x { + encodeInt64(b, tag, u) + } +} + +func encodeInt64Opt(b *buffer, tag int, x int64) { + if x == 0 { + return + } + encodeInt64(b, tag, x) +} + +func encodeString(b *buffer, tag int, x string) { + encodeLength(b, tag, len(x)) + b.data = append(b.data, x...) +} + +func encodeStrings(b *buffer, tag int, x []string) { + for _, s := range x { + encodeString(b, tag, s) + } +} + +func encodeBool(b *buffer, tag int, x bool) { + if x { + encodeUint64(b, tag, 1) + } else { + encodeUint64(b, tag, 0) + } +} + +func encodeBoolOpt(b *buffer, tag int, x bool) { + if x { + encodeBool(b, tag, x) + } +} + +func encodeMessage(b *buffer, tag int, m message) { + n1 := len(b.data) + m.encode(b) + n2 := len(b.data) + encodeLength(b, tag, n2-n1) + n3 := len(b.data) + copy(b.tmp[:], b.data[n2:n3]) + copy(b.data[n1+(n3-n2):], b.data[n1:n2]) + copy(b.data[n1:], b.tmp[:n3-n2]) +} + +func unmarshal(data []byte, m message) (err error) { + b := buffer{data: data, typ: 2} + return decodeMessage(&b, m) +} + +func le64(p []byte) uint64 { + return uint64(p[0]) | uint64(p[1])<<8 | uint64(p[2])<<16 | uint64(p[3])<<24 | uint64(p[4])<<32 | uint64(p[5])<<40 | uint64(p[6])<<48 | uint64(p[7])<<56 +} + +func le32(p []byte) uint32 { + return uint32(p[0]) | uint32(p[1])<<8 | uint32(p[2])<<16 | uint32(p[3])<<24 +} + +func decodeVarint(data []byte) (uint64, []byte, error) { + var u uint64 + for i := 0; ; i++ { + if i >= 10 || i >= len(data) { + return 0, nil, errors.New("bad varint") + } + u |= uint64(data[i]&0x7F) << uint(7*i) + if data[i]&0x80 == 0 { + return u, data[i+1:], nil + } + } +} + +func decodeField(b *buffer, data []byte) ([]byte, error) { + x, data, err := decodeVarint(data) + if err != nil { + return nil, err + } + b.field = int(x >> 3) + b.typ = int(x & 7) + b.data = nil + b.u64 = 0 + switch b.typ { + case 0: + b.u64, data, err = decodeVarint(data) + if err != nil { + return nil, err + } + case 1: + if len(data) < 8 { + return nil, errors.New("not enough data") + } + b.u64 = le64(data[:8]) + data = data[8:] + case 2: + var n uint64 + n, data, err = decodeVarint(data) + if err != nil { + return nil, err + } + if n > uint64(len(data)) { + return nil, errors.New("too much data") + } + b.data = data[:n] + data = data[n:] + case 5: + if len(data) < 4 { + return nil, errors.New("not enough data") + } + b.u64 = uint64(le32(data[:4])) + data = data[4:] + default: + return nil, fmt.Errorf("unknown wire type: %d", b.typ) + } + + return data, nil +} + +func checkType(b *buffer, typ int) error { + if b.typ != typ { + return errors.New("type mismatch") + } + return nil +} + +func decodeMessage(b *buffer, m message) error { + if err := checkType(b, 2); err != nil { + return err + } + dec := m.decoder() + data := b.data + for len(data) > 0 { + // pull varint field# + type + var err error + data, err = decodeField(b, data) + if err != nil { + return err + } + if b.field >= len(dec) || dec[b.field] == nil { + continue + } + if err := dec[b.field](b, m); err != nil { + return err + } + } + return nil +} + +func decodeInt64(b *buffer, x *int64) error { + if err := checkType(b, 0); err != nil { + return err + } + *x = int64(b.u64) + return nil +} + +func decodeInt64s(b *buffer, x *[]int64) error { + if b.typ == 2 { + // Packed encoding + data := b.data + tmp := make([]int64, 0, len(data)) // Maximally sized + for len(data) > 0 { + var u uint64 + var err error + + if u, data, err = decodeVarint(data); err != nil { + return err + } + tmp = append(tmp, int64(u)) + } + *x = append(*x, tmp...) + return nil + } + var i int64 + if err := decodeInt64(b, &i); err != nil { + return err + } + *x = append(*x, i) + return nil +} + +func decodeUint64(b *buffer, x *uint64) error { + if err := checkType(b, 0); err != nil { + return err + } + *x = b.u64 + return nil +} + +func decodeUint64s(b *buffer, x *[]uint64) error { + if b.typ == 2 { + data := b.data + // Packed encoding + tmp := make([]uint64, 0, len(data)) // Maximally sized + for len(data) > 0 { + var u uint64 + var err error + + if u, data, err = decodeVarint(data); err != nil { + return err + } + tmp = append(tmp, u) + } + *x = append(*x, tmp...) + return nil + } + var u uint64 + if err := decodeUint64(b, &u); err != nil { + return err + } + *x = append(*x, u) + return nil +} + +func decodeString(b *buffer, x *string) error { + if err := checkType(b, 2); err != nil { + return err + } + *x = string(b.data) + return nil +} + +func decodeStrings(b *buffer, x *[]string) error { + var s string + if err := decodeString(b, &s); err != nil { + return err + } + *x = append(*x, s) + return nil +} + +func decodeBool(b *buffer, x *bool) error { + if err := checkType(b, 0); err != nil { + return err + } + if int64(b.u64) == 0 { + *x = false + } else { + *x = true + } + return nil +} diff --git a/vendor/github.com/google/pprof/profile/prune.go b/vendor/github.com/google/pprof/profile/prune.go new file mode 100644 index 00000000000..02d21a81846 --- /dev/null +++ b/vendor/github.com/google/pprof/profile/prune.go @@ -0,0 +1,178 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Implements methods to remove frames from profiles. + +package profile + +import ( + "fmt" + "regexp" + "strings" +) + +var ( + reservedNames = []string{"(anonymous namespace)", "operator()"} + bracketRx = func() *regexp.Regexp { + var quotedNames []string + for _, name := range append(reservedNames, "(") { + quotedNames = append(quotedNames, regexp.QuoteMeta(name)) + } + return regexp.MustCompile(strings.Join(quotedNames, "|")) + }() +) + +// simplifyFunc does some primitive simplification of function names. +func simplifyFunc(f string) string { + // Account for leading '.' on the PPC ELF v1 ABI. + funcName := strings.TrimPrefix(f, ".") + // Account for unsimplified names -- try to remove the argument list by trimming + // starting from the first '(', but skipping reserved names that have '('. + for _, ind := range bracketRx.FindAllStringSubmatchIndex(funcName, -1) { + foundReserved := false + for _, res := range reservedNames { + if funcName[ind[0]:ind[1]] == res { + foundReserved = true + break + } + } + if !foundReserved { + funcName = funcName[:ind[0]] + break + } + } + return funcName +} + +// Prune removes all nodes beneath a node matching dropRx, and not +// matching keepRx. If the root node of a Sample matches, the sample +// will have an empty stack. +func (p *Profile) Prune(dropRx, keepRx *regexp.Regexp) { + prune := make(map[uint64]bool) + pruneBeneath := make(map[uint64]bool) + + for _, loc := range p.Location { + var i int + for i = len(loc.Line) - 1; i >= 0; i-- { + if fn := loc.Line[i].Function; fn != nil && fn.Name != "" { + funcName := simplifyFunc(fn.Name) + if dropRx.MatchString(funcName) { + if keepRx == nil || !keepRx.MatchString(funcName) { + break + } + } + } + } + + if i >= 0 { + // Found matching entry to prune. + pruneBeneath[loc.ID] = true + + // Remove the matching location. + if i == len(loc.Line)-1 { + // Matched the top entry: prune the whole location. + prune[loc.ID] = true + } else { + loc.Line = loc.Line[i+1:] + } + } + } + + // Prune locs from each Sample + for _, sample := range p.Sample { + // Scan from the root to the leaves to find the prune location. + // Do not prune frames before the first user frame, to avoid + // pruning everything. + foundUser := false + for i := len(sample.Location) - 1; i >= 0; i-- { + id := sample.Location[i].ID + if !prune[id] && !pruneBeneath[id] { + foundUser = true + continue + } + if !foundUser { + continue + } + if prune[id] { + sample.Location = sample.Location[i+1:] + break + } + if pruneBeneath[id] { + sample.Location = sample.Location[i:] + break + } + } + } +} + +// RemoveUninteresting prunes and elides profiles using built-in +// tables of uninteresting function names. +func (p *Profile) RemoveUninteresting() error { + var keep, drop *regexp.Regexp + var err error + + if p.DropFrames != "" { + if drop, err = regexp.Compile("^(" + p.DropFrames + ")$"); err != nil { + return fmt.Errorf("failed to compile regexp %s: %v", p.DropFrames, err) + } + if p.KeepFrames != "" { + if keep, err = regexp.Compile("^(" + p.KeepFrames + ")$"); err != nil { + return fmt.Errorf("failed to compile regexp %s: %v", p.KeepFrames, err) + } + } + p.Prune(drop, keep) + } + return nil +} + +// PruneFrom removes all nodes beneath the lowest node matching dropRx, not including itself. +// +// Please see the example below to understand this method as well as +// the difference from Prune method. +// +// A sample contains Location of [A,B,C,B,D] where D is the top frame and there's no inline. +// +// PruneFrom(A) returns [A,B,C,B,D] because there's no node beneath A. +// Prune(A, nil) returns [B,C,B,D] by removing A itself. +// +// PruneFrom(B) returns [B,C,B,D] by removing all nodes beneath the first B when scanning from the bottom. +// Prune(B, nil) returns [D] because a matching node is found by scanning from the root. +func (p *Profile) PruneFrom(dropRx *regexp.Regexp) { + pruneBeneath := make(map[uint64]bool) + + for _, loc := range p.Location { + for i := 0; i < len(loc.Line); i++ { + if fn := loc.Line[i].Function; fn != nil && fn.Name != "" { + funcName := simplifyFunc(fn.Name) + if dropRx.MatchString(funcName) { + // Found matching entry to prune. + pruneBeneath[loc.ID] = true + loc.Line = loc.Line[i:] + break + } + } + } + } + + // Prune locs from each Sample + for _, sample := range p.Sample { + // Scan from the bottom leaf to the root to find the prune location. + for i, loc := range sample.Location { + if pruneBeneath[loc.ID] { + sample.Location = sample.Location[i:] + break + } + } + } +} diff --git a/vendor/github.com/google/uuid/README.md b/vendor/github.com/google/uuid/README.md index 9d92c11f16f..f765a46f915 100644 --- a/vendor/github.com/google/uuid/README.md +++ b/vendor/github.com/google/uuid/README.md @@ -16,4 +16,4 @@ change is the ability to represent an invalid UUID (vs a NIL UUID). Full `go doc` style documentation for the package can be viewed online without installing this package by using the GoDoc site here: -http://godoc.org/github.com/google/uuid +http://pkg.go.dev/github.com/google/uuid diff --git a/vendor/github.com/google/uuid/hash.go b/vendor/github.com/google/uuid/hash.go index b1746163151..b404f4bec27 100644 --- a/vendor/github.com/google/uuid/hash.go +++ b/vendor/github.com/google/uuid/hash.go @@ -26,8 +26,8 @@ var ( // NewMD5 and NewSHA1. func NewHash(h hash.Hash, space UUID, data []byte, version int) UUID { h.Reset() - h.Write(space[:]) - h.Write(data) + h.Write(space[:]) //nolint:errcheck + h.Write(data) //nolint:errcheck s := h.Sum(nil) var uuid UUID copy(uuid[:], s) diff --git a/vendor/github.com/google/uuid/marshal.go b/vendor/github.com/google/uuid/marshal.go index 7f9e0c6c0e3..14bd34072b6 100644 --- a/vendor/github.com/google/uuid/marshal.go +++ b/vendor/github.com/google/uuid/marshal.go @@ -16,10 +16,11 @@ func (uuid UUID) MarshalText() ([]byte, error) { // UnmarshalText implements encoding.TextUnmarshaler. func (uuid *UUID) UnmarshalText(data []byte) error { id, err := ParseBytes(data) - if err == nil { - *uuid = id + if err != nil { + return err } - return err + *uuid = id + return nil } // MarshalBinary implements encoding.BinaryMarshaler. diff --git a/vendor/github.com/google/uuid/sql.go b/vendor/github.com/google/uuid/sql.go index f326b54db37..2e02ec06c01 100644 --- a/vendor/github.com/google/uuid/sql.go +++ b/vendor/github.com/google/uuid/sql.go @@ -9,7 +9,7 @@ import ( "fmt" ) -// Scan implements sql.Scanner so UUIDs can be read from databases transparently +// Scan implements sql.Scanner so UUIDs can be read from databases transparently. // Currently, database types that map to string and []byte are supported. Please // consult database-specific driver documentation for matching types. func (uuid *UUID) Scan(src interface{}) error { diff --git a/vendor/github.com/google/uuid/uuid.go b/vendor/github.com/google/uuid/uuid.go index 524404cc522..60d26bb50c6 100644 --- a/vendor/github.com/google/uuid/uuid.go +++ b/vendor/github.com/google/uuid/uuid.go @@ -35,6 +35,12 @@ const ( var rander = rand.Reader // random function +type invalidLengthError struct{ len int } + +func (err invalidLengthError) Error() string { + return fmt.Sprintf("invalid UUID length: %d", err.len) +} + // Parse decodes s into a UUID or returns an error. Both the standard UUID // forms of xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx and // urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx are decoded as well as the @@ -68,7 +74,7 @@ func Parse(s string) (UUID, error) { } return uuid, nil default: - return uuid, fmt.Errorf("invalid UUID length: %d", len(s)) + return uuid, invalidLengthError{len(s)} } // s is now at least 36 bytes long // it must be of the form xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx @@ -112,7 +118,7 @@ func ParseBytes(b []byte) (UUID, error) { } return uuid, nil default: - return uuid, fmt.Errorf("invalid UUID length: %d", len(b)) + return uuid, invalidLengthError{len(b)} } // s is now at least 36 bytes long // it must be of the form xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx diff --git a/vendor/github.com/google/uuid/version1.go b/vendor/github.com/google/uuid/version1.go index 199a1ac6540..463109629ee 100644 --- a/vendor/github.com/google/uuid/version1.go +++ b/vendor/github.com/google/uuid/version1.go @@ -17,12 +17,6 @@ import ( // // In most cases, New should be used. func NewUUID() (UUID, error) { - nodeMu.Lock() - if nodeID == zeroID { - setNodeInterface("") - } - nodeMu.Unlock() - var uuid UUID now, seq, err := GetTime() if err != nil { @@ -38,7 +32,13 @@ func NewUUID() (UUID, error) { binary.BigEndian.PutUint16(uuid[4:], timeMid) binary.BigEndian.PutUint16(uuid[6:], timeHi) binary.BigEndian.PutUint16(uuid[8:], seq) + + nodeMu.Lock() + if nodeID == zeroID { + setNodeInterface("") + } copy(uuid[10:], nodeID[:]) + nodeMu.Unlock() return uuid, nil } diff --git a/vendor/github.com/google/uuid/version4.go b/vendor/github.com/google/uuid/version4.go index 84af91c9f54..86160fbd072 100644 --- a/vendor/github.com/google/uuid/version4.go +++ b/vendor/github.com/google/uuid/version4.go @@ -14,6 +14,14 @@ func New() UUID { return Must(NewRandom()) } +// NewString creates a new random UUID and returns it as a string or panics. +// NewString is equivalent to the expression +// +// uuid.New().String() +func NewString() string { + return Must(NewRandom()).String() +} + // NewRandom returns a Random (Version 4) UUID. // // The strength of the UUIDs is based on the strength of the crypto/rand @@ -27,8 +35,13 @@ func New() UUID { // equivalent to the odds of creating a few tens of trillions of UUIDs in a // year and having one duplicate. func NewRandom() (UUID, error) { + return NewRandomFromReader(rander) +} + +// NewRandomFromReader returns a UUID based on bytes read from a given io.Reader. +func NewRandomFromReader(r io.Reader) (UUID, error) { var uuid UUID - _, err := io.ReadFull(rander, uuid[:]) + _, err := io.ReadFull(r, uuid[:]) if err != nil { return Nil, err } diff --git a/vendor/github.com/google/wire/.codecov.yml b/vendor/github.com/google/wire/.codecov.yml new file mode 100644 index 00000000000..5ae6b8355c9 --- /dev/null +++ b/vendor/github.com/google/wire/.codecov.yml @@ -0,0 +1,13 @@ +comment: off +coverage: + status: + project: + default: + target: 0 + threshold: null + base: auto + patch: + default: + target: 0 + threshold: null + base: auto diff --git a/vendor/github.com/google/wire/.contributebot b/vendor/github.com/google/wire/.contributebot new file mode 100644 index 00000000000..9a66b3babd6 --- /dev/null +++ b/vendor/github.com/google/wire/.contributebot @@ -0,0 +1,4 @@ +{ + "issue_title_pattern": "^.*$", + "pull_request_title_response": "Please edit the title of this pull request with the name of the affected component, or \"all\", followed by a colon, followed by a short summary of the change." +} diff --git a/vendor/github.com/google/wire/.travis.yml b/vendor/github.com/google/wire/.travis.yml new file mode 100644 index 00000000000..680a5003acd --- /dev/null +++ b/vendor/github.com/google/wire/.travis.yml @@ -0,0 +1,53 @@ +# Copyright 2018 The Wire Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +language: go +go_import_path: github.com/google/wire + +before_install: + # The Bash that comes with OS X is ancient. + # grep is similar: it's not GNU grep, which means commands aren't portable. + # Homebrew installs grep as ggrep if you don't build from source, so it needs + # moving so it takes precedence in the PATH. + - if [[ "$TRAVIS_OS_NAME" == "osx" ]]; then + HOMEBREW_NO_AUTO_UPDATE=1 brew install bash grep; + mv $(brew --prefix)/bin/ggrep $(brew --prefix)/bin/grep; + fi + +install: + # Re-checkout files preserving line feeds. This prevents Windows builds from + # converting \n to \r\n. + - "git config --global core.autocrlf input" + - "git checkout -- ." + +script: + - 'internal/runtests.sh' + +env: + global: + - GO111MODULE=on + - GOPROXY=https://proxy.golang.org + +# When updating Go versions: +# In addition to changing the "go:" versions below, edit the version +# test in internal/runtests.sh. + +jobs: + include: + - go: "1.13.x" + os: linux + - go: "1.13.x" + os: osx + - go: "1.13.x" + os: windows diff --git a/vendor/github.com/google/wire/AUTHORS b/vendor/github.com/google/wire/AUTHORS new file mode 100644 index 00000000000..4d8d4b3197f --- /dev/null +++ b/vendor/github.com/google/wire/AUTHORS @@ -0,0 +1,18 @@ +# This is the official list of Wire authors for copyright purposes. +# This file is distinct from the CONTRIBUTORS files. +# See the latter for an explanation. + +# Names should be added to this file as one of +# Organization's name +# Individual's name +# Individual's name +# See CONTRIBUTORS for the meaning of multiple email addresses. + +# Please keep the list sorted. + +Google LLC +ktr +Kumbirai Tanekha +Oleg Kovalov +Yoichiro Shimizu +Zachary Romero diff --git a/vendor/github.com/google/wire/CODE_OF_CONDUCT.md b/vendor/github.com/google/wire/CODE_OF_CONDUCT.md new file mode 100644 index 00000000000..3a8545eccf6 --- /dev/null +++ b/vendor/github.com/google/wire/CODE_OF_CONDUCT.md @@ -0,0 +1,10 @@ +# Code of Conduct + +This project is covered under the [Go Code of Conduct][]. In summary: + +- Treat everyone with respect and kindness. +- Be thoughtful in how you communicate. +- Don’t be destructive or inflammatory. +- If you encounter an issue, please mail conduct@golang.org. + +[Go Code of Conduct]: https://golang.org/conduct diff --git a/vendor/github.com/google/wire/CONTRIBUTING.md b/vendor/github.com/google/wire/CONTRIBUTING.md new file mode 100644 index 00000000000..68445fc463d --- /dev/null +++ b/vendor/github.com/google/wire/CONTRIBUTING.md @@ -0,0 +1,152 @@ +# How to Contribute + +We would love to accept your patches and contributions to this project. Here is +how you can help. + +## Filing issues + +Filing issues is an important way you can contribute to the Wire Project. We +want your feedback on things like bugs, desired API changes, or just anything +that isn't working for you. + +### Bugs + +If your issue is a bug, open one +[here](https://github.com/google/wire/issues/new). The easiest way to file an +issue with all the right information is to run `go bug`. `go bug` will print out +a handy template of questions and system information that will help us get to +the root of the issue quicker. + +### Changes + +Unlike the core Go project, we do not have a formal proposal process for +changes. If you have a change you would like to see in Wire, please file an +issue with the necessary details. + +### Triaging + +The Go Cloud team triages issues at least every two weeks, but usually within +two business days. Bugs or feature requests are either placed into a **Sprint** +milestone which means the issue is intended to be worked on. Issues that we +would like to address but do not have time for are placed into the [Unplanned][] +milestone. + +[Unplanned]: https://github.com/google/wire/milestone/1 + +## Contributing Code + +We love accepting contributions! If your change is minor, please feel free +submit a [pull request](https://help.github.com/articles/about-pull-requests/). +If your change is larger, or adds a feature, please file an issue beforehand so +that we can discuss the change. You're welcome to file an implementation pull +request immediately as well, although we generally lean towards discussing the +change and then reviewing the implementation separately. + +### Finding something to work on + +If you want to write some code, but don't know where to start or what you might +want to do, take a look at our [Unplanned][] milestone. This is where you can +find issues we would like to address but can't currently find time for. See if +any of the latest ones look interesting! If you need help before you can start +work, you can comment on the issue and we will try to help as best we can. + +### Contributor License Agreement + +Contributions to this project can only be made by those who have signed Google's +Contributor License Agreement. You (or your employer) retain the copyright to +your contribution, this simply gives us permission to use and redistribute your +contributions as part of the project. Head over to + to see your current agreements on file or +to sign a new one. + +As a personal contributor, you only need to sign the Google CLA once across all +Google projects. If you've already signed the CLA, there is no need to do it +again. If you are submitting code on behalf of your employer, there's +[a separate corporate CLA that your employer manages for you](https://opensource.google.com/docs/cla/#external-contributors). + +## Making a pull request + +* Follow the normal + [pull request flow](https://help.github.com/articles/creating-a-pull-request/) +* Build your changes using Go 1.11 with Go modules enabled. Wire's continuous + integration uses Go modules in order to ensure + [reproducible builds](https://research.swtch.com/vgo-repro). +* Test your changes using `go test ./...`. Please add tests that show the + change does what it says it does, even if there wasn't a test in the first + place. +* Feel free to make as many commits as you want; we will squash them all into + a single commit before merging your change. +* Check the diffs, write a useful description (including something like + `Fixes #123` if it's fixing a bug) and send the PR out. +* [Travis CI](http://travis-ci.com) will run tests against the PR. This should + happen within 10 minutes or so. If a test fails, go back to the coding stage + and try to fix the test and push the same branch again. You won't need to + make a new pull request, the changes will be rolled directly into the PR you + already opened. Wait for Travis again. There is no need to assign a reviewer + to the PR, the project team will assign someone for review during the + standard [triage](#triaging) process. + +## Code review + +All submissions, including submissions by project members, require review. It is +almost never the case that a pull request is accepted without some changes +requested, so please do not be offended! + +When you have finished making requested changes to your pull request, please +make a comment containing "PTAL" (Please Take Another Look) on your pull +request. GitHub notifications can be noisy, and it is unfortunately easy for +things to be lost in the shuffle. + +Once your PR is approved (hooray!) the reviewer will squash your commits into a +single commit, and then merge the commit onto the Wire master branch. Thank you! + +## Github code review workflow conventions + +(For project members and frequent contributors.) + +As a contributor: + +- Try hard to make each Pull Request as small and focused as possible. In + particular, this means that if a reviewer asks you to do something that is + beyond the scope of the Pull Request, the best practice is to file another + issue and reference it from the Pull Request rather than just adding more + commits to the existing PR. +- Adding someone as a Reviewer means "please feel free to look and comment"; + the review is optional. Choose as many Reviewers as you'd like. +- Adding someone as an Assignee means that the Pull Request should not be + submitted until they approve. If you choose multiple Assignees, wait until + all of them approve. It is fine to ask someone if they are OK with being + removed as an Assignee. + - Note that if you don't select any assignees, ContributeBot will turn all + of your Reviewers into Assignees. +- Make as many commits as you want locally, but try not to push them to Github + until you've addressed comments; this allows the email notification about + the push to be a signal to reviewers that the PR is ready to be looked at + again. +- When there may be confusion about what should happen next for a PR, be + explicit; add a "PTAL" comment if it is ready for review again, or a "Please + hold off on reviewing for now" if you are still working on addressing + comments. +- "Resolve" comments that you are sure you've addressed; let your reviewers + resolve ones that you're not sure about. +- Do not use `git push --force`; this can cause comments from your reviewers + that are associated with a specific commit to be lost. This implies that + once you've sent a Pull Request, you should use `git merge` instead of `git + rebase` to incorporate commits from the master branch. + +As a reviewer: + +- Be timely in your review process, especially if you are an Assignee. +- Try to use `Start a Review` instead of single comments, to reduce email + spam. +- "Resolve" your own comments if they have been addressed. +- If you want your review to be blocking, and are not currently an Assignee, + add yourself as an Assignee. + +When squashing-and-merging: + +- Ensure that **all** of the Assignees have approved. +- Do a final review of the one-line PR summary, ensuring that it accurately + describes the change. +- Delete the automatically added commit lines; these are generally not + interesting and make commit history harder to read. diff --git a/vendor/github.com/google/wire/CONTRIBUTORS b/vendor/github.com/google/wire/CONTRIBUTORS new file mode 100644 index 00000000000..00a94f89caa --- /dev/null +++ b/vendor/github.com/google/wire/CONTRIBUTORS @@ -0,0 +1,43 @@ +# This is the official list of people who can contribute +# (and typically have contributed) code to the Wire repository. +# The AUTHORS file lists the copyright holders; this file +# lists people. For example, Google employees are listed here +# but not in AUTHORS, because Google holds the copyright. +# +# Names should be added to this file only after verifying that +# the individual or the individual's organization has agreed to +# the appropriate Contributor License Agreement, found here: +# +# http://code.google.com/legal/individual-cla-v1.0.html +# http://code.google.com/legal/corporate-cla-v1.0.html +# +# The agreement for individuals can be filled out on the web. +# +# When adding J Random Contributor's name to this file, +# either J's name or J's organization's name should be +# added to the AUTHORS file, depending on whether the +# individual or corporate CLA was used. + +# Names should be added to this file like so: +# Individual's name +# Individual's name +# +# An entry with multiple email addresses specifies that the +# first address should be used in the submit logs and +# that the other addresses should be recognized as the +# same person when interacting with Git. + +# Please keep the list sorted. + +Chris Lewis +Christina Austin <4240737+clausti@users.noreply.github.com> +Eno Compton +Issac Trotts +ktr +Kumbirai Tanekha +Oleg Kovalov +Robert van Gent +Ross Light +Tuo Shan +Yoichiro Shimizu +Zachary Romero diff --git a/vendor/github.com/google/wire/LICENSE b/vendor/github.com/google/wire/LICENSE new file mode 100644 index 00000000000..d6456956733 --- /dev/null +++ b/vendor/github.com/google/wire/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/google/wire/README.md b/vendor/github.com/google/wire/README.md new file mode 100644 index 00000000000..d432b63374f --- /dev/null +++ b/vendor/github.com/google/wire/README.md @@ -0,0 +1,60 @@ +# Wire: Automated Initialization in Go + +[![Build Status](https://travis-ci.com/google/wire.svg?branch=master)][travis] +[![godoc](https://godoc.org/github.com/google/wire?status.svg)][godoc] +[![Coverage](https://codecov.io/gh/google/wire/branch/master/graph/badge.svg)](https://codecov.io/gh/google/wire) + + +Wire is a code generation tool that automates connecting components using +[dependency injection][]. Dependencies between components are represented in +Wire as function parameters, encouraging explicit initialization instead of +global variables. Because Wire operates without runtime state or reflection, +code written to be used with Wire is useful even for hand-written +initialization. + +For an overview, see the [introductory blog post][]. + +[dependency injection]: https://en.wikipedia.org/wiki/Dependency_injection +[introductory blog post]: https://blog.golang.org/wire +[godoc]: https://godoc.org/github.com/google/wire +[travis]: https://travis-ci.com/google/wire + +## Installing + +Install Wire by running: + +```shell +go get github.com/google/wire/cmd/wire +``` + +and ensuring that `$GOPATH/bin` is added to your `$PATH`. + +## Documentation + +- [Tutorial][] +- [User Guide][] +- [Best Practices][] +- [FAQ][] + +[Tutorial]: ./_tutorial/README.md +[Best Practices]: ./docs/best-practices.md +[FAQ]: ./docs/faq.md +[User Guide]: ./docs/guide.md + +## Project status + +As of version v0.3.0, Wire is *beta* and is considered feature complete. It +works well for the tasks it was designed to perform, and we prefer to keep it +as simple as possible. + +We'll not be accepting new features at this time, but will gladly accept bug +reports and fixes. + +## Community + +You can contact us on the [go-cloud mailing list][]. + +This project is covered by the Go [Code of Conduct][]. + +[Code of Conduct]: ./CODE_OF_CONDUCT.md +[go-cloud mailing list]: https://groups.google.com/forum/#!forum/go-cloud diff --git a/vendor/github.com/google/wire/go.mod b/vendor/github.com/google/wire/go.mod new file mode 100644 index 00000000000..b2233dc52b4 --- /dev/null +++ b/vendor/github.com/google/wire/go.mod @@ -0,0 +1,10 @@ +module github.com/google/wire + +go 1.12 + +require ( + github.com/google/go-cmp v0.2.0 + github.com/google/subcommands v1.0.1 + github.com/pmezard/go-difflib v1.0.0 + golang.org/x/tools v0.0.0-20190422233926-fe54fb35175b +) diff --git a/vendor/github.com/google/wire/go.sum b/vendor/github.com/google/wire/go.sum new file mode 100644 index 00000000000..88ea58c5284 --- /dev/null +++ b/vendor/github.com/google/wire/go.sum @@ -0,0 +1,12 @@ +github.com/google/go-cmp v0.2.0 h1:+dTQ8DZQJz0Mb/HjFlkptS1FeQ4cWSnN941F8aEG4SQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/subcommands v1.0.1 h1:/eqq+otEXm5vhfBrbREPCSVQbvofip6kIz+mX5TUH7k= +github.com/google/subcommands v1.0.1/go.mod h1:ZjhPrFU+Olkh9WazFPsl27BQ4UPiG37m3yTrtFlrHVk= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/tools v0.0.0-20190422233926-fe54fb35175b h1:NVD8gBK33xpdqCaZVVtd6OFJp+3dxkXuz7+U7KaVN6s= +golang.org/x/tools v0.0.0-20190422233926-fe54fb35175b/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= diff --git a/vendor/github.com/google/wire/wire.go b/vendor/github.com/google/wire/wire.go new file mode 100644 index 00000000000..fe8edc8c8ac --- /dev/null +++ b/vendor/github.com/google/wire/wire.go @@ -0,0 +1,196 @@ +// Copyright 2018 The Wire Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package wire contains directives for Wire code generation. +// For an overview of working with Wire, see the user guide at +// https://github.com/google/wire/blob/master/docs/guide.md +// +// The directives in this package are used as input to the Wire code generation +// tool. The entry point of Wire's analysis are injector functions: function +// templates denoted by only containing a call to Build. The arguments to Build +// describes a set of providers and the Wire code generation tool builds a +// directed acylic graph of the providers' output types. The generated code will +// fill in the function template by using the providers from the provider set to +// instantiate any needed types. +package wire + +// ProviderSet is a marker type that collects a group of providers. +type ProviderSet struct{} + +// NewSet creates a new provider set that includes the providers in its +// arguments. Each argument is a function value, a provider set, a call to +// Struct, a call to Bind, a call to Value, a call to InterfaceValue or a call +// to FieldsOf. +// +// Passing a function value to NewSet declares that the function's first +// return value type will be provided by calling the function. The arguments +// to the function will come from the providers for their types. As such, all +// the function's parameters must be of non-identical types. The function may +// optionally return an error as its last return value and a cleanup function +// as the second return value. A cleanup function must be of type func() and is +// guaranteed to be called before the cleanup function of any of the +// provider's inputs. If any provider returns an error, the injector function +// will call all the appropriate cleanup functions and return the error from +// the injector function. +// +// Passing a ProviderSet to NewSet is the same as if the set's contents +// were passed as arguments to NewSet directly. +// +// The behavior of passing the result of a call to other functions in this +// package are described in their respective doc comments. +// +// For compatibility with older versions of Wire, passing a struct value of type +// S to NewSet declares that both S and *S will be provided by creating a new +// value of the appropriate type by filling in each field of S using the +// provider of the field's type. This form is deprecated and will be removed in +// a future version of Wire: new providers sets should use wire.Struct. +func NewSet(...interface{}) ProviderSet { + return ProviderSet{} +} + +// Build is placed in the body of an injector function template to declare the +// providers to use. The Wire code generation tool will fill in an +// implementation of the function. The arguments to Build are interpreted the +// same as NewSet: they determine the provider set presented to Wire's +// dependency graph. Build returns an error message that can be sent to a call +// to panic(). +// +// The parameters of the injector function are used as inputs in the dependency +// graph. +// +// Similar to provider functions passed into NewSet, the first return value is +// the output of the injector function, the optional second return value is a +// cleanup function, and the optional last return value is an error. If any of +// the provider functions in the injector function's provider set return errors +// or cleanup functions, the corresponding return value must be present in the +// injector function template. +// +// Examples: +// +// func injector(ctx context.Context) (*sql.DB, error) { +// wire.Build(otherpkg.FooSet, myProviderFunc) +// return nil, nil +// } +// +// func injector(ctx context.Context) (*sql.DB, error) { +// panic(wire.Build(otherpkg.FooSet, myProviderFunc)) +// } +func Build(...interface{}) string { + return "implementation not generated, run wire" +} + +// A Binding maps an interface to a concrete type. +type Binding struct{} + +// Bind declares that a concrete type should be used to satisfy a dependency on +// the type of iface. iface must be a pointer to an interface type, to must be a +// pointer to a concrete type. +// +// Example: +// +// type Fooer interface { +// Foo() +// } +// +// type MyFoo struct{} +// +// func (MyFoo) Foo() {} +// +// var MySet = wire.NewSet( +// wire.Struct(new(MyFoo)) +// wire.Bind(new(Fooer), new(MyFoo))) +func Bind(iface, to interface{}) Binding { + return Binding{} +} + +// bindToUsePointer is detected by the wire tool to indicate that Bind's second argument should take a pointer. +// See https://github.com/google/wire/issues/120 for details. +const bindToUsePointer = true + +// A ProvidedValue is an expression that is copied to the generated injector. +type ProvidedValue struct{} + +// Value binds an expression to provide the type of the expression. +// The expression may not be an interface value; use InterfaceValue for that. +// +// Example: +// +// var MySet = wire.NewSet(wire.Value([]string(nil))) +func Value(interface{}) ProvidedValue { + return ProvidedValue{} +} + +// InterfaceValue binds an expression to provide a specific interface type. +// The first argument is a pointer to the interface which user wants to provide. +// The second argument is the actual variable value whose type implements the +// interface. +// +// Example: +// +// var MySet = wire.NewSet(wire.InterfaceValue(new(io.Reader), os.Stdin)) +func InterfaceValue(typ interface{}, x interface{}) ProvidedValue { + return ProvidedValue{} +} + +// A StructProvider represents a named struct. +type StructProvider struct{} + +// Struct specifies that the given struct type will be provided by filling in +// the fields in the struct that have the names given. +// +// The first argument must be a pointer to the struct type. For a struct type +// Foo, Wire will use field-filling to provide both Foo and *Foo. The remaining +// arguments are field names to fill in. As a special case, if a single name "*" +// is given, then all of the fields in the struct will be filled in. +// +// For example: +// +// type S struct { +// MyFoo *Foo +// MyBar *Bar +// } +// var Set = wire.NewSet(wire.Struct(new(S), "MyFoo")) -> inject only S.MyFoo +// var Set = wire.NewSet(wire.Struct(new(S), "*")) -> inject all fields +func Struct(structType interface{}, fieldNames ...string) StructProvider { + return StructProvider{} +} + +// StructFields is a collection of the fields from a struct. +type StructFields struct{} + +// FieldsOf declares that the fields named of the given struct type will be used +// to provide the types of those fields. The structType argument must be a +// pointer to the struct or a pointer to a pointer to the struct it wishes to reference. +// +// The following example would provide Foo and Bar using S.MyFoo and S.MyBar respectively: +// +// type S struct { +// MyFoo Foo +// MyBar Bar +// } +// +// func NewStruct() S { /* ... */ } +// var Set = wire.NewSet(wire.FieldsOf(new(S), "MyFoo", "MyBar")) +// +// or +// +// func NewStruct() *S { /* ... */ } +// var Set = wire.NewSet(wire.FieldsOf(new(*S), "MyFoo", "MyBar")) +// +// If the structType argument is a pointer to a pointer to a struct, then FieldsOf +// additionally provides a pointer to each field type (e.g., *Foo and *Bar in the +// example above). +func FieldsOf(structType interface{}, fieldNames ...string) StructFields { + return StructFields{} +} diff --git a/vendor/github.com/googleapis/gax-go/.gitignore b/vendor/github.com/googleapis/gax-go/.gitignore new file mode 100644 index 00000000000..289bf1eb79f --- /dev/null +++ b/vendor/github.com/googleapis/gax-go/.gitignore @@ -0,0 +1 @@ +*.cover diff --git a/vendor/github.com/googleapis/gax-go/.travis.yml b/vendor/github.com/googleapis/gax-go/.travis.yml new file mode 100644 index 00000000000..cc0a91e1536 --- /dev/null +++ b/vendor/github.com/googleapis/gax-go/.travis.yml @@ -0,0 +1,12 @@ +sudo: false +language: go +go: + - 1.9.x + - 1.10.x + - 1.11.x +script: + - gofmt -l . + - go tool vet . + - go test -coverprofile=coverage.txt -covermode=atomic +after_success: + - bash <(curl -s https://codecov.io/bash) diff --git a/vendor/github.com/googleapis/gax-go/CODE_OF_CONDUCT.md b/vendor/github.com/googleapis/gax-go/CODE_OF_CONDUCT.md new file mode 100644 index 00000000000..46b2a08ea6d --- /dev/null +++ b/vendor/github.com/googleapis/gax-go/CODE_OF_CONDUCT.md @@ -0,0 +1,43 @@ +# Contributor Code of Conduct + +As contributors and maintainers of this project, +and in the interest of fostering an open and welcoming community, +we pledge to respect all people who contribute through reporting issues, +posting feature requests, updating documentation, +submitting pull requests or patches, and other activities. + +We are committed to making participation in this project +a harassment-free experience for everyone, +regardless of level of experience, gender, gender identity and expression, +sexual orientation, disability, personal appearance, +body size, race, ethnicity, age, religion, or nationality. + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery +* Personal attacks +* Trolling or insulting/derogatory comments +* Public or private harassment +* Publishing other's private information, +such as physical or electronic +addresses, without explicit permission +* Other unethical or unprofessional conduct. + +Project maintainers have the right and responsibility to remove, edit, or reject +comments, commits, code, wiki edits, issues, and other contributions +that are not aligned to this Code of Conduct. +By adopting this Code of Conduct, +project maintainers commit themselves to fairly and consistently +applying these principles to every aspect of managing this project. +Project maintainers who do not follow or enforce the Code of Conduct +may be permanently removed from the project team. + +This code of conduct applies both within project spaces and in public spaces +when an individual is representing the project or its community. + +Instances of abusive, harassing, or otherwise unacceptable behavior +may be reported by opening an issue +or contacting one or more of the project maintainers. + +This Code of Conduct is adapted from the [Contributor Covenant](http://contributor-covenant.org), version 1.2.0, +available at [http://contributor-covenant.org/version/1/2/0/](http://contributor-covenant.org/version/1/2/0/) diff --git a/vendor/github.com/googleapis/gax-go/CONTRIBUTING.md b/vendor/github.com/googleapis/gax-go/CONTRIBUTING.md new file mode 100644 index 00000000000..2827b7d3fa2 --- /dev/null +++ b/vendor/github.com/googleapis/gax-go/CONTRIBUTING.md @@ -0,0 +1,27 @@ +Want to contribute? Great! First, read this page (including the small print at the end). + +### Before you contribute +Before we can use your code, you must sign the +[Google Individual Contributor License Agreement] +(https://cla.developers.google.com/about/google-individual) +(CLA), which you can do online. The CLA is necessary mainly because you own the +copyright to your changes, even after your contribution becomes part of our +codebase, so we need your permission to use and distribute your code. We also +need to be sure of various other things—for instance that you'll tell us if you +know that your code infringes on other people's patents. You don't have to sign +the CLA until after you've submitted your code for review and a member has +approved it, but you must do it before we can put your code into our codebase. +Before you start working on a larger contribution, you should get in touch with +us first through the issue tracker with your idea so that we can help out and +possibly guide you. Coordinating up front makes it much easier to avoid +frustration later on. + +### Code reviews +All submissions, including submissions by project members, require review. We +use Github pull requests for this purpose. + +### The small print +Contributions made by corporations are covered by a different agreement than +the one above, the +[Software Grant and Corporate Contributor License Agreement] +(https://cla.developers.google.com/about/google-corporate). diff --git a/vendor/github.com/googleapis/gax-go/LICENSE b/vendor/github.com/googleapis/gax-go/LICENSE new file mode 100644 index 00000000000..6d16b6578a2 --- /dev/null +++ b/vendor/github.com/googleapis/gax-go/LICENSE @@ -0,0 +1,27 @@ +Copyright 2016, Google Inc. +All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/googleapis/gax-go/README.md b/vendor/github.com/googleapis/gax-go/README.md new file mode 100644 index 00000000000..d6e214efd97 --- /dev/null +++ b/vendor/github.com/googleapis/gax-go/README.md @@ -0,0 +1,29 @@ +Google API Extensions for Go +============================ + +[![Build Status](https://travis-ci.org/googleapis/gax-go.svg?branch=master)](https://travis-ci.org/googleapis/gax-go) +[![Code Coverage](https://img.shields.io/codecov/c/github/googleapis/gax-go.svg)](https://codecov.io/github/googleapis/gax-go) +[![GoDoc](https://godoc.org/github.com/googleapis/gax-go?status.svg)](https://godoc.org/github.com/googleapis/gax-go) + +Google API Extensions for Go (gax-go) is a set of modules which aids the +development of APIs for clients and servers based on `gRPC` and Google API +conventions. + +To install the API extensions, use: + +``` +go get -u github.com/googleapis/gax-go +``` + +**Note:** Application code will rarely need to use this library directly, +but the code generated automatically from API definition files can use it +to simplify code generation and to provide more convenient and idiomatic API surface. + +Go Versions +=========== +This library requires Go 1.6 or above. + +License +======= +BSD - please see [LICENSE](https://github.com/googleapis/gax-go/blob/master/LICENSE) +for more information. diff --git a/vendor/github.com/googleapis/gax-go/call_option.go b/vendor/github.com/googleapis/gax-go/call_option.go new file mode 100644 index 00000000000..7b621643e94 --- /dev/null +++ b/vendor/github.com/googleapis/gax-go/call_option.go @@ -0,0 +1,157 @@ +// Copyright 2016, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package gax + +import ( + "math/rand" + "time" + + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +// CallOption is an option used by Invoke to control behaviors of RPC calls. +// CallOption works by modifying relevant fields of CallSettings. +type CallOption interface { + // Resolve applies the option by modifying cs. + Resolve(cs *CallSettings) +} + +// Retryer is used by Invoke to determine retry behavior. +type Retryer interface { + // Retry reports whether a request should be retriedand how long to pause before retrying + // if the previous attempt returned with err. Invoke never calls Retry with nil error. + Retry(err error) (pause time.Duration, shouldRetry bool) +} + +type retryerOption func() Retryer + +func (o retryerOption) Resolve(s *CallSettings) { + s.Retry = o +} + +// WithRetry sets CallSettings.Retry to fn. +func WithRetry(fn func() Retryer) CallOption { + return retryerOption(fn) +} + +// OnCodes returns a Retryer that retries if and only if +// the previous attempt returns a GRPC error whose error code is stored in cc. +// Pause times between retries are specified by bo. +// +// bo is only used for its parameters; each Retryer has its own copy. +func OnCodes(cc []codes.Code, bo Backoff) Retryer { + return &boRetryer{ + backoff: bo, + codes: append([]codes.Code(nil), cc...), + } +} + +type boRetryer struct { + backoff Backoff + codes []codes.Code +} + +func (r *boRetryer) Retry(err error) (time.Duration, bool) { + st, ok := status.FromError(err) + if !ok { + return 0, false + } + c := st.Code() + for _, rc := range r.codes { + if c == rc { + return r.backoff.Pause(), true + } + } + return 0, false +} + +// Backoff implements exponential backoff. +// The wait time between retries is a random value between 0 and the "retry envelope". +// The envelope starts at Initial and increases by the factor of Multiplier every retry, +// but is capped at Max. +type Backoff struct { + // Initial is the initial value of the retry envelope, defaults to 1 second. + Initial time.Duration + + // Max is the maximum value of the retry envelope, defaults to 30 seconds. + Max time.Duration + + // Multiplier is the factor by which the retry envelope increases. + // It should be greater than 1 and defaults to 2. + Multiplier float64 + + // cur is the current retry envelope + cur time.Duration +} + +func (bo *Backoff) Pause() time.Duration { + if bo.Initial == 0 { + bo.Initial = time.Second + } + if bo.cur == 0 { + bo.cur = bo.Initial + } + if bo.Max == 0 { + bo.Max = 30 * time.Second + } + if bo.Multiplier < 1 { + bo.Multiplier = 2 + } + // Select a duration between zero and the current max. It might seem counterintuitive to + // have so much jitter, but https://www.awsarchitectureblog.com/2015/03/backoff.html + // argues that that is the best strategy. + d := time.Duration(rand.Int63n(int64(bo.cur))) + bo.cur = time.Duration(float64(bo.cur) * bo.Multiplier) + if bo.cur > bo.Max { + bo.cur = bo.Max + } + return d +} + +type grpcOpt []grpc.CallOption + +func (o grpcOpt) Resolve(s *CallSettings) { + s.GRPC = o +} + +func WithGRPCOptions(opt ...grpc.CallOption) CallOption { + return grpcOpt(append([]grpc.CallOption(nil), opt...)) +} + +type CallSettings struct { + // Retry returns a Retryer to be used to control retry logic of a method call. + // If Retry is nil or the returned Retryer is nil, the call will not be retried. + Retry func() Retryer + + // CallOptions to be forwarded to GRPC. + GRPC []grpc.CallOption +} diff --git a/vendor/github.com/googleapis/gax-go/gax.go b/vendor/github.com/googleapis/gax-go/gax.go new file mode 100644 index 00000000000..8b2900e71bf --- /dev/null +++ b/vendor/github.com/googleapis/gax-go/gax.go @@ -0,0 +1,38 @@ +// Copyright 2016, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Package gax contains a set of modules which aid the development of APIs +// for clients and servers based on gRPC and Google API conventions. +// +// Application code will rarely need to use this library directly. +// However, code generated automatically from API definition files can use it +// to simplify code generation and to provide more convenient and idiomatic API surfaces. +package gax + +const Version = "2.0.0" diff --git a/vendor/github.com/googleapis/gax-go/header.go b/vendor/github.com/googleapis/gax-go/header.go new file mode 100644 index 00000000000..d81455eccd9 --- /dev/null +++ b/vendor/github.com/googleapis/gax-go/header.go @@ -0,0 +1,24 @@ +package gax + +import "bytes" + +// XGoogHeader is for use by the Google Cloud Libraries only. +// +// XGoogHeader formats key-value pairs. +// The resulting string is suitable for x-goog-api-client header. +func XGoogHeader(keyval ...string) string { + if len(keyval) == 0 { + return "" + } + if len(keyval)%2 != 0 { + panic("gax.Header: odd argument count") + } + var buf bytes.Buffer + for i := 0; i < len(keyval); i += 2 { + buf.WriteByte(' ') + buf.WriteString(keyval[i]) + buf.WriteByte('/') + buf.WriteString(keyval[i+1]) + } + return buf.String()[1:] +} diff --git a/vendor/github.com/googleapis/gax-go/invoke.go b/vendor/github.com/googleapis/gax-go/invoke.go new file mode 100644 index 00000000000..cb5cd2a9627 --- /dev/null +++ b/vendor/github.com/googleapis/gax-go/invoke.go @@ -0,0 +1,89 @@ +// Copyright 2016, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package gax + +import ( + "context" + "time" +) + +// A user defined call stub. +type APICall func(context.Context, CallSettings) error + +// Invoke calls the given APICall, +// performing retries as specified by opts, if any. +func Invoke(ctx context.Context, call APICall, opts ...CallOption) error { + var settings CallSettings + for _, opt := range opts { + opt.Resolve(&settings) + } + return invoke(ctx, call, settings, Sleep) +} + +// Sleep is similar to time.Sleep, but it can be interrupted by ctx.Done() closing. +// If interrupted, Sleep returns ctx.Err(). +func Sleep(ctx context.Context, d time.Duration) error { + t := time.NewTimer(d) + select { + case <-ctx.Done(): + t.Stop() + return ctx.Err() + case <-t.C: + return nil + } +} + +type sleeper func(ctx context.Context, d time.Duration) error + +// invoke implements Invoke, taking an additional sleeper argument for testing. +func invoke(ctx context.Context, call APICall, settings CallSettings, sp sleeper) error { + var retryer Retryer + for { + err := call(ctx, settings) + if err == nil { + return nil + } + if settings.Retry == nil { + return err + } + if retryer == nil { + if r := settings.Retry(); r != nil { + retryer = r + } else { + return err + } + } + if d, ok := retryer.Retry(err); !ok { + return err + } else if err = sp(ctx, d); err != nil { + return err + } + } +} diff --git a/vendor/github.com/googleapis/gnostic/LICENSE b/vendor/github.com/googleapis/gnostic/LICENSE new file mode 100644 index 00000000000..6b0b1270ff0 --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/LICENSE @@ -0,0 +1,203 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + diff --git a/vendor/github.com/googleapis/gnostic/compiler/README.md b/vendor/github.com/googleapis/gnostic/compiler/README.md new file mode 100644 index 00000000000..ee9783d232f --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/compiler/README.md @@ -0,0 +1,4 @@ +# Compiler support code + +This directory contains compiler support code used by Gnostic and Gnostic +extensions. diff --git a/vendor/github.com/googleapis/gnostic/compiler/context.go b/vendor/github.com/googleapis/gnostic/compiler/context.go new file mode 100644 index 00000000000..1bfe9612194 --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/compiler/context.go @@ -0,0 +1,49 @@ +// Copyright 2017 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package compiler + +import ( + yaml "gopkg.in/yaml.v3" +) + +// Context contains state of the compiler as it traverses a document. +type Context struct { + Parent *Context + Name string + Node *yaml.Node + ExtensionHandlers *[]ExtensionHandler +} + +// NewContextWithExtensions returns a new object representing the compiler state +func NewContextWithExtensions(name string, node *yaml.Node, parent *Context, extensionHandlers *[]ExtensionHandler) *Context { + return &Context{Name: name, Node: node, Parent: parent, ExtensionHandlers: extensionHandlers} +} + +// NewContext returns a new object representing the compiler state +func NewContext(name string, node *yaml.Node, parent *Context) *Context { + if parent != nil { + return &Context{Name: name, Node: node, Parent: parent, ExtensionHandlers: parent.ExtensionHandlers} + } + return &Context{Name: name, Parent: parent, ExtensionHandlers: nil} +} + +// Description returns a text description of the compiler state +func (context *Context) Description() string { + name := context.Name + if context.Parent != nil { + name = context.Parent.Description() + "." + name + } + return name +} diff --git a/vendor/github.com/googleapis/gnostic/compiler/error.go b/vendor/github.com/googleapis/gnostic/compiler/error.go new file mode 100644 index 00000000000..6f40515d6b6 --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/compiler/error.go @@ -0,0 +1,70 @@ +// Copyright 2017 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package compiler + +import "fmt" + +// Error represents compiler errors and their location in the document. +type Error struct { + Context *Context + Message string +} + +// NewError creates an Error. +func NewError(context *Context, message string) *Error { + return &Error{Context: context, Message: message} +} + +func (err *Error) locationDescription() string { + if err.Context.Node != nil { + return fmt.Sprintf("[%d,%d] %s", err.Context.Node.Line, err.Context.Node.Column, err.Context.Description()) + } + return err.Context.Description() +} + +// Error returns the string value of an Error. +func (err *Error) Error() string { + if err.Context == nil { + return err.Message + } + return err.locationDescription() + " " + err.Message +} + +// ErrorGroup is a container for groups of Error values. +type ErrorGroup struct { + Errors []error +} + +// NewErrorGroupOrNil returns a new ErrorGroup for a slice of errors or nil if the slice is empty. +func NewErrorGroupOrNil(errors []error) error { + if len(errors) == 0 { + return nil + } else if len(errors) == 1 { + return errors[0] + } else { + return &ErrorGroup{Errors: errors} + } +} + +func (group *ErrorGroup) Error() string { + result := "" + for i, err := range group.Errors { + if i > 0 { + result += "\n" + } + result += err.Error() + } + return result +} diff --git a/vendor/github.com/googleapis/gnostic/compiler/extensions.go b/vendor/github.com/googleapis/gnostic/compiler/extensions.go new file mode 100644 index 00000000000..20848a0a163 --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/compiler/extensions.go @@ -0,0 +1,85 @@ +// Copyright 2017 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package compiler + +import ( + "bytes" + "fmt" + "os/exec" + "strings" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes/any" + extensions "github.com/googleapis/gnostic/extensions" + yaml "gopkg.in/yaml.v3" +) + +// ExtensionHandler describes a binary that is called by the compiler to handle specification extensions. +type ExtensionHandler struct { + Name string +} + +// CallExtension calls a binary extension handler. +func CallExtension(context *Context, in *yaml.Node, extensionName string) (handled bool, response *any.Any, err error) { + if context == nil || context.ExtensionHandlers == nil { + return false, nil, nil + } + handled = false + for _, handler := range *(context.ExtensionHandlers) { + response, err = handler.handle(in, extensionName) + if response == nil { + continue + } else { + handled = true + break + } + } + return handled, response, err +} + +func (extensionHandlers *ExtensionHandler) handle(in *yaml.Node, extensionName string) (*any.Any, error) { + if extensionHandlers.Name != "" { + yamlData, _ := yaml.Marshal(in) + request := &extensions.ExtensionHandlerRequest{ + CompilerVersion: &extensions.Version{ + Major: 0, + Minor: 1, + Patch: 0, + }, + Wrapper: &extensions.Wrapper{ + Version: "unknown", // TODO: set this to the type/version of spec being parsed. + Yaml: string(yamlData), + ExtensionName: extensionName, + }, + } + requestBytes, _ := proto.Marshal(request) + cmd := exec.Command(extensionHandlers.Name) + cmd.Stdin = bytes.NewReader(requestBytes) + output, err := cmd.Output() + if err != nil { + return nil, err + } + response := &extensions.ExtensionHandlerResponse{} + err = proto.Unmarshal(output, response) + if err != nil || !response.Handled { + return nil, err + } + if len(response.Errors) != 0 { + return nil, fmt.Errorf("Errors when parsing: %+v for field %s by vendor extension handler %s. Details %+v", in, extensionName, extensionHandlers.Name, strings.Join(response.Errors, ",")) + } + return response.Value, nil + } + return nil, nil +} diff --git a/vendor/github.com/googleapis/gnostic/compiler/helpers.go b/vendor/github.com/googleapis/gnostic/compiler/helpers.go new file mode 100644 index 00000000000..48f02f3950e --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/compiler/helpers.go @@ -0,0 +1,396 @@ +// Copyright 2017 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package compiler + +import ( + "fmt" + "regexp" + "sort" + "strconv" + + "github.com/googleapis/gnostic/jsonschema" + "gopkg.in/yaml.v3" +) + +// compiler helper functions, usually called from generated code + +// UnpackMap gets a *yaml.Node if possible. +func UnpackMap(in *yaml.Node) (*yaml.Node, bool) { + if in == nil { + return nil, false + } + return in, true +} + +// SortedKeysForMap returns the sorted keys of a yamlv2.MapSlice. +func SortedKeysForMap(m *yaml.Node) []string { + keys := make([]string, 0) + if m.Kind == yaml.MappingNode { + for i := 0; i < len(m.Content); i += 2 { + keys = append(keys, m.Content[i].Value) + } + } + sort.Strings(keys) + return keys +} + +// MapHasKey returns true if a yamlv2.MapSlice contains a specified key. +func MapHasKey(m *yaml.Node, key string) bool { + if m == nil { + return false + } + if m.Kind == yaml.MappingNode { + for i := 0; i < len(m.Content); i += 2 { + itemKey := m.Content[i].Value + if key == itemKey { + return true + } + } + } + return false +} + +// MapValueForKey gets the value of a map value for a specified key. +func MapValueForKey(m *yaml.Node, key string) *yaml.Node { + if m == nil { + return nil + } + if m.Kind == yaml.MappingNode { + for i := 0; i < len(m.Content); i += 2 { + itemKey := m.Content[i].Value + if key == itemKey { + return m.Content[i+1] + } + } + } + return nil +} + +// ConvertInterfaceArrayToStringArray converts an array of interfaces to an array of strings, if possible. +func ConvertInterfaceArrayToStringArray(interfaceArray []interface{}) []string { + stringArray := make([]string, 0) + for _, item := range interfaceArray { + v, ok := item.(string) + if ok { + stringArray = append(stringArray, v) + } + } + return stringArray +} + +// SequenceNodeForNode returns a node if it is a SequenceNode. +func SequenceNodeForNode(node *yaml.Node) (*yaml.Node, bool) { + if node.Kind != yaml.SequenceNode { + return nil, false + } + return node, true +} + +// BoolForScalarNode returns the bool value of a node. +func BoolForScalarNode(node *yaml.Node) (bool, bool) { + if node == nil { + return false, false + } + if node.Kind == yaml.DocumentNode { + return BoolForScalarNode(node.Content[0]) + } + if node.Kind != yaml.ScalarNode { + return false, false + } + if node.Tag != "!!bool" { + return false, false + } + v, err := strconv.ParseBool(node.Value) + if err != nil { + return false, false + } + return v, true +} + +// IntForScalarNode returns the integer value of a node. +func IntForScalarNode(node *yaml.Node) (int64, bool) { + if node == nil { + return 0, false + } + if node.Kind == yaml.DocumentNode { + return IntForScalarNode(node.Content[0]) + } + if node.Kind != yaml.ScalarNode { + return 0, false + } + if node.Tag != "!!int" { + return 0, false + } + v, err := strconv.ParseInt(node.Value, 10, 64) + if err != nil { + return 0, false + } + return v, true +} + +// FloatForScalarNode returns the float value of a node. +func FloatForScalarNode(node *yaml.Node) (float64, bool) { + if node == nil { + return 0.0, false + } + if node.Kind == yaml.DocumentNode { + return FloatForScalarNode(node.Content[0]) + } + if node.Kind != yaml.ScalarNode { + return 0.0, false + } + if (node.Tag != "!!int") && (node.Tag != "!!float") { + return 0.0, false + } + v, err := strconv.ParseFloat(node.Value, 64) + if err != nil { + return 0.0, false + } + return v, true +} + +// StringForScalarNode returns the string value of a node. +func StringForScalarNode(node *yaml.Node) (string, bool) { + if node == nil { + return "", false + } + if node.Kind == yaml.DocumentNode { + return StringForScalarNode(node.Content[0]) + } + switch node.Kind { + case yaml.ScalarNode: + switch node.Tag { + case "!!int": + return node.Value, true + case "!!str": + return node.Value, true + case "!!timestamp": + return node.Value, true + case "!!null": + return "", true + default: + return "", false + } + default: + return "", false + } +} + +// StringArrayForSequenceNode converts a sequence node to an array of strings, if possible. +func StringArrayForSequenceNode(node *yaml.Node) []string { + stringArray := make([]string, 0) + for _, item := range node.Content { + v, ok := StringForScalarNode(item) + if ok { + stringArray = append(stringArray, v) + } + } + return stringArray +} + +// MissingKeysInMap identifies which keys from a list of required keys are not in a map. +func MissingKeysInMap(m *yaml.Node, requiredKeys []string) []string { + missingKeys := make([]string, 0) + for _, k := range requiredKeys { + if !MapHasKey(m, k) { + missingKeys = append(missingKeys, k) + } + } + return missingKeys +} + +// InvalidKeysInMap returns keys in a map that don't match a list of allowed keys and patterns. +func InvalidKeysInMap(m *yaml.Node, allowedKeys []string, allowedPatterns []*regexp.Regexp) []string { + invalidKeys := make([]string, 0) + if m == nil || m.Kind != yaml.MappingNode { + return invalidKeys + } + for i := 0; i < len(m.Content); i += 2 { + key := m.Content[i].Value + found := false + // does the key match an allowed key? + for _, allowedKey := range allowedKeys { + if key == allowedKey { + found = true + break + } + } + if !found { + // does the key match an allowed pattern? + for _, allowedPattern := range allowedPatterns { + if allowedPattern.MatchString(key) { + found = true + break + } + } + if !found { + invalidKeys = append(invalidKeys, key) + } + } + } + return invalidKeys +} + +// NewNullNode creates a new Null node. +func NewNullNode() *yaml.Node { + node := &yaml.Node{ + Kind: yaml.ScalarNode, + Tag: "!!null", + } + return node +} + +// NewMappingNode creates a new Mapping node. +func NewMappingNode() *yaml.Node { + return &yaml.Node{ + Kind: yaml.MappingNode, + Content: make([]*yaml.Node, 0), + } +} + +// NewSequenceNode creates a new Sequence node. +func NewSequenceNode() *yaml.Node { + node := &yaml.Node{ + Kind: yaml.SequenceNode, + Content: make([]*yaml.Node, 0), + } + return node +} + +// NewScalarNodeForString creates a new node to hold a string. +func NewScalarNodeForString(s string) *yaml.Node { + return &yaml.Node{ + Kind: yaml.ScalarNode, + Tag: "!!str", + Value: s, + } +} + +// NewSequenceNodeForStringArray creates a new node to hold an array of strings. +func NewSequenceNodeForStringArray(strings []string) *yaml.Node { + node := &yaml.Node{ + Kind: yaml.SequenceNode, + Content: make([]*yaml.Node, 0), + } + for _, s := range strings { + node.Content = append(node.Content, NewScalarNodeForString(s)) + } + return node +} + +// NewScalarNodeForBool creates a new node to hold a bool. +func NewScalarNodeForBool(b bool) *yaml.Node { + return &yaml.Node{ + Kind: yaml.ScalarNode, + Tag: "!!bool", + Value: fmt.Sprintf("%t", b), + } +} + +// NewScalarNodeForFloat creates a new node to hold a float. +func NewScalarNodeForFloat(f float64) *yaml.Node { + return &yaml.Node{ + Kind: yaml.ScalarNode, + Tag: "!!float", + Value: fmt.Sprintf("%g", f), + } +} + +// NewScalarNodeForInt creates a new node to hold an integer. +func NewScalarNodeForInt(i int64) *yaml.Node { + return &yaml.Node{ + Kind: yaml.ScalarNode, + Tag: "!!int", + Value: fmt.Sprintf("%d", i), + } +} + +// PluralProperties returns the string "properties" pluralized. +func PluralProperties(count int) string { + if count == 1 { + return "property" + } + return "properties" +} + +// StringArrayContainsValue returns true if a string array contains a specified value. +func StringArrayContainsValue(array []string, value string) bool { + for _, item := range array { + if item == value { + return true + } + } + return false +} + +// StringArrayContainsValues returns true if a string array contains all of a list of specified values. +func StringArrayContainsValues(array []string, values []string) bool { + for _, value := range values { + if !StringArrayContainsValue(array, value) { + return false + } + } + return true +} + +// StringValue returns the string value of an item. +func StringValue(item interface{}) (value string, ok bool) { + value, ok = item.(string) + if ok { + return value, ok + } + intValue, ok := item.(int) + if ok { + return strconv.Itoa(intValue), true + } + return "", false +} + +// Description returns a human-readable represention of an item. +func Description(item interface{}) string { + value, ok := item.(*yaml.Node) + if ok { + return jsonschema.Render(value) + } + return fmt.Sprintf("%+v", item) +} + +// Display returns a description of a node for use in error messages. +func Display(node *yaml.Node) string { + switch node.Kind { + case yaml.ScalarNode: + switch node.Tag { + case "!!str": + return fmt.Sprintf("%s (string)", node.Value) + } + } + return fmt.Sprintf("%+v (%T)", node, node) +} + +// Marshal creates a yaml version of a structure in our preferred style +func Marshal(in *yaml.Node) []byte { + clearStyle(in) + //bytes, _ := yaml.Marshal(&yaml.Node{Kind: yaml.DocumentNode, Content: []*yaml.Node{in}}) + bytes, _ := yaml.Marshal(in) + + return bytes +} + +func clearStyle(node *yaml.Node) { + node.Style = 0 + for _, c := range node.Content { + clearStyle(c) + } +} diff --git a/vendor/github.com/googleapis/gnostic/compiler/main.go b/vendor/github.com/googleapis/gnostic/compiler/main.go new file mode 100644 index 00000000000..ce9fcc456cc --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/compiler/main.go @@ -0,0 +1,16 @@ +// Copyright 2017 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package compiler provides support functions to generated compiler code. +package compiler diff --git a/vendor/github.com/googleapis/gnostic/compiler/reader.go b/vendor/github.com/googleapis/gnostic/compiler/reader.go new file mode 100644 index 00000000000..be0e8b40c8c --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/compiler/reader.go @@ -0,0 +1,307 @@ +// Copyright 2017 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package compiler + +import ( + "fmt" + "io/ioutil" + "log" + "net/http" + "net/url" + "path/filepath" + "strings" + "sync" + + yaml "gopkg.in/yaml.v3" +) + +var verboseReader = false + +var fileCache map[string][]byte +var infoCache map[string]*yaml.Node + +var fileCacheEnable = true +var infoCacheEnable = true + +// These locks are used to synchronize accesses to the fileCache and infoCache +// maps (above). They are global state and can throw thread-related errors +// when modified from separate goroutines. The general strategy is to protect +// all public functions in this file with mutex Lock() calls. As a result, to +// avoid deadlock, these public functions should not call other public +// functions, so some public functions have private equivalents. +// In the future, we might consider replacing the maps with sync.Map and +// eliminating these mutexes. +var fileCacheMutex sync.Mutex +var infoCacheMutex sync.Mutex + +func initializeFileCache() { + if fileCache == nil { + fileCache = make(map[string][]byte, 0) + } +} + +func initializeInfoCache() { + if infoCache == nil { + infoCache = make(map[string]*yaml.Node, 0) + } +} + +// EnableFileCache turns on file caching. +func EnableFileCache() { + fileCacheMutex.Lock() + defer fileCacheMutex.Unlock() + fileCacheEnable = true +} + +// EnableInfoCache turns on parsed info caching. +func EnableInfoCache() { + infoCacheMutex.Lock() + defer infoCacheMutex.Unlock() + infoCacheEnable = true +} + +// DisableFileCache turns off file caching. +func DisableFileCache() { + fileCacheMutex.Lock() + defer fileCacheMutex.Unlock() + fileCacheEnable = false +} + +// DisableInfoCache turns off parsed info caching. +func DisableInfoCache() { + infoCacheMutex.Lock() + defer infoCacheMutex.Unlock() + infoCacheEnable = false +} + +// RemoveFromFileCache removes an entry from the file cache. +func RemoveFromFileCache(fileurl string) { + fileCacheMutex.Lock() + defer fileCacheMutex.Unlock() + if !fileCacheEnable { + return + } + initializeFileCache() + delete(fileCache, fileurl) +} + +// RemoveFromInfoCache removes an entry from the info cache. +func RemoveFromInfoCache(filename string) { + infoCacheMutex.Lock() + defer infoCacheMutex.Unlock() + if !infoCacheEnable { + return + } + initializeInfoCache() + delete(infoCache, filename) +} + +// GetInfoCache returns the info cache map. +func GetInfoCache() map[string]*yaml.Node { + infoCacheMutex.Lock() + defer infoCacheMutex.Unlock() + if infoCache == nil { + initializeInfoCache() + } + return infoCache +} + +// ClearFileCache clears the file cache. +func ClearFileCache() { + fileCacheMutex.Lock() + defer fileCacheMutex.Unlock() + fileCache = make(map[string][]byte, 0) +} + +// ClearInfoCache clears the info cache. +func ClearInfoCache() { + infoCacheMutex.Lock() + defer infoCacheMutex.Unlock() + infoCache = make(map[string]*yaml.Node) +} + +// ClearCaches clears all caches. +func ClearCaches() { + ClearFileCache() + ClearInfoCache() +} + +// FetchFile gets a specified file from the local filesystem or a remote location. +func FetchFile(fileurl string) ([]byte, error) { + fileCacheMutex.Lock() + defer fileCacheMutex.Unlock() + return fetchFile(fileurl) +} + +func fetchFile(fileurl string) ([]byte, error) { + var bytes []byte + initializeFileCache() + if fileCacheEnable { + bytes, ok := fileCache[fileurl] + if ok { + if verboseReader { + log.Printf("Cache hit %s", fileurl) + } + return bytes, nil + } + if verboseReader { + log.Printf("Fetching %s", fileurl) + } + } + response, err := http.Get(fileurl) + if err != nil { + return nil, err + } + defer response.Body.Close() + if response.StatusCode != 200 { + return nil, fmt.Errorf("Error downloading %s: %s", fileurl, response.Status) + } + bytes, err = ioutil.ReadAll(response.Body) + if fileCacheEnable && err == nil { + fileCache[fileurl] = bytes + } + return bytes, err +} + +// ReadBytesForFile reads the bytes of a file. +func ReadBytesForFile(filename string) ([]byte, error) { + fileCacheMutex.Lock() + defer fileCacheMutex.Unlock() + return readBytesForFile(filename) +} + +func readBytesForFile(filename string) ([]byte, error) { + // is the filename a url? + fileurl, _ := url.Parse(filename) + if fileurl.Scheme != "" { + // yes, fetch it + bytes, err := fetchFile(filename) + if err != nil { + return nil, err + } + return bytes, nil + } + // no, it's a local filename + bytes, err := ioutil.ReadFile(filename) + if err != nil { + return nil, err + } + return bytes, nil +} + +// ReadInfoFromBytes unmarshals a file as a *yaml.Node. +func ReadInfoFromBytes(filename string, bytes []byte) (*yaml.Node, error) { + infoCacheMutex.Lock() + defer infoCacheMutex.Unlock() + return readInfoFromBytes(filename, bytes) +} + +func readInfoFromBytes(filename string, bytes []byte) (*yaml.Node, error) { + initializeInfoCache() + if infoCacheEnable { + cachedInfo, ok := infoCache[filename] + if ok { + if verboseReader { + log.Printf("Cache hit info for file %s", filename) + } + return cachedInfo, nil + } + if verboseReader { + log.Printf("Reading info for file %s", filename) + } + } + var info yaml.Node + err := yaml.Unmarshal(bytes, &info) + if err != nil { + return nil, err + } + if infoCacheEnable && len(filename) > 0 { + infoCache[filename] = &info + } + return &info, nil +} + +// ReadInfoForRef reads a file and return the fragment needed to resolve a $ref. +func ReadInfoForRef(basefile string, ref string) (*yaml.Node, error) { + fileCacheMutex.Lock() + defer fileCacheMutex.Unlock() + infoCacheMutex.Lock() + defer infoCacheMutex.Unlock() + initializeInfoCache() + if infoCacheEnable { + info, ok := infoCache[ref] + if ok { + if verboseReader { + log.Printf("Cache hit for ref %s#%s", basefile, ref) + } + return info, nil + } + if verboseReader { + log.Printf("Reading info for ref %s#%s", basefile, ref) + } + } + basedir, _ := filepath.Split(basefile) + parts := strings.Split(ref, "#") + var filename string + if parts[0] != "" { + filename = parts[0] + if _, err := url.ParseRequestURI(parts[0]); err != nil { + // It is not an URL, so the file is local + filename = basedir + parts[0] + } + } else { + filename = basefile + } + bytes, err := readBytesForFile(filename) + if err != nil { + return nil, err + } + info, err := readInfoFromBytes(filename, bytes) + if info != nil && info.Kind == yaml.DocumentNode { + info = info.Content[0] + } + if err != nil { + log.Printf("File error: %v\n", err) + } else { + if info == nil { + return nil, NewError(nil, fmt.Sprintf("could not resolve %s", ref)) + } + if len(parts) > 1 { + path := strings.Split(parts[1], "/") + for i, key := range path { + if i > 0 { + m := info + if true { + found := false + for i := 0; i < len(m.Content); i += 2 { + if m.Content[i].Value == key { + info = m.Content[i+1] + found = true + } + } + if !found { + infoCache[ref] = nil + return nil, NewError(nil, fmt.Sprintf("could not resolve %s", ref)) + } + } + } + } + } + } + if infoCacheEnable { + infoCache[ref] = info + } + return info, nil +} diff --git a/vendor/github.com/googleapis/gnostic/extensions/README.md b/vendor/github.com/googleapis/gnostic/extensions/README.md new file mode 100644 index 00000000000..4b5d63e5856 --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/extensions/README.md @@ -0,0 +1,13 @@ +# Extensions + +**Extension Support is experimental.** + +This directory contains support code for building Gnostic extensio handlers and +associated examples. + +Extension handlers can be used to compile vendor or specification extensions +into protocol buffer structures. + +Like plugins, extension handlers are built as separate executables. Extension +bodies are written to extension handlers as serialized +ExtensionHandlerRequests. diff --git a/vendor/github.com/googleapis/gnostic/extensions/extension.pb.go b/vendor/github.com/googleapis/gnostic/extensions/extension.pb.go new file mode 100644 index 00000000000..5aab58ebfbe --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/extensions/extension.pb.go @@ -0,0 +1,461 @@ +// Copyright 2017 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.26.0 +// protoc v3.15.5 +// source: extensions/extension.proto + +package gnostic_extension_v1 + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// The version number of Gnostic. +type Version struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Major int32 `protobuf:"varint,1,opt,name=major,proto3" json:"major,omitempty"` + Minor int32 `protobuf:"varint,2,opt,name=minor,proto3" json:"minor,omitempty"` + Patch int32 `protobuf:"varint,3,opt,name=patch,proto3" json:"patch,omitempty"` + // A suffix for alpha, beta or rc release, e.g., "alpha-1", "rc2". It should + // be empty for mainline stable releases. + Suffix string `protobuf:"bytes,4,opt,name=suffix,proto3" json:"suffix,omitempty"` +} + +func (x *Version) Reset() { + *x = Version{} + if protoimpl.UnsafeEnabled { + mi := &file_extensions_extension_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Version) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Version) ProtoMessage() {} + +func (x *Version) ProtoReflect() protoreflect.Message { + mi := &file_extensions_extension_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Version.ProtoReflect.Descriptor instead. +func (*Version) Descriptor() ([]byte, []int) { + return file_extensions_extension_proto_rawDescGZIP(), []int{0} +} + +func (x *Version) GetMajor() int32 { + if x != nil { + return x.Major + } + return 0 +} + +func (x *Version) GetMinor() int32 { + if x != nil { + return x.Minor + } + return 0 +} + +func (x *Version) GetPatch() int32 { + if x != nil { + return x.Patch + } + return 0 +} + +func (x *Version) GetSuffix() string { + if x != nil { + return x.Suffix + } + return "" +} + +// An encoded Request is written to the ExtensionHandler's stdin. +type ExtensionHandlerRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The extension to process. + Wrapper *Wrapper `protobuf:"bytes,1,opt,name=wrapper,proto3" json:"wrapper,omitempty"` + // The version number of Gnostic. + CompilerVersion *Version `protobuf:"bytes,2,opt,name=compiler_version,json=compilerVersion,proto3" json:"compiler_version,omitempty"` +} + +func (x *ExtensionHandlerRequest) Reset() { + *x = ExtensionHandlerRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_extensions_extension_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ExtensionHandlerRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ExtensionHandlerRequest) ProtoMessage() {} + +func (x *ExtensionHandlerRequest) ProtoReflect() protoreflect.Message { + mi := &file_extensions_extension_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ExtensionHandlerRequest.ProtoReflect.Descriptor instead. +func (*ExtensionHandlerRequest) Descriptor() ([]byte, []int) { + return file_extensions_extension_proto_rawDescGZIP(), []int{1} +} + +func (x *ExtensionHandlerRequest) GetWrapper() *Wrapper { + if x != nil { + return x.Wrapper + } + return nil +} + +func (x *ExtensionHandlerRequest) GetCompilerVersion() *Version { + if x != nil { + return x.CompilerVersion + } + return nil +} + +// The extensions writes an encoded ExtensionHandlerResponse to stdout. +type ExtensionHandlerResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // true if the extension is handled by the extension handler; false otherwise + Handled bool `protobuf:"varint,1,opt,name=handled,proto3" json:"handled,omitempty"` + // Error message(s). If non-empty, the extension handling failed. + // The extension handler process should exit with status code zero + // even if it reports an error in this way. + // + // This should be used to indicate errors which prevent the extension from + // operating as intended. Errors which indicate a problem in gnostic + // itself -- such as the input Document being unparseable -- should be + // reported by writing a message to stderr and exiting with a non-zero + // status code. + Errors []string `protobuf:"bytes,2,rep,name=errors,proto3" json:"errors,omitempty"` + // text output + Value *anypb.Any `protobuf:"bytes,3,opt,name=value,proto3" json:"value,omitempty"` +} + +func (x *ExtensionHandlerResponse) Reset() { + *x = ExtensionHandlerResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_extensions_extension_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ExtensionHandlerResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ExtensionHandlerResponse) ProtoMessage() {} + +func (x *ExtensionHandlerResponse) ProtoReflect() protoreflect.Message { + mi := &file_extensions_extension_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ExtensionHandlerResponse.ProtoReflect.Descriptor instead. +func (*ExtensionHandlerResponse) Descriptor() ([]byte, []int) { + return file_extensions_extension_proto_rawDescGZIP(), []int{2} +} + +func (x *ExtensionHandlerResponse) GetHandled() bool { + if x != nil { + return x.Handled + } + return false +} + +func (x *ExtensionHandlerResponse) GetErrors() []string { + if x != nil { + return x.Errors + } + return nil +} + +func (x *ExtensionHandlerResponse) GetValue() *anypb.Any { + if x != nil { + return x.Value + } + return nil +} + +type Wrapper struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // version of the OpenAPI specification in which this extension was written. + Version string `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"` + // Name of the extension. + ExtensionName string `protobuf:"bytes,2,opt,name=extension_name,json=extensionName,proto3" json:"extension_name,omitempty"` + // YAML-formatted extension value. + Yaml string `protobuf:"bytes,3,opt,name=yaml,proto3" json:"yaml,omitempty"` +} + +func (x *Wrapper) Reset() { + *x = Wrapper{} + if protoimpl.UnsafeEnabled { + mi := &file_extensions_extension_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Wrapper) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Wrapper) ProtoMessage() {} + +func (x *Wrapper) ProtoReflect() protoreflect.Message { + mi := &file_extensions_extension_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Wrapper.ProtoReflect.Descriptor instead. +func (*Wrapper) Descriptor() ([]byte, []int) { + return file_extensions_extension_proto_rawDescGZIP(), []int{3} +} + +func (x *Wrapper) GetVersion() string { + if x != nil { + return x.Version + } + return "" +} + +func (x *Wrapper) GetExtensionName() string { + if x != nil { + return x.ExtensionName + } + return "" +} + +func (x *Wrapper) GetYaml() string { + if x != nil { + return x.Yaml + } + return "" +} + +var File_extensions_extension_proto protoreflect.FileDescriptor + +var file_extensions_extension_proto_rawDesc = []byte{ + 0x0a, 0x1a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x65, 0x78, 0x74, + 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x67, 0x6e, + 0x6f, 0x73, 0x74, 0x69, 0x63, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x2e, + 0x76, 0x31, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x63, 0x0a, + 0x07, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, 0x05, 0x6d, 0x61, 0x6a, 0x6f, + 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x6d, 0x61, 0x6a, 0x6f, 0x72, 0x12, 0x14, + 0x0a, 0x05, 0x6d, 0x69, 0x6e, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x6d, + 0x69, 0x6e, 0x6f, 0x72, 0x12, 0x14, 0x0a, 0x05, 0x70, 0x61, 0x74, 0x63, 0x68, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x05, 0x52, 0x05, 0x70, 0x61, 0x74, 0x63, 0x68, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x75, + 0x66, 0x66, 0x69, 0x78, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x75, 0x66, 0x66, + 0x69, 0x78, 0x22, 0x9c, 0x01, 0x0a, 0x17, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, + 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x37, + 0x0a, 0x07, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1d, 0x2e, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, + 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x57, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x52, 0x07, + 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x12, 0x48, 0x0a, 0x10, 0x63, 0x6f, 0x6d, 0x70, 0x69, + 0x6c, 0x65, 0x72, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x2e, 0x65, 0x78, 0x74, 0x65, + 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, + 0x52, 0x0f, 0x63, 0x6f, 0x6d, 0x70, 0x69, 0x6c, 0x65, 0x72, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x22, 0x78, 0x0a, 0x18, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x48, 0x61, + 0x6e, 0x64, 0x6c, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, + 0x07, 0x68, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, + 0x68, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x65, 0x72, 0x72, 0x6f, 0x72, + 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x73, 0x12, + 0x2a, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x5e, 0x0a, 0x07, 0x57, + 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, + 0x12, 0x25, 0x0a, 0x0e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x61, + 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, + 0x69, 0x6f, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x79, 0x61, 0x6d, 0x6c, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x79, 0x61, 0x6d, 0x6c, 0x42, 0x4d, 0x0a, 0x0e, 0x6f, + 0x72, 0x67, 0x2e, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x2e, 0x76, 0x31, 0x42, 0x10, 0x47, + 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x50, + 0x01, 0x5a, 0x21, 0x2e, 0x2f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x3b, + 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x5f, 0x76, 0x31, 0xa2, 0x02, 0x03, 0x47, 0x4e, 0x58, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x33, +} + +var ( + file_extensions_extension_proto_rawDescOnce sync.Once + file_extensions_extension_proto_rawDescData = file_extensions_extension_proto_rawDesc +) + +func file_extensions_extension_proto_rawDescGZIP() []byte { + file_extensions_extension_proto_rawDescOnce.Do(func() { + file_extensions_extension_proto_rawDescData = protoimpl.X.CompressGZIP(file_extensions_extension_proto_rawDescData) + }) + return file_extensions_extension_proto_rawDescData +} + +var file_extensions_extension_proto_msgTypes = make([]protoimpl.MessageInfo, 4) +var file_extensions_extension_proto_goTypes = []interface{}{ + (*Version)(nil), // 0: gnostic.extension.v1.Version + (*ExtensionHandlerRequest)(nil), // 1: gnostic.extension.v1.ExtensionHandlerRequest + (*ExtensionHandlerResponse)(nil), // 2: gnostic.extension.v1.ExtensionHandlerResponse + (*Wrapper)(nil), // 3: gnostic.extension.v1.Wrapper + (*anypb.Any)(nil), // 4: google.protobuf.Any +} +var file_extensions_extension_proto_depIdxs = []int32{ + 3, // 0: gnostic.extension.v1.ExtensionHandlerRequest.wrapper:type_name -> gnostic.extension.v1.Wrapper + 0, // 1: gnostic.extension.v1.ExtensionHandlerRequest.compiler_version:type_name -> gnostic.extension.v1.Version + 4, // 2: gnostic.extension.v1.ExtensionHandlerResponse.value:type_name -> google.protobuf.Any + 3, // [3:3] is the sub-list for method output_type + 3, // [3:3] is the sub-list for method input_type + 3, // [3:3] is the sub-list for extension type_name + 3, // [3:3] is the sub-list for extension extendee + 0, // [0:3] is the sub-list for field type_name +} + +func init() { file_extensions_extension_proto_init() } +func file_extensions_extension_proto_init() { + if File_extensions_extension_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_extensions_extension_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Version); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_extensions_extension_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ExtensionHandlerRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_extensions_extension_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ExtensionHandlerResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_extensions_extension_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Wrapper); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_extensions_extension_proto_rawDesc, + NumEnums: 0, + NumMessages: 4, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_extensions_extension_proto_goTypes, + DependencyIndexes: file_extensions_extension_proto_depIdxs, + MessageInfos: file_extensions_extension_proto_msgTypes, + }.Build() + File_extensions_extension_proto = out.File + file_extensions_extension_proto_rawDesc = nil + file_extensions_extension_proto_goTypes = nil + file_extensions_extension_proto_depIdxs = nil +} diff --git a/vendor/github.com/googleapis/gnostic/extensions/extension.proto b/vendor/github.com/googleapis/gnostic/extensions/extension.proto new file mode 100644 index 00000000000..875137c1a86 --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/extensions/extension.proto @@ -0,0 +1,97 @@ +// Copyright 2017 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package gnostic.extension.v1; + +import "google/protobuf/any.proto"; + +// This option lets the proto compiler generate Java code inside the package +// name (see below) instead of inside an outer class. It creates a simpler +// developer experience by reducing one-level of name nesting and be +// consistent with most programming languages that don't support outer classes. +option java_multiple_files = true; + +// The Java outer classname should be the filename in UpperCamelCase. This +// class is only used to hold proto descriptor, so developers don't need to +// work with it directly. +option java_outer_classname = "GnosticExtension"; + +// The Java package name must be proto package name with proper prefix. +option java_package = "org.gnostic.v1"; + +// A reasonable prefix for the Objective-C symbols generated from the package. +// It should at a minimum be 3 characters long, all uppercase, and convention +// is to use an abbreviation of the package name. Something short, but +// hopefully unique enough to not conflict with things that may come along in +// the future. 'GPB' is reserved for the protocol buffer implementation itself. +// +// "Gnostic Extension" +option objc_class_prefix = "GNX"; + +// The Go package name. +option go_package = "./extensions;gnostic_extension_v1"; + +// The version number of Gnostic. +message Version { + int32 major = 1; + int32 minor = 2; + int32 patch = 3; + // A suffix for alpha, beta or rc release, e.g., "alpha-1", "rc2". It should + // be empty for mainline stable releases. + string suffix = 4; +} + +// An encoded Request is written to the ExtensionHandler's stdin. +message ExtensionHandlerRequest { + + // The extension to process. + Wrapper wrapper = 1; + + // The version number of Gnostic. + Version compiler_version = 2; +} + +// The extensions writes an encoded ExtensionHandlerResponse to stdout. +message ExtensionHandlerResponse { + + // true if the extension is handled by the extension handler; false otherwise + bool handled = 1; + + // Error message(s). If non-empty, the extension handling failed. + // The extension handler process should exit with status code zero + // even if it reports an error in this way. + // + // This should be used to indicate errors which prevent the extension from + // operating as intended. Errors which indicate a problem in gnostic + // itself -- such as the input Document being unparseable -- should be + // reported by writing a message to stderr and exiting with a non-zero + // status code. + repeated string errors = 2; + + // text output + google.protobuf.Any value = 3; +} + +message Wrapper { + // version of the OpenAPI specification in which this extension was written. + string version = 1; + + // Name of the extension. + string extension_name = 2; + + // YAML-formatted extension value. + string yaml = 3; +} diff --git a/vendor/github.com/googleapis/gnostic/extensions/extensions.go b/vendor/github.com/googleapis/gnostic/extensions/extensions.go new file mode 100644 index 00000000000..ec8afd00923 --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/extensions/extensions.go @@ -0,0 +1,64 @@ +// Copyright 2017 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package gnostic_extension_v1 + +import ( + "io/ioutil" + "log" + "os" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes" +) + +type extensionHandler func(name string, yamlInput string) (bool, proto.Message, error) + +// Main implements the main program of an extension handler. +func Main(handler extensionHandler) { + // unpack the request + data, err := ioutil.ReadAll(os.Stdin) + if err != nil { + log.Println("File error:", err.Error()) + os.Exit(1) + } + if len(data) == 0 { + log.Println("No input data.") + os.Exit(1) + } + request := &ExtensionHandlerRequest{} + err = proto.Unmarshal(data, request) + if err != nil { + log.Println("Input error:", err.Error()) + os.Exit(1) + } + // call the handler + handled, output, err := handler(request.Wrapper.ExtensionName, request.Wrapper.Yaml) + // respond with the output of the handler + response := &ExtensionHandlerResponse{ + Handled: false, // default assumption + Errors: make([]string, 0), + } + if err != nil { + response.Errors = append(response.Errors, err.Error()) + } else if handled { + response.Handled = true + response.Value, err = ptypes.MarshalAny(output) + if err != nil { + response.Errors = append(response.Errors, err.Error()) + } + } + responseBytes, _ := proto.Marshal(response) + os.Stdout.Write(responseBytes) +} diff --git a/vendor/github.com/googleapis/gnostic/jsonschema/README.md b/vendor/github.com/googleapis/gnostic/jsonschema/README.md new file mode 100644 index 00000000000..6793c5179c8 --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/jsonschema/README.md @@ -0,0 +1,4 @@ +# jsonschema + +This directory contains code for reading, writing, and manipulating JSON +schemas. diff --git a/vendor/github.com/googleapis/gnostic/jsonschema/base.go b/vendor/github.com/googleapis/gnostic/jsonschema/base.go new file mode 100644 index 00000000000..0af8b148b9c --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/jsonschema/base.go @@ -0,0 +1,84 @@ + +// THIS FILE IS AUTOMATICALLY GENERATED. + +package jsonschema + +import ( + "encoding/base64" +) + +func baseSchemaBytes() ([]byte, error){ + return base64.StdEncoding.DecodeString( +`ewogICAgImlkIjogImh0dHA6Ly9qc29uLXNjaGVtYS5vcmcvZHJhZnQtMDQvc2NoZW1hIyIsCiAgICAi +JHNjaGVtYSI6ICJodHRwOi8vanNvbi1zY2hlbWEub3JnL2RyYWZ0LTA0L3NjaGVtYSMiLAogICAgImRl +c2NyaXB0aW9uIjogIkNvcmUgc2NoZW1hIG1ldGEtc2NoZW1hIiwKICAgICJkZWZpbml0aW9ucyI6IHsK +ICAgICAgICAic2NoZW1hQXJyYXkiOiB7CiAgICAgICAgICAgICJ0eXBlIjogImFycmF5IiwKICAgICAg +ICAgICAgIm1pbkl0ZW1zIjogMSwKICAgICAgICAgICAgIml0ZW1zIjogeyAiJHJlZiI6ICIjIiB9CiAg +ICAgICAgfSwKICAgICAgICAicG9zaXRpdmVJbnRlZ2VyIjogewogICAgICAgICAgICAidHlwZSI6ICJp +bnRlZ2VyIiwKICAgICAgICAgICAgIm1pbmltdW0iOiAwCiAgICAgICAgfSwKICAgICAgICAicG9zaXRp +dmVJbnRlZ2VyRGVmYXVsdDAiOiB7CiAgICAgICAgICAgICJhbGxPZiI6IFsgeyAiJHJlZiI6ICIjL2Rl +ZmluaXRpb25zL3Bvc2l0aXZlSW50ZWdlciIgfSwgeyAiZGVmYXVsdCI6IDAgfSBdCiAgICAgICAgfSwK +ICAgICAgICAic2ltcGxlVHlwZXMiOiB7CiAgICAgICAgICAgICJlbnVtIjogWyAiYXJyYXkiLCAiYm9v +bGVhbiIsICJpbnRlZ2VyIiwgIm51bGwiLCAibnVtYmVyIiwgIm9iamVjdCIsICJzdHJpbmciIF0KICAg +ICAgICB9LAogICAgICAgICJzdHJpbmdBcnJheSI6IHsKICAgICAgICAgICAgInR5cGUiOiAiYXJyYXki +LAogICAgICAgICAgICAiaXRlbXMiOiB7ICJ0eXBlIjogInN0cmluZyIgfSwKICAgICAgICAgICAgIm1p +bkl0ZW1zIjogMSwKICAgICAgICAgICAgInVuaXF1ZUl0ZW1zIjogdHJ1ZQogICAgICAgIH0KICAgIH0s +CiAgICAidHlwZSI6ICJvYmplY3QiLAogICAgInByb3BlcnRpZXMiOiB7CiAgICAgICAgImlkIjogewog +ICAgICAgICAgICAidHlwZSI6ICJzdHJpbmciLAogICAgICAgICAgICAiZm9ybWF0IjogInVyaSIKICAg +ICAgICB9LAogICAgICAgICIkc2NoZW1hIjogewogICAgICAgICAgICAidHlwZSI6ICJzdHJpbmciLAog +ICAgICAgICAgICAiZm9ybWF0IjogInVyaSIKICAgICAgICB9LAogICAgICAgICJ0aXRsZSI6IHsKICAg +ICAgICAgICAgInR5cGUiOiAic3RyaW5nIgogICAgICAgIH0sCiAgICAgICAgImRlc2NyaXB0aW9uIjog +ewogICAgICAgICAgICAidHlwZSI6ICJzdHJpbmciCiAgICAgICAgfSwKICAgICAgICAiZGVmYXVsdCI6 +IHt9LAogICAgICAgICJtdWx0aXBsZU9mIjogewogICAgICAgICAgICAidHlwZSI6ICJudW1iZXIiLAog +ICAgICAgICAgICAibWluaW11bSI6IDAsCiAgICAgICAgICAgICJleGNsdXNpdmVNaW5pbXVtIjogdHJ1 +ZQogICAgICAgIH0sCiAgICAgICAgIm1heGltdW0iOiB7CiAgICAgICAgICAgICJ0eXBlIjogIm51bWJl +ciIKICAgICAgICB9LAogICAgICAgICJleGNsdXNpdmVNYXhpbXVtIjogewogICAgICAgICAgICAidHlw +ZSI6ICJib29sZWFuIiwKICAgICAgICAgICAgImRlZmF1bHQiOiBmYWxzZQogICAgICAgIH0sCiAgICAg +ICAgIm1pbmltdW0iOiB7CiAgICAgICAgICAgICJ0eXBlIjogIm51bWJlciIKICAgICAgICB9LAogICAg +ICAgICJleGNsdXNpdmVNaW5pbXVtIjogewogICAgICAgICAgICAidHlwZSI6ICJib29sZWFuIiwKICAg +ICAgICAgICAgImRlZmF1bHQiOiBmYWxzZQogICAgICAgIH0sCiAgICAgICAgIm1heExlbmd0aCI6IHsg +IiRyZWYiOiAiIy9kZWZpbml0aW9ucy9wb3NpdGl2ZUludGVnZXIiIH0sCiAgICAgICAgIm1pbkxlbmd0 +aCI6IHsgIiRyZWYiOiAiIy9kZWZpbml0aW9ucy9wb3NpdGl2ZUludGVnZXJEZWZhdWx0MCIgfSwKICAg +ICAgICAicGF0dGVybiI6IHsKICAgICAgICAgICAgInR5cGUiOiAic3RyaW5nIiwKICAgICAgICAgICAg +ImZvcm1hdCI6ICJyZWdleCIKICAgICAgICB9LAogICAgICAgICJhZGRpdGlvbmFsSXRlbXMiOiB7CiAg +ICAgICAgICAgICJhbnlPZiI6IFsKICAgICAgICAgICAgICAgIHsgInR5cGUiOiAiYm9vbGVhbiIgfSwK +ICAgICAgICAgICAgICAgIHsgIiRyZWYiOiAiIyIgfQogICAgICAgICAgICBdLAogICAgICAgICAgICAi +ZGVmYXVsdCI6IHt9CiAgICAgICAgfSwKICAgICAgICAiaXRlbXMiOiB7CiAgICAgICAgICAgICJhbnlP +ZiI6IFsKICAgICAgICAgICAgICAgIHsgIiRyZWYiOiAiIyIgfSwKICAgICAgICAgICAgICAgIHsgIiRy +ZWYiOiAiIy9kZWZpbml0aW9ucy9zY2hlbWFBcnJheSIgfQogICAgICAgICAgICBdLAogICAgICAgICAg +ICAiZGVmYXVsdCI6IHt9CiAgICAgICAgfSwKICAgICAgICAibWF4SXRlbXMiOiB7ICIkcmVmIjogIiMv +ZGVmaW5pdGlvbnMvcG9zaXRpdmVJbnRlZ2VyIiB9LAogICAgICAgICJtaW5JdGVtcyI6IHsgIiRyZWYi +OiAiIy9kZWZpbml0aW9ucy9wb3NpdGl2ZUludGVnZXJEZWZhdWx0MCIgfSwKICAgICAgICAidW5pcXVl +SXRlbXMiOiB7CiAgICAgICAgICAgICJ0eXBlIjogImJvb2xlYW4iLAogICAgICAgICAgICAiZGVmYXVs +dCI6IGZhbHNlCiAgICAgICAgfSwKICAgICAgICAibWF4UHJvcGVydGllcyI6IHsgIiRyZWYiOiAiIy9k +ZWZpbml0aW9ucy9wb3NpdGl2ZUludGVnZXIiIH0sCiAgICAgICAgIm1pblByb3BlcnRpZXMiOiB7ICIk +cmVmIjogIiMvZGVmaW5pdGlvbnMvcG9zaXRpdmVJbnRlZ2VyRGVmYXVsdDAiIH0sCiAgICAgICAgInJl +cXVpcmVkIjogeyAiJHJlZiI6ICIjL2RlZmluaXRpb25zL3N0cmluZ0FycmF5IiB9LAogICAgICAgICJh +ZGRpdGlvbmFsUHJvcGVydGllcyI6IHsKICAgICAgICAgICAgImFueU9mIjogWwogICAgICAgICAgICAg +ICAgeyAidHlwZSI6ICJib29sZWFuIiB9LAogICAgICAgICAgICAgICAgeyAiJHJlZiI6ICIjIiB9CiAg +ICAgICAgICAgIF0sCiAgICAgICAgICAgICJkZWZhdWx0Ijoge30KICAgICAgICB9LAogICAgICAgICJk +ZWZpbml0aW9ucyI6IHsKICAgICAgICAgICAgInR5cGUiOiAib2JqZWN0IiwKICAgICAgICAgICAgImFk +ZGl0aW9uYWxQcm9wZXJ0aWVzIjogeyAiJHJlZiI6ICIjIiB9LAogICAgICAgICAgICAiZGVmYXVsdCI6 +IHt9CiAgICAgICAgfSwKICAgICAgICAicHJvcGVydGllcyI6IHsKICAgICAgICAgICAgInR5cGUiOiAi +b2JqZWN0IiwKICAgICAgICAgICAgImFkZGl0aW9uYWxQcm9wZXJ0aWVzIjogeyAiJHJlZiI6ICIjIiB9 +LAogICAgICAgICAgICAiZGVmYXVsdCI6IHt9CiAgICAgICAgfSwKICAgICAgICAicGF0dGVyblByb3Bl +cnRpZXMiOiB7CiAgICAgICAgICAgICJ0eXBlIjogIm9iamVjdCIsCiAgICAgICAgICAgICJhZGRpdGlv +bmFsUHJvcGVydGllcyI6IHsgIiRyZWYiOiAiIyIgfSwKICAgICAgICAgICAgImRlZmF1bHQiOiB7fQog +ICAgICAgIH0sCiAgICAgICAgImRlcGVuZGVuY2llcyI6IHsKICAgICAgICAgICAgInR5cGUiOiAib2Jq +ZWN0IiwKICAgICAgICAgICAgImFkZGl0aW9uYWxQcm9wZXJ0aWVzIjogewogICAgICAgICAgICAgICAg +ImFueU9mIjogWwogICAgICAgICAgICAgICAgICAgIHsgIiRyZWYiOiAiIyIgfSwKICAgICAgICAgICAg +ICAgICAgICB7ICIkcmVmIjogIiMvZGVmaW5pdGlvbnMvc3RyaW5nQXJyYXkiIH0KICAgICAgICAgICAg +ICAgIF0KICAgICAgICAgICAgfQogICAgICAgIH0sCiAgICAgICAgImVudW0iOiB7CiAgICAgICAgICAg +ICJ0eXBlIjogImFycmF5IiwKICAgICAgICAgICAgIm1pbkl0ZW1zIjogMSwKICAgICAgICAgICAgInVu +aXF1ZUl0ZW1zIjogdHJ1ZQogICAgICAgIH0sCiAgICAgICAgInR5cGUiOiB7CiAgICAgICAgICAgICJh +bnlPZiI6IFsKICAgICAgICAgICAgICAgIHsgIiRyZWYiOiAiIy9kZWZpbml0aW9ucy9zaW1wbGVUeXBl +cyIgfSwKICAgICAgICAgICAgICAgIHsKICAgICAgICAgICAgICAgICAgICAidHlwZSI6ICJhcnJheSIs +CiAgICAgICAgICAgICAgICAgICAgIml0ZW1zIjogeyAiJHJlZiI6ICIjL2RlZmluaXRpb25zL3NpbXBs +ZVR5cGVzIiB9LAogICAgICAgICAgICAgICAgICAgICJtaW5JdGVtcyI6IDEsCiAgICAgICAgICAgICAg +ICAgICAgInVuaXF1ZUl0ZW1zIjogdHJ1ZQogICAgICAgICAgICAgICAgfQogICAgICAgICAgICBdCiAg +ICAgICAgfSwKICAgICAgICAiYWxsT2YiOiB7ICIkcmVmIjogIiMvZGVmaW5pdGlvbnMvc2NoZW1hQXJy +YXkiIH0sCiAgICAgICAgImFueU9mIjogeyAiJHJlZiI6ICIjL2RlZmluaXRpb25zL3NjaGVtYUFycmF5 +IiB9LAogICAgICAgICJvbmVPZiI6IHsgIiRyZWYiOiAiIy9kZWZpbml0aW9ucy9zY2hlbWFBcnJheSIg +fSwKICAgICAgICAibm90IjogeyAiJHJlZiI6ICIjIiB9CiAgICB9LAogICAgImRlcGVuZGVuY2llcyI6 +IHsKICAgICAgICAiZXhjbHVzaXZlTWF4aW11bSI6IFsgIm1heGltdW0iIF0sCiAgICAgICAgImV4Y2x1 +c2l2ZU1pbmltdW0iOiBbICJtaW5pbXVtIiBdCiAgICB9LAogICAgImRlZmF1bHQiOiB7fQp9Cg==`)} \ No newline at end of file diff --git a/vendor/github.com/googleapis/gnostic/jsonschema/display.go b/vendor/github.com/googleapis/gnostic/jsonschema/display.go new file mode 100644 index 00000000000..028a760a91b --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/jsonschema/display.go @@ -0,0 +1,229 @@ +// Copyright 2017 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package jsonschema + +import ( + "fmt" + "strings" +) + +// +// DISPLAY +// The following methods display Schemas. +// + +// Description returns a string representation of a string or string array. +func (s *StringOrStringArray) Description() string { + if s.String != nil { + return *s.String + } + if s.StringArray != nil { + return strings.Join(*s.StringArray, ", ") + } + return "" +} + +// Returns a string representation of a Schema. +func (schema *Schema) String() string { + return schema.describeSchema("") +} + +// Helper: Returns a string representation of a Schema indented by a specified string. +func (schema *Schema) describeSchema(indent string) string { + result := "" + if schema.Schema != nil { + result += indent + "$schema: " + *(schema.Schema) + "\n" + } + if schema.ID != nil { + result += indent + "id: " + *(schema.ID) + "\n" + } + if schema.MultipleOf != nil { + result += indent + fmt.Sprintf("multipleOf: %+v\n", *(schema.MultipleOf)) + } + if schema.Maximum != nil { + result += indent + fmt.Sprintf("maximum: %+v\n", *(schema.Maximum)) + } + if schema.ExclusiveMaximum != nil { + result += indent + fmt.Sprintf("exclusiveMaximum: %+v\n", *(schema.ExclusiveMaximum)) + } + if schema.Minimum != nil { + result += indent + fmt.Sprintf("minimum: %+v\n", *(schema.Minimum)) + } + if schema.ExclusiveMinimum != nil { + result += indent + fmt.Sprintf("exclusiveMinimum: %+v\n", *(schema.ExclusiveMinimum)) + } + if schema.MaxLength != nil { + result += indent + fmt.Sprintf("maxLength: %+v\n", *(schema.MaxLength)) + } + if schema.MinLength != nil { + result += indent + fmt.Sprintf("minLength: %+v\n", *(schema.MinLength)) + } + if schema.Pattern != nil { + result += indent + fmt.Sprintf("pattern: %+v\n", *(schema.Pattern)) + } + if schema.AdditionalItems != nil { + s := schema.AdditionalItems.Schema + if s != nil { + result += indent + "additionalItems:\n" + result += s.describeSchema(indent + " ") + } else { + b := *(schema.AdditionalItems.Boolean) + result += indent + fmt.Sprintf("additionalItems: %+v\n", b) + } + } + if schema.Items != nil { + result += indent + "items:\n" + items := schema.Items + if items.SchemaArray != nil { + for i, s := range *(items.SchemaArray) { + result += indent + " " + fmt.Sprintf("%d", i) + ":\n" + result += s.describeSchema(indent + " " + " ") + } + } else if items.Schema != nil { + result += items.Schema.describeSchema(indent + " " + " ") + } + } + if schema.MaxItems != nil { + result += indent + fmt.Sprintf("maxItems: %+v\n", *(schema.MaxItems)) + } + if schema.MinItems != nil { + result += indent + fmt.Sprintf("minItems: %+v\n", *(schema.MinItems)) + } + if schema.UniqueItems != nil { + result += indent + fmt.Sprintf("uniqueItems: %+v\n", *(schema.UniqueItems)) + } + if schema.MaxProperties != nil { + result += indent + fmt.Sprintf("maxProperties: %+v\n", *(schema.MaxProperties)) + } + if schema.MinProperties != nil { + result += indent + fmt.Sprintf("minProperties: %+v\n", *(schema.MinProperties)) + } + if schema.Required != nil { + result += indent + fmt.Sprintf("required: %+v\n", *(schema.Required)) + } + if schema.AdditionalProperties != nil { + s := schema.AdditionalProperties.Schema + if s != nil { + result += indent + "additionalProperties:\n" + result += s.describeSchema(indent + " ") + } else { + b := *(schema.AdditionalProperties.Boolean) + result += indent + fmt.Sprintf("additionalProperties: %+v\n", b) + } + } + if schema.Properties != nil { + result += indent + "properties:\n" + for _, pair := range *(schema.Properties) { + name := pair.Name + s := pair.Value + result += indent + " " + name + ":\n" + result += s.describeSchema(indent + " " + " ") + } + } + if schema.PatternProperties != nil { + result += indent + "patternProperties:\n" + for _, pair := range *(schema.PatternProperties) { + name := pair.Name + s := pair.Value + result += indent + " " + name + ":\n" + result += s.describeSchema(indent + " " + " ") + } + } + if schema.Dependencies != nil { + result += indent + "dependencies:\n" + for _, pair := range *(schema.Dependencies) { + name := pair.Name + schemaOrStringArray := pair.Value + s := schemaOrStringArray.Schema + if s != nil { + result += indent + " " + name + ":\n" + result += s.describeSchema(indent + " " + " ") + } else { + a := schemaOrStringArray.StringArray + if a != nil { + result += indent + " " + name + ":\n" + for _, s2 := range *a { + result += indent + " " + " " + s2 + "\n" + } + } + } + + } + } + if schema.Enumeration != nil { + result += indent + "enumeration:\n" + for _, value := range *(schema.Enumeration) { + if value.String != nil { + result += indent + " " + fmt.Sprintf("%+v\n", *value.String) + } else { + result += indent + " " + fmt.Sprintf("%+v\n", *value.Bool) + } + } + } + if schema.Type != nil { + result += indent + fmt.Sprintf("type: %+v\n", schema.Type.Description()) + } + if schema.AllOf != nil { + result += indent + "allOf:\n" + for _, s := range *(schema.AllOf) { + result += s.describeSchema(indent + " ") + result += indent + "-\n" + } + } + if schema.AnyOf != nil { + result += indent + "anyOf:\n" + for _, s := range *(schema.AnyOf) { + result += s.describeSchema(indent + " ") + result += indent + "-\n" + } + } + if schema.OneOf != nil { + result += indent + "oneOf:\n" + for _, s := range *(schema.OneOf) { + result += s.describeSchema(indent + " ") + result += indent + "-\n" + } + } + if schema.Not != nil { + result += indent + "not:\n" + result += schema.Not.describeSchema(indent + " ") + } + if schema.Definitions != nil { + result += indent + "definitions:\n" + for _, pair := range *(schema.Definitions) { + name := pair.Name + s := pair.Value + result += indent + " " + name + ":\n" + result += s.describeSchema(indent + " " + " ") + } + } + if schema.Title != nil { + result += indent + "title: " + *(schema.Title) + "\n" + } + if schema.Description != nil { + result += indent + "description: " + *(schema.Description) + "\n" + } + if schema.Default != nil { + result += indent + "default:\n" + result += indent + fmt.Sprintf(" %+v\n", *(schema.Default)) + } + if schema.Format != nil { + result += indent + "format: " + *(schema.Format) + "\n" + } + if schema.Ref != nil { + result += indent + "$ref: " + *(schema.Ref) + "\n" + } + return result +} diff --git a/vendor/github.com/googleapis/gnostic/jsonschema/models.go b/vendor/github.com/googleapis/gnostic/jsonschema/models.go new file mode 100644 index 00000000000..4781bdc5f50 --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/jsonschema/models.go @@ -0,0 +1,228 @@ +// Copyright 2017 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package jsonschema supports the reading, writing, and manipulation +// of JSON Schemas. +package jsonschema + +import "gopkg.in/yaml.v3" + +// The Schema struct models a JSON Schema and, because schemas are +// defined hierarchically, contains many references to itself. +// All fields are pointers and are nil if the associated values +// are not specified. +type Schema struct { + Schema *string // $schema + ID *string // id keyword used for $ref resolution scope + Ref *string // $ref, i.e. JSON Pointers + + // http://json-schema.org/latest/json-schema-validation.html + // 5.1. Validation keywords for numeric instances (number and integer) + MultipleOf *SchemaNumber + Maximum *SchemaNumber + ExclusiveMaximum *bool + Minimum *SchemaNumber + ExclusiveMinimum *bool + + // 5.2. Validation keywords for strings + MaxLength *int64 + MinLength *int64 + Pattern *string + + // 5.3. Validation keywords for arrays + AdditionalItems *SchemaOrBoolean + Items *SchemaOrSchemaArray + MaxItems *int64 + MinItems *int64 + UniqueItems *bool + + // 5.4. Validation keywords for objects + MaxProperties *int64 + MinProperties *int64 + Required *[]string + AdditionalProperties *SchemaOrBoolean + Properties *[]*NamedSchema + PatternProperties *[]*NamedSchema + Dependencies *[]*NamedSchemaOrStringArray + + // 5.5. Validation keywords for any instance type + Enumeration *[]SchemaEnumValue + Type *StringOrStringArray + AllOf *[]*Schema + AnyOf *[]*Schema + OneOf *[]*Schema + Not *Schema + Definitions *[]*NamedSchema + + // 6. Metadata keywords + Title *string + Description *string + Default *yaml.Node + + // 7. Semantic validation with "format" + Format *string +} + +// These helper structs represent "combination" types that generally can +// have values of one type or another. All are used to represent parts +// of Schemas. + +// SchemaNumber represents a value that can be either an Integer or a Float. +type SchemaNumber struct { + Integer *int64 + Float *float64 +} + +// NewSchemaNumberWithInteger creates and returns a new object +func NewSchemaNumberWithInteger(i int64) *SchemaNumber { + result := &SchemaNumber{} + result.Integer = &i + return result +} + +// NewSchemaNumberWithFloat creates and returns a new object +func NewSchemaNumberWithFloat(f float64) *SchemaNumber { + result := &SchemaNumber{} + result.Float = &f + return result +} + +// SchemaOrBoolean represents a value that can be either a Schema or a Boolean. +type SchemaOrBoolean struct { + Schema *Schema + Boolean *bool +} + +// NewSchemaOrBooleanWithSchema creates and returns a new object +func NewSchemaOrBooleanWithSchema(s *Schema) *SchemaOrBoolean { + result := &SchemaOrBoolean{} + result.Schema = s + return result +} + +// NewSchemaOrBooleanWithBoolean creates and returns a new object +func NewSchemaOrBooleanWithBoolean(b bool) *SchemaOrBoolean { + result := &SchemaOrBoolean{} + result.Boolean = &b + return result +} + +// StringOrStringArray represents a value that can be either +// a String or an Array of Strings. +type StringOrStringArray struct { + String *string + StringArray *[]string +} + +// NewStringOrStringArrayWithString creates and returns a new object +func NewStringOrStringArrayWithString(s string) *StringOrStringArray { + result := &StringOrStringArray{} + result.String = &s + return result +} + +// NewStringOrStringArrayWithStringArray creates and returns a new object +func NewStringOrStringArrayWithStringArray(a []string) *StringOrStringArray { + result := &StringOrStringArray{} + result.StringArray = &a + return result +} + +// SchemaOrStringArray represents a value that can be either +// a Schema or an Array of Strings. +type SchemaOrStringArray struct { + Schema *Schema + StringArray *[]string +} + +// SchemaOrSchemaArray represents a value that can be either +// a Schema or an Array of Schemas. +type SchemaOrSchemaArray struct { + Schema *Schema + SchemaArray *[]*Schema +} + +// NewSchemaOrSchemaArrayWithSchema creates and returns a new object +func NewSchemaOrSchemaArrayWithSchema(s *Schema) *SchemaOrSchemaArray { + result := &SchemaOrSchemaArray{} + result.Schema = s + return result +} + +// NewSchemaOrSchemaArrayWithSchemaArray creates and returns a new object +func NewSchemaOrSchemaArrayWithSchemaArray(a []*Schema) *SchemaOrSchemaArray { + result := &SchemaOrSchemaArray{} + result.SchemaArray = &a + return result +} + +// SchemaEnumValue represents a value that can be part of an +// enumeration in a Schema. +type SchemaEnumValue struct { + String *string + Bool *bool +} + +// NamedSchema is a name-value pair that is used to emulate maps +// with ordered keys. +type NamedSchema struct { + Name string + Value *Schema +} + +// NewNamedSchema creates and returns a new object +func NewNamedSchema(name string, value *Schema) *NamedSchema { + return &NamedSchema{Name: name, Value: value} +} + +// NamedSchemaOrStringArray is a name-value pair that is used +// to emulate maps with ordered keys. +type NamedSchemaOrStringArray struct { + Name string + Value *SchemaOrStringArray +} + +// Access named subschemas by name + +func namedSchemaArrayElementWithName(array *[]*NamedSchema, name string) *Schema { + if array == nil { + return nil + } + for _, pair := range *array { + if pair.Name == name { + return pair.Value + } + } + return nil +} + +// PropertyWithName returns the selected element. +func (s *Schema) PropertyWithName(name string) *Schema { + return namedSchemaArrayElementWithName(s.Properties, name) +} + +// PatternPropertyWithName returns the selected element. +func (s *Schema) PatternPropertyWithName(name string) *Schema { + return namedSchemaArrayElementWithName(s.PatternProperties, name) +} + +// DefinitionWithName returns the selected element. +func (s *Schema) DefinitionWithName(name string) *Schema { + return namedSchemaArrayElementWithName(s.Definitions, name) +} + +// AddProperty adds a named property. +func (s *Schema) AddProperty(name string, property *Schema) { + *s.Properties = append(*s.Properties, NewNamedSchema(name, property)) +} diff --git a/vendor/github.com/googleapis/gnostic/jsonschema/operations.go b/vendor/github.com/googleapis/gnostic/jsonschema/operations.go new file mode 100644 index 00000000000..ba8dd4a91b9 --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/jsonschema/operations.go @@ -0,0 +1,394 @@ +// Copyright 2017 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package jsonschema + +import ( + "fmt" + "log" + "strings" +) + +// +// OPERATIONS +// The following methods perform operations on Schemas. +// + +// IsEmpty returns true if no members of the Schema are specified. +func (schema *Schema) IsEmpty() bool { + return (schema.Schema == nil) && + (schema.ID == nil) && + (schema.MultipleOf == nil) && + (schema.Maximum == nil) && + (schema.ExclusiveMaximum == nil) && + (schema.Minimum == nil) && + (schema.ExclusiveMinimum == nil) && + (schema.MaxLength == nil) && + (schema.MinLength == nil) && + (schema.Pattern == nil) && + (schema.AdditionalItems == nil) && + (schema.Items == nil) && + (schema.MaxItems == nil) && + (schema.MinItems == nil) && + (schema.UniqueItems == nil) && + (schema.MaxProperties == nil) && + (schema.MinProperties == nil) && + (schema.Required == nil) && + (schema.AdditionalProperties == nil) && + (schema.Properties == nil) && + (schema.PatternProperties == nil) && + (schema.Dependencies == nil) && + (schema.Enumeration == nil) && + (schema.Type == nil) && + (schema.AllOf == nil) && + (schema.AnyOf == nil) && + (schema.OneOf == nil) && + (schema.Not == nil) && + (schema.Definitions == nil) && + (schema.Title == nil) && + (schema.Description == nil) && + (schema.Default == nil) && + (schema.Format == nil) && + (schema.Ref == nil) +} + +// IsEqual returns true if two schemas are equal. +func (schema *Schema) IsEqual(schema2 *Schema) bool { + return schema.String() == schema2.String() +} + +// SchemaOperation represents a function that can be applied to a Schema. +type SchemaOperation func(schema *Schema, context string) + +// Applies a specified function to a Schema and all of the Schemas that it contains. +func (schema *Schema) applyToSchemas(operation SchemaOperation, context string) { + + if schema.AdditionalItems != nil { + s := schema.AdditionalItems.Schema + if s != nil { + s.applyToSchemas(operation, "AdditionalItems") + } + } + + if schema.Items != nil { + if schema.Items.SchemaArray != nil { + for _, s := range *(schema.Items.SchemaArray) { + s.applyToSchemas(operation, "Items.SchemaArray") + } + } else if schema.Items.Schema != nil { + schema.Items.Schema.applyToSchemas(operation, "Items.Schema") + } + } + + if schema.AdditionalProperties != nil { + s := schema.AdditionalProperties.Schema + if s != nil { + s.applyToSchemas(operation, "AdditionalProperties") + } + } + + if schema.Properties != nil { + for _, pair := range *(schema.Properties) { + s := pair.Value + s.applyToSchemas(operation, "Properties") + } + } + if schema.PatternProperties != nil { + for _, pair := range *(schema.PatternProperties) { + s := pair.Value + s.applyToSchemas(operation, "PatternProperties") + } + } + + if schema.Dependencies != nil { + for _, pair := range *(schema.Dependencies) { + schemaOrStringArray := pair.Value + s := schemaOrStringArray.Schema + if s != nil { + s.applyToSchemas(operation, "Dependencies") + } + } + } + + if schema.AllOf != nil { + for _, s := range *(schema.AllOf) { + s.applyToSchemas(operation, "AllOf") + } + } + if schema.AnyOf != nil { + for _, s := range *(schema.AnyOf) { + s.applyToSchemas(operation, "AnyOf") + } + } + if schema.OneOf != nil { + for _, s := range *(schema.OneOf) { + s.applyToSchemas(operation, "OneOf") + } + } + if schema.Not != nil { + schema.Not.applyToSchemas(operation, "Not") + } + + if schema.Definitions != nil { + for _, pair := range *(schema.Definitions) { + s := pair.Value + s.applyToSchemas(operation, "Definitions") + } + } + + operation(schema, context) +} + +// CopyProperties copies all non-nil properties from the source Schema to the schema Schema. +func (schema *Schema) CopyProperties(source *Schema) { + if source.Schema != nil { + schema.Schema = source.Schema + } + if source.ID != nil { + schema.ID = source.ID + } + if source.MultipleOf != nil { + schema.MultipleOf = source.MultipleOf + } + if source.Maximum != nil { + schema.Maximum = source.Maximum + } + if source.ExclusiveMaximum != nil { + schema.ExclusiveMaximum = source.ExclusiveMaximum + } + if source.Minimum != nil { + schema.Minimum = source.Minimum + } + if source.ExclusiveMinimum != nil { + schema.ExclusiveMinimum = source.ExclusiveMinimum + } + if source.MaxLength != nil { + schema.MaxLength = source.MaxLength + } + if source.MinLength != nil { + schema.MinLength = source.MinLength + } + if source.Pattern != nil { + schema.Pattern = source.Pattern + } + if source.AdditionalItems != nil { + schema.AdditionalItems = source.AdditionalItems + } + if source.Items != nil { + schema.Items = source.Items + } + if source.MaxItems != nil { + schema.MaxItems = source.MaxItems + } + if source.MinItems != nil { + schema.MinItems = source.MinItems + } + if source.UniqueItems != nil { + schema.UniqueItems = source.UniqueItems + } + if source.MaxProperties != nil { + schema.MaxProperties = source.MaxProperties + } + if source.MinProperties != nil { + schema.MinProperties = source.MinProperties + } + if source.Required != nil { + schema.Required = source.Required + } + if source.AdditionalProperties != nil { + schema.AdditionalProperties = source.AdditionalProperties + } + if source.Properties != nil { + schema.Properties = source.Properties + } + if source.PatternProperties != nil { + schema.PatternProperties = source.PatternProperties + } + if source.Dependencies != nil { + schema.Dependencies = source.Dependencies + } + if source.Enumeration != nil { + schema.Enumeration = source.Enumeration + } + if source.Type != nil { + schema.Type = source.Type + } + if source.AllOf != nil { + schema.AllOf = source.AllOf + } + if source.AnyOf != nil { + schema.AnyOf = source.AnyOf + } + if source.OneOf != nil { + schema.OneOf = source.OneOf + } + if source.Not != nil { + schema.Not = source.Not + } + if source.Definitions != nil { + schema.Definitions = source.Definitions + } + if source.Title != nil { + schema.Title = source.Title + } + if source.Description != nil { + schema.Description = source.Description + } + if source.Default != nil { + schema.Default = source.Default + } + if source.Format != nil { + schema.Format = source.Format + } + if source.Ref != nil { + schema.Ref = source.Ref + } +} + +// TypeIs returns true if the Type of a Schema includes the specified type +func (schema *Schema) TypeIs(typeName string) bool { + if schema.Type != nil { + // the schema Type is either a string or an array of strings + if schema.Type.String != nil { + return (*(schema.Type.String) == typeName) + } else if schema.Type.StringArray != nil { + for _, n := range *(schema.Type.StringArray) { + if n == typeName { + return true + } + } + } + } + return false +} + +// ResolveRefs resolves "$ref" elements in a Schema and its children. +// But if a reference refers to an object type, is inside a oneOf, or contains a oneOf, +// the reference is kept and we expect downstream tools to separately model these +// referenced schemas. +func (schema *Schema) ResolveRefs() { + rootSchema := schema + count := 1 + for count > 0 { + count = 0 + schema.applyToSchemas( + func(schema *Schema, context string) { + if schema.Ref != nil { + resolvedRef, err := rootSchema.resolveJSONPointer(*(schema.Ref)) + if err != nil { + log.Printf("%+v", err) + } else if resolvedRef.TypeIs("object") { + // don't substitute for objects, we'll model the referenced schema with a class + } else if context == "OneOf" { + // don't substitute for references inside oneOf declarations + } else if resolvedRef.OneOf != nil { + // don't substitute for references that contain oneOf declarations + } else if resolvedRef.AdditionalProperties != nil { + // don't substitute for references that look like objects + } else { + schema.Ref = nil + schema.CopyProperties(resolvedRef) + count++ + } + } + }, "") + } +} + +// resolveJSONPointer resolves JSON pointers. +// This current implementation is very crude and custom for OpenAPI 2.0 schemas. +// It panics for any pointer that it is unable to resolve. +func (schema *Schema) resolveJSONPointer(ref string) (result *Schema, err error) { + parts := strings.Split(ref, "#") + if len(parts) == 2 { + documentName := parts[0] + "#" + if documentName == "#" && schema.ID != nil { + documentName = *(schema.ID) + } + path := parts[1] + document := schemas[documentName] + pathParts := strings.Split(path, "/") + + // we currently do a very limited (hard-coded) resolution of certain paths and log errors for missed cases + if len(pathParts) == 1 { + return document, nil + } else if len(pathParts) == 3 { + switch pathParts[1] { + case "definitions": + dictionary := document.Definitions + for _, pair := range *dictionary { + if pair.Name == pathParts[2] { + result = pair.Value + } + } + case "properties": + dictionary := document.Properties + for _, pair := range *dictionary { + if pair.Name == pathParts[2] { + result = pair.Value + } + } + default: + break + } + } + } + if result == nil { + return nil, fmt.Errorf("unresolved pointer: %+v", ref) + } + return result, nil +} + +// ResolveAllOfs replaces "allOf" elements by merging their properties into the parent Schema. +func (schema *Schema) ResolveAllOfs() { + schema.applyToSchemas( + func(schema *Schema, context string) { + if schema.AllOf != nil { + for _, allOf := range *(schema.AllOf) { + schema.CopyProperties(allOf) + } + schema.AllOf = nil + } + }, "resolveAllOfs") +} + +// ResolveAnyOfs replaces all "anyOf" elements with "oneOf". +func (schema *Schema) ResolveAnyOfs() { + schema.applyToSchemas( + func(schema *Schema, context string) { + if schema.AnyOf != nil { + schema.OneOf = schema.AnyOf + schema.AnyOf = nil + } + }, "resolveAnyOfs") +} + +// return a pointer to a copy of a passed-in string +func stringptr(input string) (output *string) { + return &input +} + +// CopyOfficialSchemaProperty copies a named property from the official JSON Schema definition +func (schema *Schema) CopyOfficialSchemaProperty(name string) { + *schema.Properties = append(*schema.Properties, + NewNamedSchema(name, + &Schema{Ref: stringptr("http://json-schema.org/draft-04/schema#/properties/" + name)})) +} + +// CopyOfficialSchemaProperties copies named properties from the official JSON Schema definition +func (schema *Schema) CopyOfficialSchemaProperties(names []string) { + for _, name := range names { + schema.CopyOfficialSchemaProperty(name) + } +} diff --git a/vendor/github.com/googleapis/gnostic/jsonschema/reader.go b/vendor/github.com/googleapis/gnostic/jsonschema/reader.go new file mode 100644 index 00000000000..b8583d46602 --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/jsonschema/reader.go @@ -0,0 +1,442 @@ +// Copyright 2017 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:generate go run generate-base.go + +package jsonschema + +import ( + "fmt" + "io/ioutil" + "strconv" + + "gopkg.in/yaml.v3" +) + +// This is a global map of all known Schemas. +// It is initialized when the first Schema is created and inserted. +var schemas map[string]*Schema + +// NewBaseSchema builds a schema object from an embedded json representation. +func NewBaseSchema() (schema *Schema, err error) { + b, err := baseSchemaBytes() + if err != nil { + return nil, err + } + var node yaml.Node + err = yaml.Unmarshal(b, &node) + if err != nil { + return nil, err + } + return NewSchemaFromObject(&node), nil +} + +// NewSchemaFromFile reads a schema from a file. +// Currently this assumes that schemas are stored in the source distribution of this project. +func NewSchemaFromFile(filename string) (schema *Schema, err error) { + file, err := ioutil.ReadFile(filename) + if err != nil { + return nil, err + } + var node yaml.Node + err = yaml.Unmarshal(file, &node) + if err != nil { + return nil, err + } + return NewSchemaFromObject(&node), nil +} + +// NewSchemaFromObject constructs a schema from a parsed JSON object. +// Due to the complexity of the schema representation, this is a +// custom reader and not the standard Go JSON reader (encoding/json). +func NewSchemaFromObject(jsonData *yaml.Node) *Schema { + switch jsonData.Kind { + case yaml.DocumentNode: + return NewSchemaFromObject(jsonData.Content[0]) + case yaml.MappingNode: + schema := &Schema{} + + for i := 0; i < len(jsonData.Content); i += 2 { + k := jsonData.Content[i].Value + v := jsonData.Content[i+1] + + switch k { + case "$schema": + schema.Schema = schema.stringValue(v) + case "id": + schema.ID = schema.stringValue(v) + + case "multipleOf": + schema.MultipleOf = schema.numberValue(v) + case "maximum": + schema.Maximum = schema.numberValue(v) + case "exclusiveMaximum": + schema.ExclusiveMaximum = schema.boolValue(v) + case "minimum": + schema.Minimum = schema.numberValue(v) + case "exclusiveMinimum": + schema.ExclusiveMinimum = schema.boolValue(v) + + case "maxLength": + schema.MaxLength = schema.intValue(v) + case "minLength": + schema.MinLength = schema.intValue(v) + case "pattern": + schema.Pattern = schema.stringValue(v) + + case "additionalItems": + schema.AdditionalItems = schema.schemaOrBooleanValue(v) + case "items": + schema.Items = schema.schemaOrSchemaArrayValue(v) + case "maxItems": + schema.MaxItems = schema.intValue(v) + case "minItems": + schema.MinItems = schema.intValue(v) + case "uniqueItems": + schema.UniqueItems = schema.boolValue(v) + + case "maxProperties": + schema.MaxProperties = schema.intValue(v) + case "minProperties": + schema.MinProperties = schema.intValue(v) + case "required": + schema.Required = schema.arrayOfStringsValue(v) + case "additionalProperties": + schema.AdditionalProperties = schema.schemaOrBooleanValue(v) + case "properties": + schema.Properties = schema.mapOfSchemasValue(v) + case "patternProperties": + schema.PatternProperties = schema.mapOfSchemasValue(v) + case "dependencies": + schema.Dependencies = schema.mapOfSchemasOrStringArraysValue(v) + + case "enum": + schema.Enumeration = schema.arrayOfEnumValuesValue(v) + + case "type": + schema.Type = schema.stringOrStringArrayValue(v) + case "allOf": + schema.AllOf = schema.arrayOfSchemasValue(v) + case "anyOf": + schema.AnyOf = schema.arrayOfSchemasValue(v) + case "oneOf": + schema.OneOf = schema.arrayOfSchemasValue(v) + case "not": + schema.Not = NewSchemaFromObject(v) + case "definitions": + schema.Definitions = schema.mapOfSchemasValue(v) + + case "title": + schema.Title = schema.stringValue(v) + case "description": + schema.Description = schema.stringValue(v) + + case "default": + schema.Default = v + + case "format": + schema.Format = schema.stringValue(v) + case "$ref": + schema.Ref = schema.stringValue(v) + default: + fmt.Printf("UNSUPPORTED (%s)\n", k) + } + } + + // insert schema in global map + if schema.ID != nil { + if schemas == nil { + schemas = make(map[string]*Schema, 0) + } + schemas[*(schema.ID)] = schema + } + return schema + + default: + fmt.Printf("schemaValue: unexpected node %+v\n", jsonData) + return nil + } + + return nil +} + +// +// BUILDERS +// The following methods build elements of Schemas from interface{} values. +// Each returns nil if it is unable to build the desired element. +// + +// Gets the string value of an interface{} value if possible. +func (schema *Schema) stringValue(v *yaml.Node) *string { + switch v.Kind { + case yaml.ScalarNode: + return &v.Value + default: + fmt.Printf("stringValue: unexpected node %+v\n", v) + } + return nil +} + +// Gets the numeric value of an interface{} value if possible. +func (schema *Schema) numberValue(v *yaml.Node) *SchemaNumber { + number := &SchemaNumber{} + switch v.Kind { + case yaml.ScalarNode: + switch v.Tag { + case "!!float": + v2, _ := strconv.ParseFloat(v.Value, 64) + number.Float = &v2 + return number + case "!!int": + v2, _ := strconv.ParseInt(v.Value, 10, 64) + number.Integer = &v2 + return number + default: + fmt.Printf("stringValue: unexpected node %+v\n", v) + } + default: + fmt.Printf("stringValue: unexpected node %+v\n", v) + } + return nil +} + +// Gets the integer value of an interface{} value if possible. +func (schema *Schema) intValue(v *yaml.Node) *int64 { + switch v.Kind { + case yaml.ScalarNode: + switch v.Tag { + case "!!float": + v2, _ := strconv.ParseFloat(v.Value, 64) + v3 := int64(v2) + return &v3 + case "!!int": + v2, _ := strconv.ParseInt(v.Value, 10, 64) + return &v2 + default: + fmt.Printf("intValue: unexpected node %+v\n", v) + } + default: + fmt.Printf("intValue: unexpected node %+v\n", v) + } + return nil +} + +// Gets the bool value of an interface{} value if possible. +func (schema *Schema) boolValue(v *yaml.Node) *bool { + switch v.Kind { + case yaml.ScalarNode: + switch v.Tag { + case "!!bool": + v2, _ := strconv.ParseBool(v.Value) + return &v2 + default: + fmt.Printf("boolValue: unexpected node %+v\n", v) + } + default: + fmt.Printf("boolValue: unexpected node %+v\n", v) + } + return nil +} + +// Gets a map of Schemas from an interface{} value if possible. +func (schema *Schema) mapOfSchemasValue(v *yaml.Node) *[]*NamedSchema { + switch v.Kind { + case yaml.MappingNode: + m := make([]*NamedSchema, 0) + for i := 0; i < len(v.Content); i += 2 { + k2 := v.Content[i].Value + v2 := v.Content[i+1] + pair := &NamedSchema{Name: k2, Value: NewSchemaFromObject(v2)} + m = append(m, pair) + } + return &m + default: + fmt.Printf("mapOfSchemasValue: unexpected node %+v\n", v) + } + return nil +} + +// Gets an array of Schemas from an interface{} value if possible. +func (schema *Schema) arrayOfSchemasValue(v *yaml.Node) *[]*Schema { + switch v.Kind { + case yaml.SequenceNode: + m := make([]*Schema, 0) + for _, v2 := range v.Content { + switch v2.Kind { + case yaml.MappingNode: + s := NewSchemaFromObject(v2) + m = append(m, s) + default: + fmt.Printf("arrayOfSchemasValue: unexpected node %+v\n", v2) + } + } + return &m + case yaml.MappingNode: + m := make([]*Schema, 0) + s := NewSchemaFromObject(v) + m = append(m, s) + return &m + default: + fmt.Printf("arrayOfSchemasValue: unexpected node %+v\n", v) + } + return nil +} + +// Gets a Schema or an array of Schemas from an interface{} value if possible. +func (schema *Schema) schemaOrSchemaArrayValue(v *yaml.Node) *SchemaOrSchemaArray { + switch v.Kind { + case yaml.SequenceNode: + m := make([]*Schema, 0) + for _, v2 := range v.Content { + switch v2.Kind { + case yaml.MappingNode: + s := NewSchemaFromObject(v2) + m = append(m, s) + default: + fmt.Printf("schemaOrSchemaArrayValue: unexpected node %+v\n", v2) + } + } + return &SchemaOrSchemaArray{SchemaArray: &m} + case yaml.MappingNode: + s := NewSchemaFromObject(v) + return &SchemaOrSchemaArray{Schema: s} + default: + fmt.Printf("schemaOrSchemaArrayValue: unexpected node %+v\n", v) + } + return nil +} + +// Gets an array of strings from an interface{} value if possible. +func (schema *Schema) arrayOfStringsValue(v *yaml.Node) *[]string { + switch v.Kind { + case yaml.ScalarNode: + a := []string{v.Value} + return &a + case yaml.SequenceNode: + a := make([]string, 0) + for _, v2 := range v.Content { + switch v2.Kind { + case yaml.ScalarNode: + a = append(a, v2.Value) + default: + fmt.Printf("arrayOfStringsValue: unexpected node %+v\n", v2) + } + } + return &a + default: + fmt.Printf("arrayOfStringsValue: unexpected node %+v\n", v) + } + return nil +} + +// Gets a string or an array of strings from an interface{} value if possible. +func (schema *Schema) stringOrStringArrayValue(v *yaml.Node) *StringOrStringArray { + switch v.Kind { + case yaml.ScalarNode: + s := &StringOrStringArray{} + s.String = &v.Value + return s + case yaml.SequenceNode: + a := make([]string, 0) + for _, v2 := range v.Content { + switch v2.Kind { + case yaml.ScalarNode: + a = append(a, v2.Value) + default: + fmt.Printf("arrayOfStringsValue: unexpected node %+v\n", v2) + } + } + s := &StringOrStringArray{} + s.StringArray = &a + return s + default: + fmt.Printf("arrayOfStringsValue: unexpected node %+v\n", v) + } + return nil +} + +// Gets an array of enum values from an interface{} value if possible. +func (schema *Schema) arrayOfEnumValuesValue(v *yaml.Node) *[]SchemaEnumValue { + a := make([]SchemaEnumValue, 0) + switch v.Kind { + case yaml.SequenceNode: + for _, v2 := range v.Content { + switch v2.Kind { + case yaml.ScalarNode: + switch v2.Tag { + case "!!str": + a = append(a, SchemaEnumValue{String: &v2.Value}) + case "!!bool": + v3, _ := strconv.ParseBool(v2.Value) + a = append(a, SchemaEnumValue{Bool: &v3}) + default: + fmt.Printf("arrayOfEnumValuesValue: unexpected type %s\n", v2.Tag) + } + default: + fmt.Printf("arrayOfEnumValuesValue: unexpected node %+v\n", v2) + } + } + default: + fmt.Printf("arrayOfEnumValuesValue: unexpected node %+v\n", v) + } + return &a +} + +// Gets a map of schemas or string arrays from an interface{} value if possible. +func (schema *Schema) mapOfSchemasOrStringArraysValue(v *yaml.Node) *[]*NamedSchemaOrStringArray { + m := make([]*NamedSchemaOrStringArray, 0) + switch v.Kind { + case yaml.MappingNode: + for i := 0; i < len(v.Content); i += 2 { + k2 := v.Content[i].Value + v2 := v.Content[i+1] + switch v2.Kind { + case yaml.SequenceNode: + a := make([]string, 0) + for _, v3 := range v2.Content { + switch v3.Kind { + case yaml.ScalarNode: + a = append(a, v3.Value) + default: + fmt.Printf("mapOfSchemasOrStringArraysValue: unexpected node %+v\n", v3) + } + } + s := &SchemaOrStringArray{} + s.StringArray = &a + pair := &NamedSchemaOrStringArray{Name: k2, Value: s} + m = append(m, pair) + default: + fmt.Printf("mapOfSchemasOrStringArraysValue: unexpected node %+v\n", v2) + } + } + default: + fmt.Printf("mapOfSchemasOrStringArraysValue: unexpected node %+v\n", v) + } + return &m +} + +// Gets a schema or a boolean value from an interface{} value if possible. +func (schema *Schema) schemaOrBooleanValue(v *yaml.Node) *SchemaOrBoolean { + schemaOrBoolean := &SchemaOrBoolean{} + switch v.Kind { + case yaml.ScalarNode: + v2, _ := strconv.ParseBool(v.Value) + schemaOrBoolean.Boolean = &v2 + case yaml.MappingNode: + schemaOrBoolean.Schema = NewSchemaFromObject(v) + default: + fmt.Printf("schemaOrBooleanValue: unexpected node %+v\n", v) + } + return schemaOrBoolean +} diff --git a/vendor/github.com/googleapis/gnostic/jsonschema/schema.json b/vendor/github.com/googleapis/gnostic/jsonschema/schema.json new file mode 100644 index 00000000000..85eb502a680 --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/jsonschema/schema.json @@ -0,0 +1,150 @@ +{ + "id": "http://json-schema.org/draft-04/schema#", + "$schema": "http://json-schema.org/draft-04/schema#", + "description": "Core schema meta-schema", + "definitions": { + "schemaArray": { + "type": "array", + "minItems": 1, + "items": { "$ref": "#" } + }, + "positiveInteger": { + "type": "integer", + "minimum": 0 + }, + "positiveIntegerDefault0": { + "allOf": [ { "$ref": "#/definitions/positiveInteger" }, { "default": 0 } ] + }, + "simpleTypes": { + "enum": [ "array", "boolean", "integer", "null", "number", "object", "string" ] + }, + "stringArray": { + "type": "array", + "items": { "type": "string" }, + "minItems": 1, + "uniqueItems": true + } + }, + "type": "object", + "properties": { + "id": { + "type": "string", + "format": "uri" + }, + "$schema": { + "type": "string", + "format": "uri" + }, + "title": { + "type": "string" + }, + "description": { + "type": "string" + }, + "default": {}, + "multipleOf": { + "type": "number", + "minimum": 0, + "exclusiveMinimum": true + }, + "maximum": { + "type": "number" + }, + "exclusiveMaximum": { + "type": "boolean", + "default": false + }, + "minimum": { + "type": "number" + }, + "exclusiveMinimum": { + "type": "boolean", + "default": false + }, + "maxLength": { "$ref": "#/definitions/positiveInteger" }, + "minLength": { "$ref": "#/definitions/positiveIntegerDefault0" }, + "pattern": { + "type": "string", + "format": "regex" + }, + "additionalItems": { + "anyOf": [ + { "type": "boolean" }, + { "$ref": "#" } + ], + "default": {} + }, + "items": { + "anyOf": [ + { "$ref": "#" }, + { "$ref": "#/definitions/schemaArray" } + ], + "default": {} + }, + "maxItems": { "$ref": "#/definitions/positiveInteger" }, + "minItems": { "$ref": "#/definitions/positiveIntegerDefault0" }, + "uniqueItems": { + "type": "boolean", + "default": false + }, + "maxProperties": { "$ref": "#/definitions/positiveInteger" }, + "minProperties": { "$ref": "#/definitions/positiveIntegerDefault0" }, + "required": { "$ref": "#/definitions/stringArray" }, + "additionalProperties": { + "anyOf": [ + { "type": "boolean" }, + { "$ref": "#" } + ], + "default": {} + }, + "definitions": { + "type": "object", + "additionalProperties": { "$ref": "#" }, + "default": {} + }, + "properties": { + "type": "object", + "additionalProperties": { "$ref": "#" }, + "default": {} + }, + "patternProperties": { + "type": "object", + "additionalProperties": { "$ref": "#" }, + "default": {} + }, + "dependencies": { + "type": "object", + "additionalProperties": { + "anyOf": [ + { "$ref": "#" }, + { "$ref": "#/definitions/stringArray" } + ] + } + }, + "enum": { + "type": "array", + "minItems": 1, + "uniqueItems": true + }, + "type": { + "anyOf": [ + { "$ref": "#/definitions/simpleTypes" }, + { + "type": "array", + "items": { "$ref": "#/definitions/simpleTypes" }, + "minItems": 1, + "uniqueItems": true + } + ] + }, + "allOf": { "$ref": "#/definitions/schemaArray" }, + "anyOf": { "$ref": "#/definitions/schemaArray" }, + "oneOf": { "$ref": "#/definitions/schemaArray" }, + "not": { "$ref": "#" } + }, + "dependencies": { + "exclusiveMaximum": [ "maximum" ], + "exclusiveMinimum": [ "minimum" ] + }, + "default": {} +} diff --git a/vendor/github.com/googleapis/gnostic/jsonschema/writer.go b/vendor/github.com/googleapis/gnostic/jsonschema/writer.go new file mode 100644 index 00000000000..340dc5f9330 --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/jsonschema/writer.go @@ -0,0 +1,369 @@ +// Copyright 2017 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package jsonschema + +import ( + "fmt" + + "gopkg.in/yaml.v3" +) + +const indentation = " " + +func renderMappingNode(node *yaml.Node, indent string) (result string) { + result = "{\n" + innerIndent := indent + indentation + for i := 0; i < len(node.Content); i += 2 { + // first print the key + key := node.Content[i].Value + result += fmt.Sprintf("%s\"%+v\": ", innerIndent, key) + // then the value + value := node.Content[i+1] + switch value.Kind { + case yaml.ScalarNode: + result += "\"" + value.Value + "\"" + case yaml.MappingNode: + result += renderMappingNode(value, innerIndent) + case yaml.SequenceNode: + result += renderSequenceNode(value, innerIndent) + default: + result += fmt.Sprintf("???MapItem(Key:%+v, Value:%T)", value, value) + } + if i < len(node.Content)-2 { + result += "," + } + result += "\n" + } + + result += indent + "}" + return result +} + +func renderSequenceNode(node *yaml.Node, indent string) (result string) { + result = "[\n" + innerIndent := indent + indentation + for i := 0; i < len(node.Content); i++ { + item := node.Content[i] + switch item.Kind { + case yaml.ScalarNode: + result += innerIndent + "\"" + item.Value + "\"" + case yaml.MappingNode: + result += innerIndent + renderMappingNode(item, innerIndent) + "" + default: + result += innerIndent + fmt.Sprintf("???ArrayItem(%+v)", item) + } + if i < len(node.Content)-1 { + result += "," + } + result += "\n" + } + result += indent + "]" + return result +} + +func renderStringArray(array []string, indent string) (result string) { + result = "[\n" + innerIndent := indent + indentation + for i, item := range array { + result += innerIndent + "\"" + item + "\"" + if i < len(array)-1 { + result += "," + } + result += "\n" + } + result += indent + "]" + return result +} + +// Render renders a yaml.Node as JSON +func Render(node *yaml.Node) string { + if node.Kind == yaml.DocumentNode { + if len(node.Content) == 1 { + return Render(node.Content[0]) + } + } else if node.Kind == yaml.MappingNode { + return renderMappingNode(node, "") + "\n" + } else if node.Kind == yaml.SequenceNode { + return renderSequenceNode(node, "") + "\n" + } + return "" +} + +func (object *SchemaNumber) nodeValue() *yaml.Node { + if object.Integer != nil { + return nodeForInt64(*object.Integer) + } else if object.Float != nil { + return nodeForFloat64(*object.Float) + } else { + return nil + } +} + +func (object *SchemaOrBoolean) nodeValue() *yaml.Node { + if object.Schema != nil { + return object.Schema.nodeValue() + } else if object.Boolean != nil { + return nodeForBoolean(*object.Boolean) + } else { + return nil + } +} + +func nodeForStringArray(array []string) *yaml.Node { + content := make([]*yaml.Node, 0) + for _, item := range array { + content = append(content, nodeForString(item)) + } + return nodeForSequence(content) +} + +func nodeForSchemaArray(array []*Schema) *yaml.Node { + content := make([]*yaml.Node, 0) + for _, item := range array { + content = append(content, item.nodeValue()) + } + return nodeForSequence(content) +} + +func (object *StringOrStringArray) nodeValue() *yaml.Node { + if object.String != nil { + return nodeForString(*object.String) + } else if object.StringArray != nil { + return nodeForStringArray(*(object.StringArray)) + } else { + return nil + } +} + +func (object *SchemaOrStringArray) nodeValue() *yaml.Node { + if object.Schema != nil { + return object.Schema.nodeValue() + } else if object.StringArray != nil { + return nodeForStringArray(*(object.StringArray)) + } else { + return nil + } +} + +func (object *SchemaOrSchemaArray) nodeValue() *yaml.Node { + if object.Schema != nil { + return object.Schema.nodeValue() + } else if object.SchemaArray != nil { + return nodeForSchemaArray(*(object.SchemaArray)) + } else { + return nil + } +} + +func (object *SchemaEnumValue) nodeValue() *yaml.Node { + if object.String != nil { + return nodeForString(*object.String) + } else if object.Bool != nil { + return nodeForBoolean(*object.Bool) + } else { + return nil + } +} + +func nodeForNamedSchemaArray(array *[]*NamedSchema) *yaml.Node { + content := make([]*yaml.Node, 0) + for _, pair := range *(array) { + content = appendPair(content, pair.Name, pair.Value.nodeValue()) + } + return nodeForMapping(content) +} + +func nodeForNamedSchemaOrStringArray(array *[]*NamedSchemaOrStringArray) *yaml.Node { + content := make([]*yaml.Node, 0) + for _, pair := range *(array) { + content = appendPair(content, pair.Name, pair.Value.nodeValue()) + } + return nodeForMapping(content) +} + +func nodeForSchemaEnumArray(array *[]SchemaEnumValue) *yaml.Node { + content := make([]*yaml.Node, 0) + for _, item := range *array { + content = append(content, item.nodeValue()) + } + return nodeForSequence(content) +} + +func nodeForMapping(content []*yaml.Node) *yaml.Node { + return &yaml.Node{ + Kind: yaml.MappingNode, + Content: content, + } +} + +func nodeForSequence(content []*yaml.Node) *yaml.Node { + return &yaml.Node{ + Kind: yaml.SequenceNode, + Content: content, + } +} + +func nodeForString(value string) *yaml.Node { + return &yaml.Node{ + Kind: yaml.ScalarNode, + Tag: "!!str", + Value: value, + } +} + +func nodeForBoolean(value bool) *yaml.Node { + return &yaml.Node{ + Kind: yaml.ScalarNode, + Tag: "!!bool", + Value: fmt.Sprintf("%t", value), + } +} + +func nodeForInt64(value int64) *yaml.Node { + return &yaml.Node{ + Kind: yaml.ScalarNode, + Tag: "!!int", + Value: fmt.Sprintf("%d", value), + } +} + +func nodeForFloat64(value float64) *yaml.Node { + return &yaml.Node{ + Kind: yaml.ScalarNode, + Tag: "!!float", + Value: fmt.Sprintf("%f", value), + } +} + +func appendPair(nodes []*yaml.Node, name string, value *yaml.Node) []*yaml.Node { + nodes = append(nodes, nodeForString(name)) + nodes = append(nodes, value) + return nodes +} + +func (schema *Schema) nodeValue() *yaml.Node { + n := &yaml.Node{Kind: yaml.MappingNode} + content := make([]*yaml.Node, 0) + if schema.Title != nil { + content = appendPair(content, "title", nodeForString(*schema.Title)) + } + if schema.ID != nil { + content = appendPair(content, "id", nodeForString(*schema.ID)) + } + if schema.Schema != nil { + content = appendPair(content, "$schema", nodeForString(*schema.Schema)) + } + if schema.Type != nil { + content = appendPair(content, "type", schema.Type.nodeValue()) + } + if schema.Items != nil { + content = appendPair(content, "items", schema.Items.nodeValue()) + } + if schema.Description != nil { + content = appendPair(content, "description", nodeForString(*schema.Description)) + } + if schema.Required != nil { + content = appendPair(content, "required", nodeForStringArray(*schema.Required)) + } + if schema.AdditionalProperties != nil { + content = appendPair(content, "additionalProperties", schema.AdditionalProperties.nodeValue()) + } + if schema.PatternProperties != nil { + content = appendPair(content, "patternProperties", nodeForNamedSchemaArray(schema.PatternProperties)) + } + if schema.Properties != nil { + content = appendPair(content, "properties", nodeForNamedSchemaArray(schema.Properties)) + } + if schema.Dependencies != nil { + content = appendPair(content, "dependencies", nodeForNamedSchemaOrStringArray(schema.Dependencies)) + } + if schema.Ref != nil { + content = appendPair(content, "$ref", nodeForString(*schema.Ref)) + } + if schema.MultipleOf != nil { + content = appendPair(content, "multipleOf", schema.MultipleOf.nodeValue()) + } + if schema.Maximum != nil { + content = appendPair(content, "maximum", schema.Maximum.nodeValue()) + } + if schema.ExclusiveMaximum != nil { + content = appendPair(content, "exclusiveMaximum", nodeForBoolean(*schema.ExclusiveMaximum)) + } + if schema.Minimum != nil { + content = appendPair(content, "minimum", schema.Minimum.nodeValue()) + } + if schema.ExclusiveMinimum != nil { + content = appendPair(content, "exclusiveMinimum", nodeForBoolean(*schema.ExclusiveMinimum)) + } + if schema.MaxLength != nil { + content = appendPair(content, "maxLength", nodeForInt64(*schema.MaxLength)) + } + if schema.MinLength != nil { + content = appendPair(content, "minLength", nodeForInt64(*schema.MinLength)) + } + if schema.Pattern != nil { + content = appendPair(content, "pattern", nodeForString(*schema.Pattern)) + } + if schema.AdditionalItems != nil { + content = appendPair(content, "additionalItems", schema.AdditionalItems.nodeValue()) + } + if schema.MaxItems != nil { + content = appendPair(content, "maxItems", nodeForInt64(*schema.MaxItems)) + } + if schema.MinItems != nil { + content = appendPair(content, "minItems", nodeForInt64(*schema.MinItems)) + } + if schema.UniqueItems != nil { + content = appendPair(content, "uniqueItems", nodeForBoolean(*schema.UniqueItems)) + } + if schema.MaxProperties != nil { + content = appendPair(content, "maxProperties", nodeForInt64(*schema.MaxProperties)) + } + if schema.MinProperties != nil { + content = appendPair(content, "minProperties", nodeForInt64(*schema.MinProperties)) + } + if schema.Enumeration != nil { + content = appendPair(content, "enum", nodeForSchemaEnumArray(schema.Enumeration)) + } + if schema.AllOf != nil { + content = appendPair(content, "allOf", nodeForSchemaArray(*schema.AllOf)) + } + if schema.AnyOf != nil { + content = appendPair(content, "anyOf", nodeForSchemaArray(*schema.AnyOf)) + } + if schema.OneOf != nil { + content = appendPair(content, "oneOf", nodeForSchemaArray(*schema.OneOf)) + } + if schema.Not != nil { + content = appendPair(content, "not", schema.Not.nodeValue()) + } + if schema.Definitions != nil { + content = appendPair(content, "definitions", nodeForNamedSchemaArray(schema.Definitions)) + } + if schema.Default != nil { + // m = append(m, yaml.MapItem{Key: "default", Value: *schema.Default}) + } + if schema.Format != nil { + content = appendPair(content, "format", nodeForString(*schema.Format)) + } + n.Content = content + return n +} + +// JSONString returns a json representation of a schema. +func (schema *Schema) JSONString() string { + node := schema.nodeValue() + return Render(node) +} diff --git a/vendor/github.com/googleapis/gnostic/openapiv2/OpenAPIv2.go b/vendor/github.com/googleapis/gnostic/openapiv2/OpenAPIv2.go new file mode 100644 index 00000000000..727d7f4ad52 --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/openapiv2/OpenAPIv2.go @@ -0,0 +1,8818 @@ +// Copyright 2020 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// THIS FILE IS AUTOMATICALLY GENERATED. + +package openapi_v2 + +import ( + "fmt" + "github.com/googleapis/gnostic/compiler" + "gopkg.in/yaml.v3" + "regexp" + "strings" +) + +// Version returns the package name (and OpenAPI version). +func Version() string { + return "openapi_v2" +} + +// NewAdditionalPropertiesItem creates an object of type AdditionalPropertiesItem if possible, returning an error if not. +func NewAdditionalPropertiesItem(in *yaml.Node, context *compiler.Context) (*AdditionalPropertiesItem, error) { + errors := make([]error, 0) + x := &AdditionalPropertiesItem{} + matched := false + // Schema schema = 1; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewSchema(m, compiler.NewContext("schema", m, context)) + if matchingError == nil { + x.Oneof = &AdditionalPropertiesItem_Schema{Schema: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + // bool boolean = 2; + boolValue, ok := compiler.BoolForScalarNode(in) + if ok { + x.Oneof = &AdditionalPropertiesItem_Boolean{Boolean: boolValue} + matched = true + } + if matched { + // since the oneof matched one of its possibilities, discard any matching errors + errors = make([]error, 0) + } else { + message := fmt.Sprintf("contains an invalid AdditionalPropertiesItem") + err := compiler.NewError(context, message) + errors = []error{err} + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewAny creates an object of type Any if possible, returning an error if not. +func NewAny(in *yaml.Node, context *compiler.Context) (*Any, error) { + errors := make([]error, 0) + x := &Any{} + bytes := compiler.Marshal(in) + x.Yaml = string(bytes) + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewApiKeySecurity creates an object of type ApiKeySecurity if possible, returning an error if not. +func NewApiKeySecurity(in *yaml.Node, context *compiler.Context) (*ApiKeySecurity, error) { + errors := make([]error, 0) + x := &ApiKeySecurity{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"in", "name", "type"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"description", "in", "name", "type"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string type = 1; + v1 := compiler.MapValueForKey(m, "type") + if v1 != nil { + x.Type, ok = compiler.StringForScalarNode(v1) + if !ok { + message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [apiKey] + if ok && !compiler.StringArrayContainsValue([]string{"apiKey"}, x.Type) { + message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string name = 2; + v2 := compiler.MapValueForKey(m, "name") + if v2 != nil { + x.Name, ok = compiler.StringForScalarNode(v2) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v2)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string in = 3; + v3 := compiler.MapValueForKey(m, "in") + if v3 != nil { + x.In, ok = compiler.StringForScalarNode(v3) + if !ok { + message := fmt.Sprintf("has unexpected value for in: %s", compiler.Display(v3)) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [header query] + if ok && !compiler.StringArrayContainsValue([]string{"header", "query"}, x.In) { + message := fmt.Sprintf("has unexpected value for in: %s", compiler.Display(v3)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 4; + v4 := compiler.MapValueForKey(m, "description") + if v4 != nil { + x.Description, ok = compiler.StringForScalarNode(v4) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v4)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny vendor_extension = 5; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.CallExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes := compiler.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewBasicAuthenticationSecurity creates an object of type BasicAuthenticationSecurity if possible, returning an error if not. +func NewBasicAuthenticationSecurity(in *yaml.Node, context *compiler.Context) (*BasicAuthenticationSecurity, error) { + errors := make([]error, 0) + x := &BasicAuthenticationSecurity{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"type"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"description", "type"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string type = 1; + v1 := compiler.MapValueForKey(m, "type") + if v1 != nil { + x.Type, ok = compiler.StringForScalarNode(v1) + if !ok { + message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [basic] + if ok && !compiler.StringArrayContainsValue([]string{"basic"}, x.Type) { + message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 2; + v2 := compiler.MapValueForKey(m, "description") + if v2 != nil { + x.Description, ok = compiler.StringForScalarNode(v2) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v2)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny vendor_extension = 3; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.CallExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes := compiler.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewBodyParameter creates an object of type BodyParameter if possible, returning an error if not. +func NewBodyParameter(in *yaml.Node, context *compiler.Context) (*BodyParameter, error) { + errors := make([]error, 0) + x := &BodyParameter{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"in", "name", "schema"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"description", "in", "name", "required", "schema"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string description = 1; + v1 := compiler.MapValueForKey(m, "description") + if v1 != nil { + x.Description, ok = compiler.StringForScalarNode(v1) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string name = 2; + v2 := compiler.MapValueForKey(m, "name") + if v2 != nil { + x.Name, ok = compiler.StringForScalarNode(v2) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v2)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string in = 3; + v3 := compiler.MapValueForKey(m, "in") + if v3 != nil { + x.In, ok = compiler.StringForScalarNode(v3) + if !ok { + message := fmt.Sprintf("has unexpected value for in: %s", compiler.Display(v3)) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [body] + if ok && !compiler.StringArrayContainsValue([]string{"body"}, x.In) { + message := fmt.Sprintf("has unexpected value for in: %s", compiler.Display(v3)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool required = 4; + v4 := compiler.MapValueForKey(m, "required") + if v4 != nil { + x.Required, ok = compiler.BoolForScalarNode(v4) + if !ok { + message := fmt.Sprintf("has unexpected value for required: %s", compiler.Display(v4)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Schema schema = 5; + v5 := compiler.MapValueForKey(m, "schema") + if v5 != nil { + var err error + x.Schema, err = NewSchema(v5, compiler.NewContext("schema", v5, context)) + if err != nil { + errors = append(errors, err) + } + } + // repeated NamedAny vendor_extension = 6; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.CallExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes := compiler.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewContact creates an object of type Contact if possible, returning an error if not. +func NewContact(in *yaml.Node, context *compiler.Context) (*Contact, error) { + errors := make([]error, 0) + x := &Contact{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"email", "name", "url"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = compiler.StringForScalarNode(v1) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string url = 2; + v2 := compiler.MapValueForKey(m, "url") + if v2 != nil { + x.Url, ok = compiler.StringForScalarNode(v2) + if !ok { + message := fmt.Sprintf("has unexpected value for url: %s", compiler.Display(v2)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string email = 3; + v3 := compiler.MapValueForKey(m, "email") + if v3 != nil { + x.Email, ok = compiler.StringForScalarNode(v3) + if !ok { + message := fmt.Sprintf("has unexpected value for email: %s", compiler.Display(v3)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny vendor_extension = 4; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.CallExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes := compiler.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewDefault creates an object of type Default if possible, returning an error if not. +func NewDefault(in *yaml.Node, context *compiler.Context) (*Default, error) { + errors := make([]error, 0) + x := &Default{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + // repeated NamedAny additional_properties = 1; + // MAP: Any + x.AdditionalProperties = make([]*NamedAny, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.CallExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes := compiler.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) + if err != nil { + errors = append(errors, err) + } + } + x.AdditionalProperties = append(x.AdditionalProperties, pair) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewDefinitions creates an object of type Definitions if possible, returning an error if not. +func NewDefinitions(in *yaml.Node, context *compiler.Context) (*Definitions, error) { + errors := make([]error, 0) + x := &Definitions{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + // repeated NamedSchema additional_properties = 1; + // MAP: Schema + x.AdditionalProperties = make([]*NamedSchema, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + pair := &NamedSchema{} + pair.Name = k + var err error + pair.Value, err = NewSchema(v, compiler.NewContext(k, v, context)) + if err != nil { + errors = append(errors, err) + } + x.AdditionalProperties = append(x.AdditionalProperties, pair) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewDocument creates an object of type Document if possible, returning an error if not. +func NewDocument(in *yaml.Node, context *compiler.Context) (*Document, error) { + errors := make([]error, 0) + x := &Document{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"info", "paths", "swagger"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"basePath", "consumes", "definitions", "externalDocs", "host", "info", "parameters", "paths", "produces", "responses", "schemes", "security", "securityDefinitions", "swagger", "tags"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string swagger = 1; + v1 := compiler.MapValueForKey(m, "swagger") + if v1 != nil { + x.Swagger, ok = compiler.StringForScalarNode(v1) + if !ok { + message := fmt.Sprintf("has unexpected value for swagger: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [2.0] + if ok && !compiler.StringArrayContainsValue([]string{"2.0"}, x.Swagger) { + message := fmt.Sprintf("has unexpected value for swagger: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Info info = 2; + v2 := compiler.MapValueForKey(m, "info") + if v2 != nil { + var err error + x.Info, err = NewInfo(v2, compiler.NewContext("info", v2, context)) + if err != nil { + errors = append(errors, err) + } + } + // string host = 3; + v3 := compiler.MapValueForKey(m, "host") + if v3 != nil { + x.Host, ok = compiler.StringForScalarNode(v3) + if !ok { + message := fmt.Sprintf("has unexpected value for host: %s", compiler.Display(v3)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string base_path = 4; + v4 := compiler.MapValueForKey(m, "basePath") + if v4 != nil { + x.BasePath, ok = compiler.StringForScalarNode(v4) + if !ok { + message := fmt.Sprintf("has unexpected value for basePath: %s", compiler.Display(v4)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated string schemes = 5; + v5 := compiler.MapValueForKey(m, "schemes") + if v5 != nil { + v, ok := compiler.SequenceNodeForNode(v5) + if ok { + x.Schemes = compiler.StringArrayForSequenceNode(v) + } else { + message := fmt.Sprintf("has unexpected value for schemes: %s", compiler.Display(v5)) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [http https ws wss] + if ok && !compiler.StringArrayContainsValues([]string{"http", "https", "ws", "wss"}, x.Schemes) { + message := fmt.Sprintf("has unexpected value for schemes: %s", compiler.Display(v5)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated string consumes = 6; + v6 := compiler.MapValueForKey(m, "consumes") + if v6 != nil { + v, ok := compiler.SequenceNodeForNode(v6) + if ok { + x.Consumes = compiler.StringArrayForSequenceNode(v) + } else { + message := fmt.Sprintf("has unexpected value for consumes: %s", compiler.Display(v6)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated string produces = 7; + v7 := compiler.MapValueForKey(m, "produces") + if v7 != nil { + v, ok := compiler.SequenceNodeForNode(v7) + if ok { + x.Produces = compiler.StringArrayForSequenceNode(v) + } else { + message := fmt.Sprintf("has unexpected value for produces: %s", compiler.Display(v7)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Paths paths = 8; + v8 := compiler.MapValueForKey(m, "paths") + if v8 != nil { + var err error + x.Paths, err = NewPaths(v8, compiler.NewContext("paths", v8, context)) + if err != nil { + errors = append(errors, err) + } + } + // Definitions definitions = 9; + v9 := compiler.MapValueForKey(m, "definitions") + if v9 != nil { + var err error + x.Definitions, err = NewDefinitions(v9, compiler.NewContext("definitions", v9, context)) + if err != nil { + errors = append(errors, err) + } + } + // ParameterDefinitions parameters = 10; + v10 := compiler.MapValueForKey(m, "parameters") + if v10 != nil { + var err error + x.Parameters, err = NewParameterDefinitions(v10, compiler.NewContext("parameters", v10, context)) + if err != nil { + errors = append(errors, err) + } + } + // ResponseDefinitions responses = 11; + v11 := compiler.MapValueForKey(m, "responses") + if v11 != nil { + var err error + x.Responses, err = NewResponseDefinitions(v11, compiler.NewContext("responses", v11, context)) + if err != nil { + errors = append(errors, err) + } + } + // repeated SecurityRequirement security = 12; + v12 := compiler.MapValueForKey(m, "security") + if v12 != nil { + // repeated SecurityRequirement + x.Security = make([]*SecurityRequirement, 0) + a, ok := compiler.SequenceNodeForNode(v12) + if ok { + for _, item := range a.Content { + y, err := NewSecurityRequirement(item, compiler.NewContext("security", item, context)) + if err != nil { + errors = append(errors, err) + } + x.Security = append(x.Security, y) + } + } + } + // SecurityDefinitions security_definitions = 13; + v13 := compiler.MapValueForKey(m, "securityDefinitions") + if v13 != nil { + var err error + x.SecurityDefinitions, err = NewSecurityDefinitions(v13, compiler.NewContext("securityDefinitions", v13, context)) + if err != nil { + errors = append(errors, err) + } + } + // repeated Tag tags = 14; + v14 := compiler.MapValueForKey(m, "tags") + if v14 != nil { + // repeated Tag + x.Tags = make([]*Tag, 0) + a, ok := compiler.SequenceNodeForNode(v14) + if ok { + for _, item := range a.Content { + y, err := NewTag(item, compiler.NewContext("tags", item, context)) + if err != nil { + errors = append(errors, err) + } + x.Tags = append(x.Tags, y) + } + } + } + // ExternalDocs external_docs = 15; + v15 := compiler.MapValueForKey(m, "externalDocs") + if v15 != nil { + var err error + x.ExternalDocs, err = NewExternalDocs(v15, compiler.NewContext("externalDocs", v15, context)) + if err != nil { + errors = append(errors, err) + } + } + // repeated NamedAny vendor_extension = 16; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.CallExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes := compiler.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewExamples creates an object of type Examples if possible, returning an error if not. +func NewExamples(in *yaml.Node, context *compiler.Context) (*Examples, error) { + errors := make([]error, 0) + x := &Examples{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + // repeated NamedAny additional_properties = 1; + // MAP: Any + x.AdditionalProperties = make([]*NamedAny, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.CallExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes := compiler.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) + if err != nil { + errors = append(errors, err) + } + } + x.AdditionalProperties = append(x.AdditionalProperties, pair) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewExternalDocs creates an object of type ExternalDocs if possible, returning an error if not. +func NewExternalDocs(in *yaml.Node, context *compiler.Context) (*ExternalDocs, error) { + errors := make([]error, 0) + x := &ExternalDocs{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"url"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"description", "url"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string description = 1; + v1 := compiler.MapValueForKey(m, "description") + if v1 != nil { + x.Description, ok = compiler.StringForScalarNode(v1) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string url = 2; + v2 := compiler.MapValueForKey(m, "url") + if v2 != nil { + x.Url, ok = compiler.StringForScalarNode(v2) + if !ok { + message := fmt.Sprintf("has unexpected value for url: %s", compiler.Display(v2)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny vendor_extension = 3; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.CallExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes := compiler.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewFileSchema creates an object of type FileSchema if possible, returning an error if not. +func NewFileSchema(in *yaml.Node, context *compiler.Context) (*FileSchema, error) { + errors := make([]error, 0) + x := &FileSchema{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"type"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"default", "description", "example", "externalDocs", "format", "readOnly", "required", "title", "type"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string format = 1; + v1 := compiler.MapValueForKey(m, "format") + if v1 != nil { + x.Format, ok = compiler.StringForScalarNode(v1) + if !ok { + message := fmt.Sprintf("has unexpected value for format: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string title = 2; + v2 := compiler.MapValueForKey(m, "title") + if v2 != nil { + x.Title, ok = compiler.StringForScalarNode(v2) + if !ok { + message := fmt.Sprintf("has unexpected value for title: %s", compiler.Display(v2)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 3; + v3 := compiler.MapValueForKey(m, "description") + if v3 != nil { + x.Description, ok = compiler.StringForScalarNode(v3) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v3)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Any default = 4; + v4 := compiler.MapValueForKey(m, "default") + if v4 != nil { + var err error + x.Default, err = NewAny(v4, compiler.NewContext("default", v4, context)) + if err != nil { + errors = append(errors, err) + } + } + // repeated string required = 5; + v5 := compiler.MapValueForKey(m, "required") + if v5 != nil { + v, ok := compiler.SequenceNodeForNode(v5) + if ok { + x.Required = compiler.StringArrayForSequenceNode(v) + } else { + message := fmt.Sprintf("has unexpected value for required: %s", compiler.Display(v5)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string type = 6; + v6 := compiler.MapValueForKey(m, "type") + if v6 != nil { + x.Type, ok = compiler.StringForScalarNode(v6) + if !ok { + message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v6)) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [file] + if ok && !compiler.StringArrayContainsValue([]string{"file"}, x.Type) { + message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v6)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool read_only = 7; + v7 := compiler.MapValueForKey(m, "readOnly") + if v7 != nil { + x.ReadOnly, ok = compiler.BoolForScalarNode(v7) + if !ok { + message := fmt.Sprintf("has unexpected value for readOnly: %s", compiler.Display(v7)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // ExternalDocs external_docs = 8; + v8 := compiler.MapValueForKey(m, "externalDocs") + if v8 != nil { + var err error + x.ExternalDocs, err = NewExternalDocs(v8, compiler.NewContext("externalDocs", v8, context)) + if err != nil { + errors = append(errors, err) + } + } + // Any example = 9; + v9 := compiler.MapValueForKey(m, "example") + if v9 != nil { + var err error + x.Example, err = NewAny(v9, compiler.NewContext("example", v9, context)) + if err != nil { + errors = append(errors, err) + } + } + // repeated NamedAny vendor_extension = 10; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.CallExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes := compiler.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewFormDataParameterSubSchema creates an object of type FormDataParameterSubSchema if possible, returning an error if not. +func NewFormDataParameterSubSchema(in *yaml.Node, context *compiler.Context) (*FormDataParameterSubSchema, error) { + errors := make([]error, 0) + x := &FormDataParameterSubSchema{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"allowEmptyValue", "collectionFormat", "default", "description", "enum", "exclusiveMaximum", "exclusiveMinimum", "format", "in", "items", "maxItems", "maxLength", "maximum", "minItems", "minLength", "minimum", "multipleOf", "name", "pattern", "required", "type", "uniqueItems"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // bool required = 1; + v1 := compiler.MapValueForKey(m, "required") + if v1 != nil { + x.Required, ok = compiler.BoolForScalarNode(v1) + if !ok { + message := fmt.Sprintf("has unexpected value for required: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string in = 2; + v2 := compiler.MapValueForKey(m, "in") + if v2 != nil { + x.In, ok = compiler.StringForScalarNode(v2) + if !ok { + message := fmt.Sprintf("has unexpected value for in: %s", compiler.Display(v2)) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [formData] + if ok && !compiler.StringArrayContainsValue([]string{"formData"}, x.In) { + message := fmt.Sprintf("has unexpected value for in: %s", compiler.Display(v2)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 3; + v3 := compiler.MapValueForKey(m, "description") + if v3 != nil { + x.Description, ok = compiler.StringForScalarNode(v3) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v3)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string name = 4; + v4 := compiler.MapValueForKey(m, "name") + if v4 != nil { + x.Name, ok = compiler.StringForScalarNode(v4) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v4)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool allow_empty_value = 5; + v5 := compiler.MapValueForKey(m, "allowEmptyValue") + if v5 != nil { + x.AllowEmptyValue, ok = compiler.BoolForScalarNode(v5) + if !ok { + message := fmt.Sprintf("has unexpected value for allowEmptyValue: %s", compiler.Display(v5)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string type = 6; + v6 := compiler.MapValueForKey(m, "type") + if v6 != nil { + x.Type, ok = compiler.StringForScalarNode(v6) + if !ok { + message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v6)) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [string number boolean integer array file] + if ok && !compiler.StringArrayContainsValue([]string{"string", "number", "boolean", "integer", "array", "file"}, x.Type) { + message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v6)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string format = 7; + v7 := compiler.MapValueForKey(m, "format") + if v7 != nil { + x.Format, ok = compiler.StringForScalarNode(v7) + if !ok { + message := fmt.Sprintf("has unexpected value for format: %s", compiler.Display(v7)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // PrimitivesItems items = 8; + v8 := compiler.MapValueForKey(m, "items") + if v8 != nil { + var err error + x.Items, err = NewPrimitivesItems(v8, compiler.NewContext("items", v8, context)) + if err != nil { + errors = append(errors, err) + } + } + // string collection_format = 9; + v9 := compiler.MapValueForKey(m, "collectionFormat") + if v9 != nil { + x.CollectionFormat, ok = compiler.StringForScalarNode(v9) + if !ok { + message := fmt.Sprintf("has unexpected value for collectionFormat: %s", compiler.Display(v9)) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [csv ssv tsv pipes multi] + if ok && !compiler.StringArrayContainsValue([]string{"csv", "ssv", "tsv", "pipes", "multi"}, x.CollectionFormat) { + message := fmt.Sprintf("has unexpected value for collectionFormat: %s", compiler.Display(v9)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Any default = 10; + v10 := compiler.MapValueForKey(m, "default") + if v10 != nil { + var err error + x.Default, err = NewAny(v10, compiler.NewContext("default", v10, context)) + if err != nil { + errors = append(errors, err) + } + } + // float maximum = 11; + v11 := compiler.MapValueForKey(m, "maximum") + if v11 != nil { + v, ok := compiler.FloatForScalarNode(v11) + if ok { + x.Maximum = v + } else { + message := fmt.Sprintf("has unexpected value for maximum: %s", compiler.Display(v11)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool exclusive_maximum = 12; + v12 := compiler.MapValueForKey(m, "exclusiveMaximum") + if v12 != nil { + x.ExclusiveMaximum, ok = compiler.BoolForScalarNode(v12) + if !ok { + message := fmt.Sprintf("has unexpected value for exclusiveMaximum: %s", compiler.Display(v12)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // float minimum = 13; + v13 := compiler.MapValueForKey(m, "minimum") + if v13 != nil { + v, ok := compiler.FloatForScalarNode(v13) + if ok { + x.Minimum = v + } else { + message := fmt.Sprintf("has unexpected value for minimum: %s", compiler.Display(v13)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool exclusive_minimum = 14; + v14 := compiler.MapValueForKey(m, "exclusiveMinimum") + if v14 != nil { + x.ExclusiveMinimum, ok = compiler.BoolForScalarNode(v14) + if !ok { + message := fmt.Sprintf("has unexpected value for exclusiveMinimum: %s", compiler.Display(v14)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 max_length = 15; + v15 := compiler.MapValueForKey(m, "maxLength") + if v15 != nil { + t, ok := compiler.IntForScalarNode(v15) + if ok { + x.MaxLength = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for maxLength: %s", compiler.Display(v15)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 min_length = 16; + v16 := compiler.MapValueForKey(m, "minLength") + if v16 != nil { + t, ok := compiler.IntForScalarNode(v16) + if ok { + x.MinLength = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for minLength: %s", compiler.Display(v16)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string pattern = 17; + v17 := compiler.MapValueForKey(m, "pattern") + if v17 != nil { + x.Pattern, ok = compiler.StringForScalarNode(v17) + if !ok { + message := fmt.Sprintf("has unexpected value for pattern: %s", compiler.Display(v17)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 max_items = 18; + v18 := compiler.MapValueForKey(m, "maxItems") + if v18 != nil { + t, ok := compiler.IntForScalarNode(v18) + if ok { + x.MaxItems = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for maxItems: %s", compiler.Display(v18)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 min_items = 19; + v19 := compiler.MapValueForKey(m, "minItems") + if v19 != nil { + t, ok := compiler.IntForScalarNode(v19) + if ok { + x.MinItems = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for minItems: %s", compiler.Display(v19)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool unique_items = 20; + v20 := compiler.MapValueForKey(m, "uniqueItems") + if v20 != nil { + x.UniqueItems, ok = compiler.BoolForScalarNode(v20) + if !ok { + message := fmt.Sprintf("has unexpected value for uniqueItems: %s", compiler.Display(v20)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated Any enum = 21; + v21 := compiler.MapValueForKey(m, "enum") + if v21 != nil { + // repeated Any + x.Enum = make([]*Any, 0) + a, ok := compiler.SequenceNodeForNode(v21) + if ok { + for _, item := range a.Content { + y, err := NewAny(item, compiler.NewContext("enum", item, context)) + if err != nil { + errors = append(errors, err) + } + x.Enum = append(x.Enum, y) + } + } + } + // float multiple_of = 22; + v22 := compiler.MapValueForKey(m, "multipleOf") + if v22 != nil { + v, ok := compiler.FloatForScalarNode(v22) + if ok { + x.MultipleOf = v + } else { + message := fmt.Sprintf("has unexpected value for multipleOf: %s", compiler.Display(v22)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny vendor_extension = 23; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.CallExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes := compiler.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewHeader creates an object of type Header if possible, returning an error if not. +func NewHeader(in *yaml.Node, context *compiler.Context) (*Header, error) { + errors := make([]error, 0) + x := &Header{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"type"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"collectionFormat", "default", "description", "enum", "exclusiveMaximum", "exclusiveMinimum", "format", "items", "maxItems", "maxLength", "maximum", "minItems", "minLength", "minimum", "multipleOf", "pattern", "type", "uniqueItems"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string type = 1; + v1 := compiler.MapValueForKey(m, "type") + if v1 != nil { + x.Type, ok = compiler.StringForScalarNode(v1) + if !ok { + message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [string number integer boolean array] + if ok && !compiler.StringArrayContainsValue([]string{"string", "number", "integer", "boolean", "array"}, x.Type) { + message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string format = 2; + v2 := compiler.MapValueForKey(m, "format") + if v2 != nil { + x.Format, ok = compiler.StringForScalarNode(v2) + if !ok { + message := fmt.Sprintf("has unexpected value for format: %s", compiler.Display(v2)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // PrimitivesItems items = 3; + v3 := compiler.MapValueForKey(m, "items") + if v3 != nil { + var err error + x.Items, err = NewPrimitivesItems(v3, compiler.NewContext("items", v3, context)) + if err != nil { + errors = append(errors, err) + } + } + // string collection_format = 4; + v4 := compiler.MapValueForKey(m, "collectionFormat") + if v4 != nil { + x.CollectionFormat, ok = compiler.StringForScalarNode(v4) + if !ok { + message := fmt.Sprintf("has unexpected value for collectionFormat: %s", compiler.Display(v4)) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [csv ssv tsv pipes] + if ok && !compiler.StringArrayContainsValue([]string{"csv", "ssv", "tsv", "pipes"}, x.CollectionFormat) { + message := fmt.Sprintf("has unexpected value for collectionFormat: %s", compiler.Display(v4)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Any default = 5; + v5 := compiler.MapValueForKey(m, "default") + if v5 != nil { + var err error + x.Default, err = NewAny(v5, compiler.NewContext("default", v5, context)) + if err != nil { + errors = append(errors, err) + } + } + // float maximum = 6; + v6 := compiler.MapValueForKey(m, "maximum") + if v6 != nil { + v, ok := compiler.FloatForScalarNode(v6) + if ok { + x.Maximum = v + } else { + message := fmt.Sprintf("has unexpected value for maximum: %s", compiler.Display(v6)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool exclusive_maximum = 7; + v7 := compiler.MapValueForKey(m, "exclusiveMaximum") + if v7 != nil { + x.ExclusiveMaximum, ok = compiler.BoolForScalarNode(v7) + if !ok { + message := fmt.Sprintf("has unexpected value for exclusiveMaximum: %s", compiler.Display(v7)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // float minimum = 8; + v8 := compiler.MapValueForKey(m, "minimum") + if v8 != nil { + v, ok := compiler.FloatForScalarNode(v8) + if ok { + x.Minimum = v + } else { + message := fmt.Sprintf("has unexpected value for minimum: %s", compiler.Display(v8)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool exclusive_minimum = 9; + v9 := compiler.MapValueForKey(m, "exclusiveMinimum") + if v9 != nil { + x.ExclusiveMinimum, ok = compiler.BoolForScalarNode(v9) + if !ok { + message := fmt.Sprintf("has unexpected value for exclusiveMinimum: %s", compiler.Display(v9)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 max_length = 10; + v10 := compiler.MapValueForKey(m, "maxLength") + if v10 != nil { + t, ok := compiler.IntForScalarNode(v10) + if ok { + x.MaxLength = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for maxLength: %s", compiler.Display(v10)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 min_length = 11; + v11 := compiler.MapValueForKey(m, "minLength") + if v11 != nil { + t, ok := compiler.IntForScalarNode(v11) + if ok { + x.MinLength = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for minLength: %s", compiler.Display(v11)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string pattern = 12; + v12 := compiler.MapValueForKey(m, "pattern") + if v12 != nil { + x.Pattern, ok = compiler.StringForScalarNode(v12) + if !ok { + message := fmt.Sprintf("has unexpected value for pattern: %s", compiler.Display(v12)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 max_items = 13; + v13 := compiler.MapValueForKey(m, "maxItems") + if v13 != nil { + t, ok := compiler.IntForScalarNode(v13) + if ok { + x.MaxItems = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for maxItems: %s", compiler.Display(v13)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 min_items = 14; + v14 := compiler.MapValueForKey(m, "minItems") + if v14 != nil { + t, ok := compiler.IntForScalarNode(v14) + if ok { + x.MinItems = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for minItems: %s", compiler.Display(v14)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool unique_items = 15; + v15 := compiler.MapValueForKey(m, "uniqueItems") + if v15 != nil { + x.UniqueItems, ok = compiler.BoolForScalarNode(v15) + if !ok { + message := fmt.Sprintf("has unexpected value for uniqueItems: %s", compiler.Display(v15)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated Any enum = 16; + v16 := compiler.MapValueForKey(m, "enum") + if v16 != nil { + // repeated Any + x.Enum = make([]*Any, 0) + a, ok := compiler.SequenceNodeForNode(v16) + if ok { + for _, item := range a.Content { + y, err := NewAny(item, compiler.NewContext("enum", item, context)) + if err != nil { + errors = append(errors, err) + } + x.Enum = append(x.Enum, y) + } + } + } + // float multiple_of = 17; + v17 := compiler.MapValueForKey(m, "multipleOf") + if v17 != nil { + v, ok := compiler.FloatForScalarNode(v17) + if ok { + x.MultipleOf = v + } else { + message := fmt.Sprintf("has unexpected value for multipleOf: %s", compiler.Display(v17)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 18; + v18 := compiler.MapValueForKey(m, "description") + if v18 != nil { + x.Description, ok = compiler.StringForScalarNode(v18) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v18)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny vendor_extension = 19; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.CallExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes := compiler.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewHeaderParameterSubSchema creates an object of type HeaderParameterSubSchema if possible, returning an error if not. +func NewHeaderParameterSubSchema(in *yaml.Node, context *compiler.Context) (*HeaderParameterSubSchema, error) { + errors := make([]error, 0) + x := &HeaderParameterSubSchema{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"collectionFormat", "default", "description", "enum", "exclusiveMaximum", "exclusiveMinimum", "format", "in", "items", "maxItems", "maxLength", "maximum", "minItems", "minLength", "minimum", "multipleOf", "name", "pattern", "required", "type", "uniqueItems"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // bool required = 1; + v1 := compiler.MapValueForKey(m, "required") + if v1 != nil { + x.Required, ok = compiler.BoolForScalarNode(v1) + if !ok { + message := fmt.Sprintf("has unexpected value for required: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string in = 2; + v2 := compiler.MapValueForKey(m, "in") + if v2 != nil { + x.In, ok = compiler.StringForScalarNode(v2) + if !ok { + message := fmt.Sprintf("has unexpected value for in: %s", compiler.Display(v2)) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [header] + if ok && !compiler.StringArrayContainsValue([]string{"header"}, x.In) { + message := fmt.Sprintf("has unexpected value for in: %s", compiler.Display(v2)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 3; + v3 := compiler.MapValueForKey(m, "description") + if v3 != nil { + x.Description, ok = compiler.StringForScalarNode(v3) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v3)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string name = 4; + v4 := compiler.MapValueForKey(m, "name") + if v4 != nil { + x.Name, ok = compiler.StringForScalarNode(v4) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v4)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string type = 5; + v5 := compiler.MapValueForKey(m, "type") + if v5 != nil { + x.Type, ok = compiler.StringForScalarNode(v5) + if !ok { + message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v5)) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [string number boolean integer array] + if ok && !compiler.StringArrayContainsValue([]string{"string", "number", "boolean", "integer", "array"}, x.Type) { + message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v5)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string format = 6; + v6 := compiler.MapValueForKey(m, "format") + if v6 != nil { + x.Format, ok = compiler.StringForScalarNode(v6) + if !ok { + message := fmt.Sprintf("has unexpected value for format: %s", compiler.Display(v6)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // PrimitivesItems items = 7; + v7 := compiler.MapValueForKey(m, "items") + if v7 != nil { + var err error + x.Items, err = NewPrimitivesItems(v7, compiler.NewContext("items", v7, context)) + if err != nil { + errors = append(errors, err) + } + } + // string collection_format = 8; + v8 := compiler.MapValueForKey(m, "collectionFormat") + if v8 != nil { + x.CollectionFormat, ok = compiler.StringForScalarNode(v8) + if !ok { + message := fmt.Sprintf("has unexpected value for collectionFormat: %s", compiler.Display(v8)) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [csv ssv tsv pipes] + if ok && !compiler.StringArrayContainsValue([]string{"csv", "ssv", "tsv", "pipes"}, x.CollectionFormat) { + message := fmt.Sprintf("has unexpected value for collectionFormat: %s", compiler.Display(v8)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Any default = 9; + v9 := compiler.MapValueForKey(m, "default") + if v9 != nil { + var err error + x.Default, err = NewAny(v9, compiler.NewContext("default", v9, context)) + if err != nil { + errors = append(errors, err) + } + } + // float maximum = 10; + v10 := compiler.MapValueForKey(m, "maximum") + if v10 != nil { + v, ok := compiler.FloatForScalarNode(v10) + if ok { + x.Maximum = v + } else { + message := fmt.Sprintf("has unexpected value for maximum: %s", compiler.Display(v10)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool exclusive_maximum = 11; + v11 := compiler.MapValueForKey(m, "exclusiveMaximum") + if v11 != nil { + x.ExclusiveMaximum, ok = compiler.BoolForScalarNode(v11) + if !ok { + message := fmt.Sprintf("has unexpected value for exclusiveMaximum: %s", compiler.Display(v11)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // float minimum = 12; + v12 := compiler.MapValueForKey(m, "minimum") + if v12 != nil { + v, ok := compiler.FloatForScalarNode(v12) + if ok { + x.Minimum = v + } else { + message := fmt.Sprintf("has unexpected value for minimum: %s", compiler.Display(v12)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool exclusive_minimum = 13; + v13 := compiler.MapValueForKey(m, "exclusiveMinimum") + if v13 != nil { + x.ExclusiveMinimum, ok = compiler.BoolForScalarNode(v13) + if !ok { + message := fmt.Sprintf("has unexpected value for exclusiveMinimum: %s", compiler.Display(v13)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 max_length = 14; + v14 := compiler.MapValueForKey(m, "maxLength") + if v14 != nil { + t, ok := compiler.IntForScalarNode(v14) + if ok { + x.MaxLength = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for maxLength: %s", compiler.Display(v14)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 min_length = 15; + v15 := compiler.MapValueForKey(m, "minLength") + if v15 != nil { + t, ok := compiler.IntForScalarNode(v15) + if ok { + x.MinLength = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for minLength: %s", compiler.Display(v15)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string pattern = 16; + v16 := compiler.MapValueForKey(m, "pattern") + if v16 != nil { + x.Pattern, ok = compiler.StringForScalarNode(v16) + if !ok { + message := fmt.Sprintf("has unexpected value for pattern: %s", compiler.Display(v16)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 max_items = 17; + v17 := compiler.MapValueForKey(m, "maxItems") + if v17 != nil { + t, ok := compiler.IntForScalarNode(v17) + if ok { + x.MaxItems = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for maxItems: %s", compiler.Display(v17)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 min_items = 18; + v18 := compiler.MapValueForKey(m, "minItems") + if v18 != nil { + t, ok := compiler.IntForScalarNode(v18) + if ok { + x.MinItems = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for minItems: %s", compiler.Display(v18)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool unique_items = 19; + v19 := compiler.MapValueForKey(m, "uniqueItems") + if v19 != nil { + x.UniqueItems, ok = compiler.BoolForScalarNode(v19) + if !ok { + message := fmt.Sprintf("has unexpected value for uniqueItems: %s", compiler.Display(v19)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated Any enum = 20; + v20 := compiler.MapValueForKey(m, "enum") + if v20 != nil { + // repeated Any + x.Enum = make([]*Any, 0) + a, ok := compiler.SequenceNodeForNode(v20) + if ok { + for _, item := range a.Content { + y, err := NewAny(item, compiler.NewContext("enum", item, context)) + if err != nil { + errors = append(errors, err) + } + x.Enum = append(x.Enum, y) + } + } + } + // float multiple_of = 21; + v21 := compiler.MapValueForKey(m, "multipleOf") + if v21 != nil { + v, ok := compiler.FloatForScalarNode(v21) + if ok { + x.MultipleOf = v + } else { + message := fmt.Sprintf("has unexpected value for multipleOf: %s", compiler.Display(v21)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny vendor_extension = 22; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.CallExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes := compiler.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewHeaders creates an object of type Headers if possible, returning an error if not. +func NewHeaders(in *yaml.Node, context *compiler.Context) (*Headers, error) { + errors := make([]error, 0) + x := &Headers{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + // repeated NamedHeader additional_properties = 1; + // MAP: Header + x.AdditionalProperties = make([]*NamedHeader, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + pair := &NamedHeader{} + pair.Name = k + var err error + pair.Value, err = NewHeader(v, compiler.NewContext(k, v, context)) + if err != nil { + errors = append(errors, err) + } + x.AdditionalProperties = append(x.AdditionalProperties, pair) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewInfo creates an object of type Info if possible, returning an error if not. +func NewInfo(in *yaml.Node, context *compiler.Context) (*Info, error) { + errors := make([]error, 0) + x := &Info{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"title", "version"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"contact", "description", "license", "termsOfService", "title", "version"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string title = 1; + v1 := compiler.MapValueForKey(m, "title") + if v1 != nil { + x.Title, ok = compiler.StringForScalarNode(v1) + if !ok { + message := fmt.Sprintf("has unexpected value for title: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string version = 2; + v2 := compiler.MapValueForKey(m, "version") + if v2 != nil { + x.Version, ok = compiler.StringForScalarNode(v2) + if !ok { + message := fmt.Sprintf("has unexpected value for version: %s", compiler.Display(v2)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 3; + v3 := compiler.MapValueForKey(m, "description") + if v3 != nil { + x.Description, ok = compiler.StringForScalarNode(v3) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v3)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string terms_of_service = 4; + v4 := compiler.MapValueForKey(m, "termsOfService") + if v4 != nil { + x.TermsOfService, ok = compiler.StringForScalarNode(v4) + if !ok { + message := fmt.Sprintf("has unexpected value for termsOfService: %s", compiler.Display(v4)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Contact contact = 5; + v5 := compiler.MapValueForKey(m, "contact") + if v5 != nil { + var err error + x.Contact, err = NewContact(v5, compiler.NewContext("contact", v5, context)) + if err != nil { + errors = append(errors, err) + } + } + // License license = 6; + v6 := compiler.MapValueForKey(m, "license") + if v6 != nil { + var err error + x.License, err = NewLicense(v6, compiler.NewContext("license", v6, context)) + if err != nil { + errors = append(errors, err) + } + } + // repeated NamedAny vendor_extension = 7; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.CallExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes := compiler.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewItemsItem creates an object of type ItemsItem if possible, returning an error if not. +func NewItemsItem(in *yaml.Node, context *compiler.Context) (*ItemsItem, error) { + errors := make([]error, 0) + x := &ItemsItem{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value for item array: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + x.Schema = make([]*Schema, 0) + y, err := NewSchema(m, compiler.NewContext("", m, context)) + if err != nil { + return nil, err + } + x.Schema = append(x.Schema, y) + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewJsonReference creates an object of type JsonReference if possible, returning an error if not. +func NewJsonReference(in *yaml.Node, context *compiler.Context) (*JsonReference, error) { + errors := make([]error, 0) + x := &JsonReference{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"$ref"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string _ref = 1; + v1 := compiler.MapValueForKey(m, "$ref") + if v1 != nil { + x.XRef, ok = compiler.StringForScalarNode(v1) + if !ok { + message := fmt.Sprintf("has unexpected value for $ref: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 2; + v2 := compiler.MapValueForKey(m, "description") + if v2 != nil { + x.Description, ok = compiler.StringForScalarNode(v2) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v2)) + errors = append(errors, compiler.NewError(context, message)) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewLicense creates an object of type License if possible, returning an error if not. +func NewLicense(in *yaml.Node, context *compiler.Context) (*License, error) { + errors := make([]error, 0) + x := &License{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"name"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"name", "url"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = compiler.StringForScalarNode(v1) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string url = 2; + v2 := compiler.MapValueForKey(m, "url") + if v2 != nil { + x.Url, ok = compiler.StringForScalarNode(v2) + if !ok { + message := fmt.Sprintf("has unexpected value for url: %s", compiler.Display(v2)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny vendor_extension = 3; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.CallExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes := compiler.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewNamedAny creates an object of type NamedAny if possible, returning an error if not. +func NewNamedAny(in *yaml.Node, context *compiler.Context) (*NamedAny, error) { + errors := make([]error, 0) + x := &NamedAny{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"name", "value"} + var allowedPatterns []*regexp.Regexp + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = compiler.StringForScalarNode(v1) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Any value = 2; + v2 := compiler.MapValueForKey(m, "value") + if v2 != nil { + var err error + x.Value, err = NewAny(v2, compiler.NewContext("value", v2, context)) + if err != nil { + errors = append(errors, err) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewNamedHeader creates an object of type NamedHeader if possible, returning an error if not. +func NewNamedHeader(in *yaml.Node, context *compiler.Context) (*NamedHeader, error) { + errors := make([]error, 0) + x := &NamedHeader{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"name", "value"} + var allowedPatterns []*regexp.Regexp + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = compiler.StringForScalarNode(v1) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Header value = 2; + v2 := compiler.MapValueForKey(m, "value") + if v2 != nil { + var err error + x.Value, err = NewHeader(v2, compiler.NewContext("value", v2, context)) + if err != nil { + errors = append(errors, err) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewNamedParameter creates an object of type NamedParameter if possible, returning an error if not. +func NewNamedParameter(in *yaml.Node, context *compiler.Context) (*NamedParameter, error) { + errors := make([]error, 0) + x := &NamedParameter{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"name", "value"} + var allowedPatterns []*regexp.Regexp + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = compiler.StringForScalarNode(v1) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Parameter value = 2; + v2 := compiler.MapValueForKey(m, "value") + if v2 != nil { + var err error + x.Value, err = NewParameter(v2, compiler.NewContext("value", v2, context)) + if err != nil { + errors = append(errors, err) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewNamedPathItem creates an object of type NamedPathItem if possible, returning an error if not. +func NewNamedPathItem(in *yaml.Node, context *compiler.Context) (*NamedPathItem, error) { + errors := make([]error, 0) + x := &NamedPathItem{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"name", "value"} + var allowedPatterns []*regexp.Regexp + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = compiler.StringForScalarNode(v1) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // PathItem value = 2; + v2 := compiler.MapValueForKey(m, "value") + if v2 != nil { + var err error + x.Value, err = NewPathItem(v2, compiler.NewContext("value", v2, context)) + if err != nil { + errors = append(errors, err) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewNamedResponse creates an object of type NamedResponse if possible, returning an error if not. +func NewNamedResponse(in *yaml.Node, context *compiler.Context) (*NamedResponse, error) { + errors := make([]error, 0) + x := &NamedResponse{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"name", "value"} + var allowedPatterns []*regexp.Regexp + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = compiler.StringForScalarNode(v1) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Response value = 2; + v2 := compiler.MapValueForKey(m, "value") + if v2 != nil { + var err error + x.Value, err = NewResponse(v2, compiler.NewContext("value", v2, context)) + if err != nil { + errors = append(errors, err) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewNamedResponseValue creates an object of type NamedResponseValue if possible, returning an error if not. +func NewNamedResponseValue(in *yaml.Node, context *compiler.Context) (*NamedResponseValue, error) { + errors := make([]error, 0) + x := &NamedResponseValue{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"name", "value"} + var allowedPatterns []*regexp.Regexp + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = compiler.StringForScalarNode(v1) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // ResponseValue value = 2; + v2 := compiler.MapValueForKey(m, "value") + if v2 != nil { + var err error + x.Value, err = NewResponseValue(v2, compiler.NewContext("value", v2, context)) + if err != nil { + errors = append(errors, err) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewNamedSchema creates an object of type NamedSchema if possible, returning an error if not. +func NewNamedSchema(in *yaml.Node, context *compiler.Context) (*NamedSchema, error) { + errors := make([]error, 0) + x := &NamedSchema{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"name", "value"} + var allowedPatterns []*regexp.Regexp + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = compiler.StringForScalarNode(v1) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Schema value = 2; + v2 := compiler.MapValueForKey(m, "value") + if v2 != nil { + var err error + x.Value, err = NewSchema(v2, compiler.NewContext("value", v2, context)) + if err != nil { + errors = append(errors, err) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewNamedSecurityDefinitionsItem creates an object of type NamedSecurityDefinitionsItem if possible, returning an error if not. +func NewNamedSecurityDefinitionsItem(in *yaml.Node, context *compiler.Context) (*NamedSecurityDefinitionsItem, error) { + errors := make([]error, 0) + x := &NamedSecurityDefinitionsItem{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"name", "value"} + var allowedPatterns []*regexp.Regexp + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = compiler.StringForScalarNode(v1) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // SecurityDefinitionsItem value = 2; + v2 := compiler.MapValueForKey(m, "value") + if v2 != nil { + var err error + x.Value, err = NewSecurityDefinitionsItem(v2, compiler.NewContext("value", v2, context)) + if err != nil { + errors = append(errors, err) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewNamedString creates an object of type NamedString if possible, returning an error if not. +func NewNamedString(in *yaml.Node, context *compiler.Context) (*NamedString, error) { + errors := make([]error, 0) + x := &NamedString{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"name", "value"} + var allowedPatterns []*regexp.Regexp + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = compiler.StringForScalarNode(v1) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string value = 2; + v2 := compiler.MapValueForKey(m, "value") + if v2 != nil { + x.Value, ok = compiler.StringForScalarNode(v2) + if !ok { + message := fmt.Sprintf("has unexpected value for value: %s", compiler.Display(v2)) + errors = append(errors, compiler.NewError(context, message)) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewNamedStringArray creates an object of type NamedStringArray if possible, returning an error if not. +func NewNamedStringArray(in *yaml.Node, context *compiler.Context) (*NamedStringArray, error) { + errors := make([]error, 0) + x := &NamedStringArray{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"name", "value"} + var allowedPatterns []*regexp.Regexp + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = compiler.StringForScalarNode(v1) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // StringArray value = 2; + v2 := compiler.MapValueForKey(m, "value") + if v2 != nil { + var err error + x.Value, err = NewStringArray(v2, compiler.NewContext("value", v2, context)) + if err != nil { + errors = append(errors, err) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewNonBodyParameter creates an object of type NonBodyParameter if possible, returning an error if not. +func NewNonBodyParameter(in *yaml.Node, context *compiler.Context) (*NonBodyParameter, error) { + errors := make([]error, 0) + x := &NonBodyParameter{} + matched := false + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"in", "name", "type"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // HeaderParameterSubSchema header_parameter_sub_schema = 1; + { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewHeaderParameterSubSchema(m, compiler.NewContext("headerParameterSubSchema", m, context)) + if matchingError == nil { + x.Oneof = &NonBodyParameter_HeaderParameterSubSchema{HeaderParameterSubSchema: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + // FormDataParameterSubSchema form_data_parameter_sub_schema = 2; + { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewFormDataParameterSubSchema(m, compiler.NewContext("formDataParameterSubSchema", m, context)) + if matchingError == nil { + x.Oneof = &NonBodyParameter_FormDataParameterSubSchema{FormDataParameterSubSchema: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + // QueryParameterSubSchema query_parameter_sub_schema = 3; + { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewQueryParameterSubSchema(m, compiler.NewContext("queryParameterSubSchema", m, context)) + if matchingError == nil { + x.Oneof = &NonBodyParameter_QueryParameterSubSchema{QueryParameterSubSchema: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + // PathParameterSubSchema path_parameter_sub_schema = 4; + { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewPathParameterSubSchema(m, compiler.NewContext("pathParameterSubSchema", m, context)) + if matchingError == nil { + x.Oneof = &NonBodyParameter_PathParameterSubSchema{PathParameterSubSchema: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + if matched { + // since the oneof matched one of its possibilities, discard any matching errors + errors = make([]error, 0) + } else { + message := fmt.Sprintf("contains an invalid NonBodyParameter") + err := compiler.NewError(context, message) + errors = []error{err} + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewOauth2AccessCodeSecurity creates an object of type Oauth2AccessCodeSecurity if possible, returning an error if not. +func NewOauth2AccessCodeSecurity(in *yaml.Node, context *compiler.Context) (*Oauth2AccessCodeSecurity, error) { + errors := make([]error, 0) + x := &Oauth2AccessCodeSecurity{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"authorizationUrl", "flow", "tokenUrl", "type"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"authorizationUrl", "description", "flow", "scopes", "tokenUrl", "type"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string type = 1; + v1 := compiler.MapValueForKey(m, "type") + if v1 != nil { + x.Type, ok = compiler.StringForScalarNode(v1) + if !ok { + message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [oauth2] + if ok && !compiler.StringArrayContainsValue([]string{"oauth2"}, x.Type) { + message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string flow = 2; + v2 := compiler.MapValueForKey(m, "flow") + if v2 != nil { + x.Flow, ok = compiler.StringForScalarNode(v2) + if !ok { + message := fmt.Sprintf("has unexpected value for flow: %s", compiler.Display(v2)) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [accessCode] + if ok && !compiler.StringArrayContainsValue([]string{"accessCode"}, x.Flow) { + message := fmt.Sprintf("has unexpected value for flow: %s", compiler.Display(v2)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Oauth2Scopes scopes = 3; + v3 := compiler.MapValueForKey(m, "scopes") + if v3 != nil { + var err error + x.Scopes, err = NewOauth2Scopes(v3, compiler.NewContext("scopes", v3, context)) + if err != nil { + errors = append(errors, err) + } + } + // string authorization_url = 4; + v4 := compiler.MapValueForKey(m, "authorizationUrl") + if v4 != nil { + x.AuthorizationUrl, ok = compiler.StringForScalarNode(v4) + if !ok { + message := fmt.Sprintf("has unexpected value for authorizationUrl: %s", compiler.Display(v4)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string token_url = 5; + v5 := compiler.MapValueForKey(m, "tokenUrl") + if v5 != nil { + x.TokenUrl, ok = compiler.StringForScalarNode(v5) + if !ok { + message := fmt.Sprintf("has unexpected value for tokenUrl: %s", compiler.Display(v5)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 6; + v6 := compiler.MapValueForKey(m, "description") + if v6 != nil { + x.Description, ok = compiler.StringForScalarNode(v6) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v6)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny vendor_extension = 7; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.CallExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes := compiler.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewOauth2ApplicationSecurity creates an object of type Oauth2ApplicationSecurity if possible, returning an error if not. +func NewOauth2ApplicationSecurity(in *yaml.Node, context *compiler.Context) (*Oauth2ApplicationSecurity, error) { + errors := make([]error, 0) + x := &Oauth2ApplicationSecurity{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"flow", "tokenUrl", "type"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"description", "flow", "scopes", "tokenUrl", "type"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string type = 1; + v1 := compiler.MapValueForKey(m, "type") + if v1 != nil { + x.Type, ok = compiler.StringForScalarNode(v1) + if !ok { + message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [oauth2] + if ok && !compiler.StringArrayContainsValue([]string{"oauth2"}, x.Type) { + message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string flow = 2; + v2 := compiler.MapValueForKey(m, "flow") + if v2 != nil { + x.Flow, ok = compiler.StringForScalarNode(v2) + if !ok { + message := fmt.Sprintf("has unexpected value for flow: %s", compiler.Display(v2)) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [application] + if ok && !compiler.StringArrayContainsValue([]string{"application"}, x.Flow) { + message := fmt.Sprintf("has unexpected value for flow: %s", compiler.Display(v2)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Oauth2Scopes scopes = 3; + v3 := compiler.MapValueForKey(m, "scopes") + if v3 != nil { + var err error + x.Scopes, err = NewOauth2Scopes(v3, compiler.NewContext("scopes", v3, context)) + if err != nil { + errors = append(errors, err) + } + } + // string token_url = 4; + v4 := compiler.MapValueForKey(m, "tokenUrl") + if v4 != nil { + x.TokenUrl, ok = compiler.StringForScalarNode(v4) + if !ok { + message := fmt.Sprintf("has unexpected value for tokenUrl: %s", compiler.Display(v4)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 5; + v5 := compiler.MapValueForKey(m, "description") + if v5 != nil { + x.Description, ok = compiler.StringForScalarNode(v5) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v5)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny vendor_extension = 6; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.CallExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes := compiler.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewOauth2ImplicitSecurity creates an object of type Oauth2ImplicitSecurity if possible, returning an error if not. +func NewOauth2ImplicitSecurity(in *yaml.Node, context *compiler.Context) (*Oauth2ImplicitSecurity, error) { + errors := make([]error, 0) + x := &Oauth2ImplicitSecurity{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"authorizationUrl", "flow", "type"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"authorizationUrl", "description", "flow", "scopes", "type"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string type = 1; + v1 := compiler.MapValueForKey(m, "type") + if v1 != nil { + x.Type, ok = compiler.StringForScalarNode(v1) + if !ok { + message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [oauth2] + if ok && !compiler.StringArrayContainsValue([]string{"oauth2"}, x.Type) { + message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string flow = 2; + v2 := compiler.MapValueForKey(m, "flow") + if v2 != nil { + x.Flow, ok = compiler.StringForScalarNode(v2) + if !ok { + message := fmt.Sprintf("has unexpected value for flow: %s", compiler.Display(v2)) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [implicit] + if ok && !compiler.StringArrayContainsValue([]string{"implicit"}, x.Flow) { + message := fmt.Sprintf("has unexpected value for flow: %s", compiler.Display(v2)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Oauth2Scopes scopes = 3; + v3 := compiler.MapValueForKey(m, "scopes") + if v3 != nil { + var err error + x.Scopes, err = NewOauth2Scopes(v3, compiler.NewContext("scopes", v3, context)) + if err != nil { + errors = append(errors, err) + } + } + // string authorization_url = 4; + v4 := compiler.MapValueForKey(m, "authorizationUrl") + if v4 != nil { + x.AuthorizationUrl, ok = compiler.StringForScalarNode(v4) + if !ok { + message := fmt.Sprintf("has unexpected value for authorizationUrl: %s", compiler.Display(v4)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 5; + v5 := compiler.MapValueForKey(m, "description") + if v5 != nil { + x.Description, ok = compiler.StringForScalarNode(v5) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v5)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny vendor_extension = 6; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.CallExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes := compiler.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewOauth2PasswordSecurity creates an object of type Oauth2PasswordSecurity if possible, returning an error if not. +func NewOauth2PasswordSecurity(in *yaml.Node, context *compiler.Context) (*Oauth2PasswordSecurity, error) { + errors := make([]error, 0) + x := &Oauth2PasswordSecurity{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"flow", "tokenUrl", "type"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"description", "flow", "scopes", "tokenUrl", "type"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string type = 1; + v1 := compiler.MapValueForKey(m, "type") + if v1 != nil { + x.Type, ok = compiler.StringForScalarNode(v1) + if !ok { + message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [oauth2] + if ok && !compiler.StringArrayContainsValue([]string{"oauth2"}, x.Type) { + message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string flow = 2; + v2 := compiler.MapValueForKey(m, "flow") + if v2 != nil { + x.Flow, ok = compiler.StringForScalarNode(v2) + if !ok { + message := fmt.Sprintf("has unexpected value for flow: %s", compiler.Display(v2)) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [password] + if ok && !compiler.StringArrayContainsValue([]string{"password"}, x.Flow) { + message := fmt.Sprintf("has unexpected value for flow: %s", compiler.Display(v2)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Oauth2Scopes scopes = 3; + v3 := compiler.MapValueForKey(m, "scopes") + if v3 != nil { + var err error + x.Scopes, err = NewOauth2Scopes(v3, compiler.NewContext("scopes", v3, context)) + if err != nil { + errors = append(errors, err) + } + } + // string token_url = 4; + v4 := compiler.MapValueForKey(m, "tokenUrl") + if v4 != nil { + x.TokenUrl, ok = compiler.StringForScalarNode(v4) + if !ok { + message := fmt.Sprintf("has unexpected value for tokenUrl: %s", compiler.Display(v4)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 5; + v5 := compiler.MapValueForKey(m, "description") + if v5 != nil { + x.Description, ok = compiler.StringForScalarNode(v5) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v5)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny vendor_extension = 6; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.CallExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes := compiler.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewOauth2Scopes creates an object of type Oauth2Scopes if possible, returning an error if not. +func NewOauth2Scopes(in *yaml.Node, context *compiler.Context) (*Oauth2Scopes, error) { + errors := make([]error, 0) + x := &Oauth2Scopes{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + // repeated NamedString additional_properties = 1; + // MAP: string + x.AdditionalProperties = make([]*NamedString, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + pair := &NamedString{} + pair.Name = k + pair.Value, _ = compiler.StringForScalarNode(v) + x.AdditionalProperties = append(x.AdditionalProperties, pair) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewOperation creates an object of type Operation if possible, returning an error if not. +func NewOperation(in *yaml.Node, context *compiler.Context) (*Operation, error) { + errors := make([]error, 0) + x := &Operation{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"responses"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"consumes", "deprecated", "description", "externalDocs", "operationId", "parameters", "produces", "responses", "schemes", "security", "summary", "tags"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // repeated string tags = 1; + v1 := compiler.MapValueForKey(m, "tags") + if v1 != nil { + v, ok := compiler.SequenceNodeForNode(v1) + if ok { + x.Tags = compiler.StringArrayForSequenceNode(v) + } else { + message := fmt.Sprintf("has unexpected value for tags: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string summary = 2; + v2 := compiler.MapValueForKey(m, "summary") + if v2 != nil { + x.Summary, ok = compiler.StringForScalarNode(v2) + if !ok { + message := fmt.Sprintf("has unexpected value for summary: %s", compiler.Display(v2)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 3; + v3 := compiler.MapValueForKey(m, "description") + if v3 != nil { + x.Description, ok = compiler.StringForScalarNode(v3) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v3)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // ExternalDocs external_docs = 4; + v4 := compiler.MapValueForKey(m, "externalDocs") + if v4 != nil { + var err error + x.ExternalDocs, err = NewExternalDocs(v4, compiler.NewContext("externalDocs", v4, context)) + if err != nil { + errors = append(errors, err) + } + } + // string operation_id = 5; + v5 := compiler.MapValueForKey(m, "operationId") + if v5 != nil { + x.OperationId, ok = compiler.StringForScalarNode(v5) + if !ok { + message := fmt.Sprintf("has unexpected value for operationId: %s", compiler.Display(v5)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated string produces = 6; + v6 := compiler.MapValueForKey(m, "produces") + if v6 != nil { + v, ok := compiler.SequenceNodeForNode(v6) + if ok { + x.Produces = compiler.StringArrayForSequenceNode(v) + } else { + message := fmt.Sprintf("has unexpected value for produces: %s", compiler.Display(v6)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated string consumes = 7; + v7 := compiler.MapValueForKey(m, "consumes") + if v7 != nil { + v, ok := compiler.SequenceNodeForNode(v7) + if ok { + x.Consumes = compiler.StringArrayForSequenceNode(v) + } else { + message := fmt.Sprintf("has unexpected value for consumes: %s", compiler.Display(v7)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated ParametersItem parameters = 8; + v8 := compiler.MapValueForKey(m, "parameters") + if v8 != nil { + // repeated ParametersItem + x.Parameters = make([]*ParametersItem, 0) + a, ok := compiler.SequenceNodeForNode(v8) + if ok { + for _, item := range a.Content { + y, err := NewParametersItem(item, compiler.NewContext("parameters", item, context)) + if err != nil { + errors = append(errors, err) + } + x.Parameters = append(x.Parameters, y) + } + } + } + // Responses responses = 9; + v9 := compiler.MapValueForKey(m, "responses") + if v9 != nil { + var err error + x.Responses, err = NewResponses(v9, compiler.NewContext("responses", v9, context)) + if err != nil { + errors = append(errors, err) + } + } + // repeated string schemes = 10; + v10 := compiler.MapValueForKey(m, "schemes") + if v10 != nil { + v, ok := compiler.SequenceNodeForNode(v10) + if ok { + x.Schemes = compiler.StringArrayForSequenceNode(v) + } else { + message := fmt.Sprintf("has unexpected value for schemes: %s", compiler.Display(v10)) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [http https ws wss] + if ok && !compiler.StringArrayContainsValues([]string{"http", "https", "ws", "wss"}, x.Schemes) { + message := fmt.Sprintf("has unexpected value for schemes: %s", compiler.Display(v10)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool deprecated = 11; + v11 := compiler.MapValueForKey(m, "deprecated") + if v11 != nil { + x.Deprecated, ok = compiler.BoolForScalarNode(v11) + if !ok { + message := fmt.Sprintf("has unexpected value for deprecated: %s", compiler.Display(v11)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated SecurityRequirement security = 12; + v12 := compiler.MapValueForKey(m, "security") + if v12 != nil { + // repeated SecurityRequirement + x.Security = make([]*SecurityRequirement, 0) + a, ok := compiler.SequenceNodeForNode(v12) + if ok { + for _, item := range a.Content { + y, err := NewSecurityRequirement(item, compiler.NewContext("security", item, context)) + if err != nil { + errors = append(errors, err) + } + x.Security = append(x.Security, y) + } + } + } + // repeated NamedAny vendor_extension = 13; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.CallExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes := compiler.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewParameter creates an object of type Parameter if possible, returning an error if not. +func NewParameter(in *yaml.Node, context *compiler.Context) (*Parameter, error) { + errors := make([]error, 0) + x := &Parameter{} + matched := false + // BodyParameter body_parameter = 1; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewBodyParameter(m, compiler.NewContext("bodyParameter", m, context)) + if matchingError == nil { + x.Oneof = &Parameter_BodyParameter{BodyParameter: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + // NonBodyParameter non_body_parameter = 2; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewNonBodyParameter(m, compiler.NewContext("nonBodyParameter", m, context)) + if matchingError == nil { + x.Oneof = &Parameter_NonBodyParameter{NonBodyParameter: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + if matched { + // since the oneof matched one of its possibilities, discard any matching errors + errors = make([]error, 0) + } else { + message := fmt.Sprintf("contains an invalid Parameter") + err := compiler.NewError(context, message) + errors = []error{err} + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewParameterDefinitions creates an object of type ParameterDefinitions if possible, returning an error if not. +func NewParameterDefinitions(in *yaml.Node, context *compiler.Context) (*ParameterDefinitions, error) { + errors := make([]error, 0) + x := &ParameterDefinitions{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + // repeated NamedParameter additional_properties = 1; + // MAP: Parameter + x.AdditionalProperties = make([]*NamedParameter, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + pair := &NamedParameter{} + pair.Name = k + var err error + pair.Value, err = NewParameter(v, compiler.NewContext(k, v, context)) + if err != nil { + errors = append(errors, err) + } + x.AdditionalProperties = append(x.AdditionalProperties, pair) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewParametersItem creates an object of type ParametersItem if possible, returning an error if not. +func NewParametersItem(in *yaml.Node, context *compiler.Context) (*ParametersItem, error) { + errors := make([]error, 0) + x := &ParametersItem{} + matched := false + // Parameter parameter = 1; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewParameter(m, compiler.NewContext("parameter", m, context)) + if matchingError == nil { + x.Oneof = &ParametersItem_Parameter{Parameter: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + // JsonReference json_reference = 2; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewJsonReference(m, compiler.NewContext("jsonReference", m, context)) + if matchingError == nil { + x.Oneof = &ParametersItem_JsonReference{JsonReference: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + if matched { + // since the oneof matched one of its possibilities, discard any matching errors + errors = make([]error, 0) + } else { + message := fmt.Sprintf("contains an invalid ParametersItem") + err := compiler.NewError(context, message) + errors = []error{err} + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewPathItem creates an object of type PathItem if possible, returning an error if not. +func NewPathItem(in *yaml.Node, context *compiler.Context) (*PathItem, error) { + errors := make([]error, 0) + x := &PathItem{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"$ref", "delete", "get", "head", "options", "parameters", "patch", "post", "put"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string _ref = 1; + v1 := compiler.MapValueForKey(m, "$ref") + if v1 != nil { + x.XRef, ok = compiler.StringForScalarNode(v1) + if !ok { + message := fmt.Sprintf("has unexpected value for $ref: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Operation get = 2; + v2 := compiler.MapValueForKey(m, "get") + if v2 != nil { + var err error + x.Get, err = NewOperation(v2, compiler.NewContext("get", v2, context)) + if err != nil { + errors = append(errors, err) + } + } + // Operation put = 3; + v3 := compiler.MapValueForKey(m, "put") + if v3 != nil { + var err error + x.Put, err = NewOperation(v3, compiler.NewContext("put", v3, context)) + if err != nil { + errors = append(errors, err) + } + } + // Operation post = 4; + v4 := compiler.MapValueForKey(m, "post") + if v4 != nil { + var err error + x.Post, err = NewOperation(v4, compiler.NewContext("post", v4, context)) + if err != nil { + errors = append(errors, err) + } + } + // Operation delete = 5; + v5 := compiler.MapValueForKey(m, "delete") + if v5 != nil { + var err error + x.Delete, err = NewOperation(v5, compiler.NewContext("delete", v5, context)) + if err != nil { + errors = append(errors, err) + } + } + // Operation options = 6; + v6 := compiler.MapValueForKey(m, "options") + if v6 != nil { + var err error + x.Options, err = NewOperation(v6, compiler.NewContext("options", v6, context)) + if err != nil { + errors = append(errors, err) + } + } + // Operation head = 7; + v7 := compiler.MapValueForKey(m, "head") + if v7 != nil { + var err error + x.Head, err = NewOperation(v7, compiler.NewContext("head", v7, context)) + if err != nil { + errors = append(errors, err) + } + } + // Operation patch = 8; + v8 := compiler.MapValueForKey(m, "patch") + if v8 != nil { + var err error + x.Patch, err = NewOperation(v8, compiler.NewContext("patch", v8, context)) + if err != nil { + errors = append(errors, err) + } + } + // repeated ParametersItem parameters = 9; + v9 := compiler.MapValueForKey(m, "parameters") + if v9 != nil { + // repeated ParametersItem + x.Parameters = make([]*ParametersItem, 0) + a, ok := compiler.SequenceNodeForNode(v9) + if ok { + for _, item := range a.Content { + y, err := NewParametersItem(item, compiler.NewContext("parameters", item, context)) + if err != nil { + errors = append(errors, err) + } + x.Parameters = append(x.Parameters, y) + } + } + } + // repeated NamedAny vendor_extension = 10; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.CallExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes := compiler.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewPathParameterSubSchema creates an object of type PathParameterSubSchema if possible, returning an error if not. +func NewPathParameterSubSchema(in *yaml.Node, context *compiler.Context) (*PathParameterSubSchema, error) { + errors := make([]error, 0) + x := &PathParameterSubSchema{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"required"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"collectionFormat", "default", "description", "enum", "exclusiveMaximum", "exclusiveMinimum", "format", "in", "items", "maxItems", "maxLength", "maximum", "minItems", "minLength", "minimum", "multipleOf", "name", "pattern", "required", "type", "uniqueItems"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // bool required = 1; + v1 := compiler.MapValueForKey(m, "required") + if v1 != nil { + x.Required, ok = compiler.BoolForScalarNode(v1) + if !ok { + message := fmt.Sprintf("has unexpected value for required: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string in = 2; + v2 := compiler.MapValueForKey(m, "in") + if v2 != nil { + x.In, ok = compiler.StringForScalarNode(v2) + if !ok { + message := fmt.Sprintf("has unexpected value for in: %s", compiler.Display(v2)) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [path] + if ok && !compiler.StringArrayContainsValue([]string{"path"}, x.In) { + message := fmt.Sprintf("has unexpected value for in: %s", compiler.Display(v2)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 3; + v3 := compiler.MapValueForKey(m, "description") + if v3 != nil { + x.Description, ok = compiler.StringForScalarNode(v3) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v3)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string name = 4; + v4 := compiler.MapValueForKey(m, "name") + if v4 != nil { + x.Name, ok = compiler.StringForScalarNode(v4) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v4)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string type = 5; + v5 := compiler.MapValueForKey(m, "type") + if v5 != nil { + x.Type, ok = compiler.StringForScalarNode(v5) + if !ok { + message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v5)) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [string number boolean integer array] + if ok && !compiler.StringArrayContainsValue([]string{"string", "number", "boolean", "integer", "array"}, x.Type) { + message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v5)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string format = 6; + v6 := compiler.MapValueForKey(m, "format") + if v6 != nil { + x.Format, ok = compiler.StringForScalarNode(v6) + if !ok { + message := fmt.Sprintf("has unexpected value for format: %s", compiler.Display(v6)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // PrimitivesItems items = 7; + v7 := compiler.MapValueForKey(m, "items") + if v7 != nil { + var err error + x.Items, err = NewPrimitivesItems(v7, compiler.NewContext("items", v7, context)) + if err != nil { + errors = append(errors, err) + } + } + // string collection_format = 8; + v8 := compiler.MapValueForKey(m, "collectionFormat") + if v8 != nil { + x.CollectionFormat, ok = compiler.StringForScalarNode(v8) + if !ok { + message := fmt.Sprintf("has unexpected value for collectionFormat: %s", compiler.Display(v8)) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [csv ssv tsv pipes] + if ok && !compiler.StringArrayContainsValue([]string{"csv", "ssv", "tsv", "pipes"}, x.CollectionFormat) { + message := fmt.Sprintf("has unexpected value for collectionFormat: %s", compiler.Display(v8)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Any default = 9; + v9 := compiler.MapValueForKey(m, "default") + if v9 != nil { + var err error + x.Default, err = NewAny(v9, compiler.NewContext("default", v9, context)) + if err != nil { + errors = append(errors, err) + } + } + // float maximum = 10; + v10 := compiler.MapValueForKey(m, "maximum") + if v10 != nil { + v, ok := compiler.FloatForScalarNode(v10) + if ok { + x.Maximum = v + } else { + message := fmt.Sprintf("has unexpected value for maximum: %s", compiler.Display(v10)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool exclusive_maximum = 11; + v11 := compiler.MapValueForKey(m, "exclusiveMaximum") + if v11 != nil { + x.ExclusiveMaximum, ok = compiler.BoolForScalarNode(v11) + if !ok { + message := fmt.Sprintf("has unexpected value for exclusiveMaximum: %s", compiler.Display(v11)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // float minimum = 12; + v12 := compiler.MapValueForKey(m, "minimum") + if v12 != nil { + v, ok := compiler.FloatForScalarNode(v12) + if ok { + x.Minimum = v + } else { + message := fmt.Sprintf("has unexpected value for minimum: %s", compiler.Display(v12)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool exclusive_minimum = 13; + v13 := compiler.MapValueForKey(m, "exclusiveMinimum") + if v13 != nil { + x.ExclusiveMinimum, ok = compiler.BoolForScalarNode(v13) + if !ok { + message := fmt.Sprintf("has unexpected value for exclusiveMinimum: %s", compiler.Display(v13)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 max_length = 14; + v14 := compiler.MapValueForKey(m, "maxLength") + if v14 != nil { + t, ok := compiler.IntForScalarNode(v14) + if ok { + x.MaxLength = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for maxLength: %s", compiler.Display(v14)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 min_length = 15; + v15 := compiler.MapValueForKey(m, "minLength") + if v15 != nil { + t, ok := compiler.IntForScalarNode(v15) + if ok { + x.MinLength = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for minLength: %s", compiler.Display(v15)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string pattern = 16; + v16 := compiler.MapValueForKey(m, "pattern") + if v16 != nil { + x.Pattern, ok = compiler.StringForScalarNode(v16) + if !ok { + message := fmt.Sprintf("has unexpected value for pattern: %s", compiler.Display(v16)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 max_items = 17; + v17 := compiler.MapValueForKey(m, "maxItems") + if v17 != nil { + t, ok := compiler.IntForScalarNode(v17) + if ok { + x.MaxItems = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for maxItems: %s", compiler.Display(v17)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 min_items = 18; + v18 := compiler.MapValueForKey(m, "minItems") + if v18 != nil { + t, ok := compiler.IntForScalarNode(v18) + if ok { + x.MinItems = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for minItems: %s", compiler.Display(v18)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool unique_items = 19; + v19 := compiler.MapValueForKey(m, "uniqueItems") + if v19 != nil { + x.UniqueItems, ok = compiler.BoolForScalarNode(v19) + if !ok { + message := fmt.Sprintf("has unexpected value for uniqueItems: %s", compiler.Display(v19)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated Any enum = 20; + v20 := compiler.MapValueForKey(m, "enum") + if v20 != nil { + // repeated Any + x.Enum = make([]*Any, 0) + a, ok := compiler.SequenceNodeForNode(v20) + if ok { + for _, item := range a.Content { + y, err := NewAny(item, compiler.NewContext("enum", item, context)) + if err != nil { + errors = append(errors, err) + } + x.Enum = append(x.Enum, y) + } + } + } + // float multiple_of = 21; + v21 := compiler.MapValueForKey(m, "multipleOf") + if v21 != nil { + v, ok := compiler.FloatForScalarNode(v21) + if ok { + x.MultipleOf = v + } else { + message := fmt.Sprintf("has unexpected value for multipleOf: %s", compiler.Display(v21)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny vendor_extension = 22; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.CallExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes := compiler.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewPaths creates an object of type Paths if possible, returning an error if not. +func NewPaths(in *yaml.Node, context *compiler.Context) (*Paths, error) { + errors := make([]error, 0) + x := &Paths{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{} + allowedPatterns := []*regexp.Regexp{pattern0, pattern1} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // repeated NamedAny vendor_extension = 1; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.CallExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes := compiler.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + // repeated NamedPathItem path = 2; + // MAP: PathItem ^/ + x.Path = make([]*NamedPathItem, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + if strings.HasPrefix(k, "/") { + pair := &NamedPathItem{} + pair.Name = k + var err error + pair.Value, err = NewPathItem(v, compiler.NewContext(k, v, context)) + if err != nil { + errors = append(errors, err) + } + x.Path = append(x.Path, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewPrimitivesItems creates an object of type PrimitivesItems if possible, returning an error if not. +func NewPrimitivesItems(in *yaml.Node, context *compiler.Context) (*PrimitivesItems, error) { + errors := make([]error, 0) + x := &PrimitivesItems{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"collectionFormat", "default", "enum", "exclusiveMaximum", "exclusiveMinimum", "format", "items", "maxItems", "maxLength", "maximum", "minItems", "minLength", "minimum", "multipleOf", "pattern", "type", "uniqueItems"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string type = 1; + v1 := compiler.MapValueForKey(m, "type") + if v1 != nil { + x.Type, ok = compiler.StringForScalarNode(v1) + if !ok { + message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [string number integer boolean array] + if ok && !compiler.StringArrayContainsValue([]string{"string", "number", "integer", "boolean", "array"}, x.Type) { + message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string format = 2; + v2 := compiler.MapValueForKey(m, "format") + if v2 != nil { + x.Format, ok = compiler.StringForScalarNode(v2) + if !ok { + message := fmt.Sprintf("has unexpected value for format: %s", compiler.Display(v2)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // PrimitivesItems items = 3; + v3 := compiler.MapValueForKey(m, "items") + if v3 != nil { + var err error + x.Items, err = NewPrimitivesItems(v3, compiler.NewContext("items", v3, context)) + if err != nil { + errors = append(errors, err) + } + } + // string collection_format = 4; + v4 := compiler.MapValueForKey(m, "collectionFormat") + if v4 != nil { + x.CollectionFormat, ok = compiler.StringForScalarNode(v4) + if !ok { + message := fmt.Sprintf("has unexpected value for collectionFormat: %s", compiler.Display(v4)) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [csv ssv tsv pipes] + if ok && !compiler.StringArrayContainsValue([]string{"csv", "ssv", "tsv", "pipes"}, x.CollectionFormat) { + message := fmt.Sprintf("has unexpected value for collectionFormat: %s", compiler.Display(v4)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Any default = 5; + v5 := compiler.MapValueForKey(m, "default") + if v5 != nil { + var err error + x.Default, err = NewAny(v5, compiler.NewContext("default", v5, context)) + if err != nil { + errors = append(errors, err) + } + } + // float maximum = 6; + v6 := compiler.MapValueForKey(m, "maximum") + if v6 != nil { + v, ok := compiler.FloatForScalarNode(v6) + if ok { + x.Maximum = v + } else { + message := fmt.Sprintf("has unexpected value for maximum: %s", compiler.Display(v6)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool exclusive_maximum = 7; + v7 := compiler.MapValueForKey(m, "exclusiveMaximum") + if v7 != nil { + x.ExclusiveMaximum, ok = compiler.BoolForScalarNode(v7) + if !ok { + message := fmt.Sprintf("has unexpected value for exclusiveMaximum: %s", compiler.Display(v7)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // float minimum = 8; + v8 := compiler.MapValueForKey(m, "minimum") + if v8 != nil { + v, ok := compiler.FloatForScalarNode(v8) + if ok { + x.Minimum = v + } else { + message := fmt.Sprintf("has unexpected value for minimum: %s", compiler.Display(v8)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool exclusive_minimum = 9; + v9 := compiler.MapValueForKey(m, "exclusiveMinimum") + if v9 != nil { + x.ExclusiveMinimum, ok = compiler.BoolForScalarNode(v9) + if !ok { + message := fmt.Sprintf("has unexpected value for exclusiveMinimum: %s", compiler.Display(v9)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 max_length = 10; + v10 := compiler.MapValueForKey(m, "maxLength") + if v10 != nil { + t, ok := compiler.IntForScalarNode(v10) + if ok { + x.MaxLength = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for maxLength: %s", compiler.Display(v10)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 min_length = 11; + v11 := compiler.MapValueForKey(m, "minLength") + if v11 != nil { + t, ok := compiler.IntForScalarNode(v11) + if ok { + x.MinLength = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for minLength: %s", compiler.Display(v11)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string pattern = 12; + v12 := compiler.MapValueForKey(m, "pattern") + if v12 != nil { + x.Pattern, ok = compiler.StringForScalarNode(v12) + if !ok { + message := fmt.Sprintf("has unexpected value for pattern: %s", compiler.Display(v12)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 max_items = 13; + v13 := compiler.MapValueForKey(m, "maxItems") + if v13 != nil { + t, ok := compiler.IntForScalarNode(v13) + if ok { + x.MaxItems = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for maxItems: %s", compiler.Display(v13)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 min_items = 14; + v14 := compiler.MapValueForKey(m, "minItems") + if v14 != nil { + t, ok := compiler.IntForScalarNode(v14) + if ok { + x.MinItems = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for minItems: %s", compiler.Display(v14)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool unique_items = 15; + v15 := compiler.MapValueForKey(m, "uniqueItems") + if v15 != nil { + x.UniqueItems, ok = compiler.BoolForScalarNode(v15) + if !ok { + message := fmt.Sprintf("has unexpected value for uniqueItems: %s", compiler.Display(v15)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated Any enum = 16; + v16 := compiler.MapValueForKey(m, "enum") + if v16 != nil { + // repeated Any + x.Enum = make([]*Any, 0) + a, ok := compiler.SequenceNodeForNode(v16) + if ok { + for _, item := range a.Content { + y, err := NewAny(item, compiler.NewContext("enum", item, context)) + if err != nil { + errors = append(errors, err) + } + x.Enum = append(x.Enum, y) + } + } + } + // float multiple_of = 17; + v17 := compiler.MapValueForKey(m, "multipleOf") + if v17 != nil { + v, ok := compiler.FloatForScalarNode(v17) + if ok { + x.MultipleOf = v + } else { + message := fmt.Sprintf("has unexpected value for multipleOf: %s", compiler.Display(v17)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny vendor_extension = 18; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.CallExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes := compiler.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewProperties creates an object of type Properties if possible, returning an error if not. +func NewProperties(in *yaml.Node, context *compiler.Context) (*Properties, error) { + errors := make([]error, 0) + x := &Properties{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + // repeated NamedSchema additional_properties = 1; + // MAP: Schema + x.AdditionalProperties = make([]*NamedSchema, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + pair := &NamedSchema{} + pair.Name = k + var err error + pair.Value, err = NewSchema(v, compiler.NewContext(k, v, context)) + if err != nil { + errors = append(errors, err) + } + x.AdditionalProperties = append(x.AdditionalProperties, pair) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewQueryParameterSubSchema creates an object of type QueryParameterSubSchema if possible, returning an error if not. +func NewQueryParameterSubSchema(in *yaml.Node, context *compiler.Context) (*QueryParameterSubSchema, error) { + errors := make([]error, 0) + x := &QueryParameterSubSchema{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"allowEmptyValue", "collectionFormat", "default", "description", "enum", "exclusiveMaximum", "exclusiveMinimum", "format", "in", "items", "maxItems", "maxLength", "maximum", "minItems", "minLength", "minimum", "multipleOf", "name", "pattern", "required", "type", "uniqueItems"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // bool required = 1; + v1 := compiler.MapValueForKey(m, "required") + if v1 != nil { + x.Required, ok = compiler.BoolForScalarNode(v1) + if !ok { + message := fmt.Sprintf("has unexpected value for required: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string in = 2; + v2 := compiler.MapValueForKey(m, "in") + if v2 != nil { + x.In, ok = compiler.StringForScalarNode(v2) + if !ok { + message := fmt.Sprintf("has unexpected value for in: %s", compiler.Display(v2)) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [query] + if ok && !compiler.StringArrayContainsValue([]string{"query"}, x.In) { + message := fmt.Sprintf("has unexpected value for in: %s", compiler.Display(v2)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 3; + v3 := compiler.MapValueForKey(m, "description") + if v3 != nil { + x.Description, ok = compiler.StringForScalarNode(v3) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v3)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string name = 4; + v4 := compiler.MapValueForKey(m, "name") + if v4 != nil { + x.Name, ok = compiler.StringForScalarNode(v4) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v4)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool allow_empty_value = 5; + v5 := compiler.MapValueForKey(m, "allowEmptyValue") + if v5 != nil { + x.AllowEmptyValue, ok = compiler.BoolForScalarNode(v5) + if !ok { + message := fmt.Sprintf("has unexpected value for allowEmptyValue: %s", compiler.Display(v5)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string type = 6; + v6 := compiler.MapValueForKey(m, "type") + if v6 != nil { + x.Type, ok = compiler.StringForScalarNode(v6) + if !ok { + message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v6)) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [string number boolean integer array] + if ok && !compiler.StringArrayContainsValue([]string{"string", "number", "boolean", "integer", "array"}, x.Type) { + message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v6)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string format = 7; + v7 := compiler.MapValueForKey(m, "format") + if v7 != nil { + x.Format, ok = compiler.StringForScalarNode(v7) + if !ok { + message := fmt.Sprintf("has unexpected value for format: %s", compiler.Display(v7)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // PrimitivesItems items = 8; + v8 := compiler.MapValueForKey(m, "items") + if v8 != nil { + var err error + x.Items, err = NewPrimitivesItems(v8, compiler.NewContext("items", v8, context)) + if err != nil { + errors = append(errors, err) + } + } + // string collection_format = 9; + v9 := compiler.MapValueForKey(m, "collectionFormat") + if v9 != nil { + x.CollectionFormat, ok = compiler.StringForScalarNode(v9) + if !ok { + message := fmt.Sprintf("has unexpected value for collectionFormat: %s", compiler.Display(v9)) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [csv ssv tsv pipes multi] + if ok && !compiler.StringArrayContainsValue([]string{"csv", "ssv", "tsv", "pipes", "multi"}, x.CollectionFormat) { + message := fmt.Sprintf("has unexpected value for collectionFormat: %s", compiler.Display(v9)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Any default = 10; + v10 := compiler.MapValueForKey(m, "default") + if v10 != nil { + var err error + x.Default, err = NewAny(v10, compiler.NewContext("default", v10, context)) + if err != nil { + errors = append(errors, err) + } + } + // float maximum = 11; + v11 := compiler.MapValueForKey(m, "maximum") + if v11 != nil { + v, ok := compiler.FloatForScalarNode(v11) + if ok { + x.Maximum = v + } else { + message := fmt.Sprintf("has unexpected value for maximum: %s", compiler.Display(v11)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool exclusive_maximum = 12; + v12 := compiler.MapValueForKey(m, "exclusiveMaximum") + if v12 != nil { + x.ExclusiveMaximum, ok = compiler.BoolForScalarNode(v12) + if !ok { + message := fmt.Sprintf("has unexpected value for exclusiveMaximum: %s", compiler.Display(v12)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // float minimum = 13; + v13 := compiler.MapValueForKey(m, "minimum") + if v13 != nil { + v, ok := compiler.FloatForScalarNode(v13) + if ok { + x.Minimum = v + } else { + message := fmt.Sprintf("has unexpected value for minimum: %s", compiler.Display(v13)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool exclusive_minimum = 14; + v14 := compiler.MapValueForKey(m, "exclusiveMinimum") + if v14 != nil { + x.ExclusiveMinimum, ok = compiler.BoolForScalarNode(v14) + if !ok { + message := fmt.Sprintf("has unexpected value for exclusiveMinimum: %s", compiler.Display(v14)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 max_length = 15; + v15 := compiler.MapValueForKey(m, "maxLength") + if v15 != nil { + t, ok := compiler.IntForScalarNode(v15) + if ok { + x.MaxLength = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for maxLength: %s", compiler.Display(v15)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 min_length = 16; + v16 := compiler.MapValueForKey(m, "minLength") + if v16 != nil { + t, ok := compiler.IntForScalarNode(v16) + if ok { + x.MinLength = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for minLength: %s", compiler.Display(v16)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string pattern = 17; + v17 := compiler.MapValueForKey(m, "pattern") + if v17 != nil { + x.Pattern, ok = compiler.StringForScalarNode(v17) + if !ok { + message := fmt.Sprintf("has unexpected value for pattern: %s", compiler.Display(v17)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 max_items = 18; + v18 := compiler.MapValueForKey(m, "maxItems") + if v18 != nil { + t, ok := compiler.IntForScalarNode(v18) + if ok { + x.MaxItems = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for maxItems: %s", compiler.Display(v18)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 min_items = 19; + v19 := compiler.MapValueForKey(m, "minItems") + if v19 != nil { + t, ok := compiler.IntForScalarNode(v19) + if ok { + x.MinItems = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for minItems: %s", compiler.Display(v19)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool unique_items = 20; + v20 := compiler.MapValueForKey(m, "uniqueItems") + if v20 != nil { + x.UniqueItems, ok = compiler.BoolForScalarNode(v20) + if !ok { + message := fmt.Sprintf("has unexpected value for uniqueItems: %s", compiler.Display(v20)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated Any enum = 21; + v21 := compiler.MapValueForKey(m, "enum") + if v21 != nil { + // repeated Any + x.Enum = make([]*Any, 0) + a, ok := compiler.SequenceNodeForNode(v21) + if ok { + for _, item := range a.Content { + y, err := NewAny(item, compiler.NewContext("enum", item, context)) + if err != nil { + errors = append(errors, err) + } + x.Enum = append(x.Enum, y) + } + } + } + // float multiple_of = 22; + v22 := compiler.MapValueForKey(m, "multipleOf") + if v22 != nil { + v, ok := compiler.FloatForScalarNode(v22) + if ok { + x.MultipleOf = v + } else { + message := fmt.Sprintf("has unexpected value for multipleOf: %s", compiler.Display(v22)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny vendor_extension = 23; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.CallExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes := compiler.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewResponse creates an object of type Response if possible, returning an error if not. +func NewResponse(in *yaml.Node, context *compiler.Context) (*Response, error) { + errors := make([]error, 0) + x := &Response{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"description"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"description", "examples", "headers", "schema"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string description = 1; + v1 := compiler.MapValueForKey(m, "description") + if v1 != nil { + x.Description, ok = compiler.StringForScalarNode(v1) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // SchemaItem schema = 2; + v2 := compiler.MapValueForKey(m, "schema") + if v2 != nil { + var err error + x.Schema, err = NewSchemaItem(v2, compiler.NewContext("schema", v2, context)) + if err != nil { + errors = append(errors, err) + } + } + // Headers headers = 3; + v3 := compiler.MapValueForKey(m, "headers") + if v3 != nil { + var err error + x.Headers, err = NewHeaders(v3, compiler.NewContext("headers", v3, context)) + if err != nil { + errors = append(errors, err) + } + } + // Examples examples = 4; + v4 := compiler.MapValueForKey(m, "examples") + if v4 != nil { + var err error + x.Examples, err = NewExamples(v4, compiler.NewContext("examples", v4, context)) + if err != nil { + errors = append(errors, err) + } + } + // repeated NamedAny vendor_extension = 5; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.CallExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes := compiler.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewResponseDefinitions creates an object of type ResponseDefinitions if possible, returning an error if not. +func NewResponseDefinitions(in *yaml.Node, context *compiler.Context) (*ResponseDefinitions, error) { + errors := make([]error, 0) + x := &ResponseDefinitions{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + // repeated NamedResponse additional_properties = 1; + // MAP: Response + x.AdditionalProperties = make([]*NamedResponse, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + pair := &NamedResponse{} + pair.Name = k + var err error + pair.Value, err = NewResponse(v, compiler.NewContext(k, v, context)) + if err != nil { + errors = append(errors, err) + } + x.AdditionalProperties = append(x.AdditionalProperties, pair) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewResponseValue creates an object of type ResponseValue if possible, returning an error if not. +func NewResponseValue(in *yaml.Node, context *compiler.Context) (*ResponseValue, error) { + errors := make([]error, 0) + x := &ResponseValue{} + matched := false + // Response response = 1; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewResponse(m, compiler.NewContext("response", m, context)) + if matchingError == nil { + x.Oneof = &ResponseValue_Response{Response: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + // JsonReference json_reference = 2; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewJsonReference(m, compiler.NewContext("jsonReference", m, context)) + if matchingError == nil { + x.Oneof = &ResponseValue_JsonReference{JsonReference: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + if matched { + // since the oneof matched one of its possibilities, discard any matching errors + errors = make([]error, 0) + } else { + message := fmt.Sprintf("contains an invalid ResponseValue") + err := compiler.NewError(context, message) + errors = []error{err} + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewResponses creates an object of type Responses if possible, returning an error if not. +func NewResponses(in *yaml.Node, context *compiler.Context) (*Responses, error) { + errors := make([]error, 0) + x := &Responses{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{} + allowedPatterns := []*regexp.Regexp{pattern2, pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // repeated NamedResponseValue response_code = 1; + // MAP: ResponseValue ^([0-9]{3})$|^(default)$ + x.ResponseCode = make([]*NamedResponseValue, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + if pattern2.MatchString(k) { + pair := &NamedResponseValue{} + pair.Name = k + var err error + pair.Value, err = NewResponseValue(v, compiler.NewContext(k, v, context)) + if err != nil { + errors = append(errors, err) + } + x.ResponseCode = append(x.ResponseCode, pair) + } + } + } + // repeated NamedAny vendor_extension = 2; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.CallExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes := compiler.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewSchema creates an object of type Schema if possible, returning an error if not. +func NewSchema(in *yaml.Node, context *compiler.Context) (*Schema, error) { + errors := make([]error, 0) + x := &Schema{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"$ref", "additionalProperties", "allOf", "default", "description", "discriminator", "enum", "example", "exclusiveMaximum", "exclusiveMinimum", "externalDocs", "format", "items", "maxItems", "maxLength", "maxProperties", "maximum", "minItems", "minLength", "minProperties", "minimum", "multipleOf", "pattern", "properties", "readOnly", "required", "title", "type", "uniqueItems", "xml"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string _ref = 1; + v1 := compiler.MapValueForKey(m, "$ref") + if v1 != nil { + x.XRef, ok = compiler.StringForScalarNode(v1) + if !ok { + message := fmt.Sprintf("has unexpected value for $ref: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string format = 2; + v2 := compiler.MapValueForKey(m, "format") + if v2 != nil { + x.Format, ok = compiler.StringForScalarNode(v2) + if !ok { + message := fmt.Sprintf("has unexpected value for format: %s", compiler.Display(v2)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string title = 3; + v3 := compiler.MapValueForKey(m, "title") + if v3 != nil { + x.Title, ok = compiler.StringForScalarNode(v3) + if !ok { + message := fmt.Sprintf("has unexpected value for title: %s", compiler.Display(v3)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 4; + v4 := compiler.MapValueForKey(m, "description") + if v4 != nil { + x.Description, ok = compiler.StringForScalarNode(v4) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v4)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Any default = 5; + v5 := compiler.MapValueForKey(m, "default") + if v5 != nil { + var err error + x.Default, err = NewAny(v5, compiler.NewContext("default", v5, context)) + if err != nil { + errors = append(errors, err) + } + } + // float multiple_of = 6; + v6 := compiler.MapValueForKey(m, "multipleOf") + if v6 != nil { + v, ok := compiler.FloatForScalarNode(v6) + if ok { + x.MultipleOf = v + } else { + message := fmt.Sprintf("has unexpected value for multipleOf: %s", compiler.Display(v6)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // float maximum = 7; + v7 := compiler.MapValueForKey(m, "maximum") + if v7 != nil { + v, ok := compiler.FloatForScalarNode(v7) + if ok { + x.Maximum = v + } else { + message := fmt.Sprintf("has unexpected value for maximum: %s", compiler.Display(v7)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool exclusive_maximum = 8; + v8 := compiler.MapValueForKey(m, "exclusiveMaximum") + if v8 != nil { + x.ExclusiveMaximum, ok = compiler.BoolForScalarNode(v8) + if !ok { + message := fmt.Sprintf("has unexpected value for exclusiveMaximum: %s", compiler.Display(v8)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // float minimum = 9; + v9 := compiler.MapValueForKey(m, "minimum") + if v9 != nil { + v, ok := compiler.FloatForScalarNode(v9) + if ok { + x.Minimum = v + } else { + message := fmt.Sprintf("has unexpected value for minimum: %s", compiler.Display(v9)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool exclusive_minimum = 10; + v10 := compiler.MapValueForKey(m, "exclusiveMinimum") + if v10 != nil { + x.ExclusiveMinimum, ok = compiler.BoolForScalarNode(v10) + if !ok { + message := fmt.Sprintf("has unexpected value for exclusiveMinimum: %s", compiler.Display(v10)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 max_length = 11; + v11 := compiler.MapValueForKey(m, "maxLength") + if v11 != nil { + t, ok := compiler.IntForScalarNode(v11) + if ok { + x.MaxLength = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for maxLength: %s", compiler.Display(v11)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 min_length = 12; + v12 := compiler.MapValueForKey(m, "minLength") + if v12 != nil { + t, ok := compiler.IntForScalarNode(v12) + if ok { + x.MinLength = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for minLength: %s", compiler.Display(v12)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string pattern = 13; + v13 := compiler.MapValueForKey(m, "pattern") + if v13 != nil { + x.Pattern, ok = compiler.StringForScalarNode(v13) + if !ok { + message := fmt.Sprintf("has unexpected value for pattern: %s", compiler.Display(v13)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 max_items = 14; + v14 := compiler.MapValueForKey(m, "maxItems") + if v14 != nil { + t, ok := compiler.IntForScalarNode(v14) + if ok { + x.MaxItems = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for maxItems: %s", compiler.Display(v14)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 min_items = 15; + v15 := compiler.MapValueForKey(m, "minItems") + if v15 != nil { + t, ok := compiler.IntForScalarNode(v15) + if ok { + x.MinItems = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for minItems: %s", compiler.Display(v15)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool unique_items = 16; + v16 := compiler.MapValueForKey(m, "uniqueItems") + if v16 != nil { + x.UniqueItems, ok = compiler.BoolForScalarNode(v16) + if !ok { + message := fmt.Sprintf("has unexpected value for uniqueItems: %s", compiler.Display(v16)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 max_properties = 17; + v17 := compiler.MapValueForKey(m, "maxProperties") + if v17 != nil { + t, ok := compiler.IntForScalarNode(v17) + if ok { + x.MaxProperties = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for maxProperties: %s", compiler.Display(v17)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 min_properties = 18; + v18 := compiler.MapValueForKey(m, "minProperties") + if v18 != nil { + t, ok := compiler.IntForScalarNode(v18) + if ok { + x.MinProperties = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for minProperties: %s", compiler.Display(v18)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated string required = 19; + v19 := compiler.MapValueForKey(m, "required") + if v19 != nil { + v, ok := compiler.SequenceNodeForNode(v19) + if ok { + x.Required = compiler.StringArrayForSequenceNode(v) + } else { + message := fmt.Sprintf("has unexpected value for required: %s", compiler.Display(v19)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated Any enum = 20; + v20 := compiler.MapValueForKey(m, "enum") + if v20 != nil { + // repeated Any + x.Enum = make([]*Any, 0) + a, ok := compiler.SequenceNodeForNode(v20) + if ok { + for _, item := range a.Content { + y, err := NewAny(item, compiler.NewContext("enum", item, context)) + if err != nil { + errors = append(errors, err) + } + x.Enum = append(x.Enum, y) + } + } + } + // AdditionalPropertiesItem additional_properties = 21; + v21 := compiler.MapValueForKey(m, "additionalProperties") + if v21 != nil { + var err error + x.AdditionalProperties, err = NewAdditionalPropertiesItem(v21, compiler.NewContext("additionalProperties", v21, context)) + if err != nil { + errors = append(errors, err) + } + } + // TypeItem type = 22; + v22 := compiler.MapValueForKey(m, "type") + if v22 != nil { + var err error + x.Type, err = NewTypeItem(v22, compiler.NewContext("type", v22, context)) + if err != nil { + errors = append(errors, err) + } + } + // ItemsItem items = 23; + v23 := compiler.MapValueForKey(m, "items") + if v23 != nil { + var err error + x.Items, err = NewItemsItem(v23, compiler.NewContext("items", v23, context)) + if err != nil { + errors = append(errors, err) + } + } + // repeated Schema all_of = 24; + v24 := compiler.MapValueForKey(m, "allOf") + if v24 != nil { + // repeated Schema + x.AllOf = make([]*Schema, 0) + a, ok := compiler.SequenceNodeForNode(v24) + if ok { + for _, item := range a.Content { + y, err := NewSchema(item, compiler.NewContext("allOf", item, context)) + if err != nil { + errors = append(errors, err) + } + x.AllOf = append(x.AllOf, y) + } + } + } + // Properties properties = 25; + v25 := compiler.MapValueForKey(m, "properties") + if v25 != nil { + var err error + x.Properties, err = NewProperties(v25, compiler.NewContext("properties", v25, context)) + if err != nil { + errors = append(errors, err) + } + } + // string discriminator = 26; + v26 := compiler.MapValueForKey(m, "discriminator") + if v26 != nil { + x.Discriminator, ok = compiler.StringForScalarNode(v26) + if !ok { + message := fmt.Sprintf("has unexpected value for discriminator: %s", compiler.Display(v26)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool read_only = 27; + v27 := compiler.MapValueForKey(m, "readOnly") + if v27 != nil { + x.ReadOnly, ok = compiler.BoolForScalarNode(v27) + if !ok { + message := fmt.Sprintf("has unexpected value for readOnly: %s", compiler.Display(v27)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Xml xml = 28; + v28 := compiler.MapValueForKey(m, "xml") + if v28 != nil { + var err error + x.Xml, err = NewXml(v28, compiler.NewContext("xml", v28, context)) + if err != nil { + errors = append(errors, err) + } + } + // ExternalDocs external_docs = 29; + v29 := compiler.MapValueForKey(m, "externalDocs") + if v29 != nil { + var err error + x.ExternalDocs, err = NewExternalDocs(v29, compiler.NewContext("externalDocs", v29, context)) + if err != nil { + errors = append(errors, err) + } + } + // Any example = 30; + v30 := compiler.MapValueForKey(m, "example") + if v30 != nil { + var err error + x.Example, err = NewAny(v30, compiler.NewContext("example", v30, context)) + if err != nil { + errors = append(errors, err) + } + } + // repeated NamedAny vendor_extension = 31; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.CallExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes := compiler.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewSchemaItem creates an object of type SchemaItem if possible, returning an error if not. +func NewSchemaItem(in *yaml.Node, context *compiler.Context) (*SchemaItem, error) { + errors := make([]error, 0) + x := &SchemaItem{} + matched := false + // Schema schema = 1; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewSchema(m, compiler.NewContext("schema", m, context)) + if matchingError == nil { + x.Oneof = &SchemaItem_Schema{Schema: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + // FileSchema file_schema = 2; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewFileSchema(m, compiler.NewContext("fileSchema", m, context)) + if matchingError == nil { + x.Oneof = &SchemaItem_FileSchema{FileSchema: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + if matched { + // since the oneof matched one of its possibilities, discard any matching errors + errors = make([]error, 0) + } else { + message := fmt.Sprintf("contains an invalid SchemaItem") + err := compiler.NewError(context, message) + errors = []error{err} + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewSecurityDefinitions creates an object of type SecurityDefinitions if possible, returning an error if not. +func NewSecurityDefinitions(in *yaml.Node, context *compiler.Context) (*SecurityDefinitions, error) { + errors := make([]error, 0) + x := &SecurityDefinitions{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + // repeated NamedSecurityDefinitionsItem additional_properties = 1; + // MAP: SecurityDefinitionsItem + x.AdditionalProperties = make([]*NamedSecurityDefinitionsItem, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + pair := &NamedSecurityDefinitionsItem{} + pair.Name = k + var err error + pair.Value, err = NewSecurityDefinitionsItem(v, compiler.NewContext(k, v, context)) + if err != nil { + errors = append(errors, err) + } + x.AdditionalProperties = append(x.AdditionalProperties, pair) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewSecurityDefinitionsItem creates an object of type SecurityDefinitionsItem if possible, returning an error if not. +func NewSecurityDefinitionsItem(in *yaml.Node, context *compiler.Context) (*SecurityDefinitionsItem, error) { + errors := make([]error, 0) + x := &SecurityDefinitionsItem{} + matched := false + // BasicAuthenticationSecurity basic_authentication_security = 1; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewBasicAuthenticationSecurity(m, compiler.NewContext("basicAuthenticationSecurity", m, context)) + if matchingError == nil { + x.Oneof = &SecurityDefinitionsItem_BasicAuthenticationSecurity{BasicAuthenticationSecurity: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + // ApiKeySecurity api_key_security = 2; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewApiKeySecurity(m, compiler.NewContext("apiKeySecurity", m, context)) + if matchingError == nil { + x.Oneof = &SecurityDefinitionsItem_ApiKeySecurity{ApiKeySecurity: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + // Oauth2ImplicitSecurity oauth2_implicit_security = 3; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewOauth2ImplicitSecurity(m, compiler.NewContext("oauth2ImplicitSecurity", m, context)) + if matchingError == nil { + x.Oneof = &SecurityDefinitionsItem_Oauth2ImplicitSecurity{Oauth2ImplicitSecurity: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + // Oauth2PasswordSecurity oauth2_password_security = 4; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewOauth2PasswordSecurity(m, compiler.NewContext("oauth2PasswordSecurity", m, context)) + if matchingError == nil { + x.Oneof = &SecurityDefinitionsItem_Oauth2PasswordSecurity{Oauth2PasswordSecurity: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + // Oauth2ApplicationSecurity oauth2_application_security = 5; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewOauth2ApplicationSecurity(m, compiler.NewContext("oauth2ApplicationSecurity", m, context)) + if matchingError == nil { + x.Oneof = &SecurityDefinitionsItem_Oauth2ApplicationSecurity{Oauth2ApplicationSecurity: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + // Oauth2AccessCodeSecurity oauth2_access_code_security = 6; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewOauth2AccessCodeSecurity(m, compiler.NewContext("oauth2AccessCodeSecurity", m, context)) + if matchingError == nil { + x.Oneof = &SecurityDefinitionsItem_Oauth2AccessCodeSecurity{Oauth2AccessCodeSecurity: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + if matched { + // since the oneof matched one of its possibilities, discard any matching errors + errors = make([]error, 0) + } else { + message := fmt.Sprintf("contains an invalid SecurityDefinitionsItem") + err := compiler.NewError(context, message) + errors = []error{err} + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewSecurityRequirement creates an object of type SecurityRequirement if possible, returning an error if not. +func NewSecurityRequirement(in *yaml.Node, context *compiler.Context) (*SecurityRequirement, error) { + errors := make([]error, 0) + x := &SecurityRequirement{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + // repeated NamedStringArray additional_properties = 1; + // MAP: StringArray + x.AdditionalProperties = make([]*NamedStringArray, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + pair := &NamedStringArray{} + pair.Name = k + var err error + pair.Value, err = NewStringArray(v, compiler.NewContext(k, v, context)) + if err != nil { + errors = append(errors, err) + } + x.AdditionalProperties = append(x.AdditionalProperties, pair) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewStringArray creates an object of type StringArray if possible, returning an error if not. +func NewStringArray(in *yaml.Node, context *compiler.Context) (*StringArray, error) { + errors := make([]error, 0) + x := &StringArray{} + x.Value = make([]string, 0) + for _, node := range in.Content { + s, _ := compiler.StringForScalarNode(node) + x.Value = append(x.Value, s) + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewTag creates an object of type Tag if possible, returning an error if not. +func NewTag(in *yaml.Node, context *compiler.Context) (*Tag, error) { + errors := make([]error, 0) + x := &Tag{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"name"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"description", "externalDocs", "name"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = compiler.StringForScalarNode(v1) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 2; + v2 := compiler.MapValueForKey(m, "description") + if v2 != nil { + x.Description, ok = compiler.StringForScalarNode(v2) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v2)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // ExternalDocs external_docs = 3; + v3 := compiler.MapValueForKey(m, "externalDocs") + if v3 != nil { + var err error + x.ExternalDocs, err = NewExternalDocs(v3, compiler.NewContext("externalDocs", v3, context)) + if err != nil { + errors = append(errors, err) + } + } + // repeated NamedAny vendor_extension = 4; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.CallExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes := compiler.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewTypeItem creates an object of type TypeItem if possible, returning an error if not. +func NewTypeItem(in *yaml.Node, context *compiler.Context) (*TypeItem, error) { + errors := make([]error, 0) + x := &TypeItem{} + v1 := in + switch v1.Kind { + case yaml.ScalarNode: + x.Value = make([]string, 0) + x.Value = append(x.Value, v1.Value) + case yaml.SequenceNode: + x.Value = make([]string, 0) + for _, v := range v1.Content { + value := v.Value + ok := v.Kind == yaml.ScalarNode + if ok { + x.Value = append(x.Value, value) + } else { + message := fmt.Sprintf("has unexpected value for string array element: %+v (%T)", value, value) + errors = append(errors, compiler.NewError(context, message)) + } + } + default: + message := fmt.Sprintf("has unexpected value for string array: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewVendorExtension creates an object of type VendorExtension if possible, returning an error if not. +func NewVendorExtension(in *yaml.Node, context *compiler.Context) (*VendorExtension, error) { + errors := make([]error, 0) + x := &VendorExtension{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + // repeated NamedAny additional_properties = 1; + // MAP: Any + x.AdditionalProperties = make([]*NamedAny, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.CallExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes := compiler.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) + if err != nil { + errors = append(errors, err) + } + } + x.AdditionalProperties = append(x.AdditionalProperties, pair) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewXml creates an object of type Xml if possible, returning an error if not. +func NewXml(in *yaml.Node, context *compiler.Context) (*Xml, error) { + errors := make([]error, 0) + x := &Xml{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"attribute", "name", "namespace", "prefix", "wrapped"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = compiler.StringForScalarNode(v1) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string namespace = 2; + v2 := compiler.MapValueForKey(m, "namespace") + if v2 != nil { + x.Namespace, ok = compiler.StringForScalarNode(v2) + if !ok { + message := fmt.Sprintf("has unexpected value for namespace: %s", compiler.Display(v2)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string prefix = 3; + v3 := compiler.MapValueForKey(m, "prefix") + if v3 != nil { + x.Prefix, ok = compiler.StringForScalarNode(v3) + if !ok { + message := fmt.Sprintf("has unexpected value for prefix: %s", compiler.Display(v3)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool attribute = 4; + v4 := compiler.MapValueForKey(m, "attribute") + if v4 != nil { + x.Attribute, ok = compiler.BoolForScalarNode(v4) + if !ok { + message := fmt.Sprintf("has unexpected value for attribute: %s", compiler.Display(v4)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool wrapped = 5; + v5 := compiler.MapValueForKey(m, "wrapped") + if v5 != nil { + x.Wrapped, ok = compiler.BoolForScalarNode(v5) + if !ok { + message := fmt.Sprintf("has unexpected value for wrapped: %s", compiler.Display(v5)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny vendor_extension = 6; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.CallExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes := compiler.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside AdditionalPropertiesItem objects. +func (m *AdditionalPropertiesItem) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + { + p, ok := m.Oneof.(*AdditionalPropertiesItem_Schema) + if ok { + _, err := p.Schema.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Any objects. +func (m *Any) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside ApiKeySecurity objects. +func (m *ApiKeySecurity) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside BasicAuthenticationSecurity objects. +func (m *BasicAuthenticationSecurity) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside BodyParameter objects. +func (m *BodyParameter) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + if m.Schema != nil { + _, err := m.Schema.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Contact objects. +func (m *Contact) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Default objects. +func (m *Default) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + for _, item := range m.AdditionalProperties { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Definitions objects. +func (m *Definitions) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + for _, item := range m.AdditionalProperties { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Document objects. +func (m *Document) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + if m.Info != nil { + _, err := m.Info.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Paths != nil { + _, err := m.Paths.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Definitions != nil { + _, err := m.Definitions.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Parameters != nil { + _, err := m.Parameters.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Responses != nil { + _, err := m.Responses.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.Security { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + if m.SecurityDefinitions != nil { + _, err := m.SecurityDefinitions.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.Tags { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + if m.ExternalDocs != nil { + _, err := m.ExternalDocs.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Examples objects. +func (m *Examples) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + for _, item := range m.AdditionalProperties { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside ExternalDocs objects. +func (m *ExternalDocs) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside FileSchema objects. +func (m *FileSchema) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + if m.Default != nil { + _, err := m.Default.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.ExternalDocs != nil { + _, err := m.ExternalDocs.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Example != nil { + _, err := m.Example.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside FormDataParameterSubSchema objects. +func (m *FormDataParameterSubSchema) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + if m.Items != nil { + _, err := m.Items.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Default != nil { + _, err := m.Default.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.Enum { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Header objects. +func (m *Header) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + if m.Items != nil { + _, err := m.Items.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Default != nil { + _, err := m.Default.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.Enum { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside HeaderParameterSubSchema objects. +func (m *HeaderParameterSubSchema) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + if m.Items != nil { + _, err := m.Items.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Default != nil { + _, err := m.Default.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.Enum { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Headers objects. +func (m *Headers) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + for _, item := range m.AdditionalProperties { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Info objects. +func (m *Info) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + if m.Contact != nil { + _, err := m.Contact.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.License != nil { + _, err := m.License.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside ItemsItem objects. +func (m *ItemsItem) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + for _, item := range m.Schema { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside JsonReference objects. +func (m *JsonReference) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + if m.XRef != "" { + info, err := compiler.ReadInfoForRef(root, m.XRef) + if err != nil { + return nil, err + } + if info != nil { + replacement, err := NewJsonReference(info, nil) + if err == nil { + *m = *replacement + return m.ResolveReferences(root) + } + } + return info, nil + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside License objects. +func (m *License) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside NamedAny objects. +func (m *NamedAny) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + if m.Value != nil { + _, err := m.Value.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside NamedHeader objects. +func (m *NamedHeader) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + if m.Value != nil { + _, err := m.Value.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside NamedParameter objects. +func (m *NamedParameter) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + if m.Value != nil { + _, err := m.Value.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside NamedPathItem objects. +func (m *NamedPathItem) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + if m.Value != nil { + _, err := m.Value.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside NamedResponse objects. +func (m *NamedResponse) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + if m.Value != nil { + _, err := m.Value.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside NamedResponseValue objects. +func (m *NamedResponseValue) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + if m.Value != nil { + _, err := m.Value.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside NamedSchema objects. +func (m *NamedSchema) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + if m.Value != nil { + _, err := m.Value.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside NamedSecurityDefinitionsItem objects. +func (m *NamedSecurityDefinitionsItem) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + if m.Value != nil { + _, err := m.Value.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside NamedString objects. +func (m *NamedString) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside NamedStringArray objects. +func (m *NamedStringArray) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + if m.Value != nil { + _, err := m.Value.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside NonBodyParameter objects. +func (m *NonBodyParameter) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + { + p, ok := m.Oneof.(*NonBodyParameter_HeaderParameterSubSchema) + if ok { + _, err := p.HeaderParameterSubSchema.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + { + p, ok := m.Oneof.(*NonBodyParameter_FormDataParameterSubSchema) + if ok { + _, err := p.FormDataParameterSubSchema.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + { + p, ok := m.Oneof.(*NonBodyParameter_QueryParameterSubSchema) + if ok { + _, err := p.QueryParameterSubSchema.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + { + p, ok := m.Oneof.(*NonBodyParameter_PathParameterSubSchema) + if ok { + _, err := p.PathParameterSubSchema.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Oauth2AccessCodeSecurity objects. +func (m *Oauth2AccessCodeSecurity) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + if m.Scopes != nil { + _, err := m.Scopes.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Oauth2ApplicationSecurity objects. +func (m *Oauth2ApplicationSecurity) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + if m.Scopes != nil { + _, err := m.Scopes.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Oauth2ImplicitSecurity objects. +func (m *Oauth2ImplicitSecurity) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + if m.Scopes != nil { + _, err := m.Scopes.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Oauth2PasswordSecurity objects. +func (m *Oauth2PasswordSecurity) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + if m.Scopes != nil { + _, err := m.Scopes.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Oauth2Scopes objects. +func (m *Oauth2Scopes) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + for _, item := range m.AdditionalProperties { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Operation objects. +func (m *Operation) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + if m.ExternalDocs != nil { + _, err := m.ExternalDocs.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.Parameters { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + if m.Responses != nil { + _, err := m.Responses.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.Security { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Parameter objects. +func (m *Parameter) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + { + p, ok := m.Oneof.(*Parameter_BodyParameter) + if ok { + _, err := p.BodyParameter.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + { + p, ok := m.Oneof.(*Parameter_NonBodyParameter) + if ok { + _, err := p.NonBodyParameter.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside ParameterDefinitions objects. +func (m *ParameterDefinitions) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + for _, item := range m.AdditionalProperties { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside ParametersItem objects. +func (m *ParametersItem) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + { + p, ok := m.Oneof.(*ParametersItem_Parameter) + if ok { + _, err := p.Parameter.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + { + p, ok := m.Oneof.(*ParametersItem_JsonReference) + if ok { + info, err := p.JsonReference.ResolveReferences(root) + if err != nil { + return nil, err + } else if info != nil { + n, err := NewParametersItem(info, nil) + if err != nil { + return nil, err + } else if n != nil { + *m = *n + return nil, nil + } + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside PathItem objects. +func (m *PathItem) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + if m.XRef != "" { + info, err := compiler.ReadInfoForRef(root, m.XRef) + if err != nil { + return nil, err + } + if info != nil { + replacement, err := NewPathItem(info, nil) + if err == nil { + *m = *replacement + return m.ResolveReferences(root) + } + } + return info, nil + } + if m.Get != nil { + _, err := m.Get.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Put != nil { + _, err := m.Put.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Post != nil { + _, err := m.Post.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Delete != nil { + _, err := m.Delete.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Options != nil { + _, err := m.Options.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Head != nil { + _, err := m.Head.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Patch != nil { + _, err := m.Patch.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.Parameters { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside PathParameterSubSchema objects. +func (m *PathParameterSubSchema) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + if m.Items != nil { + _, err := m.Items.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Default != nil { + _, err := m.Default.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.Enum { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Paths objects. +func (m *Paths) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + for _, item := range m.Path { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside PrimitivesItems objects. +func (m *PrimitivesItems) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + if m.Items != nil { + _, err := m.Items.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Default != nil { + _, err := m.Default.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.Enum { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Properties objects. +func (m *Properties) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + for _, item := range m.AdditionalProperties { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside QueryParameterSubSchema objects. +func (m *QueryParameterSubSchema) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + if m.Items != nil { + _, err := m.Items.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Default != nil { + _, err := m.Default.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.Enum { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Response objects. +func (m *Response) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + if m.Schema != nil { + _, err := m.Schema.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Headers != nil { + _, err := m.Headers.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Examples != nil { + _, err := m.Examples.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside ResponseDefinitions objects. +func (m *ResponseDefinitions) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + for _, item := range m.AdditionalProperties { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside ResponseValue objects. +func (m *ResponseValue) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + { + p, ok := m.Oneof.(*ResponseValue_Response) + if ok { + _, err := p.Response.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + { + p, ok := m.Oneof.(*ResponseValue_JsonReference) + if ok { + info, err := p.JsonReference.ResolveReferences(root) + if err != nil { + return nil, err + } else if info != nil { + n, err := NewResponseValue(info, nil) + if err != nil { + return nil, err + } else if n != nil { + *m = *n + return nil, nil + } + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Responses objects. +func (m *Responses) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + for _, item := range m.ResponseCode { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Schema objects. +func (m *Schema) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + if m.XRef != "" { + info, err := compiler.ReadInfoForRef(root, m.XRef) + if err != nil { + return nil, err + } + if info != nil { + replacement, err := NewSchema(info, nil) + if err == nil { + *m = *replacement + return m.ResolveReferences(root) + } + } + return info, nil + } + if m.Default != nil { + _, err := m.Default.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.Enum { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + if m.AdditionalProperties != nil { + _, err := m.AdditionalProperties.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Type != nil { + _, err := m.Type.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Items != nil { + _, err := m.Items.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.AllOf { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + if m.Properties != nil { + _, err := m.Properties.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Xml != nil { + _, err := m.Xml.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.ExternalDocs != nil { + _, err := m.ExternalDocs.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Example != nil { + _, err := m.Example.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside SchemaItem objects. +func (m *SchemaItem) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + { + p, ok := m.Oneof.(*SchemaItem_Schema) + if ok { + _, err := p.Schema.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + { + p, ok := m.Oneof.(*SchemaItem_FileSchema) + if ok { + _, err := p.FileSchema.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside SecurityDefinitions objects. +func (m *SecurityDefinitions) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + for _, item := range m.AdditionalProperties { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside SecurityDefinitionsItem objects. +func (m *SecurityDefinitionsItem) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + { + p, ok := m.Oneof.(*SecurityDefinitionsItem_BasicAuthenticationSecurity) + if ok { + _, err := p.BasicAuthenticationSecurity.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + { + p, ok := m.Oneof.(*SecurityDefinitionsItem_ApiKeySecurity) + if ok { + _, err := p.ApiKeySecurity.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + { + p, ok := m.Oneof.(*SecurityDefinitionsItem_Oauth2ImplicitSecurity) + if ok { + _, err := p.Oauth2ImplicitSecurity.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + { + p, ok := m.Oneof.(*SecurityDefinitionsItem_Oauth2PasswordSecurity) + if ok { + _, err := p.Oauth2PasswordSecurity.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + { + p, ok := m.Oneof.(*SecurityDefinitionsItem_Oauth2ApplicationSecurity) + if ok { + _, err := p.Oauth2ApplicationSecurity.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + { + p, ok := m.Oneof.(*SecurityDefinitionsItem_Oauth2AccessCodeSecurity) + if ok { + _, err := p.Oauth2AccessCodeSecurity.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside SecurityRequirement objects. +func (m *SecurityRequirement) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + for _, item := range m.AdditionalProperties { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside StringArray objects. +func (m *StringArray) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Tag objects. +func (m *Tag) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + if m.ExternalDocs != nil { + _, err := m.ExternalDocs.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside TypeItem objects. +func (m *TypeItem) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside VendorExtension objects. +func (m *VendorExtension) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + for _, item := range m.AdditionalProperties { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Xml objects. +func (m *Xml) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ToRawInfo returns a description of AdditionalPropertiesItem suitable for JSON or YAML export. +func (m *AdditionalPropertiesItem) ToRawInfo() *yaml.Node { + // ONE OF WRAPPER + // AdditionalPropertiesItem + // {Name:schema Type:Schema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v0 := m.GetSchema() + if v0 != nil { + return v0.ToRawInfo() + } + // {Name:boolean Type:bool StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if v1, ok := m.GetOneof().(*AdditionalPropertiesItem_Boolean); ok { + return compiler.NewScalarNodeForBool(v1.Boolean) + } + return compiler.NewNullNode() +} + +// ToRawInfo returns a description of Any suitable for JSON or YAML export. +func (m *Any) ToRawInfo() *yaml.Node { + var err error + var node yaml.Node + err = yaml.Unmarshal([]byte(m.Yaml), &node) + if err == nil { + if node.Kind == yaml.DocumentNode { + return node.Content[0] + } + return &node + } + return compiler.NewNullNode() +} + +// ToRawInfo returns a description of ApiKeySecurity suitable for JSON or YAML export. +func (m *ApiKeySecurity) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + // always include this required field. + info.Content = append(info.Content, compiler.NewScalarNodeForString("type")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Type)) + // always include this required field. + info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) + // always include this required field. + info.Content = append(info.Content, compiler.NewScalarNodeForString("in")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.In)) + if m.Description != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) + } + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +// ToRawInfo returns a description of BasicAuthenticationSecurity suitable for JSON or YAML export. +func (m *BasicAuthenticationSecurity) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + // always include this required field. + info.Content = append(info.Content, compiler.NewScalarNodeForString("type")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Type)) + if m.Description != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) + } + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +// ToRawInfo returns a description of BodyParameter suitable for JSON or YAML export. +func (m *BodyParameter) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.Description != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) + } + // always include this required field. + info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) + // always include this required field. + info.Content = append(info.Content, compiler.NewScalarNodeForString("in")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.In)) + if m.Required != false { + info.Content = append(info.Content, compiler.NewScalarNodeForString("required")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.Required)) + } + // always include this required field. + info.Content = append(info.Content, compiler.NewScalarNodeForString("schema")) + info.Content = append(info.Content, m.Schema.ToRawInfo()) + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +// ToRawInfo returns a description of Contact suitable for JSON or YAML export. +func (m *Contact) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.Name != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) + } + if m.Url != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("url")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Url)) + } + if m.Email != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("email")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Email)) + } + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +// ToRawInfo returns a description of Default suitable for JSON or YAML export. +func (m *Default) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.AdditionalProperties != nil { + for _, item := range m.AdditionalProperties { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +// ToRawInfo returns a description of Definitions suitable for JSON or YAML export. +func (m *Definitions) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.AdditionalProperties != nil { + for _, item := range m.AdditionalProperties { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +// ToRawInfo returns a description of Document suitable for JSON or YAML export. +func (m *Document) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + // always include this required field. + info.Content = append(info.Content, compiler.NewScalarNodeForString("swagger")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Swagger)) + // always include this required field. + info.Content = append(info.Content, compiler.NewScalarNodeForString("info")) + info.Content = append(info.Content, m.Info.ToRawInfo()) + if m.Host != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("host")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Host)) + } + if m.BasePath != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("basePath")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.BasePath)) + } + if len(m.Schemes) != 0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("schemes")) + info.Content = append(info.Content, compiler.NewSequenceNodeForStringArray(m.Schemes)) + } + if len(m.Consumes) != 0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("consumes")) + info.Content = append(info.Content, compiler.NewSequenceNodeForStringArray(m.Consumes)) + } + if len(m.Produces) != 0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("produces")) + info.Content = append(info.Content, compiler.NewSequenceNodeForStringArray(m.Produces)) + } + // always include this required field. + info.Content = append(info.Content, compiler.NewScalarNodeForString("paths")) + info.Content = append(info.Content, m.Paths.ToRawInfo()) + if m.Definitions != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("definitions")) + info.Content = append(info.Content, m.Definitions.ToRawInfo()) + } + if m.Parameters != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("parameters")) + info.Content = append(info.Content, m.Parameters.ToRawInfo()) + } + if m.Responses != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("responses")) + info.Content = append(info.Content, m.Responses.ToRawInfo()) + } + if len(m.Security) != 0 { + items := compiler.NewSequenceNode() + for _, item := range m.Security { + items.Content = append(items.Content, item.ToRawInfo()) + } + info.Content = append(info.Content, compiler.NewScalarNodeForString("security")) + info.Content = append(info.Content, items) + } + if m.SecurityDefinitions != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("securityDefinitions")) + info.Content = append(info.Content, m.SecurityDefinitions.ToRawInfo()) + } + if len(m.Tags) != 0 { + items := compiler.NewSequenceNode() + for _, item := range m.Tags { + items.Content = append(items.Content, item.ToRawInfo()) + } + info.Content = append(info.Content, compiler.NewScalarNodeForString("tags")) + info.Content = append(info.Content, items) + } + if m.ExternalDocs != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("externalDocs")) + info.Content = append(info.Content, m.ExternalDocs.ToRawInfo()) + } + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +// ToRawInfo returns a description of Examples suitable for JSON or YAML export. +func (m *Examples) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.AdditionalProperties != nil { + for _, item := range m.AdditionalProperties { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +// ToRawInfo returns a description of ExternalDocs suitable for JSON or YAML export. +func (m *ExternalDocs) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.Description != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) + } + // always include this required field. + info.Content = append(info.Content, compiler.NewScalarNodeForString("url")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Url)) + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +// ToRawInfo returns a description of FileSchema suitable for JSON or YAML export. +func (m *FileSchema) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.Format != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("format")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Format)) + } + if m.Title != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("title")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Title)) + } + if m.Description != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) + } + if m.Default != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("default")) + info.Content = append(info.Content, m.Default.ToRawInfo()) + } + if len(m.Required) != 0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("required")) + info.Content = append(info.Content, compiler.NewSequenceNodeForStringArray(m.Required)) + } + // always include this required field. + info.Content = append(info.Content, compiler.NewScalarNodeForString("type")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Type)) + if m.ReadOnly != false { + info.Content = append(info.Content, compiler.NewScalarNodeForString("readOnly")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ReadOnly)) + } + if m.ExternalDocs != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("externalDocs")) + info.Content = append(info.Content, m.ExternalDocs.ToRawInfo()) + } + if m.Example != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("example")) + info.Content = append(info.Content, m.Example.ToRawInfo()) + } + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +// ToRawInfo returns a description of FormDataParameterSubSchema suitable for JSON or YAML export. +func (m *FormDataParameterSubSchema) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.Required != false { + info.Content = append(info.Content, compiler.NewScalarNodeForString("required")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.Required)) + } + if m.In != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("in")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.In)) + } + if m.Description != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) + } + if m.Name != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) + } + if m.AllowEmptyValue != false { + info.Content = append(info.Content, compiler.NewScalarNodeForString("allowEmptyValue")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.AllowEmptyValue)) + } + if m.Type != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("type")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Type)) + } + if m.Format != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("format")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Format)) + } + if m.Items != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("items")) + info.Content = append(info.Content, m.Items.ToRawInfo()) + } + if m.CollectionFormat != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("collectionFormat")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.CollectionFormat)) + } + if m.Default != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("default")) + info.Content = append(info.Content, m.Default.ToRawInfo()) + } + if m.Maximum != 0.0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("maximum")) + info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Maximum)) + } + if m.ExclusiveMaximum != false { + info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMaximum")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMaximum)) + } + if m.Minimum != 0.0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("minimum")) + info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Minimum)) + } + if m.ExclusiveMinimum != false { + info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMinimum")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMinimum)) + } + if m.MaxLength != 0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("maxLength")) + info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MaxLength)) + } + if m.MinLength != 0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("minLength")) + info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MinLength)) + } + if m.Pattern != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("pattern")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Pattern)) + } + if m.MaxItems != 0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("maxItems")) + info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MaxItems)) + } + if m.MinItems != 0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("minItems")) + info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MinItems)) + } + if m.UniqueItems != false { + info.Content = append(info.Content, compiler.NewScalarNodeForString("uniqueItems")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.UniqueItems)) + } + if len(m.Enum) != 0 { + items := compiler.NewSequenceNode() + for _, item := range m.Enum { + items.Content = append(items.Content, item.ToRawInfo()) + } + info.Content = append(info.Content, compiler.NewScalarNodeForString("enum")) + info.Content = append(info.Content, items) + } + if m.MultipleOf != 0.0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("multipleOf")) + info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.MultipleOf)) + } + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +// ToRawInfo returns a description of Header suitable for JSON or YAML export. +func (m *Header) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + // always include this required field. + info.Content = append(info.Content, compiler.NewScalarNodeForString("type")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Type)) + if m.Format != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("format")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Format)) + } + if m.Items != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("items")) + info.Content = append(info.Content, m.Items.ToRawInfo()) + } + if m.CollectionFormat != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("collectionFormat")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.CollectionFormat)) + } + if m.Default != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("default")) + info.Content = append(info.Content, m.Default.ToRawInfo()) + } + if m.Maximum != 0.0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("maximum")) + info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Maximum)) + } + if m.ExclusiveMaximum != false { + info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMaximum")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMaximum)) + } + if m.Minimum != 0.0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("minimum")) + info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Minimum)) + } + if m.ExclusiveMinimum != false { + info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMinimum")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMinimum)) + } + if m.MaxLength != 0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("maxLength")) + info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MaxLength)) + } + if m.MinLength != 0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("minLength")) + info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MinLength)) + } + if m.Pattern != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("pattern")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Pattern)) + } + if m.MaxItems != 0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("maxItems")) + info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MaxItems)) + } + if m.MinItems != 0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("minItems")) + info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MinItems)) + } + if m.UniqueItems != false { + info.Content = append(info.Content, compiler.NewScalarNodeForString("uniqueItems")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.UniqueItems)) + } + if len(m.Enum) != 0 { + items := compiler.NewSequenceNode() + for _, item := range m.Enum { + items.Content = append(items.Content, item.ToRawInfo()) + } + info.Content = append(info.Content, compiler.NewScalarNodeForString("enum")) + info.Content = append(info.Content, items) + } + if m.MultipleOf != 0.0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("multipleOf")) + info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.MultipleOf)) + } + if m.Description != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) + } + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +// ToRawInfo returns a description of HeaderParameterSubSchema suitable for JSON or YAML export. +func (m *HeaderParameterSubSchema) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.Required != false { + info.Content = append(info.Content, compiler.NewScalarNodeForString("required")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.Required)) + } + if m.In != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("in")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.In)) + } + if m.Description != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) + } + if m.Name != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) + } + if m.Type != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("type")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Type)) + } + if m.Format != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("format")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Format)) + } + if m.Items != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("items")) + info.Content = append(info.Content, m.Items.ToRawInfo()) + } + if m.CollectionFormat != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("collectionFormat")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.CollectionFormat)) + } + if m.Default != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("default")) + info.Content = append(info.Content, m.Default.ToRawInfo()) + } + if m.Maximum != 0.0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("maximum")) + info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Maximum)) + } + if m.ExclusiveMaximum != false { + info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMaximum")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMaximum)) + } + if m.Minimum != 0.0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("minimum")) + info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Minimum)) + } + if m.ExclusiveMinimum != false { + info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMinimum")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMinimum)) + } + if m.MaxLength != 0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("maxLength")) + info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MaxLength)) + } + if m.MinLength != 0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("minLength")) + info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MinLength)) + } + if m.Pattern != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("pattern")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Pattern)) + } + if m.MaxItems != 0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("maxItems")) + info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MaxItems)) + } + if m.MinItems != 0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("minItems")) + info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MinItems)) + } + if m.UniqueItems != false { + info.Content = append(info.Content, compiler.NewScalarNodeForString("uniqueItems")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.UniqueItems)) + } + if len(m.Enum) != 0 { + items := compiler.NewSequenceNode() + for _, item := range m.Enum { + items.Content = append(items.Content, item.ToRawInfo()) + } + info.Content = append(info.Content, compiler.NewScalarNodeForString("enum")) + info.Content = append(info.Content, items) + } + if m.MultipleOf != 0.0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("multipleOf")) + info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.MultipleOf)) + } + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +// ToRawInfo returns a description of Headers suitable for JSON or YAML export. +func (m *Headers) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.AdditionalProperties != nil { + for _, item := range m.AdditionalProperties { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +// ToRawInfo returns a description of Info suitable for JSON or YAML export. +func (m *Info) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + // always include this required field. + info.Content = append(info.Content, compiler.NewScalarNodeForString("title")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Title)) + // always include this required field. + info.Content = append(info.Content, compiler.NewScalarNodeForString("version")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Version)) + if m.Description != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) + } + if m.TermsOfService != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("termsOfService")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.TermsOfService)) + } + if m.Contact != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("contact")) + info.Content = append(info.Content, m.Contact.ToRawInfo()) + } + if m.License != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("license")) + info.Content = append(info.Content, m.License.ToRawInfo()) + } + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +// ToRawInfo returns a description of ItemsItem suitable for JSON or YAML export. +func (m *ItemsItem) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if len(m.Schema) != 0 { + items := compiler.NewSequenceNode() + for _, item := range m.Schema { + items.Content = append(items.Content, item.ToRawInfo()) + } + info.Content = append(info.Content, compiler.NewScalarNodeForString("schema")) + info.Content = append(info.Content, items) + } + return info +} + +// ToRawInfo returns a description of JsonReference suitable for JSON or YAML export. +func (m *JsonReference) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + // always include this required field. + info.Content = append(info.Content, compiler.NewScalarNodeForString("$ref")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.XRef)) + if m.Description != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) + } + return info +} + +// ToRawInfo returns a description of License suitable for JSON or YAML export. +func (m *License) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + // always include this required field. + info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) + if m.Url != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("url")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Url)) + } + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +// ToRawInfo returns a description of NamedAny suitable for JSON or YAML export. +func (m *NamedAny) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.Name != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) + } + if m.Value != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("value")) + info.Content = append(info.Content, m.Value.ToRawInfo()) + } + return info +} + +// ToRawInfo returns a description of NamedHeader suitable for JSON or YAML export. +func (m *NamedHeader) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.Name != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) + } + // &{Name:value Type:Header StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} + return info +} + +// ToRawInfo returns a description of NamedParameter suitable for JSON or YAML export. +func (m *NamedParameter) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.Name != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) + } + // &{Name:value Type:Parameter StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} + return info +} + +// ToRawInfo returns a description of NamedPathItem suitable for JSON or YAML export. +func (m *NamedPathItem) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.Name != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) + } + // &{Name:value Type:PathItem StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} + return info +} + +// ToRawInfo returns a description of NamedResponse suitable for JSON or YAML export. +func (m *NamedResponse) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.Name != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) + } + // &{Name:value Type:Response StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} + return info +} + +// ToRawInfo returns a description of NamedResponseValue suitable for JSON or YAML export. +func (m *NamedResponseValue) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.Name != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) + } + // &{Name:value Type:ResponseValue StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} + return info +} + +// ToRawInfo returns a description of NamedSchema suitable for JSON or YAML export. +func (m *NamedSchema) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.Name != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) + } + // &{Name:value Type:Schema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} + return info +} + +// ToRawInfo returns a description of NamedSecurityDefinitionsItem suitable for JSON or YAML export. +func (m *NamedSecurityDefinitionsItem) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.Name != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) + } + // &{Name:value Type:SecurityDefinitionsItem StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} + return info +} + +// ToRawInfo returns a description of NamedString suitable for JSON or YAML export. +func (m *NamedString) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.Name != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) + } + if m.Value != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("value")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Value)) + } + return info +} + +// ToRawInfo returns a description of NamedStringArray suitable for JSON or YAML export. +func (m *NamedStringArray) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.Name != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) + } + // &{Name:value Type:StringArray StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} + return info +} + +// ToRawInfo returns a description of NonBodyParameter suitable for JSON or YAML export. +func (m *NonBodyParameter) ToRawInfo() *yaml.Node { + // ONE OF WRAPPER + // NonBodyParameter + // {Name:headerParameterSubSchema Type:HeaderParameterSubSchema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v0 := m.GetHeaderParameterSubSchema() + if v0 != nil { + return v0.ToRawInfo() + } + // {Name:formDataParameterSubSchema Type:FormDataParameterSubSchema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v1 := m.GetFormDataParameterSubSchema() + if v1 != nil { + return v1.ToRawInfo() + } + // {Name:queryParameterSubSchema Type:QueryParameterSubSchema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v2 := m.GetQueryParameterSubSchema() + if v2 != nil { + return v2.ToRawInfo() + } + // {Name:pathParameterSubSchema Type:PathParameterSubSchema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v3 := m.GetPathParameterSubSchema() + if v3 != nil { + return v3.ToRawInfo() + } + return compiler.NewNullNode() +} + +// ToRawInfo returns a description of Oauth2AccessCodeSecurity suitable for JSON or YAML export. +func (m *Oauth2AccessCodeSecurity) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + // always include this required field. + info.Content = append(info.Content, compiler.NewScalarNodeForString("type")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Type)) + // always include this required field. + info.Content = append(info.Content, compiler.NewScalarNodeForString("flow")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Flow)) + if m.Scopes != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("scopes")) + info.Content = append(info.Content, m.Scopes.ToRawInfo()) + } + // always include this required field. + info.Content = append(info.Content, compiler.NewScalarNodeForString("authorizationUrl")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.AuthorizationUrl)) + // always include this required field. + info.Content = append(info.Content, compiler.NewScalarNodeForString("tokenUrl")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.TokenUrl)) + if m.Description != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) + } + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +// ToRawInfo returns a description of Oauth2ApplicationSecurity suitable for JSON or YAML export. +func (m *Oauth2ApplicationSecurity) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + // always include this required field. + info.Content = append(info.Content, compiler.NewScalarNodeForString("type")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Type)) + // always include this required field. + info.Content = append(info.Content, compiler.NewScalarNodeForString("flow")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Flow)) + if m.Scopes != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("scopes")) + info.Content = append(info.Content, m.Scopes.ToRawInfo()) + } + // always include this required field. + info.Content = append(info.Content, compiler.NewScalarNodeForString("tokenUrl")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.TokenUrl)) + if m.Description != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) + } + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +// ToRawInfo returns a description of Oauth2ImplicitSecurity suitable for JSON or YAML export. +func (m *Oauth2ImplicitSecurity) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + // always include this required field. + info.Content = append(info.Content, compiler.NewScalarNodeForString("type")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Type)) + // always include this required field. + info.Content = append(info.Content, compiler.NewScalarNodeForString("flow")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Flow)) + if m.Scopes != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("scopes")) + info.Content = append(info.Content, m.Scopes.ToRawInfo()) + } + // always include this required field. + info.Content = append(info.Content, compiler.NewScalarNodeForString("authorizationUrl")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.AuthorizationUrl)) + if m.Description != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) + } + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +// ToRawInfo returns a description of Oauth2PasswordSecurity suitable for JSON or YAML export. +func (m *Oauth2PasswordSecurity) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + // always include this required field. + info.Content = append(info.Content, compiler.NewScalarNodeForString("type")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Type)) + // always include this required field. + info.Content = append(info.Content, compiler.NewScalarNodeForString("flow")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Flow)) + if m.Scopes != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("scopes")) + info.Content = append(info.Content, m.Scopes.ToRawInfo()) + } + // always include this required field. + info.Content = append(info.Content, compiler.NewScalarNodeForString("tokenUrl")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.TokenUrl)) + if m.Description != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) + } + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +// ToRawInfo returns a description of Oauth2Scopes suitable for JSON or YAML export. +func (m *Oauth2Scopes) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + // &{Name:additionalProperties Type:NamedString StringEnumValues:[] MapType:string Repeated:true Pattern: Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of Operation suitable for JSON or YAML export. +func (m *Operation) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if len(m.Tags) != 0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("tags")) + info.Content = append(info.Content, compiler.NewSequenceNodeForStringArray(m.Tags)) + } + if m.Summary != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("summary")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Summary)) + } + if m.Description != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) + } + if m.ExternalDocs != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("externalDocs")) + info.Content = append(info.Content, m.ExternalDocs.ToRawInfo()) + } + if m.OperationId != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("operationId")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.OperationId)) + } + if len(m.Produces) != 0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("produces")) + info.Content = append(info.Content, compiler.NewSequenceNodeForStringArray(m.Produces)) + } + if len(m.Consumes) != 0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("consumes")) + info.Content = append(info.Content, compiler.NewSequenceNodeForStringArray(m.Consumes)) + } + if len(m.Parameters) != 0 { + items := compiler.NewSequenceNode() + for _, item := range m.Parameters { + items.Content = append(items.Content, item.ToRawInfo()) + } + info.Content = append(info.Content, compiler.NewScalarNodeForString("parameters")) + info.Content = append(info.Content, items) + } + // always include this required field. + info.Content = append(info.Content, compiler.NewScalarNodeForString("responses")) + info.Content = append(info.Content, m.Responses.ToRawInfo()) + if len(m.Schemes) != 0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("schemes")) + info.Content = append(info.Content, compiler.NewSequenceNodeForStringArray(m.Schemes)) + } + if m.Deprecated != false { + info.Content = append(info.Content, compiler.NewScalarNodeForString("deprecated")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.Deprecated)) + } + if len(m.Security) != 0 { + items := compiler.NewSequenceNode() + for _, item := range m.Security { + items.Content = append(items.Content, item.ToRawInfo()) + } + info.Content = append(info.Content, compiler.NewScalarNodeForString("security")) + info.Content = append(info.Content, items) + } + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +// ToRawInfo returns a description of Parameter suitable for JSON or YAML export. +func (m *Parameter) ToRawInfo() *yaml.Node { + // ONE OF WRAPPER + // Parameter + // {Name:bodyParameter Type:BodyParameter StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v0 := m.GetBodyParameter() + if v0 != nil { + return v0.ToRawInfo() + } + // {Name:nonBodyParameter Type:NonBodyParameter StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v1 := m.GetNonBodyParameter() + if v1 != nil { + return v1.ToRawInfo() + } + return compiler.NewNullNode() +} + +// ToRawInfo returns a description of ParameterDefinitions suitable for JSON or YAML export. +func (m *ParameterDefinitions) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.AdditionalProperties != nil { + for _, item := range m.AdditionalProperties { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +// ToRawInfo returns a description of ParametersItem suitable for JSON or YAML export. +func (m *ParametersItem) ToRawInfo() *yaml.Node { + // ONE OF WRAPPER + // ParametersItem + // {Name:parameter Type:Parameter StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v0 := m.GetParameter() + if v0 != nil { + return v0.ToRawInfo() + } + // {Name:jsonReference Type:JsonReference StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v1 := m.GetJsonReference() + if v1 != nil { + return v1.ToRawInfo() + } + return compiler.NewNullNode() +} + +// ToRawInfo returns a description of PathItem suitable for JSON or YAML export. +func (m *PathItem) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.XRef != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("$ref")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.XRef)) + } + if m.Get != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("get")) + info.Content = append(info.Content, m.Get.ToRawInfo()) + } + if m.Put != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("put")) + info.Content = append(info.Content, m.Put.ToRawInfo()) + } + if m.Post != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("post")) + info.Content = append(info.Content, m.Post.ToRawInfo()) + } + if m.Delete != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("delete")) + info.Content = append(info.Content, m.Delete.ToRawInfo()) + } + if m.Options != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("options")) + info.Content = append(info.Content, m.Options.ToRawInfo()) + } + if m.Head != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("head")) + info.Content = append(info.Content, m.Head.ToRawInfo()) + } + if m.Patch != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("patch")) + info.Content = append(info.Content, m.Patch.ToRawInfo()) + } + if len(m.Parameters) != 0 { + items := compiler.NewSequenceNode() + for _, item := range m.Parameters { + items.Content = append(items.Content, item.ToRawInfo()) + } + info.Content = append(info.Content, compiler.NewScalarNodeForString("parameters")) + info.Content = append(info.Content, items) + } + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +// ToRawInfo returns a description of PathParameterSubSchema suitable for JSON or YAML export. +func (m *PathParameterSubSchema) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + // always include this required field. + info.Content = append(info.Content, compiler.NewScalarNodeForString("required")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.Required)) + if m.In != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("in")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.In)) + } + if m.Description != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) + } + if m.Name != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) + } + if m.Type != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("type")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Type)) + } + if m.Format != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("format")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Format)) + } + if m.Items != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("items")) + info.Content = append(info.Content, m.Items.ToRawInfo()) + } + if m.CollectionFormat != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("collectionFormat")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.CollectionFormat)) + } + if m.Default != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("default")) + info.Content = append(info.Content, m.Default.ToRawInfo()) + } + if m.Maximum != 0.0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("maximum")) + info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Maximum)) + } + if m.ExclusiveMaximum != false { + info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMaximum")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMaximum)) + } + if m.Minimum != 0.0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("minimum")) + info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Minimum)) + } + if m.ExclusiveMinimum != false { + info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMinimum")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMinimum)) + } + if m.MaxLength != 0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("maxLength")) + info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MaxLength)) + } + if m.MinLength != 0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("minLength")) + info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MinLength)) + } + if m.Pattern != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("pattern")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Pattern)) + } + if m.MaxItems != 0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("maxItems")) + info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MaxItems)) + } + if m.MinItems != 0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("minItems")) + info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MinItems)) + } + if m.UniqueItems != false { + info.Content = append(info.Content, compiler.NewScalarNodeForString("uniqueItems")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.UniqueItems)) + } + if len(m.Enum) != 0 { + items := compiler.NewSequenceNode() + for _, item := range m.Enum { + items.Content = append(items.Content, item.ToRawInfo()) + } + info.Content = append(info.Content, compiler.NewScalarNodeForString("enum")) + info.Content = append(info.Content, items) + } + if m.MultipleOf != 0.0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("multipleOf")) + info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.MultipleOf)) + } + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +// ToRawInfo returns a description of Paths suitable for JSON or YAML export. +func (m *Paths) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + if m.Path != nil { + for _, item := range m.Path { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +// ToRawInfo returns a description of PrimitivesItems suitable for JSON or YAML export. +func (m *PrimitivesItems) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.Type != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("type")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Type)) + } + if m.Format != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("format")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Format)) + } + if m.Items != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("items")) + info.Content = append(info.Content, m.Items.ToRawInfo()) + } + if m.CollectionFormat != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("collectionFormat")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.CollectionFormat)) + } + if m.Default != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("default")) + info.Content = append(info.Content, m.Default.ToRawInfo()) + } + if m.Maximum != 0.0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("maximum")) + info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Maximum)) + } + if m.ExclusiveMaximum != false { + info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMaximum")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMaximum)) + } + if m.Minimum != 0.0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("minimum")) + info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Minimum)) + } + if m.ExclusiveMinimum != false { + info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMinimum")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMinimum)) + } + if m.MaxLength != 0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("maxLength")) + info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MaxLength)) + } + if m.MinLength != 0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("minLength")) + info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MinLength)) + } + if m.Pattern != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("pattern")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Pattern)) + } + if m.MaxItems != 0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("maxItems")) + info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MaxItems)) + } + if m.MinItems != 0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("minItems")) + info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MinItems)) + } + if m.UniqueItems != false { + info.Content = append(info.Content, compiler.NewScalarNodeForString("uniqueItems")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.UniqueItems)) + } + if len(m.Enum) != 0 { + items := compiler.NewSequenceNode() + for _, item := range m.Enum { + items.Content = append(items.Content, item.ToRawInfo()) + } + info.Content = append(info.Content, compiler.NewScalarNodeForString("enum")) + info.Content = append(info.Content, items) + } + if m.MultipleOf != 0.0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("multipleOf")) + info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.MultipleOf)) + } + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +// ToRawInfo returns a description of Properties suitable for JSON or YAML export. +func (m *Properties) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.AdditionalProperties != nil { + for _, item := range m.AdditionalProperties { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +// ToRawInfo returns a description of QueryParameterSubSchema suitable for JSON or YAML export. +func (m *QueryParameterSubSchema) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.Required != false { + info.Content = append(info.Content, compiler.NewScalarNodeForString("required")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.Required)) + } + if m.In != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("in")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.In)) + } + if m.Description != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) + } + if m.Name != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) + } + if m.AllowEmptyValue != false { + info.Content = append(info.Content, compiler.NewScalarNodeForString("allowEmptyValue")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.AllowEmptyValue)) + } + if m.Type != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("type")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Type)) + } + if m.Format != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("format")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Format)) + } + if m.Items != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("items")) + info.Content = append(info.Content, m.Items.ToRawInfo()) + } + if m.CollectionFormat != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("collectionFormat")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.CollectionFormat)) + } + if m.Default != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("default")) + info.Content = append(info.Content, m.Default.ToRawInfo()) + } + if m.Maximum != 0.0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("maximum")) + info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Maximum)) + } + if m.ExclusiveMaximum != false { + info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMaximum")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMaximum)) + } + if m.Minimum != 0.0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("minimum")) + info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Minimum)) + } + if m.ExclusiveMinimum != false { + info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMinimum")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMinimum)) + } + if m.MaxLength != 0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("maxLength")) + info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MaxLength)) + } + if m.MinLength != 0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("minLength")) + info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MinLength)) + } + if m.Pattern != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("pattern")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Pattern)) + } + if m.MaxItems != 0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("maxItems")) + info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MaxItems)) + } + if m.MinItems != 0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("minItems")) + info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MinItems)) + } + if m.UniqueItems != false { + info.Content = append(info.Content, compiler.NewScalarNodeForString("uniqueItems")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.UniqueItems)) + } + if len(m.Enum) != 0 { + items := compiler.NewSequenceNode() + for _, item := range m.Enum { + items.Content = append(items.Content, item.ToRawInfo()) + } + info.Content = append(info.Content, compiler.NewScalarNodeForString("enum")) + info.Content = append(info.Content, items) + } + if m.MultipleOf != 0.0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("multipleOf")) + info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.MultipleOf)) + } + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +// ToRawInfo returns a description of Response suitable for JSON or YAML export. +func (m *Response) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + // always include this required field. + info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) + if m.Schema != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("schema")) + info.Content = append(info.Content, m.Schema.ToRawInfo()) + } + if m.Headers != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("headers")) + info.Content = append(info.Content, m.Headers.ToRawInfo()) + } + if m.Examples != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("examples")) + info.Content = append(info.Content, m.Examples.ToRawInfo()) + } + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +// ToRawInfo returns a description of ResponseDefinitions suitable for JSON or YAML export. +func (m *ResponseDefinitions) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.AdditionalProperties != nil { + for _, item := range m.AdditionalProperties { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +// ToRawInfo returns a description of ResponseValue suitable for JSON or YAML export. +func (m *ResponseValue) ToRawInfo() *yaml.Node { + // ONE OF WRAPPER + // ResponseValue + // {Name:response Type:Response StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v0 := m.GetResponse() + if v0 != nil { + return v0.ToRawInfo() + } + // {Name:jsonReference Type:JsonReference StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v1 := m.GetJsonReference() + if v1 != nil { + return v1.ToRawInfo() + } + return compiler.NewNullNode() +} + +// ToRawInfo returns a description of Responses suitable for JSON or YAML export. +func (m *Responses) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.ResponseCode != nil { + for _, item := range m.ResponseCode { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +// ToRawInfo returns a description of Schema suitable for JSON or YAML export. +func (m *Schema) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.XRef != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("$ref")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.XRef)) + } + if m.Format != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("format")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Format)) + } + if m.Title != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("title")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Title)) + } + if m.Description != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) + } + if m.Default != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("default")) + info.Content = append(info.Content, m.Default.ToRawInfo()) + } + if m.MultipleOf != 0.0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("multipleOf")) + info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.MultipleOf)) + } + if m.Maximum != 0.0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("maximum")) + info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Maximum)) + } + if m.ExclusiveMaximum != false { + info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMaximum")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMaximum)) + } + if m.Minimum != 0.0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("minimum")) + info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Minimum)) + } + if m.ExclusiveMinimum != false { + info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMinimum")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMinimum)) + } + if m.MaxLength != 0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("maxLength")) + info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MaxLength)) + } + if m.MinLength != 0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("minLength")) + info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MinLength)) + } + if m.Pattern != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("pattern")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Pattern)) + } + if m.MaxItems != 0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("maxItems")) + info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MaxItems)) + } + if m.MinItems != 0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("minItems")) + info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MinItems)) + } + if m.UniqueItems != false { + info.Content = append(info.Content, compiler.NewScalarNodeForString("uniqueItems")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.UniqueItems)) + } + if m.MaxProperties != 0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("maxProperties")) + info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MaxProperties)) + } + if m.MinProperties != 0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("minProperties")) + info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MinProperties)) + } + if len(m.Required) != 0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("required")) + info.Content = append(info.Content, compiler.NewSequenceNodeForStringArray(m.Required)) + } + if len(m.Enum) != 0 { + items := compiler.NewSequenceNode() + for _, item := range m.Enum { + items.Content = append(items.Content, item.ToRawInfo()) + } + info.Content = append(info.Content, compiler.NewScalarNodeForString("enum")) + info.Content = append(info.Content, items) + } + if m.AdditionalProperties != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("additionalProperties")) + info.Content = append(info.Content, m.AdditionalProperties.ToRawInfo()) + } + if m.Type != nil { + if len(m.Type.Value) == 1 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("type")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Type.Value[0])) + } else { + info.Content = append(info.Content, compiler.NewScalarNodeForString("type")) + info.Content = append(info.Content, compiler.NewSequenceNodeForStringArray(m.Type.Value)) + } + } + if m.Items != nil { + items := compiler.NewSequenceNode() + for _, item := range m.Items.Schema { + items.Content = append(items.Content, item.ToRawInfo()) + } + if len(items.Content) == 1 { + items = items.Content[0] + } + info.Content = append(info.Content, compiler.NewScalarNodeForString("items")) + info.Content = append(info.Content, items) + } + if len(m.AllOf) != 0 { + items := compiler.NewSequenceNode() + for _, item := range m.AllOf { + items.Content = append(items.Content, item.ToRawInfo()) + } + info.Content = append(info.Content, compiler.NewScalarNodeForString("allOf")) + info.Content = append(info.Content, items) + } + if m.Properties != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("properties")) + info.Content = append(info.Content, m.Properties.ToRawInfo()) + } + if m.Discriminator != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("discriminator")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Discriminator)) + } + if m.ReadOnly != false { + info.Content = append(info.Content, compiler.NewScalarNodeForString("readOnly")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ReadOnly)) + } + if m.Xml != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("xml")) + info.Content = append(info.Content, m.Xml.ToRawInfo()) + } + if m.ExternalDocs != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("externalDocs")) + info.Content = append(info.Content, m.ExternalDocs.ToRawInfo()) + } + if m.Example != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("example")) + info.Content = append(info.Content, m.Example.ToRawInfo()) + } + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +// ToRawInfo returns a description of SchemaItem suitable for JSON or YAML export. +func (m *SchemaItem) ToRawInfo() *yaml.Node { + // ONE OF WRAPPER + // SchemaItem + // {Name:schema Type:Schema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v0 := m.GetSchema() + if v0 != nil { + return v0.ToRawInfo() + } + // {Name:fileSchema Type:FileSchema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v1 := m.GetFileSchema() + if v1 != nil { + return v1.ToRawInfo() + } + return compiler.NewNullNode() +} + +// ToRawInfo returns a description of SecurityDefinitions suitable for JSON or YAML export. +func (m *SecurityDefinitions) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.AdditionalProperties != nil { + for _, item := range m.AdditionalProperties { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +// ToRawInfo returns a description of SecurityDefinitionsItem suitable for JSON or YAML export. +func (m *SecurityDefinitionsItem) ToRawInfo() *yaml.Node { + // ONE OF WRAPPER + // SecurityDefinitionsItem + // {Name:basicAuthenticationSecurity Type:BasicAuthenticationSecurity StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v0 := m.GetBasicAuthenticationSecurity() + if v0 != nil { + return v0.ToRawInfo() + } + // {Name:apiKeySecurity Type:ApiKeySecurity StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v1 := m.GetApiKeySecurity() + if v1 != nil { + return v1.ToRawInfo() + } + // {Name:oauth2ImplicitSecurity Type:Oauth2ImplicitSecurity StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v2 := m.GetOauth2ImplicitSecurity() + if v2 != nil { + return v2.ToRawInfo() + } + // {Name:oauth2PasswordSecurity Type:Oauth2PasswordSecurity StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v3 := m.GetOauth2PasswordSecurity() + if v3 != nil { + return v3.ToRawInfo() + } + // {Name:oauth2ApplicationSecurity Type:Oauth2ApplicationSecurity StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v4 := m.GetOauth2ApplicationSecurity() + if v4 != nil { + return v4.ToRawInfo() + } + // {Name:oauth2AccessCodeSecurity Type:Oauth2AccessCodeSecurity StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v5 := m.GetOauth2AccessCodeSecurity() + if v5 != nil { + return v5.ToRawInfo() + } + return compiler.NewNullNode() +} + +// ToRawInfo returns a description of SecurityRequirement suitable for JSON or YAML export. +func (m *SecurityRequirement) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.AdditionalProperties != nil { + for _, item := range m.AdditionalProperties { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +// ToRawInfo returns a description of StringArray suitable for JSON or YAML export. +func (m *StringArray) ToRawInfo() *yaml.Node { + return compiler.NewSequenceNodeForStringArray(m.Value) +} + +// ToRawInfo returns a description of Tag suitable for JSON or YAML export. +func (m *Tag) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + // always include this required field. + info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) + if m.Description != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) + } + if m.ExternalDocs != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("externalDocs")) + info.Content = append(info.Content, m.ExternalDocs.ToRawInfo()) + } + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +// ToRawInfo returns a description of TypeItem suitable for JSON or YAML export. +func (m *TypeItem) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if len(m.Value) != 0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("value")) + info.Content = append(info.Content, compiler.NewSequenceNodeForStringArray(m.Value)) + } + return info +} + +// ToRawInfo returns a description of VendorExtension suitable for JSON or YAML export. +func (m *VendorExtension) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.AdditionalProperties != nil { + for _, item := range m.AdditionalProperties { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +// ToRawInfo returns a description of Xml suitable for JSON or YAML export. +func (m *Xml) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.Name != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) + } + if m.Namespace != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("namespace")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Namespace)) + } + if m.Prefix != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("prefix")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Prefix)) + } + if m.Attribute != false { + info.Content = append(info.Content, compiler.NewScalarNodeForString("attribute")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.Attribute)) + } + if m.Wrapped != false { + info.Content = append(info.Content, compiler.NewScalarNodeForString("wrapped")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.Wrapped)) + } + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +var ( + pattern0 = regexp.MustCompile("^x-") + pattern1 = regexp.MustCompile("^/") + pattern2 = regexp.MustCompile("^([0-9]{3})$|^(default)$") +) diff --git a/vendor/github.com/googleapis/gnostic/openapiv2/OpenAPIv2.pb.go b/vendor/github.com/googleapis/gnostic/openapiv2/OpenAPIv2.pb.go new file mode 100644 index 00000000000..8a5f302f337 --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/openapiv2/OpenAPIv2.pb.go @@ -0,0 +1,7342 @@ +// Copyright 2020 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// THIS FILE IS AUTOMATICALLY GENERATED. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.26.0 +// protoc v3.15.5 +// source: openapiv2/OpenAPIv2.proto + +package openapi_v2 + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type AdditionalPropertiesItem struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to Oneof: + // *AdditionalPropertiesItem_Schema + // *AdditionalPropertiesItem_Boolean + Oneof isAdditionalPropertiesItem_Oneof `protobuf_oneof:"oneof"` +} + +func (x *AdditionalPropertiesItem) Reset() { + *x = AdditionalPropertiesItem{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AdditionalPropertiesItem) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AdditionalPropertiesItem) ProtoMessage() {} + +func (x *AdditionalPropertiesItem) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AdditionalPropertiesItem.ProtoReflect.Descriptor instead. +func (*AdditionalPropertiesItem) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{0} +} + +func (m *AdditionalPropertiesItem) GetOneof() isAdditionalPropertiesItem_Oneof { + if m != nil { + return m.Oneof + } + return nil +} + +func (x *AdditionalPropertiesItem) GetSchema() *Schema { + if x, ok := x.GetOneof().(*AdditionalPropertiesItem_Schema); ok { + return x.Schema + } + return nil +} + +func (x *AdditionalPropertiesItem) GetBoolean() bool { + if x, ok := x.GetOneof().(*AdditionalPropertiesItem_Boolean); ok { + return x.Boolean + } + return false +} + +type isAdditionalPropertiesItem_Oneof interface { + isAdditionalPropertiesItem_Oneof() +} + +type AdditionalPropertiesItem_Schema struct { + Schema *Schema `protobuf:"bytes,1,opt,name=schema,proto3,oneof"` +} + +type AdditionalPropertiesItem_Boolean struct { + Boolean bool `protobuf:"varint,2,opt,name=boolean,proto3,oneof"` +} + +func (*AdditionalPropertiesItem_Schema) isAdditionalPropertiesItem_Oneof() {} + +func (*AdditionalPropertiesItem_Boolean) isAdditionalPropertiesItem_Oneof() {} + +type Any struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Value *anypb.Any `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` + Yaml string `protobuf:"bytes,2,opt,name=yaml,proto3" json:"yaml,omitempty"` +} + +func (x *Any) Reset() { + *x = Any{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Any) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Any) ProtoMessage() {} + +func (x *Any) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Any.ProtoReflect.Descriptor instead. +func (*Any) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{1} +} + +func (x *Any) GetValue() *anypb.Any { + if x != nil { + return x.Value + } + return nil +} + +func (x *Any) GetYaml() string { + if x != nil { + return x.Yaml + } + return "" +} + +type ApiKeySecurity struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` + In string `protobuf:"bytes,3,opt,name=in,proto3" json:"in,omitempty"` + Description string `protobuf:"bytes,4,opt,name=description,proto3" json:"description,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,5,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` +} + +func (x *ApiKeySecurity) Reset() { + *x = ApiKeySecurity{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ApiKeySecurity) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ApiKeySecurity) ProtoMessage() {} + +func (x *ApiKeySecurity) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ApiKeySecurity.ProtoReflect.Descriptor instead. +func (*ApiKeySecurity) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{2} +} + +func (x *ApiKeySecurity) GetType() string { + if x != nil { + return x.Type + } + return "" +} + +func (x *ApiKeySecurity) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *ApiKeySecurity) GetIn() string { + if x != nil { + return x.In + } + return "" +} + +func (x *ApiKeySecurity) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +func (x *ApiKeySecurity) GetVendorExtension() []*NamedAny { + if x != nil { + return x.VendorExtension + } + return nil +} + +type BasicAuthenticationSecurity struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,3,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` +} + +func (x *BasicAuthenticationSecurity) Reset() { + *x = BasicAuthenticationSecurity{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BasicAuthenticationSecurity) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BasicAuthenticationSecurity) ProtoMessage() {} + +func (x *BasicAuthenticationSecurity) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BasicAuthenticationSecurity.ProtoReflect.Descriptor instead. +func (*BasicAuthenticationSecurity) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{3} +} + +func (x *BasicAuthenticationSecurity) GetType() string { + if x != nil { + return x.Type + } + return "" +} + +func (x *BasicAuthenticationSecurity) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +func (x *BasicAuthenticationSecurity) GetVendorExtension() []*NamedAny { + if x != nil { + return x.VendorExtension + } + return nil +} + +type BodyParameter struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed. + Description string `protobuf:"bytes,1,opt,name=description,proto3" json:"description,omitempty"` + // The name of the parameter. + Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` + // Determines the location of the parameter. + In string `protobuf:"bytes,3,opt,name=in,proto3" json:"in,omitempty"` + // Determines whether or not this parameter is required or optional. + Required bool `protobuf:"varint,4,opt,name=required,proto3" json:"required,omitempty"` + Schema *Schema `protobuf:"bytes,5,opt,name=schema,proto3" json:"schema,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,6,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` +} + +func (x *BodyParameter) Reset() { + *x = BodyParameter{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BodyParameter) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BodyParameter) ProtoMessage() {} + +func (x *BodyParameter) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BodyParameter.ProtoReflect.Descriptor instead. +func (*BodyParameter) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{4} +} + +func (x *BodyParameter) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +func (x *BodyParameter) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *BodyParameter) GetIn() string { + if x != nil { + return x.In + } + return "" +} + +func (x *BodyParameter) GetRequired() bool { + if x != nil { + return x.Required + } + return false +} + +func (x *BodyParameter) GetSchema() *Schema { + if x != nil { + return x.Schema + } + return nil +} + +func (x *BodyParameter) GetVendorExtension() []*NamedAny { + if x != nil { + return x.VendorExtension + } + return nil +} + +// Contact information for the owners of the API. +type Contact struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The identifying name of the contact person/organization. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // The URL pointing to the contact information. + Url string `protobuf:"bytes,2,opt,name=url,proto3" json:"url,omitempty"` + // The email address of the contact person/organization. + Email string `protobuf:"bytes,3,opt,name=email,proto3" json:"email,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,4,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` +} + +func (x *Contact) Reset() { + *x = Contact{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Contact) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Contact) ProtoMessage() {} + +func (x *Contact) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Contact.ProtoReflect.Descriptor instead. +func (*Contact) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{5} +} + +func (x *Contact) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *Contact) GetUrl() string { + if x != nil { + return x.Url + } + return "" +} + +func (x *Contact) GetEmail() string { + if x != nil { + return x.Email + } + return "" +} + +func (x *Contact) GetVendorExtension() []*NamedAny { + if x != nil { + return x.VendorExtension + } + return nil +} + +type Default struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + AdditionalProperties []*NamedAny `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` +} + +func (x *Default) Reset() { + *x = Default{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Default) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Default) ProtoMessage() {} + +func (x *Default) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Default.ProtoReflect.Descriptor instead. +func (*Default) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{6} +} + +func (x *Default) GetAdditionalProperties() []*NamedAny { + if x != nil { + return x.AdditionalProperties + } + return nil +} + +// One or more JSON objects describing the schemas being consumed and produced by the API. +type Definitions struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + AdditionalProperties []*NamedSchema `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` +} + +func (x *Definitions) Reset() { + *x = Definitions{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Definitions) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Definitions) ProtoMessage() {} + +func (x *Definitions) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Definitions.ProtoReflect.Descriptor instead. +func (*Definitions) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{7} +} + +func (x *Definitions) GetAdditionalProperties() []*NamedSchema { + if x != nil { + return x.AdditionalProperties + } + return nil +} + +type Document struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The Swagger version of this document. + Swagger string `protobuf:"bytes,1,opt,name=swagger,proto3" json:"swagger,omitempty"` + Info *Info `protobuf:"bytes,2,opt,name=info,proto3" json:"info,omitempty"` + // The host (name or ip) of the API. Example: 'swagger.io' + Host string `protobuf:"bytes,3,opt,name=host,proto3" json:"host,omitempty"` + // The base path to the API. Example: '/api'. + BasePath string `protobuf:"bytes,4,opt,name=base_path,json=basePath,proto3" json:"base_path,omitempty"` + // The transfer protocol of the API. + Schemes []string `protobuf:"bytes,5,rep,name=schemes,proto3" json:"schemes,omitempty"` + // A list of MIME types accepted by the API. + Consumes []string `protobuf:"bytes,6,rep,name=consumes,proto3" json:"consumes,omitempty"` + // A list of MIME types the API can produce. + Produces []string `protobuf:"bytes,7,rep,name=produces,proto3" json:"produces,omitempty"` + Paths *Paths `protobuf:"bytes,8,opt,name=paths,proto3" json:"paths,omitempty"` + Definitions *Definitions `protobuf:"bytes,9,opt,name=definitions,proto3" json:"definitions,omitempty"` + Parameters *ParameterDefinitions `protobuf:"bytes,10,opt,name=parameters,proto3" json:"parameters,omitempty"` + Responses *ResponseDefinitions `protobuf:"bytes,11,opt,name=responses,proto3" json:"responses,omitempty"` + Security []*SecurityRequirement `protobuf:"bytes,12,rep,name=security,proto3" json:"security,omitempty"` + SecurityDefinitions *SecurityDefinitions `protobuf:"bytes,13,opt,name=security_definitions,json=securityDefinitions,proto3" json:"security_definitions,omitempty"` + Tags []*Tag `protobuf:"bytes,14,rep,name=tags,proto3" json:"tags,omitempty"` + ExternalDocs *ExternalDocs `protobuf:"bytes,15,opt,name=external_docs,json=externalDocs,proto3" json:"external_docs,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,16,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` +} + +func (x *Document) Reset() { + *x = Document{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Document) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Document) ProtoMessage() {} + +func (x *Document) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Document.ProtoReflect.Descriptor instead. +func (*Document) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{8} +} + +func (x *Document) GetSwagger() string { + if x != nil { + return x.Swagger + } + return "" +} + +func (x *Document) GetInfo() *Info { + if x != nil { + return x.Info + } + return nil +} + +func (x *Document) GetHost() string { + if x != nil { + return x.Host + } + return "" +} + +func (x *Document) GetBasePath() string { + if x != nil { + return x.BasePath + } + return "" +} + +func (x *Document) GetSchemes() []string { + if x != nil { + return x.Schemes + } + return nil +} + +func (x *Document) GetConsumes() []string { + if x != nil { + return x.Consumes + } + return nil +} + +func (x *Document) GetProduces() []string { + if x != nil { + return x.Produces + } + return nil +} + +func (x *Document) GetPaths() *Paths { + if x != nil { + return x.Paths + } + return nil +} + +func (x *Document) GetDefinitions() *Definitions { + if x != nil { + return x.Definitions + } + return nil +} + +func (x *Document) GetParameters() *ParameterDefinitions { + if x != nil { + return x.Parameters + } + return nil +} + +func (x *Document) GetResponses() *ResponseDefinitions { + if x != nil { + return x.Responses + } + return nil +} + +func (x *Document) GetSecurity() []*SecurityRequirement { + if x != nil { + return x.Security + } + return nil +} + +func (x *Document) GetSecurityDefinitions() *SecurityDefinitions { + if x != nil { + return x.SecurityDefinitions + } + return nil +} + +func (x *Document) GetTags() []*Tag { + if x != nil { + return x.Tags + } + return nil +} + +func (x *Document) GetExternalDocs() *ExternalDocs { + if x != nil { + return x.ExternalDocs + } + return nil +} + +func (x *Document) GetVendorExtension() []*NamedAny { + if x != nil { + return x.VendorExtension + } + return nil +} + +type Examples struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + AdditionalProperties []*NamedAny `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` +} + +func (x *Examples) Reset() { + *x = Examples{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Examples) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Examples) ProtoMessage() {} + +func (x *Examples) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Examples.ProtoReflect.Descriptor instead. +func (*Examples) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{9} +} + +func (x *Examples) GetAdditionalProperties() []*NamedAny { + if x != nil { + return x.AdditionalProperties + } + return nil +} + +// information about external documentation +type ExternalDocs struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Description string `protobuf:"bytes,1,opt,name=description,proto3" json:"description,omitempty"` + Url string `protobuf:"bytes,2,opt,name=url,proto3" json:"url,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,3,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` +} + +func (x *ExternalDocs) Reset() { + *x = ExternalDocs{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ExternalDocs) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ExternalDocs) ProtoMessage() {} + +func (x *ExternalDocs) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ExternalDocs.ProtoReflect.Descriptor instead. +func (*ExternalDocs) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{10} +} + +func (x *ExternalDocs) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +func (x *ExternalDocs) GetUrl() string { + if x != nil { + return x.Url + } + return "" +} + +func (x *ExternalDocs) GetVendorExtension() []*NamedAny { + if x != nil { + return x.VendorExtension + } + return nil +} + +// A deterministic version of a JSON Schema object. +type FileSchema struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Format string `protobuf:"bytes,1,opt,name=format,proto3" json:"format,omitempty"` + Title string `protobuf:"bytes,2,opt,name=title,proto3" json:"title,omitempty"` + Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` + Default *Any `protobuf:"bytes,4,opt,name=default,proto3" json:"default,omitempty"` + Required []string `protobuf:"bytes,5,rep,name=required,proto3" json:"required,omitempty"` + Type string `protobuf:"bytes,6,opt,name=type,proto3" json:"type,omitempty"` + ReadOnly bool `protobuf:"varint,7,opt,name=read_only,json=readOnly,proto3" json:"read_only,omitempty"` + ExternalDocs *ExternalDocs `protobuf:"bytes,8,opt,name=external_docs,json=externalDocs,proto3" json:"external_docs,omitempty"` + Example *Any `protobuf:"bytes,9,opt,name=example,proto3" json:"example,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,10,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` +} + +func (x *FileSchema) Reset() { + *x = FileSchema{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FileSchema) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FileSchema) ProtoMessage() {} + +func (x *FileSchema) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FileSchema.ProtoReflect.Descriptor instead. +func (*FileSchema) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{11} +} + +func (x *FileSchema) GetFormat() string { + if x != nil { + return x.Format + } + return "" +} + +func (x *FileSchema) GetTitle() string { + if x != nil { + return x.Title + } + return "" +} + +func (x *FileSchema) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +func (x *FileSchema) GetDefault() *Any { + if x != nil { + return x.Default + } + return nil +} + +func (x *FileSchema) GetRequired() []string { + if x != nil { + return x.Required + } + return nil +} + +func (x *FileSchema) GetType() string { + if x != nil { + return x.Type + } + return "" +} + +func (x *FileSchema) GetReadOnly() bool { + if x != nil { + return x.ReadOnly + } + return false +} + +func (x *FileSchema) GetExternalDocs() *ExternalDocs { + if x != nil { + return x.ExternalDocs + } + return nil +} + +func (x *FileSchema) GetExample() *Any { + if x != nil { + return x.Example + } + return nil +} + +func (x *FileSchema) GetVendorExtension() []*NamedAny { + if x != nil { + return x.VendorExtension + } + return nil +} + +type FormDataParameterSubSchema struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Determines whether or not this parameter is required or optional. + Required bool `protobuf:"varint,1,opt,name=required,proto3" json:"required,omitempty"` + // Determines the location of the parameter. + In string `protobuf:"bytes,2,opt,name=in,proto3" json:"in,omitempty"` + // A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed. + Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` + // The name of the parameter. + Name string `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"` + // allows sending a parameter by name only or with an empty value. + AllowEmptyValue bool `protobuf:"varint,5,opt,name=allow_empty_value,json=allowEmptyValue,proto3" json:"allow_empty_value,omitempty"` + Type string `protobuf:"bytes,6,opt,name=type,proto3" json:"type,omitempty"` + Format string `protobuf:"bytes,7,opt,name=format,proto3" json:"format,omitempty"` + Items *PrimitivesItems `protobuf:"bytes,8,opt,name=items,proto3" json:"items,omitempty"` + CollectionFormat string `protobuf:"bytes,9,opt,name=collection_format,json=collectionFormat,proto3" json:"collection_format,omitempty"` + Default *Any `protobuf:"bytes,10,opt,name=default,proto3" json:"default,omitempty"` + Maximum float64 `protobuf:"fixed64,11,opt,name=maximum,proto3" json:"maximum,omitempty"` + ExclusiveMaximum bool `protobuf:"varint,12,opt,name=exclusive_maximum,json=exclusiveMaximum,proto3" json:"exclusive_maximum,omitempty"` + Minimum float64 `protobuf:"fixed64,13,opt,name=minimum,proto3" json:"minimum,omitempty"` + ExclusiveMinimum bool `protobuf:"varint,14,opt,name=exclusive_minimum,json=exclusiveMinimum,proto3" json:"exclusive_minimum,omitempty"` + MaxLength int64 `protobuf:"varint,15,opt,name=max_length,json=maxLength,proto3" json:"max_length,omitempty"` + MinLength int64 `protobuf:"varint,16,opt,name=min_length,json=minLength,proto3" json:"min_length,omitempty"` + Pattern string `protobuf:"bytes,17,opt,name=pattern,proto3" json:"pattern,omitempty"` + MaxItems int64 `protobuf:"varint,18,opt,name=max_items,json=maxItems,proto3" json:"max_items,omitempty"` + MinItems int64 `protobuf:"varint,19,opt,name=min_items,json=minItems,proto3" json:"min_items,omitempty"` + UniqueItems bool `protobuf:"varint,20,opt,name=unique_items,json=uniqueItems,proto3" json:"unique_items,omitempty"` + Enum []*Any `protobuf:"bytes,21,rep,name=enum,proto3" json:"enum,omitempty"` + MultipleOf float64 `protobuf:"fixed64,22,opt,name=multiple_of,json=multipleOf,proto3" json:"multiple_of,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,23,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` +} + +func (x *FormDataParameterSubSchema) Reset() { + *x = FormDataParameterSubSchema{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FormDataParameterSubSchema) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FormDataParameterSubSchema) ProtoMessage() {} + +func (x *FormDataParameterSubSchema) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FormDataParameterSubSchema.ProtoReflect.Descriptor instead. +func (*FormDataParameterSubSchema) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{12} +} + +func (x *FormDataParameterSubSchema) GetRequired() bool { + if x != nil { + return x.Required + } + return false +} + +func (x *FormDataParameterSubSchema) GetIn() string { + if x != nil { + return x.In + } + return "" +} + +func (x *FormDataParameterSubSchema) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +func (x *FormDataParameterSubSchema) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *FormDataParameterSubSchema) GetAllowEmptyValue() bool { + if x != nil { + return x.AllowEmptyValue + } + return false +} + +func (x *FormDataParameterSubSchema) GetType() string { + if x != nil { + return x.Type + } + return "" +} + +func (x *FormDataParameterSubSchema) GetFormat() string { + if x != nil { + return x.Format + } + return "" +} + +func (x *FormDataParameterSubSchema) GetItems() *PrimitivesItems { + if x != nil { + return x.Items + } + return nil +} + +func (x *FormDataParameterSubSchema) GetCollectionFormat() string { + if x != nil { + return x.CollectionFormat + } + return "" +} + +func (x *FormDataParameterSubSchema) GetDefault() *Any { + if x != nil { + return x.Default + } + return nil +} + +func (x *FormDataParameterSubSchema) GetMaximum() float64 { + if x != nil { + return x.Maximum + } + return 0 +} + +func (x *FormDataParameterSubSchema) GetExclusiveMaximum() bool { + if x != nil { + return x.ExclusiveMaximum + } + return false +} + +func (x *FormDataParameterSubSchema) GetMinimum() float64 { + if x != nil { + return x.Minimum + } + return 0 +} + +func (x *FormDataParameterSubSchema) GetExclusiveMinimum() bool { + if x != nil { + return x.ExclusiveMinimum + } + return false +} + +func (x *FormDataParameterSubSchema) GetMaxLength() int64 { + if x != nil { + return x.MaxLength + } + return 0 +} + +func (x *FormDataParameterSubSchema) GetMinLength() int64 { + if x != nil { + return x.MinLength + } + return 0 +} + +func (x *FormDataParameterSubSchema) GetPattern() string { + if x != nil { + return x.Pattern + } + return "" +} + +func (x *FormDataParameterSubSchema) GetMaxItems() int64 { + if x != nil { + return x.MaxItems + } + return 0 +} + +func (x *FormDataParameterSubSchema) GetMinItems() int64 { + if x != nil { + return x.MinItems + } + return 0 +} + +func (x *FormDataParameterSubSchema) GetUniqueItems() bool { + if x != nil { + return x.UniqueItems + } + return false +} + +func (x *FormDataParameterSubSchema) GetEnum() []*Any { + if x != nil { + return x.Enum + } + return nil +} + +func (x *FormDataParameterSubSchema) GetMultipleOf() float64 { + if x != nil { + return x.MultipleOf + } + return 0 +} + +func (x *FormDataParameterSubSchema) GetVendorExtension() []*NamedAny { + if x != nil { + return x.VendorExtension + } + return nil +} + +type Header struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + Format string `protobuf:"bytes,2,opt,name=format,proto3" json:"format,omitempty"` + Items *PrimitivesItems `protobuf:"bytes,3,opt,name=items,proto3" json:"items,omitempty"` + CollectionFormat string `protobuf:"bytes,4,opt,name=collection_format,json=collectionFormat,proto3" json:"collection_format,omitempty"` + Default *Any `protobuf:"bytes,5,opt,name=default,proto3" json:"default,omitempty"` + Maximum float64 `protobuf:"fixed64,6,opt,name=maximum,proto3" json:"maximum,omitempty"` + ExclusiveMaximum bool `protobuf:"varint,7,opt,name=exclusive_maximum,json=exclusiveMaximum,proto3" json:"exclusive_maximum,omitempty"` + Minimum float64 `protobuf:"fixed64,8,opt,name=minimum,proto3" json:"minimum,omitempty"` + ExclusiveMinimum bool `protobuf:"varint,9,opt,name=exclusive_minimum,json=exclusiveMinimum,proto3" json:"exclusive_minimum,omitempty"` + MaxLength int64 `protobuf:"varint,10,opt,name=max_length,json=maxLength,proto3" json:"max_length,omitempty"` + MinLength int64 `protobuf:"varint,11,opt,name=min_length,json=minLength,proto3" json:"min_length,omitempty"` + Pattern string `protobuf:"bytes,12,opt,name=pattern,proto3" json:"pattern,omitempty"` + MaxItems int64 `protobuf:"varint,13,opt,name=max_items,json=maxItems,proto3" json:"max_items,omitempty"` + MinItems int64 `protobuf:"varint,14,opt,name=min_items,json=minItems,proto3" json:"min_items,omitempty"` + UniqueItems bool `protobuf:"varint,15,opt,name=unique_items,json=uniqueItems,proto3" json:"unique_items,omitempty"` + Enum []*Any `protobuf:"bytes,16,rep,name=enum,proto3" json:"enum,omitempty"` + MultipleOf float64 `protobuf:"fixed64,17,opt,name=multiple_of,json=multipleOf,proto3" json:"multiple_of,omitempty"` + Description string `protobuf:"bytes,18,opt,name=description,proto3" json:"description,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,19,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` +} + +func (x *Header) Reset() { + *x = Header{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Header) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Header) ProtoMessage() {} + +func (x *Header) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Header.ProtoReflect.Descriptor instead. +func (*Header) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{13} +} + +func (x *Header) GetType() string { + if x != nil { + return x.Type + } + return "" +} + +func (x *Header) GetFormat() string { + if x != nil { + return x.Format + } + return "" +} + +func (x *Header) GetItems() *PrimitivesItems { + if x != nil { + return x.Items + } + return nil +} + +func (x *Header) GetCollectionFormat() string { + if x != nil { + return x.CollectionFormat + } + return "" +} + +func (x *Header) GetDefault() *Any { + if x != nil { + return x.Default + } + return nil +} + +func (x *Header) GetMaximum() float64 { + if x != nil { + return x.Maximum + } + return 0 +} + +func (x *Header) GetExclusiveMaximum() bool { + if x != nil { + return x.ExclusiveMaximum + } + return false +} + +func (x *Header) GetMinimum() float64 { + if x != nil { + return x.Minimum + } + return 0 +} + +func (x *Header) GetExclusiveMinimum() bool { + if x != nil { + return x.ExclusiveMinimum + } + return false +} + +func (x *Header) GetMaxLength() int64 { + if x != nil { + return x.MaxLength + } + return 0 +} + +func (x *Header) GetMinLength() int64 { + if x != nil { + return x.MinLength + } + return 0 +} + +func (x *Header) GetPattern() string { + if x != nil { + return x.Pattern + } + return "" +} + +func (x *Header) GetMaxItems() int64 { + if x != nil { + return x.MaxItems + } + return 0 +} + +func (x *Header) GetMinItems() int64 { + if x != nil { + return x.MinItems + } + return 0 +} + +func (x *Header) GetUniqueItems() bool { + if x != nil { + return x.UniqueItems + } + return false +} + +func (x *Header) GetEnum() []*Any { + if x != nil { + return x.Enum + } + return nil +} + +func (x *Header) GetMultipleOf() float64 { + if x != nil { + return x.MultipleOf + } + return 0 +} + +func (x *Header) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +func (x *Header) GetVendorExtension() []*NamedAny { + if x != nil { + return x.VendorExtension + } + return nil +} + +type HeaderParameterSubSchema struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Determines whether or not this parameter is required or optional. + Required bool `protobuf:"varint,1,opt,name=required,proto3" json:"required,omitempty"` + // Determines the location of the parameter. + In string `protobuf:"bytes,2,opt,name=in,proto3" json:"in,omitempty"` + // A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed. + Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` + // The name of the parameter. + Name string `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"` + Type string `protobuf:"bytes,5,opt,name=type,proto3" json:"type,omitempty"` + Format string `protobuf:"bytes,6,opt,name=format,proto3" json:"format,omitempty"` + Items *PrimitivesItems `protobuf:"bytes,7,opt,name=items,proto3" json:"items,omitempty"` + CollectionFormat string `protobuf:"bytes,8,opt,name=collection_format,json=collectionFormat,proto3" json:"collection_format,omitempty"` + Default *Any `protobuf:"bytes,9,opt,name=default,proto3" json:"default,omitempty"` + Maximum float64 `protobuf:"fixed64,10,opt,name=maximum,proto3" json:"maximum,omitempty"` + ExclusiveMaximum bool `protobuf:"varint,11,opt,name=exclusive_maximum,json=exclusiveMaximum,proto3" json:"exclusive_maximum,omitempty"` + Minimum float64 `protobuf:"fixed64,12,opt,name=minimum,proto3" json:"minimum,omitempty"` + ExclusiveMinimum bool `protobuf:"varint,13,opt,name=exclusive_minimum,json=exclusiveMinimum,proto3" json:"exclusive_minimum,omitempty"` + MaxLength int64 `protobuf:"varint,14,opt,name=max_length,json=maxLength,proto3" json:"max_length,omitempty"` + MinLength int64 `protobuf:"varint,15,opt,name=min_length,json=minLength,proto3" json:"min_length,omitempty"` + Pattern string `protobuf:"bytes,16,opt,name=pattern,proto3" json:"pattern,omitempty"` + MaxItems int64 `protobuf:"varint,17,opt,name=max_items,json=maxItems,proto3" json:"max_items,omitempty"` + MinItems int64 `protobuf:"varint,18,opt,name=min_items,json=minItems,proto3" json:"min_items,omitempty"` + UniqueItems bool `protobuf:"varint,19,opt,name=unique_items,json=uniqueItems,proto3" json:"unique_items,omitempty"` + Enum []*Any `protobuf:"bytes,20,rep,name=enum,proto3" json:"enum,omitempty"` + MultipleOf float64 `protobuf:"fixed64,21,opt,name=multiple_of,json=multipleOf,proto3" json:"multiple_of,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,22,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` +} + +func (x *HeaderParameterSubSchema) Reset() { + *x = HeaderParameterSubSchema{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HeaderParameterSubSchema) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HeaderParameterSubSchema) ProtoMessage() {} + +func (x *HeaderParameterSubSchema) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[14] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HeaderParameterSubSchema.ProtoReflect.Descriptor instead. +func (*HeaderParameterSubSchema) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{14} +} + +func (x *HeaderParameterSubSchema) GetRequired() bool { + if x != nil { + return x.Required + } + return false +} + +func (x *HeaderParameterSubSchema) GetIn() string { + if x != nil { + return x.In + } + return "" +} + +func (x *HeaderParameterSubSchema) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +func (x *HeaderParameterSubSchema) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *HeaderParameterSubSchema) GetType() string { + if x != nil { + return x.Type + } + return "" +} + +func (x *HeaderParameterSubSchema) GetFormat() string { + if x != nil { + return x.Format + } + return "" +} + +func (x *HeaderParameterSubSchema) GetItems() *PrimitivesItems { + if x != nil { + return x.Items + } + return nil +} + +func (x *HeaderParameterSubSchema) GetCollectionFormat() string { + if x != nil { + return x.CollectionFormat + } + return "" +} + +func (x *HeaderParameterSubSchema) GetDefault() *Any { + if x != nil { + return x.Default + } + return nil +} + +func (x *HeaderParameterSubSchema) GetMaximum() float64 { + if x != nil { + return x.Maximum + } + return 0 +} + +func (x *HeaderParameterSubSchema) GetExclusiveMaximum() bool { + if x != nil { + return x.ExclusiveMaximum + } + return false +} + +func (x *HeaderParameterSubSchema) GetMinimum() float64 { + if x != nil { + return x.Minimum + } + return 0 +} + +func (x *HeaderParameterSubSchema) GetExclusiveMinimum() bool { + if x != nil { + return x.ExclusiveMinimum + } + return false +} + +func (x *HeaderParameterSubSchema) GetMaxLength() int64 { + if x != nil { + return x.MaxLength + } + return 0 +} + +func (x *HeaderParameterSubSchema) GetMinLength() int64 { + if x != nil { + return x.MinLength + } + return 0 +} + +func (x *HeaderParameterSubSchema) GetPattern() string { + if x != nil { + return x.Pattern + } + return "" +} + +func (x *HeaderParameterSubSchema) GetMaxItems() int64 { + if x != nil { + return x.MaxItems + } + return 0 +} + +func (x *HeaderParameterSubSchema) GetMinItems() int64 { + if x != nil { + return x.MinItems + } + return 0 +} + +func (x *HeaderParameterSubSchema) GetUniqueItems() bool { + if x != nil { + return x.UniqueItems + } + return false +} + +func (x *HeaderParameterSubSchema) GetEnum() []*Any { + if x != nil { + return x.Enum + } + return nil +} + +func (x *HeaderParameterSubSchema) GetMultipleOf() float64 { + if x != nil { + return x.MultipleOf + } + return 0 +} + +func (x *HeaderParameterSubSchema) GetVendorExtension() []*NamedAny { + if x != nil { + return x.VendorExtension + } + return nil +} + +type Headers struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + AdditionalProperties []*NamedHeader `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` +} + +func (x *Headers) Reset() { + *x = Headers{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Headers) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Headers) ProtoMessage() {} + +func (x *Headers) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[15] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Headers.ProtoReflect.Descriptor instead. +func (*Headers) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{15} +} + +func (x *Headers) GetAdditionalProperties() []*NamedHeader { + if x != nil { + return x.AdditionalProperties + } + return nil +} + +// General information about the API. +type Info struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // A unique and precise title of the API. + Title string `protobuf:"bytes,1,opt,name=title,proto3" json:"title,omitempty"` + // A semantic version number of the API. + Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"` + // A longer description of the API. Should be different from the title. GitHub Flavored Markdown is allowed. + Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` + // The terms of service for the API. + TermsOfService string `protobuf:"bytes,4,opt,name=terms_of_service,json=termsOfService,proto3" json:"terms_of_service,omitempty"` + Contact *Contact `protobuf:"bytes,5,opt,name=contact,proto3" json:"contact,omitempty"` + License *License `protobuf:"bytes,6,opt,name=license,proto3" json:"license,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,7,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` +} + +func (x *Info) Reset() { + *x = Info{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Info) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Info) ProtoMessage() {} + +func (x *Info) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[16] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Info.ProtoReflect.Descriptor instead. +func (*Info) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{16} +} + +func (x *Info) GetTitle() string { + if x != nil { + return x.Title + } + return "" +} + +func (x *Info) GetVersion() string { + if x != nil { + return x.Version + } + return "" +} + +func (x *Info) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +func (x *Info) GetTermsOfService() string { + if x != nil { + return x.TermsOfService + } + return "" +} + +func (x *Info) GetContact() *Contact { + if x != nil { + return x.Contact + } + return nil +} + +func (x *Info) GetLicense() *License { + if x != nil { + return x.License + } + return nil +} + +func (x *Info) GetVendorExtension() []*NamedAny { + if x != nil { + return x.VendorExtension + } + return nil +} + +type ItemsItem struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Schema []*Schema `protobuf:"bytes,1,rep,name=schema,proto3" json:"schema,omitempty"` +} + +func (x *ItemsItem) Reset() { + *x = ItemsItem{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[17] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ItemsItem) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ItemsItem) ProtoMessage() {} + +func (x *ItemsItem) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[17] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ItemsItem.ProtoReflect.Descriptor instead. +func (*ItemsItem) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{17} +} + +func (x *ItemsItem) GetSchema() []*Schema { + if x != nil { + return x.Schema + } + return nil +} + +type JsonReference struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + XRef string `protobuf:"bytes,1,opt,name=_ref,json=Ref,proto3" json:"_ref,omitempty"` + Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` +} + +func (x *JsonReference) Reset() { + *x = JsonReference{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[18] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *JsonReference) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*JsonReference) ProtoMessage() {} + +func (x *JsonReference) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[18] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use JsonReference.ProtoReflect.Descriptor instead. +func (*JsonReference) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{18} +} + +func (x *JsonReference) GetXRef() string { + if x != nil { + return x.XRef + } + return "" +} + +func (x *JsonReference) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +type License struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The name of the license type. It's encouraged to use an OSI compatible license. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // The URL pointing to the license. + Url string `protobuf:"bytes,2,opt,name=url,proto3" json:"url,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,3,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` +} + +func (x *License) Reset() { + *x = License{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[19] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *License) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*License) ProtoMessage() {} + +func (x *License) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[19] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use License.ProtoReflect.Descriptor instead. +func (*License) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{19} +} + +func (x *License) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *License) GetUrl() string { + if x != nil { + return x.Url + } + return "" +} + +func (x *License) GetVendorExtension() []*NamedAny { + if x != nil { + return x.VendorExtension + } + return nil +} + +// Automatically-generated message used to represent maps of Any as ordered (name,value) pairs. +type NamedAny struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Map key + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Mapped value + Value *Any `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (x *NamedAny) Reset() { + *x = NamedAny{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[20] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *NamedAny) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*NamedAny) ProtoMessage() {} + +func (x *NamedAny) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[20] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use NamedAny.ProtoReflect.Descriptor instead. +func (*NamedAny) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{20} +} + +func (x *NamedAny) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *NamedAny) GetValue() *Any { + if x != nil { + return x.Value + } + return nil +} + +// Automatically-generated message used to represent maps of Header as ordered (name,value) pairs. +type NamedHeader struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Map key + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Mapped value + Value *Header `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (x *NamedHeader) Reset() { + *x = NamedHeader{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[21] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *NamedHeader) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*NamedHeader) ProtoMessage() {} + +func (x *NamedHeader) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[21] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use NamedHeader.ProtoReflect.Descriptor instead. +func (*NamedHeader) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{21} +} + +func (x *NamedHeader) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *NamedHeader) GetValue() *Header { + if x != nil { + return x.Value + } + return nil +} + +// Automatically-generated message used to represent maps of Parameter as ordered (name,value) pairs. +type NamedParameter struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Map key + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Mapped value + Value *Parameter `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (x *NamedParameter) Reset() { + *x = NamedParameter{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[22] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *NamedParameter) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*NamedParameter) ProtoMessage() {} + +func (x *NamedParameter) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[22] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use NamedParameter.ProtoReflect.Descriptor instead. +func (*NamedParameter) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{22} +} + +func (x *NamedParameter) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *NamedParameter) GetValue() *Parameter { + if x != nil { + return x.Value + } + return nil +} + +// Automatically-generated message used to represent maps of PathItem as ordered (name,value) pairs. +type NamedPathItem struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Map key + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Mapped value + Value *PathItem `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (x *NamedPathItem) Reset() { + *x = NamedPathItem{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[23] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *NamedPathItem) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*NamedPathItem) ProtoMessage() {} + +func (x *NamedPathItem) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[23] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use NamedPathItem.ProtoReflect.Descriptor instead. +func (*NamedPathItem) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{23} +} + +func (x *NamedPathItem) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *NamedPathItem) GetValue() *PathItem { + if x != nil { + return x.Value + } + return nil +} + +// Automatically-generated message used to represent maps of Response as ordered (name,value) pairs. +type NamedResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Map key + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Mapped value + Value *Response `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (x *NamedResponse) Reset() { + *x = NamedResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[24] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *NamedResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*NamedResponse) ProtoMessage() {} + +func (x *NamedResponse) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[24] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use NamedResponse.ProtoReflect.Descriptor instead. +func (*NamedResponse) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{24} +} + +func (x *NamedResponse) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *NamedResponse) GetValue() *Response { + if x != nil { + return x.Value + } + return nil +} + +// Automatically-generated message used to represent maps of ResponseValue as ordered (name,value) pairs. +type NamedResponseValue struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Map key + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Mapped value + Value *ResponseValue `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (x *NamedResponseValue) Reset() { + *x = NamedResponseValue{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[25] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *NamedResponseValue) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*NamedResponseValue) ProtoMessage() {} + +func (x *NamedResponseValue) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[25] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use NamedResponseValue.ProtoReflect.Descriptor instead. +func (*NamedResponseValue) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{25} +} + +func (x *NamedResponseValue) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *NamedResponseValue) GetValue() *ResponseValue { + if x != nil { + return x.Value + } + return nil +} + +// Automatically-generated message used to represent maps of Schema as ordered (name,value) pairs. +type NamedSchema struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Map key + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Mapped value + Value *Schema `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (x *NamedSchema) Reset() { + *x = NamedSchema{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[26] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *NamedSchema) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*NamedSchema) ProtoMessage() {} + +func (x *NamedSchema) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[26] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use NamedSchema.ProtoReflect.Descriptor instead. +func (*NamedSchema) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{26} +} + +func (x *NamedSchema) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *NamedSchema) GetValue() *Schema { + if x != nil { + return x.Value + } + return nil +} + +// Automatically-generated message used to represent maps of SecurityDefinitionsItem as ordered (name,value) pairs. +type NamedSecurityDefinitionsItem struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Map key + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Mapped value + Value *SecurityDefinitionsItem `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (x *NamedSecurityDefinitionsItem) Reset() { + *x = NamedSecurityDefinitionsItem{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[27] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *NamedSecurityDefinitionsItem) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*NamedSecurityDefinitionsItem) ProtoMessage() {} + +func (x *NamedSecurityDefinitionsItem) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[27] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use NamedSecurityDefinitionsItem.ProtoReflect.Descriptor instead. +func (*NamedSecurityDefinitionsItem) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{27} +} + +func (x *NamedSecurityDefinitionsItem) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *NamedSecurityDefinitionsItem) GetValue() *SecurityDefinitionsItem { + if x != nil { + return x.Value + } + return nil +} + +// Automatically-generated message used to represent maps of string as ordered (name,value) pairs. +type NamedString struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Map key + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Mapped value + Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (x *NamedString) Reset() { + *x = NamedString{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[28] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *NamedString) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*NamedString) ProtoMessage() {} + +func (x *NamedString) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[28] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use NamedString.ProtoReflect.Descriptor instead. +func (*NamedString) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{28} +} + +func (x *NamedString) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *NamedString) GetValue() string { + if x != nil { + return x.Value + } + return "" +} + +// Automatically-generated message used to represent maps of StringArray as ordered (name,value) pairs. +type NamedStringArray struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Map key + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Mapped value + Value *StringArray `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (x *NamedStringArray) Reset() { + *x = NamedStringArray{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[29] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *NamedStringArray) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*NamedStringArray) ProtoMessage() {} + +func (x *NamedStringArray) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[29] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use NamedStringArray.ProtoReflect.Descriptor instead. +func (*NamedStringArray) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{29} +} + +func (x *NamedStringArray) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *NamedStringArray) GetValue() *StringArray { + if x != nil { + return x.Value + } + return nil +} + +type NonBodyParameter struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to Oneof: + // *NonBodyParameter_HeaderParameterSubSchema + // *NonBodyParameter_FormDataParameterSubSchema + // *NonBodyParameter_QueryParameterSubSchema + // *NonBodyParameter_PathParameterSubSchema + Oneof isNonBodyParameter_Oneof `protobuf_oneof:"oneof"` +} + +func (x *NonBodyParameter) Reset() { + *x = NonBodyParameter{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[30] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *NonBodyParameter) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*NonBodyParameter) ProtoMessage() {} + +func (x *NonBodyParameter) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[30] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use NonBodyParameter.ProtoReflect.Descriptor instead. +func (*NonBodyParameter) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{30} +} + +func (m *NonBodyParameter) GetOneof() isNonBodyParameter_Oneof { + if m != nil { + return m.Oneof + } + return nil +} + +func (x *NonBodyParameter) GetHeaderParameterSubSchema() *HeaderParameterSubSchema { + if x, ok := x.GetOneof().(*NonBodyParameter_HeaderParameterSubSchema); ok { + return x.HeaderParameterSubSchema + } + return nil +} + +func (x *NonBodyParameter) GetFormDataParameterSubSchema() *FormDataParameterSubSchema { + if x, ok := x.GetOneof().(*NonBodyParameter_FormDataParameterSubSchema); ok { + return x.FormDataParameterSubSchema + } + return nil +} + +func (x *NonBodyParameter) GetQueryParameterSubSchema() *QueryParameterSubSchema { + if x, ok := x.GetOneof().(*NonBodyParameter_QueryParameterSubSchema); ok { + return x.QueryParameterSubSchema + } + return nil +} + +func (x *NonBodyParameter) GetPathParameterSubSchema() *PathParameterSubSchema { + if x, ok := x.GetOneof().(*NonBodyParameter_PathParameterSubSchema); ok { + return x.PathParameterSubSchema + } + return nil +} + +type isNonBodyParameter_Oneof interface { + isNonBodyParameter_Oneof() +} + +type NonBodyParameter_HeaderParameterSubSchema struct { + HeaderParameterSubSchema *HeaderParameterSubSchema `protobuf:"bytes,1,opt,name=header_parameter_sub_schema,json=headerParameterSubSchema,proto3,oneof"` +} + +type NonBodyParameter_FormDataParameterSubSchema struct { + FormDataParameterSubSchema *FormDataParameterSubSchema `protobuf:"bytes,2,opt,name=form_data_parameter_sub_schema,json=formDataParameterSubSchema,proto3,oneof"` +} + +type NonBodyParameter_QueryParameterSubSchema struct { + QueryParameterSubSchema *QueryParameterSubSchema `protobuf:"bytes,3,opt,name=query_parameter_sub_schema,json=queryParameterSubSchema,proto3,oneof"` +} + +type NonBodyParameter_PathParameterSubSchema struct { + PathParameterSubSchema *PathParameterSubSchema `protobuf:"bytes,4,opt,name=path_parameter_sub_schema,json=pathParameterSubSchema,proto3,oneof"` +} + +func (*NonBodyParameter_HeaderParameterSubSchema) isNonBodyParameter_Oneof() {} + +func (*NonBodyParameter_FormDataParameterSubSchema) isNonBodyParameter_Oneof() {} + +func (*NonBodyParameter_QueryParameterSubSchema) isNonBodyParameter_Oneof() {} + +func (*NonBodyParameter_PathParameterSubSchema) isNonBodyParameter_Oneof() {} + +type Oauth2AccessCodeSecurity struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + Flow string `protobuf:"bytes,2,opt,name=flow,proto3" json:"flow,omitempty"` + Scopes *Oauth2Scopes `protobuf:"bytes,3,opt,name=scopes,proto3" json:"scopes,omitempty"` + AuthorizationUrl string `protobuf:"bytes,4,opt,name=authorization_url,json=authorizationUrl,proto3" json:"authorization_url,omitempty"` + TokenUrl string `protobuf:"bytes,5,opt,name=token_url,json=tokenUrl,proto3" json:"token_url,omitempty"` + Description string `protobuf:"bytes,6,opt,name=description,proto3" json:"description,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,7,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` +} + +func (x *Oauth2AccessCodeSecurity) Reset() { + *x = Oauth2AccessCodeSecurity{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[31] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Oauth2AccessCodeSecurity) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Oauth2AccessCodeSecurity) ProtoMessage() {} + +func (x *Oauth2AccessCodeSecurity) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[31] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Oauth2AccessCodeSecurity.ProtoReflect.Descriptor instead. +func (*Oauth2AccessCodeSecurity) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{31} +} + +func (x *Oauth2AccessCodeSecurity) GetType() string { + if x != nil { + return x.Type + } + return "" +} + +func (x *Oauth2AccessCodeSecurity) GetFlow() string { + if x != nil { + return x.Flow + } + return "" +} + +func (x *Oauth2AccessCodeSecurity) GetScopes() *Oauth2Scopes { + if x != nil { + return x.Scopes + } + return nil +} + +func (x *Oauth2AccessCodeSecurity) GetAuthorizationUrl() string { + if x != nil { + return x.AuthorizationUrl + } + return "" +} + +func (x *Oauth2AccessCodeSecurity) GetTokenUrl() string { + if x != nil { + return x.TokenUrl + } + return "" +} + +func (x *Oauth2AccessCodeSecurity) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +func (x *Oauth2AccessCodeSecurity) GetVendorExtension() []*NamedAny { + if x != nil { + return x.VendorExtension + } + return nil +} + +type Oauth2ApplicationSecurity struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + Flow string `protobuf:"bytes,2,opt,name=flow,proto3" json:"flow,omitempty"` + Scopes *Oauth2Scopes `protobuf:"bytes,3,opt,name=scopes,proto3" json:"scopes,omitempty"` + TokenUrl string `protobuf:"bytes,4,opt,name=token_url,json=tokenUrl,proto3" json:"token_url,omitempty"` + Description string `protobuf:"bytes,5,opt,name=description,proto3" json:"description,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,6,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` +} + +func (x *Oauth2ApplicationSecurity) Reset() { + *x = Oauth2ApplicationSecurity{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[32] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Oauth2ApplicationSecurity) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Oauth2ApplicationSecurity) ProtoMessage() {} + +func (x *Oauth2ApplicationSecurity) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[32] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Oauth2ApplicationSecurity.ProtoReflect.Descriptor instead. +func (*Oauth2ApplicationSecurity) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{32} +} + +func (x *Oauth2ApplicationSecurity) GetType() string { + if x != nil { + return x.Type + } + return "" +} + +func (x *Oauth2ApplicationSecurity) GetFlow() string { + if x != nil { + return x.Flow + } + return "" +} + +func (x *Oauth2ApplicationSecurity) GetScopes() *Oauth2Scopes { + if x != nil { + return x.Scopes + } + return nil +} + +func (x *Oauth2ApplicationSecurity) GetTokenUrl() string { + if x != nil { + return x.TokenUrl + } + return "" +} + +func (x *Oauth2ApplicationSecurity) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +func (x *Oauth2ApplicationSecurity) GetVendorExtension() []*NamedAny { + if x != nil { + return x.VendorExtension + } + return nil +} + +type Oauth2ImplicitSecurity struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + Flow string `protobuf:"bytes,2,opt,name=flow,proto3" json:"flow,omitempty"` + Scopes *Oauth2Scopes `protobuf:"bytes,3,opt,name=scopes,proto3" json:"scopes,omitempty"` + AuthorizationUrl string `protobuf:"bytes,4,opt,name=authorization_url,json=authorizationUrl,proto3" json:"authorization_url,omitempty"` + Description string `protobuf:"bytes,5,opt,name=description,proto3" json:"description,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,6,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` +} + +func (x *Oauth2ImplicitSecurity) Reset() { + *x = Oauth2ImplicitSecurity{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[33] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Oauth2ImplicitSecurity) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Oauth2ImplicitSecurity) ProtoMessage() {} + +func (x *Oauth2ImplicitSecurity) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[33] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Oauth2ImplicitSecurity.ProtoReflect.Descriptor instead. +func (*Oauth2ImplicitSecurity) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{33} +} + +func (x *Oauth2ImplicitSecurity) GetType() string { + if x != nil { + return x.Type + } + return "" +} + +func (x *Oauth2ImplicitSecurity) GetFlow() string { + if x != nil { + return x.Flow + } + return "" +} + +func (x *Oauth2ImplicitSecurity) GetScopes() *Oauth2Scopes { + if x != nil { + return x.Scopes + } + return nil +} + +func (x *Oauth2ImplicitSecurity) GetAuthorizationUrl() string { + if x != nil { + return x.AuthorizationUrl + } + return "" +} + +func (x *Oauth2ImplicitSecurity) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +func (x *Oauth2ImplicitSecurity) GetVendorExtension() []*NamedAny { + if x != nil { + return x.VendorExtension + } + return nil +} + +type Oauth2PasswordSecurity struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + Flow string `protobuf:"bytes,2,opt,name=flow,proto3" json:"flow,omitempty"` + Scopes *Oauth2Scopes `protobuf:"bytes,3,opt,name=scopes,proto3" json:"scopes,omitempty"` + TokenUrl string `protobuf:"bytes,4,opt,name=token_url,json=tokenUrl,proto3" json:"token_url,omitempty"` + Description string `protobuf:"bytes,5,opt,name=description,proto3" json:"description,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,6,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` +} + +func (x *Oauth2PasswordSecurity) Reset() { + *x = Oauth2PasswordSecurity{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[34] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Oauth2PasswordSecurity) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Oauth2PasswordSecurity) ProtoMessage() {} + +func (x *Oauth2PasswordSecurity) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[34] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Oauth2PasswordSecurity.ProtoReflect.Descriptor instead. +func (*Oauth2PasswordSecurity) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{34} +} + +func (x *Oauth2PasswordSecurity) GetType() string { + if x != nil { + return x.Type + } + return "" +} + +func (x *Oauth2PasswordSecurity) GetFlow() string { + if x != nil { + return x.Flow + } + return "" +} + +func (x *Oauth2PasswordSecurity) GetScopes() *Oauth2Scopes { + if x != nil { + return x.Scopes + } + return nil +} + +func (x *Oauth2PasswordSecurity) GetTokenUrl() string { + if x != nil { + return x.TokenUrl + } + return "" +} + +func (x *Oauth2PasswordSecurity) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +func (x *Oauth2PasswordSecurity) GetVendorExtension() []*NamedAny { + if x != nil { + return x.VendorExtension + } + return nil +} + +type Oauth2Scopes struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + AdditionalProperties []*NamedString `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` +} + +func (x *Oauth2Scopes) Reset() { + *x = Oauth2Scopes{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[35] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Oauth2Scopes) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Oauth2Scopes) ProtoMessage() {} + +func (x *Oauth2Scopes) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[35] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Oauth2Scopes.ProtoReflect.Descriptor instead. +func (*Oauth2Scopes) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{35} +} + +func (x *Oauth2Scopes) GetAdditionalProperties() []*NamedString { + if x != nil { + return x.AdditionalProperties + } + return nil +} + +type Operation struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Tags []string `protobuf:"bytes,1,rep,name=tags,proto3" json:"tags,omitempty"` + // A brief summary of the operation. + Summary string `protobuf:"bytes,2,opt,name=summary,proto3" json:"summary,omitempty"` + // A longer description of the operation, GitHub Flavored Markdown is allowed. + Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` + ExternalDocs *ExternalDocs `protobuf:"bytes,4,opt,name=external_docs,json=externalDocs,proto3" json:"external_docs,omitempty"` + // A unique identifier of the operation. + OperationId string `protobuf:"bytes,5,opt,name=operation_id,json=operationId,proto3" json:"operation_id,omitempty"` + // A list of MIME types the API can produce. + Produces []string `protobuf:"bytes,6,rep,name=produces,proto3" json:"produces,omitempty"` + // A list of MIME types the API can consume. + Consumes []string `protobuf:"bytes,7,rep,name=consumes,proto3" json:"consumes,omitempty"` + // The parameters needed to send a valid API call. + Parameters []*ParametersItem `protobuf:"bytes,8,rep,name=parameters,proto3" json:"parameters,omitempty"` + Responses *Responses `protobuf:"bytes,9,opt,name=responses,proto3" json:"responses,omitempty"` + // The transfer protocol of the API. + Schemes []string `protobuf:"bytes,10,rep,name=schemes,proto3" json:"schemes,omitempty"` + Deprecated bool `protobuf:"varint,11,opt,name=deprecated,proto3" json:"deprecated,omitempty"` + Security []*SecurityRequirement `protobuf:"bytes,12,rep,name=security,proto3" json:"security,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,13,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` +} + +func (x *Operation) Reset() { + *x = Operation{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[36] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Operation) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Operation) ProtoMessage() {} + +func (x *Operation) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[36] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Operation.ProtoReflect.Descriptor instead. +func (*Operation) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{36} +} + +func (x *Operation) GetTags() []string { + if x != nil { + return x.Tags + } + return nil +} + +func (x *Operation) GetSummary() string { + if x != nil { + return x.Summary + } + return "" +} + +func (x *Operation) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +func (x *Operation) GetExternalDocs() *ExternalDocs { + if x != nil { + return x.ExternalDocs + } + return nil +} + +func (x *Operation) GetOperationId() string { + if x != nil { + return x.OperationId + } + return "" +} + +func (x *Operation) GetProduces() []string { + if x != nil { + return x.Produces + } + return nil +} + +func (x *Operation) GetConsumes() []string { + if x != nil { + return x.Consumes + } + return nil +} + +func (x *Operation) GetParameters() []*ParametersItem { + if x != nil { + return x.Parameters + } + return nil +} + +func (x *Operation) GetResponses() *Responses { + if x != nil { + return x.Responses + } + return nil +} + +func (x *Operation) GetSchemes() []string { + if x != nil { + return x.Schemes + } + return nil +} + +func (x *Operation) GetDeprecated() bool { + if x != nil { + return x.Deprecated + } + return false +} + +func (x *Operation) GetSecurity() []*SecurityRequirement { + if x != nil { + return x.Security + } + return nil +} + +func (x *Operation) GetVendorExtension() []*NamedAny { + if x != nil { + return x.VendorExtension + } + return nil +} + +type Parameter struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to Oneof: + // *Parameter_BodyParameter + // *Parameter_NonBodyParameter + Oneof isParameter_Oneof `protobuf_oneof:"oneof"` +} + +func (x *Parameter) Reset() { + *x = Parameter{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[37] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Parameter) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Parameter) ProtoMessage() {} + +func (x *Parameter) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[37] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Parameter.ProtoReflect.Descriptor instead. +func (*Parameter) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{37} +} + +func (m *Parameter) GetOneof() isParameter_Oneof { + if m != nil { + return m.Oneof + } + return nil +} + +func (x *Parameter) GetBodyParameter() *BodyParameter { + if x, ok := x.GetOneof().(*Parameter_BodyParameter); ok { + return x.BodyParameter + } + return nil +} + +func (x *Parameter) GetNonBodyParameter() *NonBodyParameter { + if x, ok := x.GetOneof().(*Parameter_NonBodyParameter); ok { + return x.NonBodyParameter + } + return nil +} + +type isParameter_Oneof interface { + isParameter_Oneof() +} + +type Parameter_BodyParameter struct { + BodyParameter *BodyParameter `protobuf:"bytes,1,opt,name=body_parameter,json=bodyParameter,proto3,oneof"` +} + +type Parameter_NonBodyParameter struct { + NonBodyParameter *NonBodyParameter `protobuf:"bytes,2,opt,name=non_body_parameter,json=nonBodyParameter,proto3,oneof"` +} + +func (*Parameter_BodyParameter) isParameter_Oneof() {} + +func (*Parameter_NonBodyParameter) isParameter_Oneof() {} + +// One or more JSON representations for parameters +type ParameterDefinitions struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + AdditionalProperties []*NamedParameter `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` +} + +func (x *ParameterDefinitions) Reset() { + *x = ParameterDefinitions{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[38] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ParameterDefinitions) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ParameterDefinitions) ProtoMessage() {} + +func (x *ParameterDefinitions) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[38] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ParameterDefinitions.ProtoReflect.Descriptor instead. +func (*ParameterDefinitions) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{38} +} + +func (x *ParameterDefinitions) GetAdditionalProperties() []*NamedParameter { + if x != nil { + return x.AdditionalProperties + } + return nil +} + +type ParametersItem struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to Oneof: + // *ParametersItem_Parameter + // *ParametersItem_JsonReference + Oneof isParametersItem_Oneof `protobuf_oneof:"oneof"` +} + +func (x *ParametersItem) Reset() { + *x = ParametersItem{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[39] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ParametersItem) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ParametersItem) ProtoMessage() {} + +func (x *ParametersItem) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[39] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ParametersItem.ProtoReflect.Descriptor instead. +func (*ParametersItem) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{39} +} + +func (m *ParametersItem) GetOneof() isParametersItem_Oneof { + if m != nil { + return m.Oneof + } + return nil +} + +func (x *ParametersItem) GetParameter() *Parameter { + if x, ok := x.GetOneof().(*ParametersItem_Parameter); ok { + return x.Parameter + } + return nil +} + +func (x *ParametersItem) GetJsonReference() *JsonReference { + if x, ok := x.GetOneof().(*ParametersItem_JsonReference); ok { + return x.JsonReference + } + return nil +} + +type isParametersItem_Oneof interface { + isParametersItem_Oneof() +} + +type ParametersItem_Parameter struct { + Parameter *Parameter `protobuf:"bytes,1,opt,name=parameter,proto3,oneof"` +} + +type ParametersItem_JsonReference struct { + JsonReference *JsonReference `protobuf:"bytes,2,opt,name=json_reference,json=jsonReference,proto3,oneof"` +} + +func (*ParametersItem_Parameter) isParametersItem_Oneof() {} + +func (*ParametersItem_JsonReference) isParametersItem_Oneof() {} + +type PathItem struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + XRef string `protobuf:"bytes,1,opt,name=_ref,json=Ref,proto3" json:"_ref,omitempty"` + Get *Operation `protobuf:"bytes,2,opt,name=get,proto3" json:"get,omitempty"` + Put *Operation `protobuf:"bytes,3,opt,name=put,proto3" json:"put,omitempty"` + Post *Operation `protobuf:"bytes,4,opt,name=post,proto3" json:"post,omitempty"` + Delete *Operation `protobuf:"bytes,5,opt,name=delete,proto3" json:"delete,omitempty"` + Options *Operation `protobuf:"bytes,6,opt,name=options,proto3" json:"options,omitempty"` + Head *Operation `protobuf:"bytes,7,opt,name=head,proto3" json:"head,omitempty"` + Patch *Operation `protobuf:"bytes,8,opt,name=patch,proto3" json:"patch,omitempty"` + // The parameters needed to send a valid API call. + Parameters []*ParametersItem `protobuf:"bytes,9,rep,name=parameters,proto3" json:"parameters,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,10,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` +} + +func (x *PathItem) Reset() { + *x = PathItem{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[40] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PathItem) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PathItem) ProtoMessage() {} + +func (x *PathItem) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[40] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PathItem.ProtoReflect.Descriptor instead. +func (*PathItem) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{40} +} + +func (x *PathItem) GetXRef() string { + if x != nil { + return x.XRef + } + return "" +} + +func (x *PathItem) GetGet() *Operation { + if x != nil { + return x.Get + } + return nil +} + +func (x *PathItem) GetPut() *Operation { + if x != nil { + return x.Put + } + return nil +} + +func (x *PathItem) GetPost() *Operation { + if x != nil { + return x.Post + } + return nil +} + +func (x *PathItem) GetDelete() *Operation { + if x != nil { + return x.Delete + } + return nil +} + +func (x *PathItem) GetOptions() *Operation { + if x != nil { + return x.Options + } + return nil +} + +func (x *PathItem) GetHead() *Operation { + if x != nil { + return x.Head + } + return nil +} + +func (x *PathItem) GetPatch() *Operation { + if x != nil { + return x.Patch + } + return nil +} + +func (x *PathItem) GetParameters() []*ParametersItem { + if x != nil { + return x.Parameters + } + return nil +} + +func (x *PathItem) GetVendorExtension() []*NamedAny { + if x != nil { + return x.VendorExtension + } + return nil +} + +type PathParameterSubSchema struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Determines whether or not this parameter is required or optional. + Required bool `protobuf:"varint,1,opt,name=required,proto3" json:"required,omitempty"` + // Determines the location of the parameter. + In string `protobuf:"bytes,2,opt,name=in,proto3" json:"in,omitempty"` + // A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed. + Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` + // The name of the parameter. + Name string `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"` + Type string `protobuf:"bytes,5,opt,name=type,proto3" json:"type,omitempty"` + Format string `protobuf:"bytes,6,opt,name=format,proto3" json:"format,omitempty"` + Items *PrimitivesItems `protobuf:"bytes,7,opt,name=items,proto3" json:"items,omitempty"` + CollectionFormat string `protobuf:"bytes,8,opt,name=collection_format,json=collectionFormat,proto3" json:"collection_format,omitempty"` + Default *Any `protobuf:"bytes,9,opt,name=default,proto3" json:"default,omitempty"` + Maximum float64 `protobuf:"fixed64,10,opt,name=maximum,proto3" json:"maximum,omitempty"` + ExclusiveMaximum bool `protobuf:"varint,11,opt,name=exclusive_maximum,json=exclusiveMaximum,proto3" json:"exclusive_maximum,omitempty"` + Minimum float64 `protobuf:"fixed64,12,opt,name=minimum,proto3" json:"minimum,omitempty"` + ExclusiveMinimum bool `protobuf:"varint,13,opt,name=exclusive_minimum,json=exclusiveMinimum,proto3" json:"exclusive_minimum,omitempty"` + MaxLength int64 `protobuf:"varint,14,opt,name=max_length,json=maxLength,proto3" json:"max_length,omitempty"` + MinLength int64 `protobuf:"varint,15,opt,name=min_length,json=minLength,proto3" json:"min_length,omitempty"` + Pattern string `protobuf:"bytes,16,opt,name=pattern,proto3" json:"pattern,omitempty"` + MaxItems int64 `protobuf:"varint,17,opt,name=max_items,json=maxItems,proto3" json:"max_items,omitempty"` + MinItems int64 `protobuf:"varint,18,opt,name=min_items,json=minItems,proto3" json:"min_items,omitempty"` + UniqueItems bool `protobuf:"varint,19,opt,name=unique_items,json=uniqueItems,proto3" json:"unique_items,omitempty"` + Enum []*Any `protobuf:"bytes,20,rep,name=enum,proto3" json:"enum,omitempty"` + MultipleOf float64 `protobuf:"fixed64,21,opt,name=multiple_of,json=multipleOf,proto3" json:"multiple_of,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,22,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` +} + +func (x *PathParameterSubSchema) Reset() { + *x = PathParameterSubSchema{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[41] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PathParameterSubSchema) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PathParameterSubSchema) ProtoMessage() {} + +func (x *PathParameterSubSchema) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[41] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PathParameterSubSchema.ProtoReflect.Descriptor instead. +func (*PathParameterSubSchema) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{41} +} + +func (x *PathParameterSubSchema) GetRequired() bool { + if x != nil { + return x.Required + } + return false +} + +func (x *PathParameterSubSchema) GetIn() string { + if x != nil { + return x.In + } + return "" +} + +func (x *PathParameterSubSchema) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +func (x *PathParameterSubSchema) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *PathParameterSubSchema) GetType() string { + if x != nil { + return x.Type + } + return "" +} + +func (x *PathParameterSubSchema) GetFormat() string { + if x != nil { + return x.Format + } + return "" +} + +func (x *PathParameterSubSchema) GetItems() *PrimitivesItems { + if x != nil { + return x.Items + } + return nil +} + +func (x *PathParameterSubSchema) GetCollectionFormat() string { + if x != nil { + return x.CollectionFormat + } + return "" +} + +func (x *PathParameterSubSchema) GetDefault() *Any { + if x != nil { + return x.Default + } + return nil +} + +func (x *PathParameterSubSchema) GetMaximum() float64 { + if x != nil { + return x.Maximum + } + return 0 +} + +func (x *PathParameterSubSchema) GetExclusiveMaximum() bool { + if x != nil { + return x.ExclusiveMaximum + } + return false +} + +func (x *PathParameterSubSchema) GetMinimum() float64 { + if x != nil { + return x.Minimum + } + return 0 +} + +func (x *PathParameterSubSchema) GetExclusiveMinimum() bool { + if x != nil { + return x.ExclusiveMinimum + } + return false +} + +func (x *PathParameterSubSchema) GetMaxLength() int64 { + if x != nil { + return x.MaxLength + } + return 0 +} + +func (x *PathParameterSubSchema) GetMinLength() int64 { + if x != nil { + return x.MinLength + } + return 0 +} + +func (x *PathParameterSubSchema) GetPattern() string { + if x != nil { + return x.Pattern + } + return "" +} + +func (x *PathParameterSubSchema) GetMaxItems() int64 { + if x != nil { + return x.MaxItems + } + return 0 +} + +func (x *PathParameterSubSchema) GetMinItems() int64 { + if x != nil { + return x.MinItems + } + return 0 +} + +func (x *PathParameterSubSchema) GetUniqueItems() bool { + if x != nil { + return x.UniqueItems + } + return false +} + +func (x *PathParameterSubSchema) GetEnum() []*Any { + if x != nil { + return x.Enum + } + return nil +} + +func (x *PathParameterSubSchema) GetMultipleOf() float64 { + if x != nil { + return x.MultipleOf + } + return 0 +} + +func (x *PathParameterSubSchema) GetVendorExtension() []*NamedAny { + if x != nil { + return x.VendorExtension + } + return nil +} + +// Relative paths to the individual endpoints. They must be relative to the 'basePath'. +type Paths struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + VendorExtension []*NamedAny `protobuf:"bytes,1,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` + Path []*NamedPathItem `protobuf:"bytes,2,rep,name=path,proto3" json:"path,omitempty"` +} + +func (x *Paths) Reset() { + *x = Paths{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[42] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Paths) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Paths) ProtoMessage() {} + +func (x *Paths) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[42] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Paths.ProtoReflect.Descriptor instead. +func (*Paths) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{42} +} + +func (x *Paths) GetVendorExtension() []*NamedAny { + if x != nil { + return x.VendorExtension + } + return nil +} + +func (x *Paths) GetPath() []*NamedPathItem { + if x != nil { + return x.Path + } + return nil +} + +type PrimitivesItems struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + Format string `protobuf:"bytes,2,opt,name=format,proto3" json:"format,omitempty"` + Items *PrimitivesItems `protobuf:"bytes,3,opt,name=items,proto3" json:"items,omitempty"` + CollectionFormat string `protobuf:"bytes,4,opt,name=collection_format,json=collectionFormat,proto3" json:"collection_format,omitempty"` + Default *Any `protobuf:"bytes,5,opt,name=default,proto3" json:"default,omitempty"` + Maximum float64 `protobuf:"fixed64,6,opt,name=maximum,proto3" json:"maximum,omitempty"` + ExclusiveMaximum bool `protobuf:"varint,7,opt,name=exclusive_maximum,json=exclusiveMaximum,proto3" json:"exclusive_maximum,omitempty"` + Minimum float64 `protobuf:"fixed64,8,opt,name=minimum,proto3" json:"minimum,omitempty"` + ExclusiveMinimum bool `protobuf:"varint,9,opt,name=exclusive_minimum,json=exclusiveMinimum,proto3" json:"exclusive_minimum,omitempty"` + MaxLength int64 `protobuf:"varint,10,opt,name=max_length,json=maxLength,proto3" json:"max_length,omitempty"` + MinLength int64 `protobuf:"varint,11,opt,name=min_length,json=minLength,proto3" json:"min_length,omitempty"` + Pattern string `protobuf:"bytes,12,opt,name=pattern,proto3" json:"pattern,omitempty"` + MaxItems int64 `protobuf:"varint,13,opt,name=max_items,json=maxItems,proto3" json:"max_items,omitempty"` + MinItems int64 `protobuf:"varint,14,opt,name=min_items,json=minItems,proto3" json:"min_items,omitempty"` + UniqueItems bool `protobuf:"varint,15,opt,name=unique_items,json=uniqueItems,proto3" json:"unique_items,omitempty"` + Enum []*Any `protobuf:"bytes,16,rep,name=enum,proto3" json:"enum,omitempty"` + MultipleOf float64 `protobuf:"fixed64,17,opt,name=multiple_of,json=multipleOf,proto3" json:"multiple_of,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,18,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` +} + +func (x *PrimitivesItems) Reset() { + *x = PrimitivesItems{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[43] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PrimitivesItems) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PrimitivesItems) ProtoMessage() {} + +func (x *PrimitivesItems) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[43] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PrimitivesItems.ProtoReflect.Descriptor instead. +func (*PrimitivesItems) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{43} +} + +func (x *PrimitivesItems) GetType() string { + if x != nil { + return x.Type + } + return "" +} + +func (x *PrimitivesItems) GetFormat() string { + if x != nil { + return x.Format + } + return "" +} + +func (x *PrimitivesItems) GetItems() *PrimitivesItems { + if x != nil { + return x.Items + } + return nil +} + +func (x *PrimitivesItems) GetCollectionFormat() string { + if x != nil { + return x.CollectionFormat + } + return "" +} + +func (x *PrimitivesItems) GetDefault() *Any { + if x != nil { + return x.Default + } + return nil +} + +func (x *PrimitivesItems) GetMaximum() float64 { + if x != nil { + return x.Maximum + } + return 0 +} + +func (x *PrimitivesItems) GetExclusiveMaximum() bool { + if x != nil { + return x.ExclusiveMaximum + } + return false +} + +func (x *PrimitivesItems) GetMinimum() float64 { + if x != nil { + return x.Minimum + } + return 0 +} + +func (x *PrimitivesItems) GetExclusiveMinimum() bool { + if x != nil { + return x.ExclusiveMinimum + } + return false +} + +func (x *PrimitivesItems) GetMaxLength() int64 { + if x != nil { + return x.MaxLength + } + return 0 +} + +func (x *PrimitivesItems) GetMinLength() int64 { + if x != nil { + return x.MinLength + } + return 0 +} + +func (x *PrimitivesItems) GetPattern() string { + if x != nil { + return x.Pattern + } + return "" +} + +func (x *PrimitivesItems) GetMaxItems() int64 { + if x != nil { + return x.MaxItems + } + return 0 +} + +func (x *PrimitivesItems) GetMinItems() int64 { + if x != nil { + return x.MinItems + } + return 0 +} + +func (x *PrimitivesItems) GetUniqueItems() bool { + if x != nil { + return x.UniqueItems + } + return false +} + +func (x *PrimitivesItems) GetEnum() []*Any { + if x != nil { + return x.Enum + } + return nil +} + +func (x *PrimitivesItems) GetMultipleOf() float64 { + if x != nil { + return x.MultipleOf + } + return 0 +} + +func (x *PrimitivesItems) GetVendorExtension() []*NamedAny { + if x != nil { + return x.VendorExtension + } + return nil +} + +type Properties struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + AdditionalProperties []*NamedSchema `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` +} + +func (x *Properties) Reset() { + *x = Properties{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[44] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Properties) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Properties) ProtoMessage() {} + +func (x *Properties) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[44] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Properties.ProtoReflect.Descriptor instead. +func (*Properties) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{44} +} + +func (x *Properties) GetAdditionalProperties() []*NamedSchema { + if x != nil { + return x.AdditionalProperties + } + return nil +} + +type QueryParameterSubSchema struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Determines whether or not this parameter is required or optional. + Required bool `protobuf:"varint,1,opt,name=required,proto3" json:"required,omitempty"` + // Determines the location of the parameter. + In string `protobuf:"bytes,2,opt,name=in,proto3" json:"in,omitempty"` + // A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed. + Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` + // The name of the parameter. + Name string `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"` + // allows sending a parameter by name only or with an empty value. + AllowEmptyValue bool `protobuf:"varint,5,opt,name=allow_empty_value,json=allowEmptyValue,proto3" json:"allow_empty_value,omitempty"` + Type string `protobuf:"bytes,6,opt,name=type,proto3" json:"type,omitempty"` + Format string `protobuf:"bytes,7,opt,name=format,proto3" json:"format,omitempty"` + Items *PrimitivesItems `protobuf:"bytes,8,opt,name=items,proto3" json:"items,omitempty"` + CollectionFormat string `protobuf:"bytes,9,opt,name=collection_format,json=collectionFormat,proto3" json:"collection_format,omitempty"` + Default *Any `protobuf:"bytes,10,opt,name=default,proto3" json:"default,omitempty"` + Maximum float64 `protobuf:"fixed64,11,opt,name=maximum,proto3" json:"maximum,omitempty"` + ExclusiveMaximum bool `protobuf:"varint,12,opt,name=exclusive_maximum,json=exclusiveMaximum,proto3" json:"exclusive_maximum,omitempty"` + Minimum float64 `protobuf:"fixed64,13,opt,name=minimum,proto3" json:"minimum,omitempty"` + ExclusiveMinimum bool `protobuf:"varint,14,opt,name=exclusive_minimum,json=exclusiveMinimum,proto3" json:"exclusive_minimum,omitempty"` + MaxLength int64 `protobuf:"varint,15,opt,name=max_length,json=maxLength,proto3" json:"max_length,omitempty"` + MinLength int64 `protobuf:"varint,16,opt,name=min_length,json=minLength,proto3" json:"min_length,omitempty"` + Pattern string `protobuf:"bytes,17,opt,name=pattern,proto3" json:"pattern,omitempty"` + MaxItems int64 `protobuf:"varint,18,opt,name=max_items,json=maxItems,proto3" json:"max_items,omitempty"` + MinItems int64 `protobuf:"varint,19,opt,name=min_items,json=minItems,proto3" json:"min_items,omitempty"` + UniqueItems bool `protobuf:"varint,20,opt,name=unique_items,json=uniqueItems,proto3" json:"unique_items,omitempty"` + Enum []*Any `protobuf:"bytes,21,rep,name=enum,proto3" json:"enum,omitempty"` + MultipleOf float64 `protobuf:"fixed64,22,opt,name=multiple_of,json=multipleOf,proto3" json:"multiple_of,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,23,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` +} + +func (x *QueryParameterSubSchema) Reset() { + *x = QueryParameterSubSchema{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[45] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *QueryParameterSubSchema) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*QueryParameterSubSchema) ProtoMessage() {} + +func (x *QueryParameterSubSchema) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[45] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use QueryParameterSubSchema.ProtoReflect.Descriptor instead. +func (*QueryParameterSubSchema) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{45} +} + +func (x *QueryParameterSubSchema) GetRequired() bool { + if x != nil { + return x.Required + } + return false +} + +func (x *QueryParameterSubSchema) GetIn() string { + if x != nil { + return x.In + } + return "" +} + +func (x *QueryParameterSubSchema) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +func (x *QueryParameterSubSchema) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *QueryParameterSubSchema) GetAllowEmptyValue() bool { + if x != nil { + return x.AllowEmptyValue + } + return false +} + +func (x *QueryParameterSubSchema) GetType() string { + if x != nil { + return x.Type + } + return "" +} + +func (x *QueryParameterSubSchema) GetFormat() string { + if x != nil { + return x.Format + } + return "" +} + +func (x *QueryParameterSubSchema) GetItems() *PrimitivesItems { + if x != nil { + return x.Items + } + return nil +} + +func (x *QueryParameterSubSchema) GetCollectionFormat() string { + if x != nil { + return x.CollectionFormat + } + return "" +} + +func (x *QueryParameterSubSchema) GetDefault() *Any { + if x != nil { + return x.Default + } + return nil +} + +func (x *QueryParameterSubSchema) GetMaximum() float64 { + if x != nil { + return x.Maximum + } + return 0 +} + +func (x *QueryParameterSubSchema) GetExclusiveMaximum() bool { + if x != nil { + return x.ExclusiveMaximum + } + return false +} + +func (x *QueryParameterSubSchema) GetMinimum() float64 { + if x != nil { + return x.Minimum + } + return 0 +} + +func (x *QueryParameterSubSchema) GetExclusiveMinimum() bool { + if x != nil { + return x.ExclusiveMinimum + } + return false +} + +func (x *QueryParameterSubSchema) GetMaxLength() int64 { + if x != nil { + return x.MaxLength + } + return 0 +} + +func (x *QueryParameterSubSchema) GetMinLength() int64 { + if x != nil { + return x.MinLength + } + return 0 +} + +func (x *QueryParameterSubSchema) GetPattern() string { + if x != nil { + return x.Pattern + } + return "" +} + +func (x *QueryParameterSubSchema) GetMaxItems() int64 { + if x != nil { + return x.MaxItems + } + return 0 +} + +func (x *QueryParameterSubSchema) GetMinItems() int64 { + if x != nil { + return x.MinItems + } + return 0 +} + +func (x *QueryParameterSubSchema) GetUniqueItems() bool { + if x != nil { + return x.UniqueItems + } + return false +} + +func (x *QueryParameterSubSchema) GetEnum() []*Any { + if x != nil { + return x.Enum + } + return nil +} + +func (x *QueryParameterSubSchema) GetMultipleOf() float64 { + if x != nil { + return x.MultipleOf + } + return 0 +} + +func (x *QueryParameterSubSchema) GetVendorExtension() []*NamedAny { + if x != nil { + return x.VendorExtension + } + return nil +} + +type Response struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Description string `protobuf:"bytes,1,opt,name=description,proto3" json:"description,omitempty"` + Schema *SchemaItem `protobuf:"bytes,2,opt,name=schema,proto3" json:"schema,omitempty"` + Headers *Headers `protobuf:"bytes,3,opt,name=headers,proto3" json:"headers,omitempty"` + Examples *Examples `protobuf:"bytes,4,opt,name=examples,proto3" json:"examples,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,5,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` +} + +func (x *Response) Reset() { + *x = Response{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[46] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Response) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Response) ProtoMessage() {} + +func (x *Response) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[46] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Response.ProtoReflect.Descriptor instead. +func (*Response) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{46} +} + +func (x *Response) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +func (x *Response) GetSchema() *SchemaItem { + if x != nil { + return x.Schema + } + return nil +} + +func (x *Response) GetHeaders() *Headers { + if x != nil { + return x.Headers + } + return nil +} + +func (x *Response) GetExamples() *Examples { + if x != nil { + return x.Examples + } + return nil +} + +func (x *Response) GetVendorExtension() []*NamedAny { + if x != nil { + return x.VendorExtension + } + return nil +} + +// One or more JSON representations for responses +type ResponseDefinitions struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + AdditionalProperties []*NamedResponse `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` +} + +func (x *ResponseDefinitions) Reset() { + *x = ResponseDefinitions{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[47] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ResponseDefinitions) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ResponseDefinitions) ProtoMessage() {} + +func (x *ResponseDefinitions) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[47] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ResponseDefinitions.ProtoReflect.Descriptor instead. +func (*ResponseDefinitions) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{47} +} + +func (x *ResponseDefinitions) GetAdditionalProperties() []*NamedResponse { + if x != nil { + return x.AdditionalProperties + } + return nil +} + +type ResponseValue struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to Oneof: + // *ResponseValue_Response + // *ResponseValue_JsonReference + Oneof isResponseValue_Oneof `protobuf_oneof:"oneof"` +} + +func (x *ResponseValue) Reset() { + *x = ResponseValue{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[48] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ResponseValue) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ResponseValue) ProtoMessage() {} + +func (x *ResponseValue) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[48] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ResponseValue.ProtoReflect.Descriptor instead. +func (*ResponseValue) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{48} +} + +func (m *ResponseValue) GetOneof() isResponseValue_Oneof { + if m != nil { + return m.Oneof + } + return nil +} + +func (x *ResponseValue) GetResponse() *Response { + if x, ok := x.GetOneof().(*ResponseValue_Response); ok { + return x.Response + } + return nil +} + +func (x *ResponseValue) GetJsonReference() *JsonReference { + if x, ok := x.GetOneof().(*ResponseValue_JsonReference); ok { + return x.JsonReference + } + return nil +} + +type isResponseValue_Oneof interface { + isResponseValue_Oneof() +} + +type ResponseValue_Response struct { + Response *Response `protobuf:"bytes,1,opt,name=response,proto3,oneof"` +} + +type ResponseValue_JsonReference struct { + JsonReference *JsonReference `protobuf:"bytes,2,opt,name=json_reference,json=jsonReference,proto3,oneof"` +} + +func (*ResponseValue_Response) isResponseValue_Oneof() {} + +func (*ResponseValue_JsonReference) isResponseValue_Oneof() {} + +// Response objects names can either be any valid HTTP status code or 'default'. +type Responses struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ResponseCode []*NamedResponseValue `protobuf:"bytes,1,rep,name=response_code,json=responseCode,proto3" json:"response_code,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,2,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` +} + +func (x *Responses) Reset() { + *x = Responses{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[49] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Responses) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Responses) ProtoMessage() {} + +func (x *Responses) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[49] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Responses.ProtoReflect.Descriptor instead. +func (*Responses) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{49} +} + +func (x *Responses) GetResponseCode() []*NamedResponseValue { + if x != nil { + return x.ResponseCode + } + return nil +} + +func (x *Responses) GetVendorExtension() []*NamedAny { + if x != nil { + return x.VendorExtension + } + return nil +} + +// A deterministic version of a JSON Schema object. +type Schema struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + XRef string `protobuf:"bytes,1,opt,name=_ref,json=Ref,proto3" json:"_ref,omitempty"` + Format string `protobuf:"bytes,2,opt,name=format,proto3" json:"format,omitempty"` + Title string `protobuf:"bytes,3,opt,name=title,proto3" json:"title,omitempty"` + Description string `protobuf:"bytes,4,opt,name=description,proto3" json:"description,omitempty"` + Default *Any `protobuf:"bytes,5,opt,name=default,proto3" json:"default,omitempty"` + MultipleOf float64 `protobuf:"fixed64,6,opt,name=multiple_of,json=multipleOf,proto3" json:"multiple_of,omitempty"` + Maximum float64 `protobuf:"fixed64,7,opt,name=maximum,proto3" json:"maximum,omitempty"` + ExclusiveMaximum bool `protobuf:"varint,8,opt,name=exclusive_maximum,json=exclusiveMaximum,proto3" json:"exclusive_maximum,omitempty"` + Minimum float64 `protobuf:"fixed64,9,opt,name=minimum,proto3" json:"minimum,omitempty"` + ExclusiveMinimum bool `protobuf:"varint,10,opt,name=exclusive_minimum,json=exclusiveMinimum,proto3" json:"exclusive_minimum,omitempty"` + MaxLength int64 `protobuf:"varint,11,opt,name=max_length,json=maxLength,proto3" json:"max_length,omitempty"` + MinLength int64 `protobuf:"varint,12,opt,name=min_length,json=minLength,proto3" json:"min_length,omitempty"` + Pattern string `protobuf:"bytes,13,opt,name=pattern,proto3" json:"pattern,omitempty"` + MaxItems int64 `protobuf:"varint,14,opt,name=max_items,json=maxItems,proto3" json:"max_items,omitempty"` + MinItems int64 `protobuf:"varint,15,opt,name=min_items,json=minItems,proto3" json:"min_items,omitempty"` + UniqueItems bool `protobuf:"varint,16,opt,name=unique_items,json=uniqueItems,proto3" json:"unique_items,omitempty"` + MaxProperties int64 `protobuf:"varint,17,opt,name=max_properties,json=maxProperties,proto3" json:"max_properties,omitempty"` + MinProperties int64 `protobuf:"varint,18,opt,name=min_properties,json=minProperties,proto3" json:"min_properties,omitempty"` + Required []string `protobuf:"bytes,19,rep,name=required,proto3" json:"required,omitempty"` + Enum []*Any `protobuf:"bytes,20,rep,name=enum,proto3" json:"enum,omitempty"` + AdditionalProperties *AdditionalPropertiesItem `protobuf:"bytes,21,opt,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` + Type *TypeItem `protobuf:"bytes,22,opt,name=type,proto3" json:"type,omitempty"` + Items *ItemsItem `protobuf:"bytes,23,opt,name=items,proto3" json:"items,omitempty"` + AllOf []*Schema `protobuf:"bytes,24,rep,name=all_of,json=allOf,proto3" json:"all_of,omitempty"` + Properties *Properties `protobuf:"bytes,25,opt,name=properties,proto3" json:"properties,omitempty"` + Discriminator string `protobuf:"bytes,26,opt,name=discriminator,proto3" json:"discriminator,omitempty"` + ReadOnly bool `protobuf:"varint,27,opt,name=read_only,json=readOnly,proto3" json:"read_only,omitempty"` + Xml *Xml `protobuf:"bytes,28,opt,name=xml,proto3" json:"xml,omitempty"` + ExternalDocs *ExternalDocs `protobuf:"bytes,29,opt,name=external_docs,json=externalDocs,proto3" json:"external_docs,omitempty"` + Example *Any `protobuf:"bytes,30,opt,name=example,proto3" json:"example,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,31,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` +} + +func (x *Schema) Reset() { + *x = Schema{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[50] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Schema) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Schema) ProtoMessage() {} + +func (x *Schema) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[50] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Schema.ProtoReflect.Descriptor instead. +func (*Schema) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{50} +} + +func (x *Schema) GetXRef() string { + if x != nil { + return x.XRef + } + return "" +} + +func (x *Schema) GetFormat() string { + if x != nil { + return x.Format + } + return "" +} + +func (x *Schema) GetTitle() string { + if x != nil { + return x.Title + } + return "" +} + +func (x *Schema) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +func (x *Schema) GetDefault() *Any { + if x != nil { + return x.Default + } + return nil +} + +func (x *Schema) GetMultipleOf() float64 { + if x != nil { + return x.MultipleOf + } + return 0 +} + +func (x *Schema) GetMaximum() float64 { + if x != nil { + return x.Maximum + } + return 0 +} + +func (x *Schema) GetExclusiveMaximum() bool { + if x != nil { + return x.ExclusiveMaximum + } + return false +} + +func (x *Schema) GetMinimum() float64 { + if x != nil { + return x.Minimum + } + return 0 +} + +func (x *Schema) GetExclusiveMinimum() bool { + if x != nil { + return x.ExclusiveMinimum + } + return false +} + +func (x *Schema) GetMaxLength() int64 { + if x != nil { + return x.MaxLength + } + return 0 +} + +func (x *Schema) GetMinLength() int64 { + if x != nil { + return x.MinLength + } + return 0 +} + +func (x *Schema) GetPattern() string { + if x != nil { + return x.Pattern + } + return "" +} + +func (x *Schema) GetMaxItems() int64 { + if x != nil { + return x.MaxItems + } + return 0 +} + +func (x *Schema) GetMinItems() int64 { + if x != nil { + return x.MinItems + } + return 0 +} + +func (x *Schema) GetUniqueItems() bool { + if x != nil { + return x.UniqueItems + } + return false +} + +func (x *Schema) GetMaxProperties() int64 { + if x != nil { + return x.MaxProperties + } + return 0 +} + +func (x *Schema) GetMinProperties() int64 { + if x != nil { + return x.MinProperties + } + return 0 +} + +func (x *Schema) GetRequired() []string { + if x != nil { + return x.Required + } + return nil +} + +func (x *Schema) GetEnum() []*Any { + if x != nil { + return x.Enum + } + return nil +} + +func (x *Schema) GetAdditionalProperties() *AdditionalPropertiesItem { + if x != nil { + return x.AdditionalProperties + } + return nil +} + +func (x *Schema) GetType() *TypeItem { + if x != nil { + return x.Type + } + return nil +} + +func (x *Schema) GetItems() *ItemsItem { + if x != nil { + return x.Items + } + return nil +} + +func (x *Schema) GetAllOf() []*Schema { + if x != nil { + return x.AllOf + } + return nil +} + +func (x *Schema) GetProperties() *Properties { + if x != nil { + return x.Properties + } + return nil +} + +func (x *Schema) GetDiscriminator() string { + if x != nil { + return x.Discriminator + } + return "" +} + +func (x *Schema) GetReadOnly() bool { + if x != nil { + return x.ReadOnly + } + return false +} + +func (x *Schema) GetXml() *Xml { + if x != nil { + return x.Xml + } + return nil +} + +func (x *Schema) GetExternalDocs() *ExternalDocs { + if x != nil { + return x.ExternalDocs + } + return nil +} + +func (x *Schema) GetExample() *Any { + if x != nil { + return x.Example + } + return nil +} + +func (x *Schema) GetVendorExtension() []*NamedAny { + if x != nil { + return x.VendorExtension + } + return nil +} + +type SchemaItem struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to Oneof: + // *SchemaItem_Schema + // *SchemaItem_FileSchema + Oneof isSchemaItem_Oneof `protobuf_oneof:"oneof"` +} + +func (x *SchemaItem) Reset() { + *x = SchemaItem{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[51] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SchemaItem) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SchemaItem) ProtoMessage() {} + +func (x *SchemaItem) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[51] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SchemaItem.ProtoReflect.Descriptor instead. +func (*SchemaItem) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{51} +} + +func (m *SchemaItem) GetOneof() isSchemaItem_Oneof { + if m != nil { + return m.Oneof + } + return nil +} + +func (x *SchemaItem) GetSchema() *Schema { + if x, ok := x.GetOneof().(*SchemaItem_Schema); ok { + return x.Schema + } + return nil +} + +func (x *SchemaItem) GetFileSchema() *FileSchema { + if x, ok := x.GetOneof().(*SchemaItem_FileSchema); ok { + return x.FileSchema + } + return nil +} + +type isSchemaItem_Oneof interface { + isSchemaItem_Oneof() +} + +type SchemaItem_Schema struct { + Schema *Schema `protobuf:"bytes,1,opt,name=schema,proto3,oneof"` +} + +type SchemaItem_FileSchema struct { + FileSchema *FileSchema `protobuf:"bytes,2,opt,name=file_schema,json=fileSchema,proto3,oneof"` +} + +func (*SchemaItem_Schema) isSchemaItem_Oneof() {} + +func (*SchemaItem_FileSchema) isSchemaItem_Oneof() {} + +type SecurityDefinitions struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + AdditionalProperties []*NamedSecurityDefinitionsItem `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` +} + +func (x *SecurityDefinitions) Reset() { + *x = SecurityDefinitions{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[52] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SecurityDefinitions) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SecurityDefinitions) ProtoMessage() {} + +func (x *SecurityDefinitions) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[52] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SecurityDefinitions.ProtoReflect.Descriptor instead. +func (*SecurityDefinitions) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{52} +} + +func (x *SecurityDefinitions) GetAdditionalProperties() []*NamedSecurityDefinitionsItem { + if x != nil { + return x.AdditionalProperties + } + return nil +} + +type SecurityDefinitionsItem struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to Oneof: + // *SecurityDefinitionsItem_BasicAuthenticationSecurity + // *SecurityDefinitionsItem_ApiKeySecurity + // *SecurityDefinitionsItem_Oauth2ImplicitSecurity + // *SecurityDefinitionsItem_Oauth2PasswordSecurity + // *SecurityDefinitionsItem_Oauth2ApplicationSecurity + // *SecurityDefinitionsItem_Oauth2AccessCodeSecurity + Oneof isSecurityDefinitionsItem_Oneof `protobuf_oneof:"oneof"` +} + +func (x *SecurityDefinitionsItem) Reset() { + *x = SecurityDefinitionsItem{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[53] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SecurityDefinitionsItem) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SecurityDefinitionsItem) ProtoMessage() {} + +func (x *SecurityDefinitionsItem) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[53] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SecurityDefinitionsItem.ProtoReflect.Descriptor instead. +func (*SecurityDefinitionsItem) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{53} +} + +func (m *SecurityDefinitionsItem) GetOneof() isSecurityDefinitionsItem_Oneof { + if m != nil { + return m.Oneof + } + return nil +} + +func (x *SecurityDefinitionsItem) GetBasicAuthenticationSecurity() *BasicAuthenticationSecurity { + if x, ok := x.GetOneof().(*SecurityDefinitionsItem_BasicAuthenticationSecurity); ok { + return x.BasicAuthenticationSecurity + } + return nil +} + +func (x *SecurityDefinitionsItem) GetApiKeySecurity() *ApiKeySecurity { + if x, ok := x.GetOneof().(*SecurityDefinitionsItem_ApiKeySecurity); ok { + return x.ApiKeySecurity + } + return nil +} + +func (x *SecurityDefinitionsItem) GetOauth2ImplicitSecurity() *Oauth2ImplicitSecurity { + if x, ok := x.GetOneof().(*SecurityDefinitionsItem_Oauth2ImplicitSecurity); ok { + return x.Oauth2ImplicitSecurity + } + return nil +} + +func (x *SecurityDefinitionsItem) GetOauth2PasswordSecurity() *Oauth2PasswordSecurity { + if x, ok := x.GetOneof().(*SecurityDefinitionsItem_Oauth2PasswordSecurity); ok { + return x.Oauth2PasswordSecurity + } + return nil +} + +func (x *SecurityDefinitionsItem) GetOauth2ApplicationSecurity() *Oauth2ApplicationSecurity { + if x, ok := x.GetOneof().(*SecurityDefinitionsItem_Oauth2ApplicationSecurity); ok { + return x.Oauth2ApplicationSecurity + } + return nil +} + +func (x *SecurityDefinitionsItem) GetOauth2AccessCodeSecurity() *Oauth2AccessCodeSecurity { + if x, ok := x.GetOneof().(*SecurityDefinitionsItem_Oauth2AccessCodeSecurity); ok { + return x.Oauth2AccessCodeSecurity + } + return nil +} + +type isSecurityDefinitionsItem_Oneof interface { + isSecurityDefinitionsItem_Oneof() +} + +type SecurityDefinitionsItem_BasicAuthenticationSecurity struct { + BasicAuthenticationSecurity *BasicAuthenticationSecurity `protobuf:"bytes,1,opt,name=basic_authentication_security,json=basicAuthenticationSecurity,proto3,oneof"` +} + +type SecurityDefinitionsItem_ApiKeySecurity struct { + ApiKeySecurity *ApiKeySecurity `protobuf:"bytes,2,opt,name=api_key_security,json=apiKeySecurity,proto3,oneof"` +} + +type SecurityDefinitionsItem_Oauth2ImplicitSecurity struct { + Oauth2ImplicitSecurity *Oauth2ImplicitSecurity `protobuf:"bytes,3,opt,name=oauth2_implicit_security,json=oauth2ImplicitSecurity,proto3,oneof"` +} + +type SecurityDefinitionsItem_Oauth2PasswordSecurity struct { + Oauth2PasswordSecurity *Oauth2PasswordSecurity `protobuf:"bytes,4,opt,name=oauth2_password_security,json=oauth2PasswordSecurity,proto3,oneof"` +} + +type SecurityDefinitionsItem_Oauth2ApplicationSecurity struct { + Oauth2ApplicationSecurity *Oauth2ApplicationSecurity `protobuf:"bytes,5,opt,name=oauth2_application_security,json=oauth2ApplicationSecurity,proto3,oneof"` +} + +type SecurityDefinitionsItem_Oauth2AccessCodeSecurity struct { + Oauth2AccessCodeSecurity *Oauth2AccessCodeSecurity `protobuf:"bytes,6,opt,name=oauth2_access_code_security,json=oauth2AccessCodeSecurity,proto3,oneof"` +} + +func (*SecurityDefinitionsItem_BasicAuthenticationSecurity) isSecurityDefinitionsItem_Oneof() {} + +func (*SecurityDefinitionsItem_ApiKeySecurity) isSecurityDefinitionsItem_Oneof() {} + +func (*SecurityDefinitionsItem_Oauth2ImplicitSecurity) isSecurityDefinitionsItem_Oneof() {} + +func (*SecurityDefinitionsItem_Oauth2PasswordSecurity) isSecurityDefinitionsItem_Oneof() {} + +func (*SecurityDefinitionsItem_Oauth2ApplicationSecurity) isSecurityDefinitionsItem_Oneof() {} + +func (*SecurityDefinitionsItem_Oauth2AccessCodeSecurity) isSecurityDefinitionsItem_Oneof() {} + +type SecurityRequirement struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + AdditionalProperties []*NamedStringArray `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` +} + +func (x *SecurityRequirement) Reset() { + *x = SecurityRequirement{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[54] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SecurityRequirement) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SecurityRequirement) ProtoMessage() {} + +func (x *SecurityRequirement) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[54] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SecurityRequirement.ProtoReflect.Descriptor instead. +func (*SecurityRequirement) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{54} +} + +func (x *SecurityRequirement) GetAdditionalProperties() []*NamedStringArray { + if x != nil { + return x.AdditionalProperties + } + return nil +} + +type StringArray struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Value []string `protobuf:"bytes,1,rep,name=value,proto3" json:"value,omitempty"` +} + +func (x *StringArray) Reset() { + *x = StringArray{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[55] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *StringArray) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StringArray) ProtoMessage() {} + +func (x *StringArray) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[55] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StringArray.ProtoReflect.Descriptor instead. +func (*StringArray) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{55} +} + +func (x *StringArray) GetValue() []string { + if x != nil { + return x.Value + } + return nil +} + +type Tag struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` + ExternalDocs *ExternalDocs `protobuf:"bytes,3,opt,name=external_docs,json=externalDocs,proto3" json:"external_docs,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,4,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` +} + +func (x *Tag) Reset() { + *x = Tag{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[56] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Tag) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Tag) ProtoMessage() {} + +func (x *Tag) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[56] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Tag.ProtoReflect.Descriptor instead. +func (*Tag) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{56} +} + +func (x *Tag) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *Tag) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +func (x *Tag) GetExternalDocs() *ExternalDocs { + if x != nil { + return x.ExternalDocs + } + return nil +} + +func (x *Tag) GetVendorExtension() []*NamedAny { + if x != nil { + return x.VendorExtension + } + return nil +} + +type TypeItem struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Value []string `protobuf:"bytes,1,rep,name=value,proto3" json:"value,omitempty"` +} + +func (x *TypeItem) Reset() { + *x = TypeItem{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[57] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TypeItem) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TypeItem) ProtoMessage() {} + +func (x *TypeItem) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[57] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TypeItem.ProtoReflect.Descriptor instead. +func (*TypeItem) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{57} +} + +func (x *TypeItem) GetValue() []string { + if x != nil { + return x.Value + } + return nil +} + +// Any property starting with x- is valid. +type VendorExtension struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + AdditionalProperties []*NamedAny `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` +} + +func (x *VendorExtension) Reset() { + *x = VendorExtension{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[58] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VendorExtension) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VendorExtension) ProtoMessage() {} + +func (x *VendorExtension) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[58] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VendorExtension.ProtoReflect.Descriptor instead. +func (*VendorExtension) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{58} +} + +func (x *VendorExtension) GetAdditionalProperties() []*NamedAny { + if x != nil { + return x.AdditionalProperties + } + return nil +} + +type Xml struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Namespace string `protobuf:"bytes,2,opt,name=namespace,proto3" json:"namespace,omitempty"` + Prefix string `protobuf:"bytes,3,opt,name=prefix,proto3" json:"prefix,omitempty"` + Attribute bool `protobuf:"varint,4,opt,name=attribute,proto3" json:"attribute,omitempty"` + Wrapped bool `protobuf:"varint,5,opt,name=wrapped,proto3" json:"wrapped,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,6,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` +} + +func (x *Xml) Reset() { + *x = Xml{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[59] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Xml) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Xml) ProtoMessage() {} + +func (x *Xml) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[59] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Xml.ProtoReflect.Descriptor instead. +func (*Xml) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{59} +} + +func (x *Xml) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *Xml) GetNamespace() string { + if x != nil { + return x.Namespace + } + return "" +} + +func (x *Xml) GetPrefix() string { + if x != nil { + return x.Prefix + } + return "" +} + +func (x *Xml) GetAttribute() bool { + if x != nil { + return x.Attribute + } + return false +} + +func (x *Xml) GetWrapped() bool { + if x != nil { + return x.Wrapped + } + return false +} + +func (x *Xml) GetVendorExtension() []*NamedAny { + if x != nil { + return x.VendorExtension + } + return nil +} + +var File_openapiv2_OpenAPIv2_proto protoreflect.FileDescriptor + +var file_openapiv2_OpenAPIv2_proto_rawDesc = []byte{ + 0x0a, 0x19, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2f, 0x4f, 0x70, 0x65, 0x6e, + 0x41, 0x50, 0x49, 0x76, 0x32, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a, 0x6f, 0x70, 0x65, + 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x22, 0x6d, 0x0a, 0x18, 0x41, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, + 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x49, 0x74, 0x65, 0x6d, 0x12, 0x2c, + 0x0a, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, + 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x63, 0x68, 0x65, + 0x6d, 0x61, 0x48, 0x00, 0x52, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x1a, 0x0a, 0x07, + 0x62, 0x6f, 0x6f, 0x6c, 0x65, 0x61, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, + 0x07, 0x62, 0x6f, 0x6f, 0x6c, 0x65, 0x61, 0x6e, 0x42, 0x07, 0x0a, 0x05, 0x6f, 0x6e, 0x65, 0x6f, + 0x66, 0x22, 0x45, 0x0a, 0x03, 0x41, 0x6e, 0x79, 0x12, 0x2a, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x79, 0x61, 0x6d, 0x6c, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x04, 0x79, 0x61, 0x6d, 0x6c, 0x22, 0xab, 0x01, 0x0a, 0x0e, 0x41, 0x70, 0x69, + 0x4b, 0x65, 0x79, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x74, + 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, + 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x02, 0x69, 0x6e, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3f, 0x0a, 0x10, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x5f, + 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, + 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x0f, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x45, 0x78, 0x74, + 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x94, 0x01, 0x0a, 0x1b, 0x42, 0x61, 0x73, 0x69, 0x63, + 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x65, + 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, + 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3f, 0x0a, 0x10, + 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, + 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, + 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x0f, 0x76, 0x65, + 0x6e, 0x64, 0x6f, 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xde, 0x01, + 0x0a, 0x0d, 0x42, 0x6f, 0x64, 0x79, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x12, + 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x02, 0x69, 0x6e, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, + 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, + 0x64, 0x12, 0x2a, 0x0a, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x12, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x53, + 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x3f, 0x0a, + 0x10, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, + 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x0f, 0x76, + 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x86, + 0x01, 0x0a, 0x07, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x63, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x10, + 0x0a, 0x03, 0x75, 0x72, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, 0x6c, + 0x12, 0x14, 0x0a, 0x05, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x05, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x12, 0x3f, 0x0a, 0x10, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, + 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, + 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x0f, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x45, 0x78, + 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x54, 0x0a, 0x07, 0x44, 0x65, 0x66, 0x61, 0x75, + 0x6c, 0x74, 0x12, 0x49, 0x0a, 0x15, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, + 0x5f, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, + 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x14, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, + 0x6e, 0x61, 0x6c, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x22, 0x5b, 0x0a, + 0x0b, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x4c, 0x0a, 0x15, + 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x70, 0x72, 0x6f, 0x70, 0x65, + 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x6f, 0x70, + 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x63, + 0x68, 0x65, 0x6d, 0x61, 0x52, 0x14, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, + 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x22, 0xe8, 0x05, 0x0a, 0x08, 0x44, + 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x77, 0x61, 0x67, 0x67, + 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x77, 0x61, 0x67, 0x67, 0x65, + 0x72, 0x12, 0x24, 0x0a, 0x04, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x10, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x49, 0x6e, 0x66, + 0x6f, 0x52, 0x04, 0x69, 0x6e, 0x66, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x68, 0x6f, 0x73, 0x74, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x68, 0x6f, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x62, + 0x61, 0x73, 0x65, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, + 0x62, 0x61, 0x73, 0x65, 0x50, 0x61, 0x74, 0x68, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x63, 0x68, 0x65, + 0x6d, 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x73, 0x63, 0x68, 0x65, 0x6d, + 0x65, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65, 0x73, 0x18, 0x06, + 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65, 0x73, 0x12, 0x1a, + 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x64, 0x75, 0x63, 0x65, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x09, + 0x52, 0x08, 0x70, 0x72, 0x6f, 0x64, 0x75, 0x63, 0x65, 0x73, 0x12, 0x27, 0x0a, 0x05, 0x70, 0x61, + 0x74, 0x68, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x6f, 0x70, 0x65, 0x6e, + 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x61, 0x74, 0x68, 0x73, 0x52, 0x05, 0x70, 0x61, + 0x74, 0x68, 0x73, 0x12, 0x39, 0x0a, 0x0b, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, + 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x52, 0x0b, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x40, + 0x0a, 0x0a, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x18, 0x0a, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, + 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x0a, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, + 0x12, 0x3d, 0x0a, 0x09, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x73, 0x18, 0x0b, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, + 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x09, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x73, 0x12, + 0x3b, 0x0a, 0x08, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x18, 0x0c, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x1f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x53, + 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, + 0x6e, 0x74, 0x52, 0x08, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x12, 0x52, 0x0a, 0x14, + 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x5f, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x6f, 0x70, 0x65, + 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, + 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x13, 0x73, 0x65, 0x63, + 0x75, 0x72, 0x69, 0x74, 0x79, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x12, 0x23, 0x0a, 0x04, 0x74, 0x61, 0x67, 0x73, 0x18, 0x0e, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, + 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x61, 0x67, 0x52, + 0x04, 0x74, 0x61, 0x67, 0x73, 0x12, 0x3d, 0x0a, 0x0d, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, + 0x6c, 0x5f, 0x64, 0x6f, 0x63, 0x73, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x6f, + 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, + 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x73, 0x52, 0x0c, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, + 0x44, 0x6f, 0x63, 0x73, 0x12, 0x3f, 0x0a, 0x10, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x5f, 0x65, + 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x10, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, + 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, + 0x64, 0x41, 0x6e, 0x79, 0x52, 0x0f, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x45, 0x78, 0x74, 0x65, + 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x55, 0x0a, 0x08, 0x45, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, + 0x73, 0x12, 0x49, 0x0a, 0x15, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, + 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, + 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x14, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, + 0x61, 0x6c, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x22, 0x83, 0x01, 0x0a, + 0x0c, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x73, 0x12, 0x20, 0x0a, + 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, + 0x10, 0x0a, 0x03, 0x75, 0x72, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, + 0x6c, 0x12, 0x3f, 0x0a, 0x10, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x5f, 0x65, 0x78, 0x74, 0x65, + 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, + 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, + 0x79, 0x52, 0x0f, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, + 0x6f, 0x6e, 0x22, 0xff, 0x02, 0x0a, 0x0a, 0x46, 0x69, 0x6c, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, + 0x61, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x69, 0x74, + 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, 0x69, 0x74, 0x6c, 0x65, 0x12, + 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x12, 0x29, 0x0a, 0x07, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, + 0x41, 0x6e, 0x79, 0x52, 0x07, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x12, 0x1a, 0x0a, 0x08, + 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, + 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, + 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x1b, 0x0a, 0x09, + 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x08, 0x72, 0x65, 0x61, 0x64, 0x4f, 0x6e, 0x6c, 0x79, 0x12, 0x3d, 0x0a, 0x0d, 0x65, 0x78, 0x74, + 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x64, 0x6f, 0x63, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x18, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x45, 0x78, + 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x73, 0x52, 0x0c, 0x65, 0x78, 0x74, 0x65, + 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x73, 0x12, 0x29, 0x0a, 0x07, 0x65, 0x78, 0x61, 0x6d, + 0x70, 0x6c, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, + 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x07, 0x65, 0x78, 0x61, 0x6d, + 0x70, 0x6c, 0x65, 0x12, 0x3f, 0x0a, 0x10, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x5f, 0x65, 0x78, + 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, + 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, + 0x41, 0x6e, 0x79, 0x52, 0x0f, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, + 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xab, 0x06, 0x0a, 0x1a, 0x46, 0x6f, 0x72, 0x6d, 0x44, 0x61, 0x74, + 0x61, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x53, 0x75, 0x62, 0x53, 0x63, 0x68, + 0x65, 0x6d, 0x61, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x12, + 0x0e, 0x0a, 0x02, 0x69, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x6e, 0x12, + 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x2a, 0x0a, 0x11, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x65, + 0x6d, 0x70, 0x74, 0x79, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x0f, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, + 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x31, 0x0a, + 0x05, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x6f, + 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x72, 0x69, 0x6d, 0x69, 0x74, + 0x69, 0x76, 0x65, 0x73, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x52, 0x05, 0x69, 0x74, 0x65, 0x6d, 0x73, + 0x12, 0x2b, 0x0a, 0x11, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x66, + 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x63, 0x6f, 0x6c, + 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x29, 0x0a, + 0x07, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, + 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x6e, 0x79, 0x52, + 0x07, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x61, 0x78, 0x69, + 0x6d, 0x75, 0x6d, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x01, 0x52, 0x07, 0x6d, 0x61, 0x78, 0x69, 0x6d, + 0x75, 0x6d, 0x12, 0x2b, 0x0a, 0x11, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x5f, + 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x65, + 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x4d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x12, + 0x18, 0x0a, 0x07, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x01, + 0x52, 0x07, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x2b, 0x0a, 0x11, 0x65, 0x78, 0x63, + 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x5f, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x0e, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x4d, + 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x61, 0x78, 0x5f, 0x6c, 0x65, + 0x6e, 0x67, 0x74, 0x68, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x6d, 0x61, 0x78, 0x4c, + 0x65, 0x6e, 0x67, 0x74, 0x68, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x69, 0x6e, 0x5f, 0x6c, 0x65, 0x6e, + 0x67, 0x74, 0x68, 0x18, 0x10, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x6d, 0x69, 0x6e, 0x4c, 0x65, + 0x6e, 0x67, 0x74, 0x68, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x18, + 0x11, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x12, 0x1b, + 0x0a, 0x09, 0x6d, 0x61, 0x78, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x12, 0x20, 0x01, 0x28, + 0x03, 0x52, 0x08, 0x6d, 0x61, 0x78, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x6d, + 0x69, 0x6e, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x13, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, + 0x6d, 0x69, 0x6e, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x75, 0x6e, 0x69, 0x71, + 0x75, 0x65, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x14, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, + 0x75, 0x6e, 0x69, 0x71, 0x75, 0x65, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x23, 0x0a, 0x04, 0x65, + 0x6e, 0x75, 0x6d, 0x18, 0x15, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, + 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x04, 0x65, 0x6e, 0x75, 0x6d, + 0x12, 0x1f, 0x0a, 0x0b, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x5f, 0x6f, 0x66, 0x18, + 0x16, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0a, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x4f, + 0x66, 0x12, 0x3f, 0x0a, 0x10, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x5f, 0x65, 0x78, 0x74, 0x65, + 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x17, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, + 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, + 0x79, 0x52, 0x0f, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, + 0x6f, 0x6e, 0x22, 0xab, 0x05, 0x0a, 0x06, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x12, 0x0a, + 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, + 0x65, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x31, 0x0a, 0x05, 0x69, 0x74, 0x65, + 0x6d, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, + 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x72, 0x69, 0x6d, 0x69, 0x74, 0x69, 0x76, 0x65, 0x73, + 0x49, 0x74, 0x65, 0x6d, 0x73, 0x52, 0x05, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x2b, 0x0a, 0x11, + 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, + 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x29, 0x0a, 0x07, 0x64, 0x65, 0x66, + 0x61, 0x75, 0x6c, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x6f, 0x70, 0x65, + 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x07, 0x64, 0x65, 0x66, + 0x61, 0x75, 0x6c, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x18, + 0x06, 0x20, 0x01, 0x28, 0x01, 0x52, 0x07, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x2b, + 0x0a, 0x11, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x5f, 0x6d, 0x61, 0x78, 0x69, + 0x6d, 0x75, 0x6d, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x65, 0x78, 0x63, 0x6c, 0x75, + 0x73, 0x69, 0x76, 0x65, 0x4d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x18, 0x0a, 0x07, 0x6d, + 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x08, 0x20, 0x01, 0x28, 0x01, 0x52, 0x07, 0x6d, 0x69, + 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x2b, 0x0a, 0x11, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, + 0x76, 0x65, 0x5f, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x09, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x10, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x4d, 0x69, 0x6e, 0x69, 0x6d, + 0x75, 0x6d, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x61, 0x78, 0x5f, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, + 0x18, 0x0a, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x6d, 0x61, 0x78, 0x4c, 0x65, 0x6e, 0x67, 0x74, + 0x68, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x69, 0x6e, 0x5f, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, + 0x0b, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x6d, 0x69, 0x6e, 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, + 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x18, 0x0c, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x61, + 0x78, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x6d, + 0x61, 0x78, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x69, 0x6e, 0x5f, 0x69, + 0x74, 0x65, 0x6d, 0x73, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x6d, 0x69, 0x6e, 0x49, + 0x74, 0x65, 0x6d, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x75, 0x6e, 0x69, 0x71, 0x75, 0x65, 0x5f, 0x69, + 0x74, 0x65, 0x6d, 0x73, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x75, 0x6e, 0x69, 0x71, + 0x75, 0x65, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x23, 0x0a, 0x04, 0x65, 0x6e, 0x75, 0x6d, 0x18, + 0x10, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, + 0x76, 0x32, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x04, 0x65, 0x6e, 0x75, 0x6d, 0x12, 0x1f, 0x0a, 0x0b, + 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x5f, 0x6f, 0x66, 0x18, 0x11, 0x20, 0x01, 0x28, + 0x01, 0x52, 0x0a, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x4f, 0x66, 0x12, 0x20, 0x0a, + 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x12, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, + 0x3f, 0x0a, 0x10, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, + 0x69, 0x6f, 0x6e, 0x18, 0x13, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, + 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, + 0x0f, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, + 0x22, 0xfd, 0x05, 0x0a, 0x18, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x50, 0x61, 0x72, 0x61, 0x6d, + 0x65, 0x74, 0x65, 0x72, 0x53, 0x75, 0x62, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x1a, 0x0a, + 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x6e, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x6e, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, + 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, + 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, + 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x06, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x31, 0x0a, 0x05, 0x69, + 0x74, 0x65, 0x6d, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x6f, 0x70, 0x65, + 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x72, 0x69, 0x6d, 0x69, 0x74, 0x69, 0x76, + 0x65, 0x73, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x52, 0x05, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x2b, + 0x0a, 0x11, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x66, 0x6f, 0x72, + 0x6d, 0x61, 0x74, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x63, 0x6f, 0x6c, 0x6c, 0x65, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x29, 0x0a, 0x07, 0x64, + 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x6f, + 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x07, 0x64, + 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, + 0x6d, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x01, 0x52, 0x07, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, + 0x12, 0x2b, 0x0a, 0x11, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x5f, 0x6d, 0x61, + 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x65, 0x78, 0x63, + 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x4d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x18, 0x0a, + 0x07, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x01, 0x52, 0x07, + 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x2b, 0x0a, 0x11, 0x65, 0x78, 0x63, 0x6c, 0x75, + 0x73, 0x69, 0x76, 0x65, 0x5f, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x0d, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x10, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x4d, 0x69, 0x6e, + 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x61, 0x78, 0x5f, 0x6c, 0x65, 0x6e, 0x67, + 0x74, 0x68, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x6d, 0x61, 0x78, 0x4c, 0x65, 0x6e, + 0x67, 0x74, 0x68, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x69, 0x6e, 0x5f, 0x6c, 0x65, 0x6e, 0x67, 0x74, + 0x68, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x6d, 0x69, 0x6e, 0x4c, 0x65, 0x6e, 0x67, + 0x74, 0x68, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x18, 0x10, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x12, 0x1b, 0x0a, 0x09, + 0x6d, 0x61, 0x78, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x11, 0x20, 0x01, 0x28, 0x03, 0x52, + 0x08, 0x6d, 0x61, 0x78, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x69, 0x6e, + 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x12, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x6d, 0x69, + 0x6e, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x75, 0x6e, 0x69, 0x71, 0x75, 0x65, + 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x13, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x75, 0x6e, + 0x69, 0x71, 0x75, 0x65, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x23, 0x0a, 0x04, 0x65, 0x6e, 0x75, + 0x6d, 0x18, 0x14, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, + 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x04, 0x65, 0x6e, 0x75, 0x6d, 0x12, 0x1f, + 0x0a, 0x0b, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x5f, 0x6f, 0x66, 0x18, 0x15, 0x20, + 0x01, 0x28, 0x01, 0x52, 0x0a, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x4f, 0x66, 0x12, + 0x3f, 0x0a, 0x10, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, + 0x69, 0x6f, 0x6e, 0x18, 0x16, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, + 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, + 0x0f, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, + 0x22, 0x57, 0x0a, 0x07, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x12, 0x4c, 0x0a, 0x15, 0x61, + 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, + 0x74, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x6f, 0x70, 0x65, + 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x48, 0x65, 0x61, + 0x64, 0x65, 0x72, 0x52, 0x14, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x50, + 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x22, 0xa1, 0x02, 0x0a, 0x04, 0x49, 0x6e, + 0x66, 0x6f, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x69, 0x74, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x05, 0x74, 0x69, 0x74, 0x6c, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, + 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x28, 0x0a, 0x10, 0x74, 0x65, 0x72, 0x6d, 0x73, 0x5f, 0x6f, 0x66, + 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, + 0x74, 0x65, 0x72, 0x6d, 0x73, 0x4f, 0x66, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x2d, + 0x0a, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x63, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x13, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6e, + 0x74, 0x61, 0x63, 0x74, 0x52, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x63, 0x74, 0x12, 0x2d, 0x0a, + 0x07, 0x6c, 0x69, 0x63, 0x65, 0x6e, 0x73, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, + 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x63, 0x65, + 0x6e, 0x73, 0x65, 0x52, 0x07, 0x6c, 0x69, 0x63, 0x65, 0x6e, 0x73, 0x65, 0x12, 0x3f, 0x0a, 0x10, + 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, + 0x18, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, + 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x0f, 0x76, 0x65, + 0x6e, 0x64, 0x6f, 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x37, 0x0a, + 0x09, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x49, 0x74, 0x65, 0x6d, 0x12, 0x2a, 0x0a, 0x06, 0x73, 0x63, + 0x68, 0x65, 0x6d, 0x61, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x6f, 0x70, 0x65, + 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x06, + 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x22, 0x44, 0x0a, 0x0d, 0x4a, 0x73, 0x6f, 0x6e, 0x52, 0x65, + 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x11, 0x0a, 0x04, 0x5f, 0x72, 0x65, 0x66, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x52, 0x65, 0x66, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, + 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x70, 0x0a, 0x07, + 0x4c, 0x69, 0x63, 0x65, 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x75, + 0x72, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, 0x6c, 0x12, 0x3f, 0x0a, + 0x10, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, + 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x0f, 0x76, + 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x45, + 0x0a, 0x08, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x25, + 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, + 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x4b, 0x0a, 0x0b, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x48, 0x65, + 0x61, 0x64, 0x65, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x28, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, + 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x52, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x22, 0x51, 0x0a, 0x0e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x50, 0x61, 0x72, 0x61, 0x6d, + 0x65, 0x74, 0x65, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x2b, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, + 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x52, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x4f, 0x0a, 0x0d, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x50, 0x61, + 0x74, 0x68, 0x49, 0x74, 0x65, 0x6d, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x2a, 0x0a, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, + 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x61, 0x74, 0x68, 0x49, 0x74, 0x65, 0x6d, 0x52, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x4f, 0x0a, 0x0d, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x2a, 0x0a, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, + 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x59, 0x0a, 0x12, 0x4e, 0x61, 0x6d, 0x65, 0x64, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x12, 0x0a, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x12, 0x2f, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x19, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x22, 0x4b, 0x0a, 0x0b, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, + 0x61, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x28, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, + 0x32, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, + 0x6d, 0x0a, 0x1c, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, + 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x49, 0x74, 0x65, 0x6d, 0x12, + 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x12, 0x39, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, + 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x49, 0x74, 0x65, 0x6d, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x37, + 0x0a, 0x0b, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x12, 0x12, 0x0a, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x55, 0x0a, 0x10, 0x4e, 0x61, 0x6d, 0x65, 0x64, + 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x41, 0x72, 0x72, 0x61, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, + 0x2d, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, + 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x74, 0x72, 0x69, + 0x6e, 0x67, 0x41, 0x72, 0x72, 0x61, 0x79, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0xb5, + 0x03, 0x0a, 0x10, 0x4e, 0x6f, 0x6e, 0x42, 0x6f, 0x64, 0x79, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, + 0x74, 0x65, 0x72, 0x12, 0x65, 0x0a, 0x1b, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x70, 0x61, + 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x5f, 0x73, 0x75, 0x62, 0x5f, 0x73, 0x63, 0x68, 0x65, + 0x6d, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, + 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x50, 0x61, 0x72, 0x61, + 0x6d, 0x65, 0x74, 0x65, 0x72, 0x53, 0x75, 0x62, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x48, 0x00, + 0x52, 0x18, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, + 0x72, 0x53, 0x75, 0x62, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x6c, 0x0a, 0x1e, 0x66, 0x6f, + 0x72, 0x6d, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, + 0x72, 0x5f, 0x73, 0x75, 0x62, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, + 0x46, 0x6f, 0x72, 0x6d, 0x44, 0x61, 0x74, 0x61, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, + 0x72, 0x53, 0x75, 0x62, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x48, 0x00, 0x52, 0x1a, 0x66, 0x6f, + 0x72, 0x6d, 0x44, 0x61, 0x74, 0x61, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x53, + 0x75, 0x62, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x62, 0x0a, 0x1a, 0x71, 0x75, 0x65, 0x72, + 0x79, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x5f, 0x73, 0x75, 0x62, 0x5f, + 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x6f, + 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x50, + 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x53, 0x75, 0x62, 0x53, 0x63, 0x68, 0x65, 0x6d, + 0x61, 0x48, 0x00, 0x52, 0x17, 0x71, 0x75, 0x65, 0x72, 0x79, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, + 0x74, 0x65, 0x72, 0x53, 0x75, 0x62, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x5f, 0x0a, 0x19, + 0x70, 0x61, 0x74, 0x68, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x5f, 0x73, + 0x75, 0x62, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x22, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x61, 0x74, + 0x68, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x53, 0x75, 0x62, 0x53, 0x63, 0x68, + 0x65, 0x6d, 0x61, 0x48, 0x00, 0x52, 0x16, 0x70, 0x61, 0x74, 0x68, 0x50, 0x61, 0x72, 0x61, 0x6d, + 0x65, 0x74, 0x65, 0x72, 0x53, 0x75, 0x62, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x42, 0x07, 0x0a, + 0x05, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x22, 0xa1, 0x02, 0x0a, 0x18, 0x4f, 0x61, 0x75, 0x74, 0x68, + 0x32, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x43, 0x6f, 0x64, 0x65, 0x53, 0x65, 0x63, 0x75, 0x72, + 0x69, 0x74, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x66, 0x6c, 0x6f, 0x77, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x66, 0x6c, 0x6f, 0x77, 0x12, 0x30, 0x0a, 0x06, 0x73, + 0x63, 0x6f, 0x70, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x6f, 0x70, + 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x61, 0x75, 0x74, 0x68, 0x32, 0x53, + 0x63, 0x6f, 0x70, 0x65, 0x73, 0x52, 0x06, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x12, 0x2b, 0x0a, + 0x11, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x75, + 0x72, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, + 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x55, 0x72, 0x6c, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x6f, + 0x6b, 0x65, 0x6e, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, + 0x6f, 0x6b, 0x65, 0x6e, 0x55, 0x72, 0x6c, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, + 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3f, 0x0a, 0x10, 0x76, 0x65, 0x6e, + 0x64, 0x6f, 0x72, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x07, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, + 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x0f, 0x76, 0x65, 0x6e, 0x64, 0x6f, + 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xf5, 0x01, 0x0a, 0x19, 0x4f, + 0x61, 0x75, 0x74, 0x68, 0x32, 0x41, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x12, 0x0a, 0x04, + 0x66, 0x6c, 0x6f, 0x77, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x66, 0x6c, 0x6f, 0x77, + 0x12, 0x30, 0x0a, 0x06, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x18, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x61, + 0x75, 0x74, 0x68, 0x32, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x52, 0x06, 0x73, 0x63, 0x6f, 0x70, + 0x65, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x5f, 0x75, 0x72, 0x6c, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x55, 0x72, 0x6c, 0x12, + 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x12, 0x3f, 0x0a, 0x10, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x5f, 0x65, 0x78, 0x74, 0x65, + 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, + 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, + 0x79, 0x52, 0x0f, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, + 0x6f, 0x6e, 0x22, 0x82, 0x02, 0x0a, 0x16, 0x4f, 0x61, 0x75, 0x74, 0x68, 0x32, 0x49, 0x6d, 0x70, + 0x6c, 0x69, 0x63, 0x69, 0x74, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x12, 0x12, 0x0a, + 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, + 0x65, 0x12, 0x12, 0x0a, 0x04, 0x66, 0x6c, 0x6f, 0x77, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x04, 0x66, 0x6c, 0x6f, 0x77, 0x12, 0x30, 0x0a, 0x06, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, + 0x76, 0x32, 0x2e, 0x4f, 0x61, 0x75, 0x74, 0x68, 0x32, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x52, + 0x06, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x61, 0x75, 0x74, 0x68, 0x6f, + 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x10, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x55, 0x72, 0x6c, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3f, 0x0a, 0x10, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, + 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, + 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x0f, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x45, 0x78, + 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xf2, 0x01, 0x0a, 0x16, 0x4f, 0x61, 0x75, 0x74, + 0x68, 0x32, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, + 0x74, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x66, 0x6c, 0x6f, 0x77, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x66, 0x6c, 0x6f, 0x77, 0x12, 0x30, 0x0a, 0x06, 0x73, 0x63, + 0x6f, 0x70, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x6f, 0x70, 0x65, + 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x61, 0x75, 0x74, 0x68, 0x32, 0x53, 0x63, + 0x6f, 0x70, 0x65, 0x73, 0x52, 0x06, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x12, 0x1b, 0x0a, 0x09, + 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x08, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x55, 0x72, 0x6c, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, + 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3f, 0x0a, 0x10, 0x76, + 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, + 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, + 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x0f, 0x76, 0x65, 0x6e, + 0x64, 0x6f, 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x5c, 0x0a, 0x0c, + 0x4f, 0x61, 0x75, 0x74, 0x68, 0x32, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x12, 0x4c, 0x0a, 0x15, + 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x70, 0x72, 0x6f, 0x70, 0x65, + 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x6f, 0x70, + 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x74, + 0x72, 0x69, 0x6e, 0x67, 0x52, 0x14, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, + 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x22, 0x9e, 0x04, 0x0a, 0x09, 0x4f, + 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x61, 0x67, 0x73, + 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x04, 0x74, 0x61, 0x67, 0x73, 0x12, 0x18, 0x0a, 0x07, + 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, + 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3d, 0x0a, 0x0d, 0x65, 0x78, 0x74, 0x65, + 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x64, 0x6f, 0x63, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x18, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x45, 0x78, 0x74, + 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x73, 0x52, 0x0c, 0x65, 0x78, 0x74, 0x65, 0x72, + 0x6e, 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x6f, 0x70, 0x65, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6f, + 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x72, + 0x6f, 0x64, 0x75, 0x63, 0x65, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x70, 0x72, + 0x6f, 0x64, 0x75, 0x63, 0x65, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6d, + 0x65, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6d, + 0x65, 0x73, 0x12, 0x3a, 0x0a, 0x0a, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, + 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, + 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x49, 0x74, + 0x65, 0x6d, 0x52, 0x0a, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x12, 0x33, + 0x0a, 0x09, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x73, 0x18, 0x09, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x15, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x73, 0x52, 0x09, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x73, 0x18, 0x0a, + 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x73, 0x12, 0x1e, 0x0a, + 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x0b, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x3b, 0x0a, + 0x08, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x1f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x65, 0x63, + 0x75, 0x72, 0x69, 0x74, 0x79, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, + 0x52, 0x08, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x12, 0x3f, 0x0a, 0x10, 0x76, 0x65, + 0x6e, 0x64, 0x6f, 0x72, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x0d, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, + 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x0f, 0x76, 0x65, 0x6e, 0x64, + 0x6f, 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xa6, 0x01, 0x0a, 0x09, + 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x12, 0x42, 0x0a, 0x0e, 0x62, 0x6f, 0x64, + 0x79, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x19, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x42, + 0x6f, 0x64, 0x79, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x48, 0x00, 0x52, 0x0d, + 0x62, 0x6f, 0x64, 0x79, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x12, 0x4c, 0x0a, + 0x12, 0x6e, 0x6f, 0x6e, 0x5f, 0x62, 0x6f, 0x64, 0x79, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, + 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x6f, 0x70, 0x65, 0x6e, + 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x6f, 0x6e, 0x42, 0x6f, 0x64, 0x79, 0x50, 0x61, + 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x48, 0x00, 0x52, 0x10, 0x6e, 0x6f, 0x6e, 0x42, 0x6f, + 0x64, 0x79, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x42, 0x07, 0x0a, 0x05, 0x6f, + 0x6e, 0x65, 0x6f, 0x66, 0x22, 0x67, 0x0a, 0x14, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, + 0x72, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x4f, 0x0a, 0x15, + 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x70, 0x72, 0x6f, 0x70, 0x65, + 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x6f, 0x70, + 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x50, 0x61, + 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x52, 0x14, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, + 0x6e, 0x61, 0x6c, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x22, 0x94, 0x01, + 0x0a, 0x0e, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x49, 0x74, 0x65, 0x6d, + 0x12, 0x35, 0x0a, 0x09, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, + 0x2e, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x48, 0x00, 0x52, 0x09, 0x70, 0x61, + 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x12, 0x42, 0x0a, 0x0e, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, + 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x19, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4a, 0x73, 0x6f, + 0x6e, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x48, 0x00, 0x52, 0x0d, 0x6a, 0x73, + 0x6f, 0x6e, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x42, 0x07, 0x0a, 0x05, 0x6f, + 0x6e, 0x65, 0x6f, 0x66, 0x22, 0xcf, 0x03, 0x0a, 0x08, 0x50, 0x61, 0x74, 0x68, 0x49, 0x74, 0x65, + 0x6d, 0x12, 0x11, 0x0a, 0x04, 0x5f, 0x72, 0x65, 0x66, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x03, 0x52, 0x65, 0x66, 0x12, 0x27, 0x0a, 0x03, 0x67, 0x65, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x15, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4f, + 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x03, 0x67, 0x65, 0x74, 0x12, 0x27, 0x0a, + 0x03, 0x70, 0x75, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x6f, 0x70, 0x65, + 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x03, 0x70, 0x75, 0x74, 0x12, 0x29, 0x0a, 0x04, 0x70, 0x6f, 0x73, 0x74, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, + 0x32, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x04, 0x70, 0x6f, 0x73, + 0x74, 0x12, 0x2d, 0x0a, 0x06, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x15, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4f, + 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x06, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, + 0x12, 0x2f, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x15, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4f, + 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x12, 0x29, 0x0a, 0x04, 0x68, 0x65, 0x61, 0x64, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x15, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x70, 0x65, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x04, 0x68, 0x65, 0x61, 0x64, 0x12, 0x2b, 0x0a, 0x05, + 0x70, 0x61, 0x74, 0x63, 0x68, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x6f, 0x70, + 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x05, 0x70, 0x61, 0x74, 0x63, 0x68, 0x12, 0x3a, 0x0a, 0x0a, 0x70, 0x61, 0x72, + 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, + 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x61, 0x72, 0x61, 0x6d, + 0x65, 0x74, 0x65, 0x72, 0x73, 0x49, 0x74, 0x65, 0x6d, 0x52, 0x0a, 0x70, 0x61, 0x72, 0x61, 0x6d, + 0x65, 0x74, 0x65, 0x72, 0x73, 0x12, 0x3f, 0x0a, 0x10, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x5f, + 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, + 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x0f, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x45, 0x78, 0x74, + 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xfb, 0x05, 0x0a, 0x16, 0x50, 0x61, 0x74, 0x68, 0x50, + 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x53, 0x75, 0x62, 0x53, 0x63, 0x68, 0x65, 0x6d, + 0x61, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x12, 0x0e, 0x0a, + 0x02, 0x69, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x6e, 0x12, 0x20, 0x0a, + 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, + 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, + 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, + 0x31, 0x0a, 0x05, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, + 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x72, 0x69, 0x6d, + 0x69, 0x74, 0x69, 0x76, 0x65, 0x73, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x52, 0x05, 0x69, 0x74, 0x65, + 0x6d, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x63, + 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, + 0x29, 0x0a, 0x07, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x0f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x6e, + 0x79, 0x52, 0x07, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x61, + 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x01, 0x52, 0x07, 0x6d, 0x61, 0x78, + 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x2b, 0x0a, 0x11, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, + 0x65, 0x5f, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x10, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x4d, 0x61, 0x78, 0x69, 0x6d, 0x75, + 0x6d, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x0c, 0x20, 0x01, + 0x28, 0x01, 0x52, 0x07, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x2b, 0x0a, 0x11, 0x65, + 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x5f, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, + 0x18, 0x0d, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, + 0x65, 0x4d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x61, 0x78, 0x5f, + 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x6d, 0x61, + 0x78, 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x69, 0x6e, 0x5f, 0x6c, + 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x6d, 0x69, 0x6e, + 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, + 0x6e, 0x18, 0x10, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, + 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x61, 0x78, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x11, 0x20, + 0x01, 0x28, 0x03, 0x52, 0x08, 0x6d, 0x61, 0x78, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x1b, 0x0a, + 0x09, 0x6d, 0x69, 0x6e, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x12, 0x20, 0x01, 0x28, 0x03, + 0x52, 0x08, 0x6d, 0x69, 0x6e, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x75, 0x6e, + 0x69, 0x71, 0x75, 0x65, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x13, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x0b, 0x75, 0x6e, 0x69, 0x71, 0x75, 0x65, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x23, 0x0a, + 0x04, 0x65, 0x6e, 0x75, 0x6d, 0x18, 0x14, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x6f, 0x70, + 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x04, 0x65, 0x6e, + 0x75, 0x6d, 0x12, 0x1f, 0x0a, 0x0b, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x5f, 0x6f, + 0x66, 0x18, 0x15, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0a, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, + 0x65, 0x4f, 0x66, 0x12, 0x3f, 0x0a, 0x10, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x5f, 0x65, 0x78, + 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x16, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, + 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, + 0x41, 0x6e, 0x79, 0x52, 0x0f, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, + 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x77, 0x0a, 0x05, 0x50, 0x61, 0x74, 0x68, 0x73, 0x12, 0x3f, 0x0a, + 0x10, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, + 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x0f, 0x76, + 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x2d, + 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x6f, + 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x50, + 0x61, 0x74, 0x68, 0x49, 0x74, 0x65, 0x6d, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x22, 0x92, 0x05, + 0x0a, 0x0f, 0x50, 0x72, 0x69, 0x6d, 0x69, 0x74, 0x69, 0x76, 0x65, 0x73, 0x49, 0x74, 0x65, 0x6d, + 0x73, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x31, 0x0a, + 0x05, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x6f, + 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x72, 0x69, 0x6d, 0x69, 0x74, + 0x69, 0x76, 0x65, 0x73, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x52, 0x05, 0x69, 0x74, 0x65, 0x6d, 0x73, + 0x12, 0x2b, 0x0a, 0x11, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x66, + 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x63, 0x6f, 0x6c, + 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x29, 0x0a, + 0x07, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, + 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x6e, 0x79, 0x52, + 0x07, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x61, 0x78, 0x69, + 0x6d, 0x75, 0x6d, 0x18, 0x06, 0x20, 0x01, 0x28, 0x01, 0x52, 0x07, 0x6d, 0x61, 0x78, 0x69, 0x6d, + 0x75, 0x6d, 0x12, 0x2b, 0x0a, 0x11, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x5f, + 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x65, + 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x4d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x12, + 0x18, 0x0a, 0x07, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x08, 0x20, 0x01, 0x28, 0x01, + 0x52, 0x07, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x2b, 0x0a, 0x11, 0x65, 0x78, 0x63, + 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x5f, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x09, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x4d, + 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x61, 0x78, 0x5f, 0x6c, 0x65, + 0x6e, 0x67, 0x74, 0x68, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x6d, 0x61, 0x78, 0x4c, + 0x65, 0x6e, 0x67, 0x74, 0x68, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x69, 0x6e, 0x5f, 0x6c, 0x65, 0x6e, + 0x67, 0x74, 0x68, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x6d, 0x69, 0x6e, 0x4c, 0x65, + 0x6e, 0x67, 0x74, 0x68, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x18, + 0x0c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x12, 0x1b, + 0x0a, 0x09, 0x6d, 0x61, 0x78, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x0d, 0x20, 0x01, 0x28, + 0x03, 0x52, 0x08, 0x6d, 0x61, 0x78, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x6d, + 0x69, 0x6e, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, + 0x6d, 0x69, 0x6e, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x75, 0x6e, 0x69, 0x71, + 0x75, 0x65, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, + 0x75, 0x6e, 0x69, 0x71, 0x75, 0x65, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x23, 0x0a, 0x04, 0x65, + 0x6e, 0x75, 0x6d, 0x18, 0x10, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, + 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x04, 0x65, 0x6e, 0x75, 0x6d, + 0x12, 0x1f, 0x0a, 0x0b, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x5f, 0x6f, 0x66, 0x18, + 0x11, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0a, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x4f, + 0x66, 0x12, 0x3f, 0x0a, 0x10, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x5f, 0x65, 0x78, 0x74, 0x65, + 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x12, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, + 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, + 0x79, 0x52, 0x0f, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, + 0x6f, 0x6e, 0x22, 0x5a, 0x0a, 0x0a, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, + 0x12, 0x4c, 0x0a, 0x15, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x70, + 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x17, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, + 0x65, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x14, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, + 0x6f, 0x6e, 0x61, 0x6c, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x22, 0xa8, + 0x06, 0x0a, 0x17, 0x51, 0x75, 0x65, 0x72, 0x79, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, + 0x72, 0x53, 0x75, 0x62, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, + 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, + 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x6e, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x02, 0x69, 0x6e, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x2a, 0x0a, 0x11, + 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x5f, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0f, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x45, 0x6d, + 0x70, 0x74, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, + 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x06, + 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x66, 0x6f, + 0x72, 0x6d, 0x61, 0x74, 0x12, 0x31, 0x0a, 0x05, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x08, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, + 0x2e, 0x50, 0x72, 0x69, 0x6d, 0x69, 0x74, 0x69, 0x76, 0x65, 0x73, 0x49, 0x74, 0x65, 0x6d, 0x73, + 0x52, 0x05, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x63, 0x6f, 0x6c, 0x6c, 0x65, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x09, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x10, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x6f, + 0x72, 0x6d, 0x61, 0x74, 0x12, 0x29, 0x0a, 0x07, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x18, + 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, + 0x76, 0x32, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x07, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x12, + 0x18, 0x0a, 0x07, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x01, + 0x52, 0x07, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x2b, 0x0a, 0x11, 0x65, 0x78, 0x63, + 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x5f, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x0c, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x4d, + 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, + 0x6d, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x01, 0x52, 0x07, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, + 0x12, 0x2b, 0x0a, 0x11, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x5f, 0x6d, 0x69, + 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x65, 0x78, 0x63, + 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x4d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x1d, 0x0a, + 0x0a, 0x6d, 0x61, 0x78, 0x5f, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, 0x0f, 0x20, 0x01, 0x28, + 0x03, 0x52, 0x09, 0x6d, 0x61, 0x78, 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x12, 0x1d, 0x0a, 0x0a, + 0x6d, 0x69, 0x6e, 0x5f, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, 0x10, 0x20, 0x01, 0x28, 0x03, + 0x52, 0x09, 0x6d, 0x69, 0x6e, 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x12, 0x18, 0x0a, 0x07, 0x70, + 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x18, 0x11, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x70, 0x61, + 0x74, 0x74, 0x65, 0x72, 0x6e, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x61, 0x78, 0x5f, 0x69, 0x74, 0x65, + 0x6d, 0x73, 0x18, 0x12, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x6d, 0x61, 0x78, 0x49, 0x74, 0x65, + 0x6d, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x69, 0x6e, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, + 0x13, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x6d, 0x69, 0x6e, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, + 0x21, 0x0a, 0x0c, 0x75, 0x6e, 0x69, 0x71, 0x75, 0x65, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, + 0x14, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x75, 0x6e, 0x69, 0x71, 0x75, 0x65, 0x49, 0x74, 0x65, + 0x6d, 0x73, 0x12, 0x23, 0x0a, 0x04, 0x65, 0x6e, 0x75, 0x6d, 0x18, 0x15, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x0f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x6e, + 0x79, 0x52, 0x04, 0x65, 0x6e, 0x75, 0x6d, 0x12, 0x1f, 0x0a, 0x0b, 0x6d, 0x75, 0x6c, 0x74, 0x69, + 0x70, 0x6c, 0x65, 0x5f, 0x6f, 0x66, 0x18, 0x16, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0a, 0x6d, 0x75, + 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x4f, 0x66, 0x12, 0x3f, 0x0a, 0x10, 0x76, 0x65, 0x6e, 0x64, + 0x6f, 0x72, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x17, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, + 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x0f, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, + 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xfe, 0x01, 0x0a, 0x08, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2e, 0x0a, 0x06, 0x73, 0x63, 0x68, 0x65, + 0x6d, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, + 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x49, 0x74, 0x65, 0x6d, + 0x52, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x2d, 0x0a, 0x07, 0x68, 0x65, 0x61, 0x64, + 0x65, 0x72, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x6f, 0x70, 0x65, 0x6e, + 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x52, 0x07, + 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x12, 0x30, 0x0a, 0x08, 0x65, 0x78, 0x61, 0x6d, 0x70, + 0x6c, 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, + 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x45, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x73, 0x52, + 0x08, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x73, 0x12, 0x3f, 0x0a, 0x10, 0x76, 0x65, 0x6e, + 0x64, 0x6f, 0x72, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, + 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x0f, 0x76, 0x65, 0x6e, 0x64, 0x6f, + 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x65, 0x0a, 0x13, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x12, 0x4e, 0x0a, 0x15, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, + 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x19, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, + 0x6d, 0x65, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, 0x14, 0x61, 0x64, 0x64, + 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, + 0x73, 0x22, 0x90, 0x01, 0x0a, 0x0d, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x12, 0x32, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, + 0x76, 0x32, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x00, 0x52, 0x08, 0x72, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x42, 0x0a, 0x0e, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, + 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x19, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4a, 0x73, 0x6f, + 0x6e, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x48, 0x00, 0x52, 0x0d, 0x6a, 0x73, + 0x6f, 0x6e, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x42, 0x07, 0x0a, 0x05, 0x6f, + 0x6e, 0x65, 0x6f, 0x66, 0x22, 0x91, 0x01, 0x0a, 0x09, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x73, 0x12, 0x43, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x63, + 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x6f, 0x70, 0x65, 0x6e, + 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x12, 0x3f, 0x0a, 0x10, 0x76, 0x65, 0x6e, 0x64, 0x6f, + 0x72, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, + 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x0f, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x45, + 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xaf, 0x09, 0x0a, 0x06, 0x53, 0x63, 0x68, + 0x65, 0x6d, 0x61, 0x12, 0x11, 0x0a, 0x04, 0x5f, 0x72, 0x65, 0x66, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x03, 0x52, 0x65, 0x66, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x14, + 0x0a, 0x05, 0x74, 0x69, 0x74, 0x6c, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, + 0x69, 0x74, 0x6c, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x29, 0x0a, 0x07, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, + 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, + 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x07, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, + 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x5f, 0x6f, 0x66, + 0x18, 0x06, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0a, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, + 0x4f, 0x66, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x07, 0x20, + 0x01, 0x28, 0x01, 0x52, 0x07, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x2b, 0x0a, 0x11, + 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x5f, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, + 0x6d, 0x18, 0x08, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, + 0x76, 0x65, 0x4d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x69, 0x6e, + 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x09, 0x20, 0x01, 0x28, 0x01, 0x52, 0x07, 0x6d, 0x69, 0x6e, 0x69, + 0x6d, 0x75, 0x6d, 0x12, 0x2b, 0x0a, 0x11, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, + 0x5f, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, + 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x4d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, + 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x61, 0x78, 0x5f, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, 0x0b, + 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x6d, 0x61, 0x78, 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x12, + 0x1d, 0x0a, 0x0a, 0x6d, 0x69, 0x6e, 0x5f, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, 0x0c, 0x20, + 0x01, 0x28, 0x03, 0x52, 0x09, 0x6d, 0x69, 0x6e, 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x12, 0x18, + 0x0a, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x61, 0x78, 0x5f, + 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x6d, 0x61, 0x78, + 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x69, 0x6e, 0x5f, 0x69, 0x74, 0x65, + 0x6d, 0x73, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x6d, 0x69, 0x6e, 0x49, 0x74, 0x65, + 0x6d, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x75, 0x6e, 0x69, 0x71, 0x75, 0x65, 0x5f, 0x69, 0x74, 0x65, + 0x6d, 0x73, 0x18, 0x10, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x75, 0x6e, 0x69, 0x71, 0x75, 0x65, + 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x6d, 0x61, 0x78, 0x5f, 0x70, 0x72, 0x6f, + 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x11, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0d, 0x6d, + 0x61, 0x78, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0e, + 0x6d, 0x69, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x12, + 0x20, 0x01, 0x28, 0x03, 0x52, 0x0d, 0x6d, 0x69, 0x6e, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, + 0x69, 0x65, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x18, + 0x13, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x12, + 0x23, 0x0a, 0x04, 0x65, 0x6e, 0x75, 0x6d, 0x18, 0x14, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, + 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x04, + 0x65, 0x6e, 0x75, 0x6d, 0x12, 0x59, 0x0a, 0x15, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, + 0x61, 0x6c, 0x5f, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x15, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, + 0x2e, 0x41, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x50, 0x72, 0x6f, 0x70, 0x65, + 0x72, 0x74, 0x69, 0x65, 0x73, 0x49, 0x74, 0x65, 0x6d, 0x52, 0x14, 0x61, 0x64, 0x64, 0x69, 0x74, + 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x12, + 0x28, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x16, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, + 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x49, + 0x74, 0x65, 0x6d, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x2b, 0x0a, 0x05, 0x69, 0x74, 0x65, + 0x6d, 0x73, 0x18, 0x17, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, + 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x49, 0x74, 0x65, 0x6d, 0x52, + 0x05, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x29, 0x0a, 0x06, 0x61, 0x6c, 0x6c, 0x5f, 0x6f, 0x66, + 0x18, 0x18, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, + 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x05, 0x61, 0x6c, 0x6c, 0x4f, + 0x66, 0x12, 0x36, 0x0a, 0x0a, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, + 0x19, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, + 0x76, 0x32, 0x2e, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x52, 0x0a, 0x70, + 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x12, 0x24, 0x0a, 0x0d, 0x64, 0x69, 0x73, + 0x63, 0x72, 0x69, 0x6d, 0x69, 0x6e, 0x61, 0x74, 0x6f, 0x72, 0x18, 0x1a, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0d, 0x64, 0x69, 0x73, 0x63, 0x72, 0x69, 0x6d, 0x69, 0x6e, 0x61, 0x74, 0x6f, 0x72, 0x12, + 0x1b, 0x0a, 0x09, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x1b, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x61, 0x64, 0x4f, 0x6e, 0x6c, 0x79, 0x12, 0x21, 0x0a, 0x03, + 0x78, 0x6d, 0x6c, 0x18, 0x1c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, + 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x58, 0x6d, 0x6c, 0x52, 0x03, 0x78, 0x6d, 0x6c, 0x12, + 0x3d, 0x0a, 0x0d, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x64, 0x6f, 0x63, 0x73, + 0x18, 0x1d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, + 0x2e, 0x76, 0x32, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x73, + 0x52, 0x0c, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x73, 0x12, 0x29, + 0x0a, 0x07, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x18, 0x1e, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x0f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x6e, 0x79, + 0x52, 0x07, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x12, 0x3f, 0x0a, 0x10, 0x76, 0x65, 0x6e, + 0x64, 0x6f, 0x72, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x1f, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, + 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x0f, 0x76, 0x65, 0x6e, 0x64, 0x6f, + 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x7e, 0x0a, 0x0a, 0x53, 0x63, + 0x68, 0x65, 0x6d, 0x61, 0x49, 0x74, 0x65, 0x6d, 0x12, 0x2c, 0x0a, 0x06, 0x73, 0x63, 0x68, 0x65, + 0x6d, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, + 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x48, 0x00, 0x52, 0x06, + 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x39, 0x0a, 0x0b, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x73, + 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x6f, 0x70, + 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x53, 0x63, 0x68, + 0x65, 0x6d, 0x61, 0x48, 0x00, 0x52, 0x0a, 0x66, 0x69, 0x6c, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, + 0x61, 0x42, 0x07, 0x0a, 0x05, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x22, 0x74, 0x0a, 0x13, 0x53, 0x65, + 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x12, 0x5d, 0x0a, 0x15, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, + 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x28, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, + 0x6d, 0x65, 0x64, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x44, 0x65, 0x66, 0x69, 0x6e, + 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x49, 0x74, 0x65, 0x6d, 0x52, 0x14, 0x61, 0x64, 0x64, 0x69, + 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, + 0x22, 0xe9, 0x04, 0x0a, 0x17, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x44, 0x65, 0x66, + 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x49, 0x74, 0x65, 0x6d, 0x12, 0x6d, 0x0a, 0x1d, + 0x62, 0x61, 0x73, 0x69, 0x63, 0x5f, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, + 0x2e, 0x42, 0x61, 0x73, 0x69, 0x63, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x48, 0x00, 0x52, 0x1b, + 0x62, 0x61, 0x73, 0x69, 0x63, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x12, 0x46, 0x0a, 0x10, 0x61, + 0x70, 0x69, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, + 0x76, 0x32, 0x2e, 0x41, 0x70, 0x69, 0x4b, 0x65, 0x79, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, + 0x79, 0x48, 0x00, 0x52, 0x0e, 0x61, 0x70, 0x69, 0x4b, 0x65, 0x79, 0x53, 0x65, 0x63, 0x75, 0x72, + 0x69, 0x74, 0x79, 0x12, 0x5e, 0x0a, 0x18, 0x6f, 0x61, 0x75, 0x74, 0x68, 0x32, 0x5f, 0x69, 0x6d, + 0x70, 0x6c, 0x69, 0x63, 0x69, 0x74, 0x5f, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, + 0x76, 0x32, 0x2e, 0x4f, 0x61, 0x75, 0x74, 0x68, 0x32, 0x49, 0x6d, 0x70, 0x6c, 0x69, 0x63, 0x69, + 0x74, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x48, 0x00, 0x52, 0x16, 0x6f, 0x61, 0x75, + 0x74, 0x68, 0x32, 0x49, 0x6d, 0x70, 0x6c, 0x69, 0x63, 0x69, 0x74, 0x53, 0x65, 0x63, 0x75, 0x72, + 0x69, 0x74, 0x79, 0x12, 0x5e, 0x0a, 0x18, 0x6f, 0x61, 0x75, 0x74, 0x68, 0x32, 0x5f, 0x70, 0x61, + 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x5f, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, + 0x76, 0x32, 0x2e, 0x4f, 0x61, 0x75, 0x74, 0x68, 0x32, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, + 0x64, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x48, 0x00, 0x52, 0x16, 0x6f, 0x61, 0x75, + 0x74, 0x68, 0x32, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x53, 0x65, 0x63, 0x75, 0x72, + 0x69, 0x74, 0x79, 0x12, 0x67, 0x0a, 0x1b, 0x6f, 0x61, 0x75, 0x74, 0x68, 0x32, 0x5f, 0x61, 0x70, + 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, + 0x74, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, + 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x61, 0x75, 0x74, 0x68, 0x32, 0x41, 0x70, 0x70, 0x6c, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x48, + 0x00, 0x52, 0x19, 0x6f, 0x61, 0x75, 0x74, 0x68, 0x32, 0x41, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x12, 0x65, 0x0a, 0x1b, + 0x6f, 0x61, 0x75, 0x74, 0x68, 0x32, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x63, 0x6f, + 0x64, 0x65, 0x5f, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x18, 0x06, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x24, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4f, + 0x61, 0x75, 0x74, 0x68, 0x32, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x43, 0x6f, 0x64, 0x65, 0x53, + 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x48, 0x00, 0x52, 0x18, 0x6f, 0x61, 0x75, 0x74, 0x68, + 0x32, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x43, 0x6f, 0x64, 0x65, 0x53, 0x65, 0x63, 0x75, 0x72, + 0x69, 0x74, 0x79, 0x42, 0x07, 0x0a, 0x05, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x22, 0x68, 0x0a, 0x13, + 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, + 0x65, 0x6e, 0x74, 0x12, 0x51, 0x0a, 0x15, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, + 0x6c, 0x5f, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, + 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x41, 0x72, 0x72, 0x61, 0x79, + 0x52, 0x14, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x50, 0x72, 0x6f, 0x70, + 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x22, 0x23, 0x0a, 0x0b, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, + 0x41, 0x72, 0x72, 0x61, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, + 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0xbb, 0x01, 0x0a, 0x03, + 0x54, 0x61, 0x67, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, + 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3d, 0x0a, 0x0d, 0x65, 0x78, 0x74, + 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x64, 0x6f, 0x63, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x18, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x45, 0x78, + 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x73, 0x52, 0x0c, 0x65, 0x78, 0x74, 0x65, + 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x73, 0x12, 0x3f, 0x0a, 0x10, 0x76, 0x65, 0x6e, 0x64, + 0x6f, 0x72, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, + 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x0f, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, + 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x20, 0x0a, 0x08, 0x54, 0x79, 0x70, + 0x65, 0x49, 0x74, 0x65, 0x6d, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, + 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x5c, 0x0a, 0x0f, 0x56, + 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x49, + 0x0a, 0x15, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x70, 0x72, 0x6f, + 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, + 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, + 0x41, 0x6e, 0x79, 0x52, 0x14, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x50, + 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x22, 0xc8, 0x01, 0x0a, 0x03, 0x58, 0x6d, + 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, + 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, + 0x61, 0x63, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x1c, 0x0a, 0x09, 0x61, + 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, + 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x77, 0x72, 0x61, + 0x70, 0x70, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x77, 0x72, 0x61, 0x70, + 0x70, 0x65, 0x64, 0x12, 0x3f, 0x0a, 0x10, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x5f, 0x65, 0x78, + 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, + 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, + 0x41, 0x6e, 0x79, 0x52, 0x0f, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, + 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x3e, 0x0a, 0x0e, 0x6f, 0x72, 0x67, 0x2e, 0x6f, 0x70, 0x65, 0x6e, + 0x61, 0x70, 0x69, 0x5f, 0x76, 0x32, 0x42, 0x0c, 0x4f, 0x70, 0x65, 0x6e, 0x41, 0x50, 0x49, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x16, 0x2e, 0x2f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, + 0x69, 0x76, 0x32, 0x3b, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x5f, 0x76, 0x32, 0xa2, 0x02, + 0x03, 0x4f, 0x41, 0x53, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_openapiv2_OpenAPIv2_proto_rawDescOnce sync.Once + file_openapiv2_OpenAPIv2_proto_rawDescData = file_openapiv2_OpenAPIv2_proto_rawDesc +) + +func file_openapiv2_OpenAPIv2_proto_rawDescGZIP() []byte { + file_openapiv2_OpenAPIv2_proto_rawDescOnce.Do(func() { + file_openapiv2_OpenAPIv2_proto_rawDescData = protoimpl.X.CompressGZIP(file_openapiv2_OpenAPIv2_proto_rawDescData) + }) + return file_openapiv2_OpenAPIv2_proto_rawDescData +} + +var file_openapiv2_OpenAPIv2_proto_msgTypes = make([]protoimpl.MessageInfo, 60) +var file_openapiv2_OpenAPIv2_proto_goTypes = []interface{}{ + (*AdditionalPropertiesItem)(nil), // 0: openapi.v2.AdditionalPropertiesItem + (*Any)(nil), // 1: openapi.v2.Any + (*ApiKeySecurity)(nil), // 2: openapi.v2.ApiKeySecurity + (*BasicAuthenticationSecurity)(nil), // 3: openapi.v2.BasicAuthenticationSecurity + (*BodyParameter)(nil), // 4: openapi.v2.BodyParameter + (*Contact)(nil), // 5: openapi.v2.Contact + (*Default)(nil), // 6: openapi.v2.Default + (*Definitions)(nil), // 7: openapi.v2.Definitions + (*Document)(nil), // 8: openapi.v2.Document + (*Examples)(nil), // 9: openapi.v2.Examples + (*ExternalDocs)(nil), // 10: openapi.v2.ExternalDocs + (*FileSchema)(nil), // 11: openapi.v2.FileSchema + (*FormDataParameterSubSchema)(nil), // 12: openapi.v2.FormDataParameterSubSchema + (*Header)(nil), // 13: openapi.v2.Header + (*HeaderParameterSubSchema)(nil), // 14: openapi.v2.HeaderParameterSubSchema + (*Headers)(nil), // 15: openapi.v2.Headers + (*Info)(nil), // 16: openapi.v2.Info + (*ItemsItem)(nil), // 17: openapi.v2.ItemsItem + (*JsonReference)(nil), // 18: openapi.v2.JsonReference + (*License)(nil), // 19: openapi.v2.License + (*NamedAny)(nil), // 20: openapi.v2.NamedAny + (*NamedHeader)(nil), // 21: openapi.v2.NamedHeader + (*NamedParameter)(nil), // 22: openapi.v2.NamedParameter + (*NamedPathItem)(nil), // 23: openapi.v2.NamedPathItem + (*NamedResponse)(nil), // 24: openapi.v2.NamedResponse + (*NamedResponseValue)(nil), // 25: openapi.v2.NamedResponseValue + (*NamedSchema)(nil), // 26: openapi.v2.NamedSchema + (*NamedSecurityDefinitionsItem)(nil), // 27: openapi.v2.NamedSecurityDefinitionsItem + (*NamedString)(nil), // 28: openapi.v2.NamedString + (*NamedStringArray)(nil), // 29: openapi.v2.NamedStringArray + (*NonBodyParameter)(nil), // 30: openapi.v2.NonBodyParameter + (*Oauth2AccessCodeSecurity)(nil), // 31: openapi.v2.Oauth2AccessCodeSecurity + (*Oauth2ApplicationSecurity)(nil), // 32: openapi.v2.Oauth2ApplicationSecurity + (*Oauth2ImplicitSecurity)(nil), // 33: openapi.v2.Oauth2ImplicitSecurity + (*Oauth2PasswordSecurity)(nil), // 34: openapi.v2.Oauth2PasswordSecurity + (*Oauth2Scopes)(nil), // 35: openapi.v2.Oauth2Scopes + (*Operation)(nil), // 36: openapi.v2.Operation + (*Parameter)(nil), // 37: openapi.v2.Parameter + (*ParameterDefinitions)(nil), // 38: openapi.v2.ParameterDefinitions + (*ParametersItem)(nil), // 39: openapi.v2.ParametersItem + (*PathItem)(nil), // 40: openapi.v2.PathItem + (*PathParameterSubSchema)(nil), // 41: openapi.v2.PathParameterSubSchema + (*Paths)(nil), // 42: openapi.v2.Paths + (*PrimitivesItems)(nil), // 43: openapi.v2.PrimitivesItems + (*Properties)(nil), // 44: openapi.v2.Properties + (*QueryParameterSubSchema)(nil), // 45: openapi.v2.QueryParameterSubSchema + (*Response)(nil), // 46: openapi.v2.Response + (*ResponseDefinitions)(nil), // 47: openapi.v2.ResponseDefinitions + (*ResponseValue)(nil), // 48: openapi.v2.ResponseValue + (*Responses)(nil), // 49: openapi.v2.Responses + (*Schema)(nil), // 50: openapi.v2.Schema + (*SchemaItem)(nil), // 51: openapi.v2.SchemaItem + (*SecurityDefinitions)(nil), // 52: openapi.v2.SecurityDefinitions + (*SecurityDefinitionsItem)(nil), // 53: openapi.v2.SecurityDefinitionsItem + (*SecurityRequirement)(nil), // 54: openapi.v2.SecurityRequirement + (*StringArray)(nil), // 55: openapi.v2.StringArray + (*Tag)(nil), // 56: openapi.v2.Tag + (*TypeItem)(nil), // 57: openapi.v2.TypeItem + (*VendorExtension)(nil), // 58: openapi.v2.VendorExtension + (*Xml)(nil), // 59: openapi.v2.Xml + (*anypb.Any)(nil), // 60: google.protobuf.Any +} +var file_openapiv2_OpenAPIv2_proto_depIdxs = []int32{ + 50, // 0: openapi.v2.AdditionalPropertiesItem.schema:type_name -> openapi.v2.Schema + 60, // 1: openapi.v2.Any.value:type_name -> google.protobuf.Any + 20, // 2: openapi.v2.ApiKeySecurity.vendor_extension:type_name -> openapi.v2.NamedAny + 20, // 3: openapi.v2.BasicAuthenticationSecurity.vendor_extension:type_name -> openapi.v2.NamedAny + 50, // 4: openapi.v2.BodyParameter.schema:type_name -> openapi.v2.Schema + 20, // 5: openapi.v2.BodyParameter.vendor_extension:type_name -> openapi.v2.NamedAny + 20, // 6: openapi.v2.Contact.vendor_extension:type_name -> openapi.v2.NamedAny + 20, // 7: openapi.v2.Default.additional_properties:type_name -> openapi.v2.NamedAny + 26, // 8: openapi.v2.Definitions.additional_properties:type_name -> openapi.v2.NamedSchema + 16, // 9: openapi.v2.Document.info:type_name -> openapi.v2.Info + 42, // 10: openapi.v2.Document.paths:type_name -> openapi.v2.Paths + 7, // 11: openapi.v2.Document.definitions:type_name -> openapi.v2.Definitions + 38, // 12: openapi.v2.Document.parameters:type_name -> openapi.v2.ParameterDefinitions + 47, // 13: openapi.v2.Document.responses:type_name -> openapi.v2.ResponseDefinitions + 54, // 14: openapi.v2.Document.security:type_name -> openapi.v2.SecurityRequirement + 52, // 15: openapi.v2.Document.security_definitions:type_name -> openapi.v2.SecurityDefinitions + 56, // 16: openapi.v2.Document.tags:type_name -> openapi.v2.Tag + 10, // 17: openapi.v2.Document.external_docs:type_name -> openapi.v2.ExternalDocs + 20, // 18: openapi.v2.Document.vendor_extension:type_name -> openapi.v2.NamedAny + 20, // 19: openapi.v2.Examples.additional_properties:type_name -> openapi.v2.NamedAny + 20, // 20: openapi.v2.ExternalDocs.vendor_extension:type_name -> openapi.v2.NamedAny + 1, // 21: openapi.v2.FileSchema.default:type_name -> openapi.v2.Any + 10, // 22: openapi.v2.FileSchema.external_docs:type_name -> openapi.v2.ExternalDocs + 1, // 23: openapi.v2.FileSchema.example:type_name -> openapi.v2.Any + 20, // 24: openapi.v2.FileSchema.vendor_extension:type_name -> openapi.v2.NamedAny + 43, // 25: openapi.v2.FormDataParameterSubSchema.items:type_name -> openapi.v2.PrimitivesItems + 1, // 26: openapi.v2.FormDataParameterSubSchema.default:type_name -> openapi.v2.Any + 1, // 27: openapi.v2.FormDataParameterSubSchema.enum:type_name -> openapi.v2.Any + 20, // 28: openapi.v2.FormDataParameterSubSchema.vendor_extension:type_name -> openapi.v2.NamedAny + 43, // 29: openapi.v2.Header.items:type_name -> openapi.v2.PrimitivesItems + 1, // 30: openapi.v2.Header.default:type_name -> openapi.v2.Any + 1, // 31: openapi.v2.Header.enum:type_name -> openapi.v2.Any + 20, // 32: openapi.v2.Header.vendor_extension:type_name -> openapi.v2.NamedAny + 43, // 33: openapi.v2.HeaderParameterSubSchema.items:type_name -> openapi.v2.PrimitivesItems + 1, // 34: openapi.v2.HeaderParameterSubSchema.default:type_name -> openapi.v2.Any + 1, // 35: openapi.v2.HeaderParameterSubSchema.enum:type_name -> openapi.v2.Any + 20, // 36: openapi.v2.HeaderParameterSubSchema.vendor_extension:type_name -> openapi.v2.NamedAny + 21, // 37: openapi.v2.Headers.additional_properties:type_name -> openapi.v2.NamedHeader + 5, // 38: openapi.v2.Info.contact:type_name -> openapi.v2.Contact + 19, // 39: openapi.v2.Info.license:type_name -> openapi.v2.License + 20, // 40: openapi.v2.Info.vendor_extension:type_name -> openapi.v2.NamedAny + 50, // 41: openapi.v2.ItemsItem.schema:type_name -> openapi.v2.Schema + 20, // 42: openapi.v2.License.vendor_extension:type_name -> openapi.v2.NamedAny + 1, // 43: openapi.v2.NamedAny.value:type_name -> openapi.v2.Any + 13, // 44: openapi.v2.NamedHeader.value:type_name -> openapi.v2.Header + 37, // 45: openapi.v2.NamedParameter.value:type_name -> openapi.v2.Parameter + 40, // 46: openapi.v2.NamedPathItem.value:type_name -> openapi.v2.PathItem + 46, // 47: openapi.v2.NamedResponse.value:type_name -> openapi.v2.Response + 48, // 48: openapi.v2.NamedResponseValue.value:type_name -> openapi.v2.ResponseValue + 50, // 49: openapi.v2.NamedSchema.value:type_name -> openapi.v2.Schema + 53, // 50: openapi.v2.NamedSecurityDefinitionsItem.value:type_name -> openapi.v2.SecurityDefinitionsItem + 55, // 51: openapi.v2.NamedStringArray.value:type_name -> openapi.v2.StringArray + 14, // 52: openapi.v2.NonBodyParameter.header_parameter_sub_schema:type_name -> openapi.v2.HeaderParameterSubSchema + 12, // 53: openapi.v2.NonBodyParameter.form_data_parameter_sub_schema:type_name -> openapi.v2.FormDataParameterSubSchema + 45, // 54: openapi.v2.NonBodyParameter.query_parameter_sub_schema:type_name -> openapi.v2.QueryParameterSubSchema + 41, // 55: openapi.v2.NonBodyParameter.path_parameter_sub_schema:type_name -> openapi.v2.PathParameterSubSchema + 35, // 56: openapi.v2.Oauth2AccessCodeSecurity.scopes:type_name -> openapi.v2.Oauth2Scopes + 20, // 57: openapi.v2.Oauth2AccessCodeSecurity.vendor_extension:type_name -> openapi.v2.NamedAny + 35, // 58: openapi.v2.Oauth2ApplicationSecurity.scopes:type_name -> openapi.v2.Oauth2Scopes + 20, // 59: openapi.v2.Oauth2ApplicationSecurity.vendor_extension:type_name -> openapi.v2.NamedAny + 35, // 60: openapi.v2.Oauth2ImplicitSecurity.scopes:type_name -> openapi.v2.Oauth2Scopes + 20, // 61: openapi.v2.Oauth2ImplicitSecurity.vendor_extension:type_name -> openapi.v2.NamedAny + 35, // 62: openapi.v2.Oauth2PasswordSecurity.scopes:type_name -> openapi.v2.Oauth2Scopes + 20, // 63: openapi.v2.Oauth2PasswordSecurity.vendor_extension:type_name -> openapi.v2.NamedAny + 28, // 64: openapi.v2.Oauth2Scopes.additional_properties:type_name -> openapi.v2.NamedString + 10, // 65: openapi.v2.Operation.external_docs:type_name -> openapi.v2.ExternalDocs + 39, // 66: openapi.v2.Operation.parameters:type_name -> openapi.v2.ParametersItem + 49, // 67: openapi.v2.Operation.responses:type_name -> openapi.v2.Responses + 54, // 68: openapi.v2.Operation.security:type_name -> openapi.v2.SecurityRequirement + 20, // 69: openapi.v2.Operation.vendor_extension:type_name -> openapi.v2.NamedAny + 4, // 70: openapi.v2.Parameter.body_parameter:type_name -> openapi.v2.BodyParameter + 30, // 71: openapi.v2.Parameter.non_body_parameter:type_name -> openapi.v2.NonBodyParameter + 22, // 72: openapi.v2.ParameterDefinitions.additional_properties:type_name -> openapi.v2.NamedParameter + 37, // 73: openapi.v2.ParametersItem.parameter:type_name -> openapi.v2.Parameter + 18, // 74: openapi.v2.ParametersItem.json_reference:type_name -> openapi.v2.JsonReference + 36, // 75: openapi.v2.PathItem.get:type_name -> openapi.v2.Operation + 36, // 76: openapi.v2.PathItem.put:type_name -> openapi.v2.Operation + 36, // 77: openapi.v2.PathItem.post:type_name -> openapi.v2.Operation + 36, // 78: openapi.v2.PathItem.delete:type_name -> openapi.v2.Operation + 36, // 79: openapi.v2.PathItem.options:type_name -> openapi.v2.Operation + 36, // 80: openapi.v2.PathItem.head:type_name -> openapi.v2.Operation + 36, // 81: openapi.v2.PathItem.patch:type_name -> openapi.v2.Operation + 39, // 82: openapi.v2.PathItem.parameters:type_name -> openapi.v2.ParametersItem + 20, // 83: openapi.v2.PathItem.vendor_extension:type_name -> openapi.v2.NamedAny + 43, // 84: openapi.v2.PathParameterSubSchema.items:type_name -> openapi.v2.PrimitivesItems + 1, // 85: openapi.v2.PathParameterSubSchema.default:type_name -> openapi.v2.Any + 1, // 86: openapi.v2.PathParameterSubSchema.enum:type_name -> openapi.v2.Any + 20, // 87: openapi.v2.PathParameterSubSchema.vendor_extension:type_name -> openapi.v2.NamedAny + 20, // 88: openapi.v2.Paths.vendor_extension:type_name -> openapi.v2.NamedAny + 23, // 89: openapi.v2.Paths.path:type_name -> openapi.v2.NamedPathItem + 43, // 90: openapi.v2.PrimitivesItems.items:type_name -> openapi.v2.PrimitivesItems + 1, // 91: openapi.v2.PrimitivesItems.default:type_name -> openapi.v2.Any + 1, // 92: openapi.v2.PrimitivesItems.enum:type_name -> openapi.v2.Any + 20, // 93: openapi.v2.PrimitivesItems.vendor_extension:type_name -> openapi.v2.NamedAny + 26, // 94: openapi.v2.Properties.additional_properties:type_name -> openapi.v2.NamedSchema + 43, // 95: openapi.v2.QueryParameterSubSchema.items:type_name -> openapi.v2.PrimitivesItems + 1, // 96: openapi.v2.QueryParameterSubSchema.default:type_name -> openapi.v2.Any + 1, // 97: openapi.v2.QueryParameterSubSchema.enum:type_name -> openapi.v2.Any + 20, // 98: openapi.v2.QueryParameterSubSchema.vendor_extension:type_name -> openapi.v2.NamedAny + 51, // 99: openapi.v2.Response.schema:type_name -> openapi.v2.SchemaItem + 15, // 100: openapi.v2.Response.headers:type_name -> openapi.v2.Headers + 9, // 101: openapi.v2.Response.examples:type_name -> openapi.v2.Examples + 20, // 102: openapi.v2.Response.vendor_extension:type_name -> openapi.v2.NamedAny + 24, // 103: openapi.v2.ResponseDefinitions.additional_properties:type_name -> openapi.v2.NamedResponse + 46, // 104: openapi.v2.ResponseValue.response:type_name -> openapi.v2.Response + 18, // 105: openapi.v2.ResponseValue.json_reference:type_name -> openapi.v2.JsonReference + 25, // 106: openapi.v2.Responses.response_code:type_name -> openapi.v2.NamedResponseValue + 20, // 107: openapi.v2.Responses.vendor_extension:type_name -> openapi.v2.NamedAny + 1, // 108: openapi.v2.Schema.default:type_name -> openapi.v2.Any + 1, // 109: openapi.v2.Schema.enum:type_name -> openapi.v2.Any + 0, // 110: openapi.v2.Schema.additional_properties:type_name -> openapi.v2.AdditionalPropertiesItem + 57, // 111: openapi.v2.Schema.type:type_name -> openapi.v2.TypeItem + 17, // 112: openapi.v2.Schema.items:type_name -> openapi.v2.ItemsItem + 50, // 113: openapi.v2.Schema.all_of:type_name -> openapi.v2.Schema + 44, // 114: openapi.v2.Schema.properties:type_name -> openapi.v2.Properties + 59, // 115: openapi.v2.Schema.xml:type_name -> openapi.v2.Xml + 10, // 116: openapi.v2.Schema.external_docs:type_name -> openapi.v2.ExternalDocs + 1, // 117: openapi.v2.Schema.example:type_name -> openapi.v2.Any + 20, // 118: openapi.v2.Schema.vendor_extension:type_name -> openapi.v2.NamedAny + 50, // 119: openapi.v2.SchemaItem.schema:type_name -> openapi.v2.Schema + 11, // 120: openapi.v2.SchemaItem.file_schema:type_name -> openapi.v2.FileSchema + 27, // 121: openapi.v2.SecurityDefinitions.additional_properties:type_name -> openapi.v2.NamedSecurityDefinitionsItem + 3, // 122: openapi.v2.SecurityDefinitionsItem.basic_authentication_security:type_name -> openapi.v2.BasicAuthenticationSecurity + 2, // 123: openapi.v2.SecurityDefinitionsItem.api_key_security:type_name -> openapi.v2.ApiKeySecurity + 33, // 124: openapi.v2.SecurityDefinitionsItem.oauth2_implicit_security:type_name -> openapi.v2.Oauth2ImplicitSecurity + 34, // 125: openapi.v2.SecurityDefinitionsItem.oauth2_password_security:type_name -> openapi.v2.Oauth2PasswordSecurity + 32, // 126: openapi.v2.SecurityDefinitionsItem.oauth2_application_security:type_name -> openapi.v2.Oauth2ApplicationSecurity + 31, // 127: openapi.v2.SecurityDefinitionsItem.oauth2_access_code_security:type_name -> openapi.v2.Oauth2AccessCodeSecurity + 29, // 128: openapi.v2.SecurityRequirement.additional_properties:type_name -> openapi.v2.NamedStringArray + 10, // 129: openapi.v2.Tag.external_docs:type_name -> openapi.v2.ExternalDocs + 20, // 130: openapi.v2.Tag.vendor_extension:type_name -> openapi.v2.NamedAny + 20, // 131: openapi.v2.VendorExtension.additional_properties:type_name -> openapi.v2.NamedAny + 20, // 132: openapi.v2.Xml.vendor_extension:type_name -> openapi.v2.NamedAny + 133, // [133:133] is the sub-list for method output_type + 133, // [133:133] is the sub-list for method input_type + 133, // [133:133] is the sub-list for extension type_name + 133, // [133:133] is the sub-list for extension extendee + 0, // [0:133] is the sub-list for field type_name +} + +func init() { file_openapiv2_OpenAPIv2_proto_init() } +func file_openapiv2_OpenAPIv2_proto_init() { + if File_openapiv2_OpenAPIv2_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_openapiv2_OpenAPIv2_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AdditionalPropertiesItem); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Any); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ApiKeySecurity); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BasicAuthenticationSecurity); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BodyParameter); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Contact); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Default); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Definitions); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Document); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Examples); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ExternalDocs); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FileSchema); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FormDataParameterSubSchema); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Header); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HeaderParameterSubSchema); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Headers); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Info); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ItemsItem); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*JsonReference); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*License); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NamedAny); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NamedHeader); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NamedParameter); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NamedPathItem); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NamedResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NamedResponseValue); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NamedSchema); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NamedSecurityDefinitionsItem); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NamedString); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[29].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NamedStringArray); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[30].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NonBodyParameter); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[31].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Oauth2AccessCodeSecurity); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[32].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Oauth2ApplicationSecurity); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[33].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Oauth2ImplicitSecurity); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[34].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Oauth2PasswordSecurity); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[35].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Oauth2Scopes); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[36].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Operation); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[37].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Parameter); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[38].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ParameterDefinitions); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[39].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ParametersItem); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[40].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PathItem); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[41].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PathParameterSubSchema); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[42].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Paths); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[43].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PrimitivesItems); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[44].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Properties); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[45].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*QueryParameterSubSchema); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[46].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Response); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[47].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ResponseDefinitions); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[48].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ResponseValue); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[49].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Responses); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[50].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Schema); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[51].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SchemaItem); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[52].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SecurityDefinitions); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[53].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SecurityDefinitionsItem); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[54].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SecurityRequirement); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[55].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StringArray); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[56].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Tag); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[57].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TypeItem); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[58].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VendorExtension); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[59].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Xml); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[0].OneofWrappers = []interface{}{ + (*AdditionalPropertiesItem_Schema)(nil), + (*AdditionalPropertiesItem_Boolean)(nil), + } + file_openapiv2_OpenAPIv2_proto_msgTypes[30].OneofWrappers = []interface{}{ + (*NonBodyParameter_HeaderParameterSubSchema)(nil), + (*NonBodyParameter_FormDataParameterSubSchema)(nil), + (*NonBodyParameter_QueryParameterSubSchema)(nil), + (*NonBodyParameter_PathParameterSubSchema)(nil), + } + file_openapiv2_OpenAPIv2_proto_msgTypes[37].OneofWrappers = []interface{}{ + (*Parameter_BodyParameter)(nil), + (*Parameter_NonBodyParameter)(nil), + } + file_openapiv2_OpenAPIv2_proto_msgTypes[39].OneofWrappers = []interface{}{ + (*ParametersItem_Parameter)(nil), + (*ParametersItem_JsonReference)(nil), + } + file_openapiv2_OpenAPIv2_proto_msgTypes[48].OneofWrappers = []interface{}{ + (*ResponseValue_Response)(nil), + (*ResponseValue_JsonReference)(nil), + } + file_openapiv2_OpenAPIv2_proto_msgTypes[51].OneofWrappers = []interface{}{ + (*SchemaItem_Schema)(nil), + (*SchemaItem_FileSchema)(nil), + } + file_openapiv2_OpenAPIv2_proto_msgTypes[53].OneofWrappers = []interface{}{ + (*SecurityDefinitionsItem_BasicAuthenticationSecurity)(nil), + (*SecurityDefinitionsItem_ApiKeySecurity)(nil), + (*SecurityDefinitionsItem_Oauth2ImplicitSecurity)(nil), + (*SecurityDefinitionsItem_Oauth2PasswordSecurity)(nil), + (*SecurityDefinitionsItem_Oauth2ApplicationSecurity)(nil), + (*SecurityDefinitionsItem_Oauth2AccessCodeSecurity)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_openapiv2_OpenAPIv2_proto_rawDesc, + NumEnums: 0, + NumMessages: 60, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_openapiv2_OpenAPIv2_proto_goTypes, + DependencyIndexes: file_openapiv2_OpenAPIv2_proto_depIdxs, + MessageInfos: file_openapiv2_OpenAPIv2_proto_msgTypes, + }.Build() + File_openapiv2_OpenAPIv2_proto = out.File + file_openapiv2_OpenAPIv2_proto_rawDesc = nil + file_openapiv2_OpenAPIv2_proto_goTypes = nil + file_openapiv2_OpenAPIv2_proto_depIdxs = nil +} diff --git a/vendor/github.com/googleapis/gnostic/openapiv2/OpenAPIv2.proto b/vendor/github.com/googleapis/gnostic/openapiv2/OpenAPIv2.proto new file mode 100644 index 00000000000..1c59b2f4ae1 --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/openapiv2/OpenAPIv2.proto @@ -0,0 +1,666 @@ +// Copyright 2020 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// THIS FILE IS AUTOMATICALLY GENERATED. + +syntax = "proto3"; + +package openapi.v2; + +import "google/protobuf/any.proto"; + +// This option lets the proto compiler generate Java code inside the package +// name (see below) instead of inside an outer class. It creates a simpler +// developer experience by reducing one-level of name nesting and be +// consistent with most programming languages that don't support outer classes. +option java_multiple_files = true; + +// The Java outer classname should be the filename in UpperCamelCase. This +// class is only used to hold proto descriptor, so developers don't need to +// work with it directly. +option java_outer_classname = "OpenAPIProto"; + +// The Java package name must be proto package name with proper prefix. +option java_package = "org.openapi_v2"; + +// A reasonable prefix for the Objective-C symbols generated from the package. +// It should at a minimum be 3 characters long, all uppercase, and convention +// is to use an abbreviation of the package name. Something short, but +// hopefully unique enough to not conflict with things that may come along in +// the future. 'GPB' is reserved for the protocol buffer implementation itself. +option objc_class_prefix = "OAS"; + +// The Go package name. +option go_package = "./openapiv2;openapi_v2"; + +message AdditionalPropertiesItem { + oneof oneof { + Schema schema = 1; + bool boolean = 2; + } +} + +message Any { + google.protobuf.Any value = 1; + string yaml = 2; +} + +message ApiKeySecurity { + string type = 1; + string name = 2; + string in = 3; + string description = 4; + repeated NamedAny vendor_extension = 5; +} + +message BasicAuthenticationSecurity { + string type = 1; + string description = 2; + repeated NamedAny vendor_extension = 3; +} + +message BodyParameter { + // A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed. + string description = 1; + // The name of the parameter. + string name = 2; + // Determines the location of the parameter. + string in = 3; + // Determines whether or not this parameter is required or optional. + bool required = 4; + Schema schema = 5; + repeated NamedAny vendor_extension = 6; +} + +// Contact information for the owners of the API. +message Contact { + // The identifying name of the contact person/organization. + string name = 1; + // The URL pointing to the contact information. + string url = 2; + // The email address of the contact person/organization. + string email = 3; + repeated NamedAny vendor_extension = 4; +} + +message Default { + repeated NamedAny additional_properties = 1; +} + +// One or more JSON objects describing the schemas being consumed and produced by the API. +message Definitions { + repeated NamedSchema additional_properties = 1; +} + +message Document { + // The Swagger version of this document. + string swagger = 1; + Info info = 2; + // The host (name or ip) of the API. Example: 'swagger.io' + string host = 3; + // The base path to the API. Example: '/api'. + string base_path = 4; + // The transfer protocol of the API. + repeated string schemes = 5; + // A list of MIME types accepted by the API. + repeated string consumes = 6; + // A list of MIME types the API can produce. + repeated string produces = 7; + Paths paths = 8; + Definitions definitions = 9; + ParameterDefinitions parameters = 10; + ResponseDefinitions responses = 11; + repeated SecurityRequirement security = 12; + SecurityDefinitions security_definitions = 13; + repeated Tag tags = 14; + ExternalDocs external_docs = 15; + repeated NamedAny vendor_extension = 16; +} + +message Examples { + repeated NamedAny additional_properties = 1; +} + +// information about external documentation +message ExternalDocs { + string description = 1; + string url = 2; + repeated NamedAny vendor_extension = 3; +} + +// A deterministic version of a JSON Schema object. +message FileSchema { + string format = 1; + string title = 2; + string description = 3; + Any default = 4; + repeated string required = 5; + string type = 6; + bool read_only = 7; + ExternalDocs external_docs = 8; + Any example = 9; + repeated NamedAny vendor_extension = 10; +} + +message FormDataParameterSubSchema { + // Determines whether or not this parameter is required or optional. + bool required = 1; + // Determines the location of the parameter. + string in = 2; + // A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed. + string description = 3; + // The name of the parameter. + string name = 4; + // allows sending a parameter by name only or with an empty value. + bool allow_empty_value = 5; + string type = 6; + string format = 7; + PrimitivesItems items = 8; + string collection_format = 9; + Any default = 10; + double maximum = 11; + bool exclusive_maximum = 12; + double minimum = 13; + bool exclusive_minimum = 14; + int64 max_length = 15; + int64 min_length = 16; + string pattern = 17; + int64 max_items = 18; + int64 min_items = 19; + bool unique_items = 20; + repeated Any enum = 21; + double multiple_of = 22; + repeated NamedAny vendor_extension = 23; +} + +message Header { + string type = 1; + string format = 2; + PrimitivesItems items = 3; + string collection_format = 4; + Any default = 5; + double maximum = 6; + bool exclusive_maximum = 7; + double minimum = 8; + bool exclusive_minimum = 9; + int64 max_length = 10; + int64 min_length = 11; + string pattern = 12; + int64 max_items = 13; + int64 min_items = 14; + bool unique_items = 15; + repeated Any enum = 16; + double multiple_of = 17; + string description = 18; + repeated NamedAny vendor_extension = 19; +} + +message HeaderParameterSubSchema { + // Determines whether or not this parameter is required or optional. + bool required = 1; + // Determines the location of the parameter. + string in = 2; + // A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed. + string description = 3; + // The name of the parameter. + string name = 4; + string type = 5; + string format = 6; + PrimitivesItems items = 7; + string collection_format = 8; + Any default = 9; + double maximum = 10; + bool exclusive_maximum = 11; + double minimum = 12; + bool exclusive_minimum = 13; + int64 max_length = 14; + int64 min_length = 15; + string pattern = 16; + int64 max_items = 17; + int64 min_items = 18; + bool unique_items = 19; + repeated Any enum = 20; + double multiple_of = 21; + repeated NamedAny vendor_extension = 22; +} + +message Headers { + repeated NamedHeader additional_properties = 1; +} + +// General information about the API. +message Info { + // A unique and precise title of the API. + string title = 1; + // A semantic version number of the API. + string version = 2; + // A longer description of the API. Should be different from the title. GitHub Flavored Markdown is allowed. + string description = 3; + // The terms of service for the API. + string terms_of_service = 4; + Contact contact = 5; + License license = 6; + repeated NamedAny vendor_extension = 7; +} + +message ItemsItem { + repeated Schema schema = 1; +} + +message JsonReference { + string _ref = 1; + string description = 2; +} + +message License { + // The name of the license type. It's encouraged to use an OSI compatible license. + string name = 1; + // The URL pointing to the license. + string url = 2; + repeated NamedAny vendor_extension = 3; +} + +// Automatically-generated message used to represent maps of Any as ordered (name,value) pairs. +message NamedAny { + // Map key + string name = 1; + // Mapped value + Any value = 2; +} + +// Automatically-generated message used to represent maps of Header as ordered (name,value) pairs. +message NamedHeader { + // Map key + string name = 1; + // Mapped value + Header value = 2; +} + +// Automatically-generated message used to represent maps of Parameter as ordered (name,value) pairs. +message NamedParameter { + // Map key + string name = 1; + // Mapped value + Parameter value = 2; +} + +// Automatically-generated message used to represent maps of PathItem as ordered (name,value) pairs. +message NamedPathItem { + // Map key + string name = 1; + // Mapped value + PathItem value = 2; +} + +// Automatically-generated message used to represent maps of Response as ordered (name,value) pairs. +message NamedResponse { + // Map key + string name = 1; + // Mapped value + Response value = 2; +} + +// Automatically-generated message used to represent maps of ResponseValue as ordered (name,value) pairs. +message NamedResponseValue { + // Map key + string name = 1; + // Mapped value + ResponseValue value = 2; +} + +// Automatically-generated message used to represent maps of Schema as ordered (name,value) pairs. +message NamedSchema { + // Map key + string name = 1; + // Mapped value + Schema value = 2; +} + +// Automatically-generated message used to represent maps of SecurityDefinitionsItem as ordered (name,value) pairs. +message NamedSecurityDefinitionsItem { + // Map key + string name = 1; + // Mapped value + SecurityDefinitionsItem value = 2; +} + +// Automatically-generated message used to represent maps of string as ordered (name,value) pairs. +message NamedString { + // Map key + string name = 1; + // Mapped value + string value = 2; +} + +// Automatically-generated message used to represent maps of StringArray as ordered (name,value) pairs. +message NamedStringArray { + // Map key + string name = 1; + // Mapped value + StringArray value = 2; +} + +message NonBodyParameter { + oneof oneof { + HeaderParameterSubSchema header_parameter_sub_schema = 1; + FormDataParameterSubSchema form_data_parameter_sub_schema = 2; + QueryParameterSubSchema query_parameter_sub_schema = 3; + PathParameterSubSchema path_parameter_sub_schema = 4; + } +} + +message Oauth2AccessCodeSecurity { + string type = 1; + string flow = 2; + Oauth2Scopes scopes = 3; + string authorization_url = 4; + string token_url = 5; + string description = 6; + repeated NamedAny vendor_extension = 7; +} + +message Oauth2ApplicationSecurity { + string type = 1; + string flow = 2; + Oauth2Scopes scopes = 3; + string token_url = 4; + string description = 5; + repeated NamedAny vendor_extension = 6; +} + +message Oauth2ImplicitSecurity { + string type = 1; + string flow = 2; + Oauth2Scopes scopes = 3; + string authorization_url = 4; + string description = 5; + repeated NamedAny vendor_extension = 6; +} + +message Oauth2PasswordSecurity { + string type = 1; + string flow = 2; + Oauth2Scopes scopes = 3; + string token_url = 4; + string description = 5; + repeated NamedAny vendor_extension = 6; +} + +message Oauth2Scopes { + repeated NamedString additional_properties = 1; +} + +message Operation { + repeated string tags = 1; + // A brief summary of the operation. + string summary = 2; + // A longer description of the operation, GitHub Flavored Markdown is allowed. + string description = 3; + ExternalDocs external_docs = 4; + // A unique identifier of the operation. + string operation_id = 5; + // A list of MIME types the API can produce. + repeated string produces = 6; + // A list of MIME types the API can consume. + repeated string consumes = 7; + // The parameters needed to send a valid API call. + repeated ParametersItem parameters = 8; + Responses responses = 9; + // The transfer protocol of the API. + repeated string schemes = 10; + bool deprecated = 11; + repeated SecurityRequirement security = 12; + repeated NamedAny vendor_extension = 13; +} + +message Parameter { + oneof oneof { + BodyParameter body_parameter = 1; + NonBodyParameter non_body_parameter = 2; + } +} + +// One or more JSON representations for parameters +message ParameterDefinitions { + repeated NamedParameter additional_properties = 1; +} + +message ParametersItem { + oneof oneof { + Parameter parameter = 1; + JsonReference json_reference = 2; + } +} + +message PathItem { + string _ref = 1; + Operation get = 2; + Operation put = 3; + Operation post = 4; + Operation delete = 5; + Operation options = 6; + Operation head = 7; + Operation patch = 8; + // The parameters needed to send a valid API call. + repeated ParametersItem parameters = 9; + repeated NamedAny vendor_extension = 10; +} + +message PathParameterSubSchema { + // Determines whether or not this parameter is required or optional. + bool required = 1; + // Determines the location of the parameter. + string in = 2; + // A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed. + string description = 3; + // The name of the parameter. + string name = 4; + string type = 5; + string format = 6; + PrimitivesItems items = 7; + string collection_format = 8; + Any default = 9; + double maximum = 10; + bool exclusive_maximum = 11; + double minimum = 12; + bool exclusive_minimum = 13; + int64 max_length = 14; + int64 min_length = 15; + string pattern = 16; + int64 max_items = 17; + int64 min_items = 18; + bool unique_items = 19; + repeated Any enum = 20; + double multiple_of = 21; + repeated NamedAny vendor_extension = 22; +} + +// Relative paths to the individual endpoints. They must be relative to the 'basePath'. +message Paths { + repeated NamedAny vendor_extension = 1; + repeated NamedPathItem path = 2; +} + +message PrimitivesItems { + string type = 1; + string format = 2; + PrimitivesItems items = 3; + string collection_format = 4; + Any default = 5; + double maximum = 6; + bool exclusive_maximum = 7; + double minimum = 8; + bool exclusive_minimum = 9; + int64 max_length = 10; + int64 min_length = 11; + string pattern = 12; + int64 max_items = 13; + int64 min_items = 14; + bool unique_items = 15; + repeated Any enum = 16; + double multiple_of = 17; + repeated NamedAny vendor_extension = 18; +} + +message Properties { + repeated NamedSchema additional_properties = 1; +} + +message QueryParameterSubSchema { + // Determines whether or not this parameter is required or optional. + bool required = 1; + // Determines the location of the parameter. + string in = 2; + // A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed. + string description = 3; + // The name of the parameter. + string name = 4; + // allows sending a parameter by name only or with an empty value. + bool allow_empty_value = 5; + string type = 6; + string format = 7; + PrimitivesItems items = 8; + string collection_format = 9; + Any default = 10; + double maximum = 11; + bool exclusive_maximum = 12; + double minimum = 13; + bool exclusive_minimum = 14; + int64 max_length = 15; + int64 min_length = 16; + string pattern = 17; + int64 max_items = 18; + int64 min_items = 19; + bool unique_items = 20; + repeated Any enum = 21; + double multiple_of = 22; + repeated NamedAny vendor_extension = 23; +} + +message Response { + string description = 1; + SchemaItem schema = 2; + Headers headers = 3; + Examples examples = 4; + repeated NamedAny vendor_extension = 5; +} + +// One or more JSON representations for responses +message ResponseDefinitions { + repeated NamedResponse additional_properties = 1; +} + +message ResponseValue { + oneof oneof { + Response response = 1; + JsonReference json_reference = 2; + } +} + +// Response objects names can either be any valid HTTP status code or 'default'. +message Responses { + repeated NamedResponseValue response_code = 1; + repeated NamedAny vendor_extension = 2; +} + +// A deterministic version of a JSON Schema object. +message Schema { + string _ref = 1; + string format = 2; + string title = 3; + string description = 4; + Any default = 5; + double multiple_of = 6; + double maximum = 7; + bool exclusive_maximum = 8; + double minimum = 9; + bool exclusive_minimum = 10; + int64 max_length = 11; + int64 min_length = 12; + string pattern = 13; + int64 max_items = 14; + int64 min_items = 15; + bool unique_items = 16; + int64 max_properties = 17; + int64 min_properties = 18; + repeated string required = 19; + repeated Any enum = 20; + AdditionalPropertiesItem additional_properties = 21; + TypeItem type = 22; + ItemsItem items = 23; + repeated Schema all_of = 24; + Properties properties = 25; + string discriminator = 26; + bool read_only = 27; + Xml xml = 28; + ExternalDocs external_docs = 29; + Any example = 30; + repeated NamedAny vendor_extension = 31; +} + +message SchemaItem { + oneof oneof { + Schema schema = 1; + FileSchema file_schema = 2; + } +} + +message SecurityDefinitions { + repeated NamedSecurityDefinitionsItem additional_properties = 1; +} + +message SecurityDefinitionsItem { + oneof oneof { + BasicAuthenticationSecurity basic_authentication_security = 1; + ApiKeySecurity api_key_security = 2; + Oauth2ImplicitSecurity oauth2_implicit_security = 3; + Oauth2PasswordSecurity oauth2_password_security = 4; + Oauth2ApplicationSecurity oauth2_application_security = 5; + Oauth2AccessCodeSecurity oauth2_access_code_security = 6; + } +} + +message SecurityRequirement { + repeated NamedStringArray additional_properties = 1; +} + +message StringArray { + repeated string value = 1; +} + +message Tag { + string name = 1; + string description = 2; + ExternalDocs external_docs = 3; + repeated NamedAny vendor_extension = 4; +} + +message TypeItem { + repeated string value = 1; +} + +// Any property starting with x- is valid. +message VendorExtension { + repeated NamedAny additional_properties = 1; +} + +message Xml { + string name = 1; + string namespace = 2; + string prefix = 3; + bool attribute = 4; + bool wrapped = 5; + repeated NamedAny vendor_extension = 6; +} + diff --git a/vendor/github.com/googleapis/gnostic/openapiv2/README.md b/vendor/github.com/googleapis/gnostic/openapiv2/README.md new file mode 100644 index 00000000000..5276128d3b8 --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/openapiv2/README.md @@ -0,0 +1,14 @@ +# OpenAPI v2 Protocol Buffer Models + +This directory contains a Protocol Buffer-language model and related code for +supporting OpenAPI v2. + +Gnostic applications and plugins can use OpenAPIv2.proto to generate Protocol +Buffer support code for their preferred languages. + +OpenAPIv2.go is used by Gnostic to read JSON and YAML OpenAPI descriptions into +the Protocol Buffer-based datastructures generated from OpenAPIv2.proto. + +OpenAPIv2.proto and OpenAPIv2.go are generated by the Gnostic compiler +generator, and OpenAPIv2.pb.go is generated by protoc, the Protocol Buffer +compiler, and protoc-gen-go, the Protocol Buffer Go code generation plugin. diff --git a/vendor/github.com/googleapis/gnostic/openapiv2/document.go b/vendor/github.com/googleapis/gnostic/openapiv2/document.go new file mode 100644 index 00000000000..56e5966b4cb --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/openapiv2/document.go @@ -0,0 +1,41 @@ +// Copyright 2020 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package openapi_v2 + +import ( + "github.com/googleapis/gnostic/compiler" + "gopkg.in/yaml.v3" +) + +// ParseDocument reads an OpenAPI v2 description from a YAML/JSON representation. +func ParseDocument(b []byte) (*Document, error) { + info, err := compiler.ReadInfoFromBytes("", b) + if err != nil { + return nil, err + } + root := info.Content[0] + return NewDocument(root, compiler.NewContextWithExtensions("$root", root, nil, nil)) +} + +// YAMLValue produces a serialized YAML representation of the document. +func (d *Document) YAMLValue(comment string) ([]byte, error) { + rawInfo := d.ToRawInfo() + rawInfo = &yaml.Node{ + Kind: yaml.DocumentNode, + Content: []*yaml.Node{rawInfo}, + HeadComment: comment, + } + return yaml.Marshal(rawInfo) +} diff --git a/vendor/github.com/googleapis/gnostic/openapiv2/openapi-2.0.json b/vendor/github.com/googleapis/gnostic/openapiv2/openapi-2.0.json new file mode 100644 index 00000000000..afa12b79b8f --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/openapiv2/openapi-2.0.json @@ -0,0 +1,1610 @@ +{ + "title": "A JSON Schema for Swagger 2.0 API.", + "id": "http://swagger.io/v2/schema.json#", + "$schema": "http://json-schema.org/draft-04/schema#", + "type": "object", + "required": [ + "swagger", + "info", + "paths" + ], + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "swagger": { + "type": "string", + "enum": [ + "2.0" + ], + "description": "The Swagger version of this document." + }, + "info": { + "$ref": "#/definitions/info" + }, + "host": { + "type": "string", + "pattern": "^[^{}/ :\\\\]+(?::\\d+)?$", + "description": "The host (name or ip) of the API. Example: 'swagger.io'" + }, + "basePath": { + "type": "string", + "pattern": "^/", + "description": "The base path to the API. Example: '/api'." + }, + "schemes": { + "$ref": "#/definitions/schemesList" + }, + "consumes": { + "description": "A list of MIME types accepted by the API.", + "allOf": [ + { + "$ref": "#/definitions/mediaTypeList" + } + ] + }, + "produces": { + "description": "A list of MIME types the API can produce.", + "allOf": [ + { + "$ref": "#/definitions/mediaTypeList" + } + ] + }, + "paths": { + "$ref": "#/definitions/paths" + }, + "definitions": { + "$ref": "#/definitions/definitions" + }, + "parameters": { + "$ref": "#/definitions/parameterDefinitions" + }, + "responses": { + "$ref": "#/definitions/responseDefinitions" + }, + "security": { + "$ref": "#/definitions/security" + }, + "securityDefinitions": { + "$ref": "#/definitions/securityDefinitions" + }, + "tags": { + "type": "array", + "items": { + "$ref": "#/definitions/tag" + }, + "uniqueItems": true + }, + "externalDocs": { + "$ref": "#/definitions/externalDocs" + } + }, + "definitions": { + "info": { + "type": "object", + "description": "General information about the API.", + "required": [ + "version", + "title" + ], + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "title": { + "type": "string", + "description": "A unique and precise title of the API." + }, + "version": { + "type": "string", + "description": "A semantic version number of the API." + }, + "description": { + "type": "string", + "description": "A longer description of the API. Should be different from the title. GitHub Flavored Markdown is allowed." + }, + "termsOfService": { + "type": "string", + "description": "The terms of service for the API." + }, + "contact": { + "$ref": "#/definitions/contact" + }, + "license": { + "$ref": "#/definitions/license" + } + } + }, + "contact": { + "type": "object", + "description": "Contact information for the owners of the API.", + "additionalProperties": false, + "properties": { + "name": { + "type": "string", + "description": "The identifying name of the contact person/organization." + }, + "url": { + "type": "string", + "description": "The URL pointing to the contact information.", + "format": "uri" + }, + "email": { + "type": "string", + "description": "The email address of the contact person/organization.", + "format": "email" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "license": { + "type": "object", + "required": [ + "name" + ], + "additionalProperties": false, + "properties": { + "name": { + "type": "string", + "description": "The name of the license type. It's encouraged to use an OSI compatible license." + }, + "url": { + "type": "string", + "description": "The URL pointing to the license.", + "format": "uri" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "paths": { + "type": "object", + "description": "Relative paths to the individual endpoints. They must be relative to the 'basePath'.", + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + }, + "^/": { + "$ref": "#/definitions/pathItem" + } + }, + "additionalProperties": false + }, + "definitions": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/schema" + }, + "description": "One or more JSON objects describing the schemas being consumed and produced by the API." + }, + "parameterDefinitions": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/parameter" + }, + "description": "One or more JSON representations for parameters" + }, + "responseDefinitions": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/response" + }, + "description": "One or more JSON representations for responses" + }, + "externalDocs": { + "type": "object", + "additionalProperties": false, + "description": "information about external documentation", + "required": [ + "url" + ], + "properties": { + "description": { + "type": "string" + }, + "url": { + "type": "string", + "format": "uri" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "examples": { + "type": "object", + "additionalProperties": true + }, + "mimeType": { + "type": "string", + "description": "The MIME type of the HTTP message." + }, + "operation": { + "type": "object", + "required": [ + "responses" + ], + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "tags": { + "type": "array", + "items": { + "type": "string" + }, + "uniqueItems": true + }, + "summary": { + "type": "string", + "description": "A brief summary of the operation." + }, + "description": { + "type": "string", + "description": "A longer description of the operation, GitHub Flavored Markdown is allowed." + }, + "externalDocs": { + "$ref": "#/definitions/externalDocs" + }, + "operationId": { + "type": "string", + "description": "A unique identifier of the operation." + }, + "produces": { + "description": "A list of MIME types the API can produce.", + "allOf": [ + { + "$ref": "#/definitions/mediaTypeList" + } + ] + }, + "consumes": { + "description": "A list of MIME types the API can consume.", + "allOf": [ + { + "$ref": "#/definitions/mediaTypeList" + } + ] + }, + "parameters": { + "$ref": "#/definitions/parametersList" + }, + "responses": { + "$ref": "#/definitions/responses" + }, + "schemes": { + "$ref": "#/definitions/schemesList" + }, + "deprecated": { + "type": "boolean", + "default": false + }, + "security": { + "$ref": "#/definitions/security" + } + } + }, + "pathItem": { + "type": "object", + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "$ref": { + "type": "string" + }, + "get": { + "$ref": "#/definitions/operation" + }, + "put": { + "$ref": "#/definitions/operation" + }, + "post": { + "$ref": "#/definitions/operation" + }, + "delete": { + "$ref": "#/definitions/operation" + }, + "options": { + "$ref": "#/definitions/operation" + }, + "head": { + "$ref": "#/definitions/operation" + }, + "patch": { + "$ref": "#/definitions/operation" + }, + "parameters": { + "$ref": "#/definitions/parametersList" + } + } + }, + "responses": { + "type": "object", + "description": "Response objects names can either be any valid HTTP status code or 'default'.", + "minProperties": 1, + "additionalProperties": false, + "patternProperties": { + "^([0-9]{3})$|^(default)$": { + "$ref": "#/definitions/responseValue" + }, + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "not": { + "type": "object", + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + } + }, + "responseValue": { + "oneOf": [ + { + "$ref": "#/definitions/response" + }, + { + "$ref": "#/definitions/jsonReference" + } + ] + }, + "response": { + "type": "object", + "required": [ + "description" + ], + "properties": { + "description": { + "type": "string" + }, + "schema": { + "oneOf": [ + { + "$ref": "#/definitions/schema" + }, + { + "$ref": "#/definitions/fileSchema" + } + ] + }, + "headers": { + "$ref": "#/definitions/headers" + }, + "examples": { + "$ref": "#/definitions/examples" + } + }, + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "headers": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/header" + } + }, + "header": { + "type": "object", + "additionalProperties": false, + "required": [ + "type" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "string", + "number", + "integer", + "boolean", + "array" + ] + }, + "format": { + "type": "string" + }, + "items": { + "$ref": "#/definitions/primitivesItems" + }, + "collectionFormat": { + "$ref": "#/definitions/collectionFormat" + }, + "default": { + "$ref": "#/definitions/default" + }, + "maximum": { + "$ref": "#/definitions/maximum" + }, + "exclusiveMaximum": { + "$ref": "#/definitions/exclusiveMaximum" + }, + "minimum": { + "$ref": "#/definitions/minimum" + }, + "exclusiveMinimum": { + "$ref": "#/definitions/exclusiveMinimum" + }, + "maxLength": { + "$ref": "#/definitions/maxLength" + }, + "minLength": { + "$ref": "#/definitions/minLength" + }, + "pattern": { + "$ref": "#/definitions/pattern" + }, + "maxItems": { + "$ref": "#/definitions/maxItems" + }, + "minItems": { + "$ref": "#/definitions/minItems" + }, + "uniqueItems": { + "$ref": "#/definitions/uniqueItems" + }, + "enum": { + "$ref": "#/definitions/enum" + }, + "multipleOf": { + "$ref": "#/definitions/multipleOf" + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "vendorExtension": { + "description": "Any property starting with x- is valid.", + "additionalProperties": true, + "additionalItems": true + }, + "bodyParameter": { + "type": "object", + "required": [ + "name", + "in", + "schema" + ], + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "description": { + "type": "string", + "description": "A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed." + }, + "name": { + "type": "string", + "description": "The name of the parameter." + }, + "in": { + "type": "string", + "description": "Determines the location of the parameter.", + "enum": [ + "body" + ] + }, + "required": { + "type": "boolean", + "description": "Determines whether or not this parameter is required or optional.", + "default": false + }, + "schema": { + "$ref": "#/definitions/schema" + } + }, + "additionalProperties": false + }, + "headerParameterSubSchema": { + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "required": { + "type": "boolean", + "description": "Determines whether or not this parameter is required or optional.", + "default": false + }, + "in": { + "type": "string", + "description": "Determines the location of the parameter.", + "enum": [ + "header" + ] + }, + "description": { + "type": "string", + "description": "A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed." + }, + "name": { + "type": "string", + "description": "The name of the parameter." + }, + "type": { + "type": "string", + "enum": [ + "string", + "number", + "boolean", + "integer", + "array" + ] + }, + "format": { + "type": "string" + }, + "items": { + "$ref": "#/definitions/primitivesItems" + }, + "collectionFormat": { + "$ref": "#/definitions/collectionFormat" + }, + "default": { + "$ref": "#/definitions/default" + }, + "maximum": { + "$ref": "#/definitions/maximum" + }, + "exclusiveMaximum": { + "$ref": "#/definitions/exclusiveMaximum" + }, + "minimum": { + "$ref": "#/definitions/minimum" + }, + "exclusiveMinimum": { + "$ref": "#/definitions/exclusiveMinimum" + }, + "maxLength": { + "$ref": "#/definitions/maxLength" + }, + "minLength": { + "$ref": "#/definitions/minLength" + }, + "pattern": { + "$ref": "#/definitions/pattern" + }, + "maxItems": { + "$ref": "#/definitions/maxItems" + }, + "minItems": { + "$ref": "#/definitions/minItems" + }, + "uniqueItems": { + "$ref": "#/definitions/uniqueItems" + }, + "enum": { + "$ref": "#/definitions/enum" + }, + "multipleOf": { + "$ref": "#/definitions/multipleOf" + } + } + }, + "queryParameterSubSchema": { + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "required": { + "type": "boolean", + "description": "Determines whether or not this parameter is required or optional.", + "default": false + }, + "in": { + "type": "string", + "description": "Determines the location of the parameter.", + "enum": [ + "query" + ] + }, + "description": { + "type": "string", + "description": "A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed." + }, + "name": { + "type": "string", + "description": "The name of the parameter." + }, + "allowEmptyValue": { + "type": "boolean", + "default": false, + "description": "allows sending a parameter by name only or with an empty value." + }, + "type": { + "type": "string", + "enum": [ + "string", + "number", + "boolean", + "integer", + "array" + ] + }, + "format": { + "type": "string" + }, + "items": { + "$ref": "#/definitions/primitivesItems" + }, + "collectionFormat": { + "$ref": "#/definitions/collectionFormatWithMulti" + }, + "default": { + "$ref": "#/definitions/default" + }, + "maximum": { + "$ref": "#/definitions/maximum" + }, + "exclusiveMaximum": { + "$ref": "#/definitions/exclusiveMaximum" + }, + "minimum": { + "$ref": "#/definitions/minimum" + }, + "exclusiveMinimum": { + "$ref": "#/definitions/exclusiveMinimum" + }, + "maxLength": { + "$ref": "#/definitions/maxLength" + }, + "minLength": { + "$ref": "#/definitions/minLength" + }, + "pattern": { + "$ref": "#/definitions/pattern" + }, + "maxItems": { + "$ref": "#/definitions/maxItems" + }, + "minItems": { + "$ref": "#/definitions/minItems" + }, + "uniqueItems": { + "$ref": "#/definitions/uniqueItems" + }, + "enum": { + "$ref": "#/definitions/enum" + }, + "multipleOf": { + "$ref": "#/definitions/multipleOf" + } + } + }, + "formDataParameterSubSchema": { + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "required": { + "type": "boolean", + "description": "Determines whether or not this parameter is required or optional.", + "default": false + }, + "in": { + "type": "string", + "description": "Determines the location of the parameter.", + "enum": [ + "formData" + ] + }, + "description": { + "type": "string", + "description": "A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed." + }, + "name": { + "type": "string", + "description": "The name of the parameter." + }, + "allowEmptyValue": { + "type": "boolean", + "default": false, + "description": "allows sending a parameter by name only or with an empty value." + }, + "type": { + "type": "string", + "enum": [ + "string", + "number", + "boolean", + "integer", + "array", + "file" + ] + }, + "format": { + "type": "string" + }, + "items": { + "$ref": "#/definitions/primitivesItems" + }, + "collectionFormat": { + "$ref": "#/definitions/collectionFormatWithMulti" + }, + "default": { + "$ref": "#/definitions/default" + }, + "maximum": { + "$ref": "#/definitions/maximum" + }, + "exclusiveMaximum": { + "$ref": "#/definitions/exclusiveMaximum" + }, + "minimum": { + "$ref": "#/definitions/minimum" + }, + "exclusiveMinimum": { + "$ref": "#/definitions/exclusiveMinimum" + }, + "maxLength": { + "$ref": "#/definitions/maxLength" + }, + "minLength": { + "$ref": "#/definitions/minLength" + }, + "pattern": { + "$ref": "#/definitions/pattern" + }, + "maxItems": { + "$ref": "#/definitions/maxItems" + }, + "minItems": { + "$ref": "#/definitions/minItems" + }, + "uniqueItems": { + "$ref": "#/definitions/uniqueItems" + }, + "enum": { + "$ref": "#/definitions/enum" + }, + "multipleOf": { + "$ref": "#/definitions/multipleOf" + } + } + }, + "pathParameterSubSchema": { + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "required": [ + "required" + ], + "properties": { + "required": { + "type": "boolean", + "enum": [ + true + ], + "description": "Determines whether or not this parameter is required or optional." + }, + "in": { + "type": "string", + "description": "Determines the location of the parameter.", + "enum": [ + "path" + ] + }, + "description": { + "type": "string", + "description": "A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed." + }, + "name": { + "type": "string", + "description": "The name of the parameter." + }, + "type": { + "type": "string", + "enum": [ + "string", + "number", + "boolean", + "integer", + "array" + ] + }, + "format": { + "type": "string" + }, + "items": { + "$ref": "#/definitions/primitivesItems" + }, + "collectionFormat": { + "$ref": "#/definitions/collectionFormat" + }, + "default": { + "$ref": "#/definitions/default" + }, + "maximum": { + "$ref": "#/definitions/maximum" + }, + "exclusiveMaximum": { + "$ref": "#/definitions/exclusiveMaximum" + }, + "minimum": { + "$ref": "#/definitions/minimum" + }, + "exclusiveMinimum": { + "$ref": "#/definitions/exclusiveMinimum" + }, + "maxLength": { + "$ref": "#/definitions/maxLength" + }, + "minLength": { + "$ref": "#/definitions/minLength" + }, + "pattern": { + "$ref": "#/definitions/pattern" + }, + "maxItems": { + "$ref": "#/definitions/maxItems" + }, + "minItems": { + "$ref": "#/definitions/minItems" + }, + "uniqueItems": { + "$ref": "#/definitions/uniqueItems" + }, + "enum": { + "$ref": "#/definitions/enum" + }, + "multipleOf": { + "$ref": "#/definitions/multipleOf" + } + } + }, + "nonBodyParameter": { + "type": "object", + "required": [ + "name", + "in", + "type" + ], + "oneOf": [ + { + "$ref": "#/definitions/headerParameterSubSchema" + }, + { + "$ref": "#/definitions/formDataParameterSubSchema" + }, + { + "$ref": "#/definitions/queryParameterSubSchema" + }, + { + "$ref": "#/definitions/pathParameterSubSchema" + } + ] + }, + "parameter": { + "oneOf": [ + { + "$ref": "#/definitions/bodyParameter" + }, + { + "$ref": "#/definitions/nonBodyParameter" + } + ] + }, + "schema": { + "type": "object", + "description": "A deterministic version of a JSON Schema object.", + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "$ref": { + "type": "string" + }, + "format": { + "type": "string" + }, + "title": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/title" + }, + "description": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/description" + }, + "default": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/default" + }, + "multipleOf": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/multipleOf" + }, + "maximum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/maximum" + }, + "exclusiveMaximum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/exclusiveMaximum" + }, + "minimum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/minimum" + }, + "exclusiveMinimum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/exclusiveMinimum" + }, + "maxLength": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger" + }, + "minLength": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0" + }, + "pattern": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/pattern" + }, + "maxItems": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger" + }, + "minItems": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0" + }, + "uniqueItems": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/uniqueItems" + }, + "maxProperties": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger" + }, + "minProperties": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0" + }, + "required": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/stringArray" + }, + "enum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/enum" + }, + "additionalProperties": { + "oneOf": [ + { + "$ref": "#/definitions/schema" + }, + { + "type": "boolean" + } + ], + "default": {} + }, + "type": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/type" + }, + "items": { + "anyOf": [ + { + "$ref": "#/definitions/schema" + }, + { + "type": "array", + "minItems": 1, + "items": { + "$ref": "#/definitions/schema" + } + } + ], + "default": {} + }, + "allOf": { + "type": "array", + "minItems": 1, + "items": { + "$ref": "#/definitions/schema" + } + }, + "properties": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/schema" + }, + "default": {} + }, + "discriminator": { + "type": "string" + }, + "readOnly": { + "type": "boolean", + "default": false + }, + "xml": { + "$ref": "#/definitions/xml" + }, + "externalDocs": { + "$ref": "#/definitions/externalDocs" + }, + "example": {} + }, + "additionalProperties": false + }, + "fileSchema": { + "type": "object", + "description": "A deterministic version of a JSON Schema object.", + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "required": [ + "type" + ], + "properties": { + "format": { + "type": "string" + }, + "title": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/title" + }, + "description": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/description" + }, + "default": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/default" + }, + "required": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/stringArray" + }, + "type": { + "type": "string", + "enum": [ + "file" + ] + }, + "readOnly": { + "type": "boolean", + "default": false + }, + "externalDocs": { + "$ref": "#/definitions/externalDocs" + }, + "example": {} + }, + "additionalProperties": false + }, + "primitivesItems": { + "type": "object", + "additionalProperties": false, + "properties": { + "type": { + "type": "string", + "enum": [ + "string", + "number", + "integer", + "boolean", + "array" + ] + }, + "format": { + "type": "string" + }, + "items": { + "$ref": "#/definitions/primitivesItems" + }, + "collectionFormat": { + "$ref": "#/definitions/collectionFormat" + }, + "default": { + "$ref": "#/definitions/default" + }, + "maximum": { + "$ref": "#/definitions/maximum" + }, + "exclusiveMaximum": { + "$ref": "#/definitions/exclusiveMaximum" + }, + "minimum": { + "$ref": "#/definitions/minimum" + }, + "exclusiveMinimum": { + "$ref": "#/definitions/exclusiveMinimum" + }, + "maxLength": { + "$ref": "#/definitions/maxLength" + }, + "minLength": { + "$ref": "#/definitions/minLength" + }, + "pattern": { + "$ref": "#/definitions/pattern" + }, + "maxItems": { + "$ref": "#/definitions/maxItems" + }, + "minItems": { + "$ref": "#/definitions/minItems" + }, + "uniqueItems": { + "$ref": "#/definitions/uniqueItems" + }, + "enum": { + "$ref": "#/definitions/enum" + }, + "multipleOf": { + "$ref": "#/definitions/multipleOf" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "security": { + "type": "array", + "items": { + "$ref": "#/definitions/securityRequirement" + }, + "uniqueItems": true + }, + "securityRequirement": { + "type": "object", + "additionalProperties": { + "type": "array", + "items": { + "type": "string" + }, + "uniqueItems": true + } + }, + "xml": { + "type": "object", + "additionalProperties": false, + "properties": { + "name": { + "type": "string" + }, + "namespace": { + "type": "string" + }, + "prefix": { + "type": "string" + }, + "attribute": { + "type": "boolean", + "default": false + }, + "wrapped": { + "type": "boolean", + "default": false + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "tag": { + "type": "object", + "additionalProperties": false, + "required": [ + "name" + ], + "properties": { + "name": { + "type": "string" + }, + "description": { + "type": "string" + }, + "externalDocs": { + "$ref": "#/definitions/externalDocs" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "securityDefinitions": { + "type": "object", + "additionalProperties": { + "oneOf": [ + { + "$ref": "#/definitions/basicAuthenticationSecurity" + }, + { + "$ref": "#/definitions/apiKeySecurity" + }, + { + "$ref": "#/definitions/oauth2ImplicitSecurity" + }, + { + "$ref": "#/definitions/oauth2PasswordSecurity" + }, + { + "$ref": "#/definitions/oauth2ApplicationSecurity" + }, + { + "$ref": "#/definitions/oauth2AccessCodeSecurity" + } + ] + } + }, + "basicAuthenticationSecurity": { + "type": "object", + "additionalProperties": false, + "required": [ + "type" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "basic" + ] + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "apiKeySecurity": { + "type": "object", + "additionalProperties": false, + "required": [ + "type", + "name", + "in" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "apiKey" + ] + }, + "name": { + "type": "string" + }, + "in": { + "type": "string", + "enum": [ + "header", + "query" + ] + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "oauth2ImplicitSecurity": { + "type": "object", + "additionalProperties": false, + "required": [ + "type", + "flow", + "authorizationUrl" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "oauth2" + ] + }, + "flow": { + "type": "string", + "enum": [ + "implicit" + ] + }, + "scopes": { + "$ref": "#/definitions/oauth2Scopes" + }, + "authorizationUrl": { + "type": "string", + "format": "uri" + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "oauth2PasswordSecurity": { + "type": "object", + "additionalProperties": false, + "required": [ + "type", + "flow", + "tokenUrl" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "oauth2" + ] + }, + "flow": { + "type": "string", + "enum": [ + "password" + ] + }, + "scopes": { + "$ref": "#/definitions/oauth2Scopes" + }, + "tokenUrl": { + "type": "string", + "format": "uri" + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "oauth2ApplicationSecurity": { + "type": "object", + "additionalProperties": false, + "required": [ + "type", + "flow", + "tokenUrl" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "oauth2" + ] + }, + "flow": { + "type": "string", + "enum": [ + "application" + ] + }, + "scopes": { + "$ref": "#/definitions/oauth2Scopes" + }, + "tokenUrl": { + "type": "string", + "format": "uri" + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "oauth2AccessCodeSecurity": { + "type": "object", + "additionalProperties": false, + "required": [ + "type", + "flow", + "authorizationUrl", + "tokenUrl" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "oauth2" + ] + }, + "flow": { + "type": "string", + "enum": [ + "accessCode" + ] + }, + "scopes": { + "$ref": "#/definitions/oauth2Scopes" + }, + "authorizationUrl": { + "type": "string", + "format": "uri" + }, + "tokenUrl": { + "type": "string", + "format": "uri" + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "oauth2Scopes": { + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "mediaTypeList": { + "type": "array", + "items": { + "$ref": "#/definitions/mimeType" + }, + "uniqueItems": true + }, + "parametersList": { + "type": "array", + "description": "The parameters needed to send a valid API call.", + "additionalItems": false, + "items": { + "oneOf": [ + { + "$ref": "#/definitions/parameter" + }, + { + "$ref": "#/definitions/jsonReference" + } + ] + }, + "uniqueItems": true + }, + "schemesList": { + "type": "array", + "description": "The transfer protocol of the API.", + "items": { + "type": "string", + "enum": [ + "http", + "https", + "ws", + "wss" + ] + }, + "uniqueItems": true + }, + "collectionFormat": { + "type": "string", + "enum": [ + "csv", + "ssv", + "tsv", + "pipes" + ], + "default": "csv" + }, + "collectionFormatWithMulti": { + "type": "string", + "enum": [ + "csv", + "ssv", + "tsv", + "pipes", + "multi" + ], + "default": "csv" + }, + "title": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/title" + }, + "description": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/description" + }, + "default": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/default" + }, + "multipleOf": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/multipleOf" + }, + "maximum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/maximum" + }, + "exclusiveMaximum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/exclusiveMaximum" + }, + "minimum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/minimum" + }, + "exclusiveMinimum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/exclusiveMinimum" + }, + "maxLength": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger" + }, + "minLength": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0" + }, + "pattern": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/pattern" + }, + "maxItems": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger" + }, + "minItems": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0" + }, + "uniqueItems": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/uniqueItems" + }, + "enum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/enum" + }, + "jsonReference": { + "type": "object", + "required": [ + "$ref" + ], + "additionalProperties": false, + "properties": { + "$ref": { + "type": "string" + }, + "description": { + "type": "string" + } + } + } + } +} \ No newline at end of file diff --git a/vendor/github.com/gregjones/httpcache/.travis.yml b/vendor/github.com/gregjones/httpcache/.travis.yml new file mode 100644 index 00000000000..597bc9996f0 --- /dev/null +++ b/vendor/github.com/gregjones/httpcache/.travis.yml @@ -0,0 +1,18 @@ +sudo: false +language: go +matrix: + allow_failures: + - go: master + fast_finish: true + include: + - go: 1.10.x + - go: 1.11.x + env: GOFMT=1 + - go: master +install: + - # Do nothing. This is needed to prevent default install action "go get -t -v ./..." from happening here (we want it to happen inside script step). +script: + - go get -t -v ./... + - if test -n "${GOFMT}"; then gofmt -w -s . && git diff --exit-code; fi + - go tool vet . + - go test -v -race ./... diff --git a/vendor/github.com/gregjones/httpcache/LICENSE.txt b/vendor/github.com/gregjones/httpcache/LICENSE.txt new file mode 100644 index 00000000000..81316beb0cb --- /dev/null +++ b/vendor/github.com/gregjones/httpcache/LICENSE.txt @@ -0,0 +1,7 @@ +Copyright © 2012 Greg Jones (greg.jones@gmail.com) + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/gregjones/httpcache/README.md b/vendor/github.com/gregjones/httpcache/README.md new file mode 100644 index 00000000000..09c9e7c173a --- /dev/null +++ b/vendor/github.com/gregjones/httpcache/README.md @@ -0,0 +1,25 @@ +httpcache +========= + +[![Build Status](https://travis-ci.org/gregjones/httpcache.svg?branch=master)](https://travis-ci.org/gregjones/httpcache) [![GoDoc](https://godoc.org/github.com/gregjones/httpcache?status.svg)](https://godoc.org/github.com/gregjones/httpcache) + +Package httpcache provides a http.RoundTripper implementation that works as a mostly [RFC 7234](https://tools.ietf.org/html/rfc7234) compliant cache for http responses. + +It is only suitable for use as a 'private' cache (i.e. for a web-browser or an API-client and not for a shared proxy). + +Cache Backends +-------------- + +- The built-in 'memory' cache stores responses in an in-memory map. +- [`github.com/gregjones/httpcache/diskcache`](https://github.com/gregjones/httpcache/tree/master/diskcache) provides a filesystem-backed cache using the [diskv](https://github.com/peterbourgon/diskv) library. +- [`github.com/gregjones/httpcache/memcache`](https://github.com/gregjones/httpcache/tree/master/memcache) provides memcache implementations, for both App Engine and 'normal' memcache servers. +- [`sourcegraph.com/sourcegraph/s3cache`](https://sourcegraph.com/github.com/sourcegraph/s3cache) uses Amazon S3 for storage. +- [`github.com/gregjones/httpcache/leveldbcache`](https://github.com/gregjones/httpcache/tree/master/leveldbcache) provides a filesystem-backed cache using [leveldb](https://github.com/syndtr/goleveldb/leveldb). +- [`github.com/die-net/lrucache`](https://github.com/die-net/lrucache) provides an in-memory cache that will evict least-recently used entries. +- [`github.com/die-net/lrucache/twotier`](https://github.com/die-net/lrucache/tree/master/twotier) allows caches to be combined, for example to use lrucache above with a persistent disk-cache. +- [`github.com/birkelund/boltdbcache`](https://github.com/birkelund/boltdbcache) provides a BoltDB implementation (based on the [bbolt](https://github.com/coreos/bbolt) fork). + +License +------- + +- [MIT License](LICENSE.txt) diff --git a/vendor/github.com/gregjones/httpcache/diskcache/diskcache.go b/vendor/github.com/gregjones/httpcache/diskcache/diskcache.go new file mode 100644 index 00000000000..42e3129d823 --- /dev/null +++ b/vendor/github.com/gregjones/httpcache/diskcache/diskcache.go @@ -0,0 +1,61 @@ +// Package diskcache provides an implementation of httpcache.Cache that uses the diskv package +// to supplement an in-memory map with persistent storage +// +package diskcache + +import ( + "bytes" + "crypto/md5" + "encoding/hex" + "github.com/peterbourgon/diskv" + "io" +) + +// Cache is an implementation of httpcache.Cache that supplements the in-memory map with persistent storage +type Cache struct { + d *diskv.Diskv +} + +// Get returns the response corresponding to key if present +func (c *Cache) Get(key string) (resp []byte, ok bool) { + key = keyToFilename(key) + resp, err := c.d.Read(key) + if err != nil { + return []byte{}, false + } + return resp, true +} + +// Set saves a response to the cache as key +func (c *Cache) Set(key string, resp []byte) { + key = keyToFilename(key) + c.d.WriteStream(key, bytes.NewReader(resp), true) +} + +// Delete removes the response with key from the cache +func (c *Cache) Delete(key string) { + key = keyToFilename(key) + c.d.Erase(key) +} + +func keyToFilename(key string) string { + h := md5.New() + io.WriteString(h, key) + return hex.EncodeToString(h.Sum(nil)) +} + +// New returns a new Cache that will store files in basePath +func New(basePath string) *Cache { + return &Cache{ + d: diskv.New(diskv.Options{ + BasePath: basePath, + CacheSizeMax: 100 * 1024 * 1024, // 100MB + }), + } +} + +// NewWithDiskv returns a new Cache using the provided Diskv as underlying +// storage. +func NewWithDiskv(d *diskv.Diskv) *Cache { + return &Cache{d} +} diff --git a/vendor/github.com/gregjones/httpcache/httpcache.go b/vendor/github.com/gregjones/httpcache/httpcache.go new file mode 100644 index 00000000000..b41a63d1ff5 --- /dev/null +++ b/vendor/github.com/gregjones/httpcache/httpcache.go @@ -0,0 +1,551 @@ +// Package httpcache provides a http.RoundTripper implementation that works as a +// mostly RFC-compliant cache for http responses. +// +// It is only suitable for use as a 'private' cache (i.e. for a web-browser or an API-client +// and not for a shared proxy). +// +package httpcache + +import ( + "bufio" + "bytes" + "errors" + "io" + "io/ioutil" + "net/http" + "net/http/httputil" + "strings" + "sync" + "time" +) + +const ( + stale = iota + fresh + transparent + // XFromCache is the header added to responses that are returned from the cache + XFromCache = "X-From-Cache" +) + +// A Cache interface is used by the Transport to store and retrieve responses. +type Cache interface { + // Get returns the []byte representation of a cached response and a bool + // set to true if the value isn't empty + Get(key string) (responseBytes []byte, ok bool) + // Set stores the []byte representation of a response against a key + Set(key string, responseBytes []byte) + // Delete removes the value associated with the key + Delete(key string) +} + +// cacheKey returns the cache key for req. +func cacheKey(req *http.Request) string { + if req.Method == http.MethodGet { + return req.URL.String() + } else { + return req.Method + " " + req.URL.String() + } +} + +// CachedResponse returns the cached http.Response for req if present, and nil +// otherwise. +func CachedResponse(c Cache, req *http.Request) (resp *http.Response, err error) { + cachedVal, ok := c.Get(cacheKey(req)) + if !ok { + return + } + + b := bytes.NewBuffer(cachedVal) + return http.ReadResponse(bufio.NewReader(b), req) +} + +// MemoryCache is an implemtation of Cache that stores responses in an in-memory map. +type MemoryCache struct { + mu sync.RWMutex + items map[string][]byte +} + +// Get returns the []byte representation of the response and true if present, false if not +func (c *MemoryCache) Get(key string) (resp []byte, ok bool) { + c.mu.RLock() + resp, ok = c.items[key] + c.mu.RUnlock() + return resp, ok +} + +// Set saves response resp to the cache with key +func (c *MemoryCache) Set(key string, resp []byte) { + c.mu.Lock() + c.items[key] = resp + c.mu.Unlock() +} + +// Delete removes key from the cache +func (c *MemoryCache) Delete(key string) { + c.mu.Lock() + delete(c.items, key) + c.mu.Unlock() +} + +// NewMemoryCache returns a new Cache that will store items in an in-memory map +func NewMemoryCache() *MemoryCache { + c := &MemoryCache{items: map[string][]byte{}} + return c +} + +// Transport is an implementation of http.RoundTripper that will return values from a cache +// where possible (avoiding a network request) and will additionally add validators (etag/if-modified-since) +// to repeated requests allowing servers to return 304 / Not Modified +type Transport struct { + // The RoundTripper interface actually used to make requests + // If nil, http.DefaultTransport is used + Transport http.RoundTripper + Cache Cache + // If true, responses returned from the cache will be given an extra header, X-From-Cache + MarkCachedResponses bool +} + +// NewTransport returns a new Transport with the +// provided Cache implementation and MarkCachedResponses set to true +func NewTransport(c Cache) *Transport { + return &Transport{Cache: c, MarkCachedResponses: true} +} + +// Client returns an *http.Client that caches responses. +func (t *Transport) Client() *http.Client { + return &http.Client{Transport: t} +} + +// varyMatches will return false unless all of the cached values for the headers listed in Vary +// match the new request +func varyMatches(cachedResp *http.Response, req *http.Request) bool { + for _, header := range headerAllCommaSepValues(cachedResp.Header, "vary") { + header = http.CanonicalHeaderKey(header) + if header != "" && req.Header.Get(header) != cachedResp.Header.Get("X-Varied-"+header) { + return false + } + } + return true +} + +// RoundTrip takes a Request and returns a Response +// +// If there is a fresh Response already in cache, then it will be returned without connecting to +// the server. +// +// If there is a stale Response, then any validators it contains will be set on the new request +// to give the server a chance to respond with NotModified. If this happens, then the cached Response +// will be returned. +func (t *Transport) RoundTrip(req *http.Request) (resp *http.Response, err error) { + cacheKey := cacheKey(req) + cacheable := (req.Method == "GET" || req.Method == "HEAD") && req.Header.Get("range") == "" + var cachedResp *http.Response + if cacheable { + cachedResp, err = CachedResponse(t.Cache, req) + } else { + // Need to invalidate an existing value + t.Cache.Delete(cacheKey) + } + + transport := t.Transport + if transport == nil { + transport = http.DefaultTransport + } + + if cacheable && cachedResp != nil && err == nil { + if t.MarkCachedResponses { + cachedResp.Header.Set(XFromCache, "1") + } + + if varyMatches(cachedResp, req) { + // Can only use cached value if the new request doesn't Vary significantly + freshness := getFreshness(cachedResp.Header, req.Header) + if freshness == fresh { + return cachedResp, nil + } + + if freshness == stale { + var req2 *http.Request + // Add validators if caller hasn't already done so + etag := cachedResp.Header.Get("etag") + if etag != "" && req.Header.Get("etag") == "" { + req2 = cloneRequest(req) + req2.Header.Set("if-none-match", etag) + } + lastModified := cachedResp.Header.Get("last-modified") + if lastModified != "" && req.Header.Get("last-modified") == "" { + if req2 == nil { + req2 = cloneRequest(req) + } + req2.Header.Set("if-modified-since", lastModified) + } + if req2 != nil { + req = req2 + } + } + } + + resp, err = transport.RoundTrip(req) + if err == nil && req.Method == "GET" && resp.StatusCode == http.StatusNotModified { + // Replace the 304 response with the one from cache, but update with some new headers + endToEndHeaders := getEndToEndHeaders(resp.Header) + for _, header := range endToEndHeaders { + cachedResp.Header[header] = resp.Header[header] + } + resp = cachedResp + } else if (err != nil || (cachedResp != nil && resp.StatusCode >= 500)) && + req.Method == "GET" && canStaleOnError(cachedResp.Header, req.Header) { + // In case of transport failure and stale-if-error activated, returns cached content + // when available + return cachedResp, nil + } else { + if err != nil || resp.StatusCode != http.StatusOK { + t.Cache.Delete(cacheKey) + } + if err != nil { + return nil, err + } + } + } else { + reqCacheControl := parseCacheControl(req.Header) + if _, ok := reqCacheControl["only-if-cached"]; ok { + resp = newGatewayTimeoutResponse(req) + } else { + resp, err = transport.RoundTrip(req) + if err != nil { + return nil, err + } + } + } + + if cacheable && canStore(parseCacheControl(req.Header), parseCacheControl(resp.Header)) { + for _, varyKey := range headerAllCommaSepValues(resp.Header, "vary") { + varyKey = http.CanonicalHeaderKey(varyKey) + fakeHeader := "X-Varied-" + varyKey + reqValue := req.Header.Get(varyKey) + if reqValue != "" { + resp.Header.Set(fakeHeader, reqValue) + } + } + switch req.Method { + case "GET": + // Delay caching until EOF is reached. + resp.Body = &cachingReadCloser{ + R: resp.Body, + OnEOF: func(r io.Reader) { + resp := *resp + resp.Body = ioutil.NopCloser(r) + respBytes, err := httputil.DumpResponse(&resp, true) + if err == nil { + t.Cache.Set(cacheKey, respBytes) + } + }, + } + default: + respBytes, err := httputil.DumpResponse(resp, true) + if err == nil { + t.Cache.Set(cacheKey, respBytes) + } + } + } else { + t.Cache.Delete(cacheKey) + } + return resp, nil +} + +// ErrNoDateHeader indicates that the HTTP headers contained no Date header. +var ErrNoDateHeader = errors.New("no Date header") + +// Date parses and returns the value of the Date header. +func Date(respHeaders http.Header) (date time.Time, err error) { + dateHeader := respHeaders.Get("date") + if dateHeader == "" { + err = ErrNoDateHeader + return + } + + return time.Parse(time.RFC1123, dateHeader) +} + +type realClock struct{} + +func (c *realClock) since(d time.Time) time.Duration { + return time.Since(d) +} + +type timer interface { + since(d time.Time) time.Duration +} + +var clock timer = &realClock{} + +// getFreshness will return one of fresh/stale/transparent based on the cache-control +// values of the request and the response +// +// fresh indicates the response can be returned +// stale indicates that the response needs validating before it is returned +// transparent indicates the response should not be used to fulfil the request +// +// Because this is only a private cache, 'public' and 'private' in cache-control aren't +// signficant. Similarly, smax-age isn't used. +func getFreshness(respHeaders, reqHeaders http.Header) (freshness int) { + respCacheControl := parseCacheControl(respHeaders) + reqCacheControl := parseCacheControl(reqHeaders) + if _, ok := reqCacheControl["no-cache"]; ok { + return transparent + } + if _, ok := respCacheControl["no-cache"]; ok { + return stale + } + if _, ok := reqCacheControl["only-if-cached"]; ok { + return fresh + } + + date, err := Date(respHeaders) + if err != nil { + return stale + } + currentAge := clock.since(date) + + var lifetime time.Duration + var zeroDuration time.Duration + + // If a response includes both an Expires header and a max-age directive, + // the max-age directive overrides the Expires header, even if the Expires header is more restrictive. + if maxAge, ok := respCacheControl["max-age"]; ok { + lifetime, err = time.ParseDuration(maxAge + "s") + if err != nil { + lifetime = zeroDuration + } + } else { + expiresHeader := respHeaders.Get("Expires") + if expiresHeader != "" { + expires, err := time.Parse(time.RFC1123, expiresHeader) + if err != nil { + lifetime = zeroDuration + } else { + lifetime = expires.Sub(date) + } + } + } + + if maxAge, ok := reqCacheControl["max-age"]; ok { + // the client is willing to accept a response whose age is no greater than the specified time in seconds + lifetime, err = time.ParseDuration(maxAge + "s") + if err != nil { + lifetime = zeroDuration + } + } + if minfresh, ok := reqCacheControl["min-fresh"]; ok { + // the client wants a response that will still be fresh for at least the specified number of seconds. + minfreshDuration, err := time.ParseDuration(minfresh + "s") + if err == nil { + currentAge = time.Duration(currentAge + minfreshDuration) + } + } + + if maxstale, ok := reqCacheControl["max-stale"]; ok { + // Indicates that the client is willing to accept a response that has exceeded its expiration time. + // If max-stale is assigned a value, then the client is willing to accept a response that has exceeded + // its expiration time by no more than the specified number of seconds. + // If no value is assigned to max-stale, then the client is willing to accept a stale response of any age. + // + // Responses served only because of a max-stale value are supposed to have a Warning header added to them, + // but that seems like a hassle, and is it actually useful? If so, then there needs to be a different + // return-value available here. + if maxstale == "" { + return fresh + } + maxstaleDuration, err := time.ParseDuration(maxstale + "s") + if err == nil { + currentAge = time.Duration(currentAge - maxstaleDuration) + } + } + + if lifetime > currentAge { + return fresh + } + + return stale +} + +// Returns true if either the request or the response includes the stale-if-error +// cache control extension: https://tools.ietf.org/html/rfc5861 +func canStaleOnError(respHeaders, reqHeaders http.Header) bool { + respCacheControl := parseCacheControl(respHeaders) + reqCacheControl := parseCacheControl(reqHeaders) + + var err error + lifetime := time.Duration(-1) + + if staleMaxAge, ok := respCacheControl["stale-if-error"]; ok { + if staleMaxAge != "" { + lifetime, err = time.ParseDuration(staleMaxAge + "s") + if err != nil { + return false + } + } else { + return true + } + } + if staleMaxAge, ok := reqCacheControl["stale-if-error"]; ok { + if staleMaxAge != "" { + lifetime, err = time.ParseDuration(staleMaxAge + "s") + if err != nil { + return false + } + } else { + return true + } + } + + if lifetime >= 0 { + date, err := Date(respHeaders) + if err != nil { + return false + } + currentAge := clock.since(date) + if lifetime > currentAge { + return true + } + } + + return false +} + +func getEndToEndHeaders(respHeaders http.Header) []string { + // These headers are always hop-by-hop + hopByHopHeaders := map[string]struct{}{ + "Connection": {}, + "Keep-Alive": {}, + "Proxy-Authenticate": {}, + "Proxy-Authorization": {}, + "Te": {}, + "Trailers": {}, + "Transfer-Encoding": {}, + "Upgrade": {}, + } + + for _, extra := range strings.Split(respHeaders.Get("connection"), ",") { + // any header listed in connection, if present, is also considered hop-by-hop + if strings.Trim(extra, " ") != "" { + hopByHopHeaders[http.CanonicalHeaderKey(extra)] = struct{}{} + } + } + endToEndHeaders := []string{} + for respHeader := range respHeaders { + if _, ok := hopByHopHeaders[respHeader]; !ok { + endToEndHeaders = append(endToEndHeaders, respHeader) + } + } + return endToEndHeaders +} + +func canStore(reqCacheControl, respCacheControl cacheControl) (canStore bool) { + if _, ok := respCacheControl["no-store"]; ok { + return false + } + if _, ok := reqCacheControl["no-store"]; ok { + return false + } + return true +} + +func newGatewayTimeoutResponse(req *http.Request) *http.Response { + var braw bytes.Buffer + braw.WriteString("HTTP/1.1 504 Gateway Timeout\r\n\r\n") + resp, err := http.ReadResponse(bufio.NewReader(&braw), req) + if err != nil { + panic(err) + } + return resp +} + +// cloneRequest returns a clone of the provided *http.Request. +// The clone is a shallow copy of the struct and its Header map. +// (This function copyright goauth2 authors: https://code.google.com/p/goauth2) +func cloneRequest(r *http.Request) *http.Request { + // shallow copy of the struct + r2 := new(http.Request) + *r2 = *r + // deep copy of the Header + r2.Header = make(http.Header) + for k, s := range r.Header { + r2.Header[k] = s + } + return r2 +} + +type cacheControl map[string]string + +func parseCacheControl(headers http.Header) cacheControl { + cc := cacheControl{} + ccHeader := headers.Get("Cache-Control") + for _, part := range strings.Split(ccHeader, ",") { + part = strings.Trim(part, " ") + if part == "" { + continue + } + if strings.ContainsRune(part, '=') { + keyval := strings.Split(part, "=") + cc[strings.Trim(keyval[0], " ")] = strings.Trim(keyval[1], ",") + } else { + cc[part] = "" + } + } + return cc +} + +// headerAllCommaSepValues returns all comma-separated values (each +// with whitespace trimmed) for header name in headers. According to +// Section 4.2 of the HTTP/1.1 spec +// (http://www.w3.org/Protocols/rfc2616/rfc2616-sec4.html#sec4.2), +// values from multiple occurrences of a header should be concatenated, if +// the header's value is a comma-separated list. +func headerAllCommaSepValues(headers http.Header, name string) []string { + var vals []string + for _, val := range headers[http.CanonicalHeaderKey(name)] { + fields := strings.Split(val, ",") + for i, f := range fields { + fields[i] = strings.TrimSpace(f) + } + vals = append(vals, fields...) + } + return vals +} + +// cachingReadCloser is a wrapper around ReadCloser R that calls OnEOF +// handler with a full copy of the content read from R when EOF is +// reached. +type cachingReadCloser struct { + // Underlying ReadCloser. + R io.ReadCloser + // OnEOF is called with a copy of the content of R when EOF is reached. + OnEOF func(io.Reader) + + buf bytes.Buffer // buf stores a copy of the content of R. +} + +// Read reads the next len(p) bytes from R or until R is drained. The +// return value n is the number of bytes read. If R has no data to +// return, err is io.EOF and OnEOF is called with a full copy of what +// has been read so far. +func (r *cachingReadCloser) Read(p []byte) (n int, err error) { + n, err = r.R.Read(p) + r.buf.Write(p[:n]) + if err == io.EOF { + r.OnEOF(bytes.NewReader(r.buf.Bytes())) + } + return n, err +} + +func (r *cachingReadCloser) Close() error { + return r.R.Close() +} + +// NewMemoryCacheTransport returns a new Transport using the in-memory cache implementation +func NewMemoryCacheTransport() *Transport { + c := NewMemoryCache() + t := NewTransport(c) + return t +} diff --git a/vendor/github.com/gregjones/httpcache/redis/redis.go b/vendor/github.com/gregjones/httpcache/redis/redis.go new file mode 100644 index 00000000000..3d69c6c7bca --- /dev/null +++ b/vendor/github.com/gregjones/httpcache/redis/redis.go @@ -0,0 +1,43 @@ +// Package redis provides a redis interface for http caching. +package redis + +import ( + "github.com/gomodule/redigo/redis" + "github.com/gregjones/httpcache" +) + +// cache is an implementation of httpcache.Cache that caches responses in a +// redis server. +type cache struct { + redis.Conn +} + +// cacheKey modifies an httpcache key for use in redis. Specifically, it +// prefixes keys to avoid collision with other data stored in redis. +func cacheKey(key string) string { + return "rediscache:" + key +} + +// Get returns the response corresponding to key if present. +func (c cache) Get(key string) (resp []byte, ok bool) { + item, err := redis.Bytes(c.Do("GET", cacheKey(key))) + if err != nil { + return nil, false + } + return item, true +} + +// Set saves a response to the cache as key. +func (c cache) Set(key string, resp []byte) { + c.Do("SET", cacheKey(key), resp) +} + +// Delete removes the response with key from the cache. +func (c cache) Delete(key string) { + c.Do("DEL", cacheKey(key)) +} + +// NewWithClient returns a new Cache with the given redis connection. +func NewWithClient(client redis.Conn) httpcache.Cache { + return cache{client} +} diff --git a/vendor/github.com/hashicorp/go-cleanhttp/LICENSE b/vendor/github.com/hashicorp/go-cleanhttp/LICENSE new file mode 100644 index 00000000000..e87a115e462 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cleanhttp/LICENSE @@ -0,0 +1,363 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. "Contributor" + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. "Contributor Version" + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the terms of + a Secondary License. + +1.6. "Executable Form" + + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + + means a work that combines Covered Software with other material, in a + separate file or files, that is not Covered Software. + +1.8. "License" + + means this document. + +1.9. "Licensable" + + means having the right to grant, to the maximum extent possible, whether + at the time of the initial grant or subsequently, any and all of the + rights conveyed by this License. + +1.10. "Modifications" + + means any of the following: + + a. any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. "Patent Claims" of a Contributor + + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the License, + by the making, using, selling, offering for sale, having made, import, + or transfer of either its Contributions or its Contributor Version. + +1.12. "Secondary License" + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. "Source Code Form" + + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, "control" means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution + become effective for each Contribution on the date the Contributor first + distributes such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under + this License. No additional rights or licenses will be implied from the + distribution or licensing of Covered Software under this License. + Notwithstanding Section 2.1(b) above, no patent license is granted by a + Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of + its Contributions. + + This License does not grant any rights in the trademarks, service marks, + or logos of any Contributor (except as may be necessary to comply with + the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this + License (see Section 10.2) or under the terms of a Secondary License (if + permitted under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its + Contributions are its original creation(s) or it has sufficient rights to + grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under + applicable copyright doctrines of fair use, fair dealing, or other + equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under + the terms of this License. You must inform recipients that the Source + Code Form of the Covered Software is governed by the terms of this + License, and how they can obtain a copy of this License. You may not + attempt to alter or restrict the recipients' rights in the Source Code + Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter the + recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for + the Covered Software. If the Larger Work is a combination of Covered + Software with a work governed by one or more Secondary Licenses, and the + Covered Software is not Incompatible With Secondary Licenses, this + License permits You to additionally distribute such Covered Software + under the terms of such Secondary License(s), so that the recipient of + the Larger Work may, at their option, further distribute the Covered + Software under the terms of either this License or such Secondary + License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices + (including copyright notices, patent notices, disclaimers of warranty, or + limitations of liability) contained within the Source Code Form of the + Covered Software, except that You may alter any license notices to the + extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on + behalf of any Contributor. You must make it absolutely clear that any + such warranty, support, indemnity, or liability obligation is offered by + You alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, + judicial order, or regulation then You must: (a) comply with the terms of + this License to the maximum extent possible; and (b) describe the + limitations and the code they affect. Such description must be placed in a + text file included with all distributions of the Covered Software under + this License. Except to the extent prohibited by statute or regulation, + such description must be sufficiently detailed for a recipient of ordinary + skill to be able to understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing + basis, if such Contributor fails to notify You of the non-compliance by + some reasonable means prior to 60 days after You have come back into + compliance. Moreover, Your grants from a particular Contributor are + reinstated on an ongoing basis if such Contributor notifies You of the + non-compliance by some reasonable means, this is the first time You have + received notice of non-compliance with this License from such + Contributor, and You become compliant prior to 30 days after Your receipt + of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, + counter-claims, and cross-claims) alleging that a Contributor Version + directly or indirectly infringes any patent, then the rights granted to + You by any and all Contributors for the Covered Software under Section + 2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an "as is" basis, + without warranty of any kind, either expressed, implied, or statutory, + including, without limitation, warranties that the Covered Software is free + of defects, merchantable, fit for a particular purpose or non-infringing. + The entire risk as to the quality and performance of the Covered Software + is with You. Should any Covered Software prove defective in any respect, + You (not any Contributor) assume the cost of any necessary servicing, + repair, or correction. This disclaimer of warranty constitutes an essential + part of this License. No use of any Covered Software is authorized under + this License except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from + such party's negligence to the extent applicable law prohibits such + limitation. Some jurisdictions do not allow the exclusion or limitation of + incidental or consequential damages, so this exclusion and limitation may + not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts + of a jurisdiction where the defendant maintains its principal place of + business and such litigation shall be governed by laws of that + jurisdiction, without reference to its conflict-of-law provisions. Nothing + in this Section shall prevent a party's ability to bring cross-claims or + counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject + matter hereof. If any provision of this License is held to be + unenforceable, such provision shall be reformed only to the extent + necessary to make it enforceable. Any law or regulation which provides that + the language of a contract shall be construed against the drafter shall not + be used to construe this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version + of the License under which You originally received the Covered Software, + or under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a + modified version of this License if you rename the license and remove + any references to the name of the license steward (except to note that + such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary + Licenses If You choose to distribute Source Code Form that is + Incompatible With Secondary Licenses under the terms of this version of + the License, the notice described in Exhibit B of this License must be + attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, +then You may include the notice in a location (such as a LICENSE file in a +relevant directory) where a recipient would be likely to look for such a +notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice + + This Source Code Form is "Incompatible + With Secondary Licenses", as defined by + the Mozilla Public License, v. 2.0. + diff --git a/vendor/github.com/hashicorp/go-cleanhttp/README.md b/vendor/github.com/hashicorp/go-cleanhttp/README.md new file mode 100644 index 00000000000..036e5313fc8 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cleanhttp/README.md @@ -0,0 +1,30 @@ +# cleanhttp + +Functions for accessing "clean" Go http.Client values + +------------- + +The Go standard library contains a default `http.Client` called +`http.DefaultClient`. It is a common idiom in Go code to start with +`http.DefaultClient` and tweak it as necessary, and in fact, this is +encouraged; from the `http` package documentation: + +> The Client's Transport typically has internal state (cached TCP connections), +so Clients should be reused instead of created as needed. Clients are safe for +concurrent use by multiple goroutines. + +Unfortunately, this is a shared value, and it is not uncommon for libraries to +assume that they are free to modify it at will. With enough dependencies, it +can be very easy to encounter strange problems and race conditions due to +manipulation of this shared value across libraries and goroutines (clients are +safe for concurrent use, but writing values to the client struct itself is not +protected). + +Making things worse is the fact that a bare `http.Client` will use a default +`http.Transport` called `http.DefaultTransport`, which is another global value +that behaves the same way. So it is not simply enough to replace +`http.DefaultClient` with `&http.Client{}`. + +This repository provides some simple functions to get a "clean" `http.Client` +-- one that uses the same default values as the Go standard library, but +returns a client that does not share any state with other clients. diff --git a/vendor/github.com/hashicorp/go-cleanhttp/cleanhttp.go b/vendor/github.com/hashicorp/go-cleanhttp/cleanhttp.go new file mode 100644 index 00000000000..8d306bf5134 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cleanhttp/cleanhttp.go @@ -0,0 +1,57 @@ +package cleanhttp + +import ( + "net" + "net/http" + "runtime" + "time" +) + +// DefaultTransport returns a new http.Transport with similar default values to +// http.DefaultTransport, but with idle connections and keepalives disabled. +func DefaultTransport() *http.Transport { + transport := DefaultPooledTransport() + transport.DisableKeepAlives = true + transport.MaxIdleConnsPerHost = -1 + return transport +} + +// DefaultPooledTransport returns a new http.Transport with similar default +// values to http.DefaultTransport. Do not use this for transient transports as +// it can leak file descriptors over time. Only use this for transports that +// will be re-used for the same host(s). +func DefaultPooledTransport() *http.Transport { + transport := &http.Transport{ + Proxy: http.ProxyFromEnvironment, + DialContext: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + DualStack: true, + }).DialContext, + MaxIdleConns: 100, + IdleConnTimeout: 90 * time.Second, + TLSHandshakeTimeout: 10 * time.Second, + ExpectContinueTimeout: 1 * time.Second, + MaxIdleConnsPerHost: runtime.GOMAXPROCS(0) + 1, + } + return transport +} + +// DefaultClient returns a new http.Client with similar default values to +// http.Client, but with a non-shared Transport, idle connections disabled, and +// keepalives disabled. +func DefaultClient() *http.Client { + return &http.Client{ + Transport: DefaultTransport(), + } +} + +// DefaultPooledClient returns a new http.Client with similar default values to +// http.Client, but with a shared Transport. Do not use this function for +// transient clients as it can leak file descriptors over time. Only use this +// for clients that will be re-used for the same host(s). +func DefaultPooledClient() *http.Client { + return &http.Client{ + Transport: DefaultPooledTransport(), + } +} diff --git a/vendor/github.com/hashicorp/go-cleanhttp/doc.go b/vendor/github.com/hashicorp/go-cleanhttp/doc.go new file mode 100644 index 00000000000..05841092a7b --- /dev/null +++ b/vendor/github.com/hashicorp/go-cleanhttp/doc.go @@ -0,0 +1,20 @@ +// Package cleanhttp offers convenience utilities for acquiring "clean" +// http.Transport and http.Client structs. +// +// Values set on http.DefaultClient and http.DefaultTransport affect all +// callers. This can have detrimental effects, esepcially in TLS contexts, +// where client or root certificates set to talk to multiple endpoints can end +// up displacing each other, leading to hard-to-debug issues. This package +// provides non-shared http.Client and http.Transport structs to ensure that +// the configuration will not be overwritten by other parts of the application +// or dependencies. +// +// The DefaultClient and DefaultTransport functions disable idle connections +// and keepalives. Without ensuring that idle connections are closed before +// garbage collection, short-term clients/transports can leak file descriptors, +// eventually leading to "too many open files" errors. If you will be +// connecting to the same hosts repeatedly from the same client, you can use +// DefaultPooledClient to receive a client that has connection pooling +// semantics similar to http.DefaultClient. +// +package cleanhttp diff --git a/vendor/github.com/hashicorp/go-cleanhttp/go.mod b/vendor/github.com/hashicorp/go-cleanhttp/go.mod new file mode 100644 index 00000000000..310f07569fc --- /dev/null +++ b/vendor/github.com/hashicorp/go-cleanhttp/go.mod @@ -0,0 +1 @@ +module github.com/hashicorp/go-cleanhttp diff --git a/vendor/github.com/hashicorp/go-cleanhttp/handlers.go b/vendor/github.com/hashicorp/go-cleanhttp/handlers.go new file mode 100644 index 00000000000..3c845dc0dc6 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cleanhttp/handlers.go @@ -0,0 +1,48 @@ +package cleanhttp + +import ( + "net/http" + "strings" + "unicode" +) + +// HandlerInput provides input options to cleanhttp's handlers +type HandlerInput struct { + ErrStatus int +} + +// PrintablePathCheckHandler is a middleware that ensures the request path +// contains only printable runes. +func PrintablePathCheckHandler(next http.Handler, input *HandlerInput) http.Handler { + // Nil-check on input to make it optional + if input == nil { + input = &HandlerInput{ + ErrStatus: http.StatusBadRequest, + } + } + + // Default to http.StatusBadRequest on error + if input.ErrStatus == 0 { + input.ErrStatus = http.StatusBadRequest + } + + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r != nil { + // Check URL path for non-printable characters + idx := strings.IndexFunc(r.URL.Path, func(c rune) bool { + return !unicode.IsPrint(c) + }) + + if idx != -1 { + w.WriteHeader(input.ErrStatus) + return + } + + if next != nil { + next.ServeHTTP(w, r) + } + } + + return + }) +} diff --git a/vendor/github.com/hashicorp/go-multierror/.travis.yml b/vendor/github.com/hashicorp/go-multierror/.travis.yml deleted file mode 100644 index 304a8359558..00000000000 --- a/vendor/github.com/hashicorp/go-multierror/.travis.yml +++ /dev/null @@ -1,12 +0,0 @@ -sudo: false - -language: go - -go: - - 1.x - -branches: - only: - - master - -script: make test testrace diff --git a/vendor/github.com/hashicorp/go-multierror/README.md b/vendor/github.com/hashicorp/go-multierror/README.md index ead5830f7b7..71dd308ed81 100644 --- a/vendor/github.com/hashicorp/go-multierror/README.md +++ b/vendor/github.com/hashicorp/go-multierror/README.md @@ -1,10 +1,11 @@ # go-multierror -[![Build Status](http://img.shields.io/travis/hashicorp/go-multierror.svg?style=flat-square)][travis] -[![Go Documentation](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)][godocs] +[![CircleCI](https://img.shields.io/circleci/build/github/hashicorp/go-multierror/master)](https://circleci.com/gh/hashicorp/go-multierror) +[![Go Reference](https://pkg.go.dev/badge/github.com/hashicorp/go-multierror.svg)](https://pkg.go.dev/github.com/hashicorp/go-multierror) +![GitHub go.mod Go version](https://img.shields.io/github/go-mod/go-version/hashicorp/go-multierror) -[travis]: https://travis-ci.org/hashicorp/go-multierror -[godocs]: https://godoc.org/github.com/hashicorp/go-multierror +[circleci]: https://app.circleci.com/pipelines/github/hashicorp/go-multierror +[godocs]: https://pkg.go.dev/github.com/hashicorp/go-multierror `go-multierror` is a package for Go that provides a mechanism for representing a list of `error` values as a single `error`. @@ -14,16 +15,35 @@ be a list of errors. If the caller knows this, they can unwrap the list and access the errors. If the caller doesn't know, the error formats to a nice human-readable format. -`go-multierror` implements the -[errwrap](https://github.com/hashicorp/errwrap) interface so that it can -be used with that library, as well. +`go-multierror` is fully compatible with the Go standard library +[errors](https://golang.org/pkg/errors/) package, including the +functions `As`, `Is`, and `Unwrap`. This provides a standardized approach +for introspecting on error values. ## Installation and Docs Install using `go get github.com/hashicorp/go-multierror`. Full documentation is available at -http://godoc.org/github.com/hashicorp/go-multierror +https://pkg.go.dev/github.com/hashicorp/go-multierror + +### Requires go version 1.13 or newer + +`go-multierror` requires go version 1.13 or newer. Go 1.13 introduced +[error wrapping](https://golang.org/doc/go1.13#error_wrapping), which +this library takes advantage of. + +If you need to use an earlier version of go, you can use the +[v1.0.0](https://github.com/hashicorp/go-multierror/tree/v1.0.0) +tag, which doesn't rely on features in go 1.13. + +If you see compile errors that look like the below, it's likely that +you're on an older version of go: + +``` +/go/src/github.com/hashicorp/go-multierror/multierror.go:112:9: undefined: errors.As +/go/src/github.com/hashicorp/go-multierror/multierror.go:117:9: undefined: errors.Is +``` ## Usage @@ -81,6 +101,39 @@ if err := something(); err != nil { } ``` +You can also use the standard [`errors.Unwrap`](https://golang.org/pkg/errors/#Unwrap) +function. This will continue to unwrap into subsequent errors until none exist. + +**Extracting an error** + +The standard library [`errors.As`](https://golang.org/pkg/errors/#As) +function can be used directly with a multierror to extract a specific error: + +```go +// Assume err is a multierror value +err := somefunc() + +// We want to know if "err" has a "RichErrorType" in it and extract it. +var errRich RichErrorType +if errors.As(err, &errRich) { + // It has it, and now errRich is populated. +} +``` + +**Checking for an exact error value** + +Some errors are returned as exact errors such as the [`ErrNotExist`](https://golang.org/pkg/os/#pkg-variables) +error in the `os` package. You can check if this error is present by using +the standard [`errors.Is`](https://golang.org/pkg/errors/#Is) function. + +```go +// Assume err is a multierror value +err := somefunc() +if errors.Is(err, os.ErrNotExist) { + // err contains os.ErrNotExist +} +``` + **Returning a multierror only if there are errors** If you build a `multierror.Error`, you can use the `ErrorOrNil` function diff --git a/vendor/github.com/hashicorp/go-multierror/append.go b/vendor/github.com/hashicorp/go-multierror/append.go index 775b6e753e7..3e2589bfde0 100644 --- a/vendor/github.com/hashicorp/go-multierror/append.go +++ b/vendor/github.com/hashicorp/go-multierror/append.go @@ -6,6 +6,8 @@ package multierror // If err is not a multierror.Error, then it will be turned into // one. If any of the errs are multierr.Error, they will be flattened // one level into err. +// Any nil errors within errs will be ignored. If err is nil, a new +// *Error will be returned. func Append(err error, errs ...error) *Error { switch err := err.(type) { case *Error: diff --git a/vendor/github.com/hashicorp/go-multierror/go.mod b/vendor/github.com/hashicorp/go-multierror/go.mod index 2534331d5f9..141cc4ccb25 100644 --- a/vendor/github.com/hashicorp/go-multierror/go.mod +++ b/vendor/github.com/hashicorp/go-multierror/go.mod @@ -1,3 +1,5 @@ module github.com/hashicorp/go-multierror +go 1.13 + require github.com/hashicorp/errwrap v1.0.0 diff --git a/vendor/github.com/hashicorp/go-multierror/go.sum b/vendor/github.com/hashicorp/go-multierror/go.sum index 85b1f8ff333..e8238e9ec91 100644 --- a/vendor/github.com/hashicorp/go-multierror/go.sum +++ b/vendor/github.com/hashicorp/go-multierror/go.sum @@ -1,4 +1,2 @@ -github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce h1:prjrVgOk2Yg6w+PflHoszQNLTUh4kaByUcEWM/9uin4= -github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= diff --git a/vendor/github.com/hashicorp/go-multierror/group.go b/vendor/github.com/hashicorp/go-multierror/group.go new file mode 100644 index 00000000000..9c29efb7f87 --- /dev/null +++ b/vendor/github.com/hashicorp/go-multierror/group.go @@ -0,0 +1,38 @@ +package multierror + +import "sync" + +// Group is a collection of goroutines which return errors that need to be +// coalesced. +type Group struct { + mutex sync.Mutex + err *Error + wg sync.WaitGroup +} + +// Go calls the given function in a new goroutine. +// +// If the function returns an error it is added to the group multierror which +// is returned by Wait. +func (g *Group) Go(f func() error) { + g.wg.Add(1) + + go func() { + defer g.wg.Done() + + if err := f(); err != nil { + g.mutex.Lock() + g.err = Append(g.err, err) + g.mutex.Unlock() + } + }() +} + +// Wait blocks until all function calls from the Go method have returned, then +// returns the multierror. +func (g *Group) Wait() *Error { + g.wg.Wait() + g.mutex.Lock() + defer g.mutex.Unlock() + return g.err +} diff --git a/vendor/github.com/hashicorp/go-multierror/multierror.go b/vendor/github.com/hashicorp/go-multierror/multierror.go index 89b1422d1d1..f5457432646 100644 --- a/vendor/github.com/hashicorp/go-multierror/multierror.go +++ b/vendor/github.com/hashicorp/go-multierror/multierror.go @@ -1,6 +1,7 @@ package multierror import ( + "errors" "fmt" ) @@ -39,13 +40,82 @@ func (e *Error) GoString() string { return fmt.Sprintf("*%#v", *e) } -// WrappedErrors returns the list of errors that this Error is wrapping. -// It is an implementation of the errwrap.Wrapper interface so that -// multierror.Error can be used with that library. +// WrappedErrors returns the list of errors that this Error is wrapping. It is +// an implementation of the errwrap.Wrapper interface so that multierror.Error +// can be used with that library. // -// This method is not safe to be called concurrently and is no different -// than accessing the Errors field directly. It is implemented only to -// satisfy the errwrap.Wrapper interface. +// This method is not safe to be called concurrently. Unlike accessing the +// Errors field directly, this function also checks if the multierror is nil to +// prevent a null-pointer panic. It satisfies the errwrap.Wrapper interface. func (e *Error) WrappedErrors() []error { + if e == nil { + return nil + } return e.Errors } + +// Unwrap returns an error from Error (or nil if there are no errors). +// This error returned will further support Unwrap to get the next error, +// etc. The order will match the order of Errors in the multierror.Error +// at the time of calling. +// +// The resulting error supports errors.As/Is/Unwrap so you can continue +// to use the stdlib errors package to introspect further. +// +// This will perform a shallow copy of the errors slice. Any errors appended +// to this error after calling Unwrap will not be available until a new +// Unwrap is called on the multierror.Error. +func (e *Error) Unwrap() error { + // If we have no errors then we do nothing + if e == nil || len(e.Errors) == 0 { + return nil + } + + // If we have exactly one error, we can just return that directly. + if len(e.Errors) == 1 { + return e.Errors[0] + } + + // Shallow copy the slice + errs := make([]error, len(e.Errors)) + copy(errs, e.Errors) + return chain(errs) +} + +// chain implements the interfaces necessary for errors.Is/As/Unwrap to +// work in a deterministic way with multierror. A chain tracks a list of +// errors while accounting for the current represented error. This lets +// Is/As be meaningful. +// +// Unwrap returns the next error. In the cleanest form, Unwrap would return +// the wrapped error here but we can't do that if we want to properly +// get access to all the errors. Instead, users are recommended to use +// Is/As to get the correct error type out. +// +// Precondition: []error is non-empty (len > 0) +type chain []error + +// Error implements the error interface +func (e chain) Error() string { + return e[0].Error() +} + +// Unwrap implements errors.Unwrap by returning the next error in the +// chain or nil if there are no more errors. +func (e chain) Unwrap() error { + if len(e) == 1 { + return nil + } + + return e[1:] +} + +// As implements errors.As by attempting to map to the current value. +func (e chain) As(target interface{}) bool { + return errors.As(e[0], target) +} + +// Is implements errors.Is by comparing the current value directly. +func (e chain) Is(target error) bool { + return errors.Is(e[0], target) +} diff --git a/vendor/github.com/hashicorp/go-retryablehttp/.gitignore b/vendor/github.com/hashicorp/go-retryablehttp/.gitignore new file mode 100644 index 00000000000..4e309e0b326 --- /dev/null +++ b/vendor/github.com/hashicorp/go-retryablehttp/.gitignore @@ -0,0 +1,4 @@ +.idea/ +*.iml +*.test +.vscode/ \ No newline at end of file diff --git a/vendor/github.com/hashicorp/go-retryablehttp/.travis.yml b/vendor/github.com/hashicorp/go-retryablehttp/.travis.yml new file mode 100644 index 00000000000..c4fb6d6c8bb --- /dev/null +++ b/vendor/github.com/hashicorp/go-retryablehttp/.travis.yml @@ -0,0 +1,12 @@ +sudo: false + +language: go + +go: + - 1.12.4 + +branches: + only: + - master + +script: make updatedeps test diff --git a/vendor/github.com/hashicorp/go-retryablehttp/LICENSE b/vendor/github.com/hashicorp/go-retryablehttp/LICENSE new file mode 100644 index 00000000000..e87a115e462 --- /dev/null +++ b/vendor/github.com/hashicorp/go-retryablehttp/LICENSE @@ -0,0 +1,363 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. "Contributor" + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. "Contributor Version" + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the terms of + a Secondary License. + +1.6. "Executable Form" + + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + + means a work that combines Covered Software with other material, in a + separate file or files, that is not Covered Software. + +1.8. "License" + + means this document. + +1.9. "Licensable" + + means having the right to grant, to the maximum extent possible, whether + at the time of the initial grant or subsequently, any and all of the + rights conveyed by this License. + +1.10. "Modifications" + + means any of the following: + + a. any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. "Patent Claims" of a Contributor + + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the License, + by the making, using, selling, offering for sale, having made, import, + or transfer of either its Contributions or its Contributor Version. + +1.12. "Secondary License" + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. "Source Code Form" + + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, "control" means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution + become effective for each Contribution on the date the Contributor first + distributes such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under + this License. No additional rights or licenses will be implied from the + distribution or licensing of Covered Software under this License. + Notwithstanding Section 2.1(b) above, no patent license is granted by a + Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of + its Contributions. + + This License does not grant any rights in the trademarks, service marks, + or logos of any Contributor (except as may be necessary to comply with + the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this + License (see Section 10.2) or under the terms of a Secondary License (if + permitted under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its + Contributions are its original creation(s) or it has sufficient rights to + grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under + applicable copyright doctrines of fair use, fair dealing, or other + equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under + the terms of this License. You must inform recipients that the Source + Code Form of the Covered Software is governed by the terms of this + License, and how they can obtain a copy of this License. You may not + attempt to alter or restrict the recipients' rights in the Source Code + Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter the + recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for + the Covered Software. If the Larger Work is a combination of Covered + Software with a work governed by one or more Secondary Licenses, and the + Covered Software is not Incompatible With Secondary Licenses, this + License permits You to additionally distribute such Covered Software + under the terms of such Secondary License(s), so that the recipient of + the Larger Work may, at their option, further distribute the Covered + Software under the terms of either this License or such Secondary + License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices + (including copyright notices, patent notices, disclaimers of warranty, or + limitations of liability) contained within the Source Code Form of the + Covered Software, except that You may alter any license notices to the + extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on + behalf of any Contributor. You must make it absolutely clear that any + such warranty, support, indemnity, or liability obligation is offered by + You alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, + judicial order, or regulation then You must: (a) comply with the terms of + this License to the maximum extent possible; and (b) describe the + limitations and the code they affect. Such description must be placed in a + text file included with all distributions of the Covered Software under + this License. Except to the extent prohibited by statute or regulation, + such description must be sufficiently detailed for a recipient of ordinary + skill to be able to understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing + basis, if such Contributor fails to notify You of the non-compliance by + some reasonable means prior to 60 days after You have come back into + compliance. Moreover, Your grants from a particular Contributor are + reinstated on an ongoing basis if such Contributor notifies You of the + non-compliance by some reasonable means, this is the first time You have + received notice of non-compliance with this License from such + Contributor, and You become compliant prior to 30 days after Your receipt + of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, + counter-claims, and cross-claims) alleging that a Contributor Version + directly or indirectly infringes any patent, then the rights granted to + You by any and all Contributors for the Covered Software under Section + 2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an "as is" basis, + without warranty of any kind, either expressed, implied, or statutory, + including, without limitation, warranties that the Covered Software is free + of defects, merchantable, fit for a particular purpose or non-infringing. + The entire risk as to the quality and performance of the Covered Software + is with You. Should any Covered Software prove defective in any respect, + You (not any Contributor) assume the cost of any necessary servicing, + repair, or correction. This disclaimer of warranty constitutes an essential + part of this License. No use of any Covered Software is authorized under + this License except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from + such party's negligence to the extent applicable law prohibits such + limitation. Some jurisdictions do not allow the exclusion or limitation of + incidental or consequential damages, so this exclusion and limitation may + not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts + of a jurisdiction where the defendant maintains its principal place of + business and such litigation shall be governed by laws of that + jurisdiction, without reference to its conflict-of-law provisions. Nothing + in this Section shall prevent a party's ability to bring cross-claims or + counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject + matter hereof. If any provision of this License is held to be + unenforceable, such provision shall be reformed only to the extent + necessary to make it enforceable. Any law or regulation which provides that + the language of a contract shall be construed against the drafter shall not + be used to construe this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version + of the License under which You originally received the Covered Software, + or under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a + modified version of this License if you rename the license and remove + any references to the name of the license steward (except to note that + such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary + Licenses If You choose to distribute Source Code Form that is + Incompatible With Secondary Licenses under the terms of this version of + the License, the notice described in Exhibit B of this License must be + attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, +then You may include the notice in a location (such as a LICENSE file in a +relevant directory) where a recipient would be likely to look for such a +notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice + + This Source Code Form is "Incompatible + With Secondary Licenses", as defined by + the Mozilla Public License, v. 2.0. + diff --git a/vendor/github.com/hashicorp/go-retryablehttp/Makefile b/vendor/github.com/hashicorp/go-retryablehttp/Makefile new file mode 100644 index 00000000000..da17640e644 --- /dev/null +++ b/vendor/github.com/hashicorp/go-retryablehttp/Makefile @@ -0,0 +1,11 @@ +default: test + +test: + go vet ./... + go test -race ./... + +updatedeps: + go get -f -t -u ./... + go get -f -u ./... + +.PHONY: default test updatedeps diff --git a/vendor/github.com/hashicorp/go-retryablehttp/README.md b/vendor/github.com/hashicorp/go-retryablehttp/README.md new file mode 100644 index 00000000000..30357c75668 --- /dev/null +++ b/vendor/github.com/hashicorp/go-retryablehttp/README.md @@ -0,0 +1,61 @@ +go-retryablehttp +================ + +[![Build Status](http://img.shields.io/travis/hashicorp/go-retryablehttp.svg?style=flat-square)][travis] +[![Go Documentation](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)][godocs] + +[travis]: http://travis-ci.org/hashicorp/go-retryablehttp +[godocs]: http://godoc.org/github.com/hashicorp/go-retryablehttp + +The `retryablehttp` package provides a familiar HTTP client interface with +automatic retries and exponential backoff. It is a thin wrapper over the +standard `net/http` client library and exposes nearly the same public API. This +makes `retryablehttp` very easy to drop into existing programs. + +`retryablehttp` performs automatic retries under certain conditions. Mainly, if +an error is returned by the client (connection errors, etc.), or if a 500-range +response code is received (except 501), then a retry is invoked after a wait +period. Otherwise, the response is returned and left to the caller to +interpret. + +The main difference from `net/http` is that requests which take a request body +(POST/PUT et. al) can have the body provided in a number of ways (some more or +less efficient) that allow "rewinding" the request body if the initial request +fails so that the full request can be attempted again. See the +[godoc](http://godoc.org/github.com/hashicorp/go-retryablehttp) for more +details. + +Version 0.6.0 and before are compatible with Go prior to 1.12. From 0.6.1 onward, Go 1.12+ is required. + +Example Use +=========== + +Using this library should look almost identical to what you would do with +`net/http`. The most simple example of a GET request is shown below: + +```go +resp, err := retryablehttp.Get("/foo") +if err != nil { + panic(err) +} +``` + +The returned response object is an `*http.Response`, the same thing you would +usually get from `net/http`. Had the request failed one or more times, the above +call would block and retry with exponential backoff. + +## Getting a stdlib `*http.Client` with retries + +It's possible to convert a `*retryablehttp.Client` directly to a `*http.Client`. +This makes use of retryablehttp broadly applicable with minimal effort. Simply +configure a `*retryablehttp.Client` as you wish, and then call `StandardClient()`: + +```go +retryClient := retryablehttp.NewClient() +retryClient.RetryMax = 10 + +standardClient := retryClient.StandardClient() // *http.Client +``` + +For more usage and examples see the +[godoc](http://godoc.org/github.com/hashicorp/go-retryablehttp). diff --git a/vendor/github.com/hashicorp/go-retryablehttp/client.go b/vendor/github.com/hashicorp/go-retryablehttp/client.go new file mode 100644 index 00000000000..f1ccd3df35c --- /dev/null +++ b/vendor/github.com/hashicorp/go-retryablehttp/client.go @@ -0,0 +1,705 @@ +// Package retryablehttp provides a familiar HTTP client interface with +// automatic retries and exponential backoff. It is a thin wrapper over the +// standard net/http client library and exposes nearly the same public API. +// This makes retryablehttp very easy to drop into existing programs. +// +// retryablehttp performs automatic retries under certain conditions. Mainly, if +// an error is returned by the client (connection errors etc), or if a 500-range +// response is received, then a retry is invoked. Otherwise, the response is +// returned and left to the caller to interpret. +// +// Requests which take a request body should provide a non-nil function +// parameter. The best choice is to provide either a function satisfying +// ReaderFunc which provides multiple io.Readers in an efficient manner, a +// *bytes.Buffer (the underlying raw byte slice will be used) or a raw byte +// slice. As it is a reference type, and we will wrap it as needed by readers, +// we can efficiently re-use the request body without needing to copy it. If an +// io.Reader (such as a *bytes.Reader) is provided, the full body will be read +// prior to the first request, and will be efficiently re-used for any retries. +// ReadSeeker can be used, but some users have observed occasional data races +// between the net/http library and the Seek functionality of some +// implementations of ReadSeeker, so should be avoided if possible. +package retryablehttp + +import ( + "bytes" + "context" + "crypto/x509" + "fmt" + "io" + "io/ioutil" + "log" + "math" + "math/rand" + "net/http" + "net/url" + "os" + "regexp" + "strings" + "sync" + "time" + + "github.com/hashicorp/go-cleanhttp" +) + +var ( + // Default retry configuration + defaultRetryWaitMin = 1 * time.Second + defaultRetryWaitMax = 30 * time.Second + defaultRetryMax = 4 + + // defaultLogger is the logger provided with defaultClient + defaultLogger = log.New(os.Stderr, "", log.LstdFlags) + + // defaultClient is used for performing requests without explicitly making + // a new client. It is purposely private to avoid modifications. + defaultClient = NewClient() + + // We need to consume response bodies to maintain http connections, but + // limit the size we consume to respReadLimit. + respReadLimit = int64(4096) + + // A regular expression to match the error returned by net/http when the + // configured number of redirects is exhausted. This error isn't typed + // specifically so we resort to matching on the error string. + redirectsErrorRe = regexp.MustCompile(`stopped after \d+ redirects\z`) + + // A regular expression to match the error returned by net/http when the + // scheme specified in the URL is invalid. This error isn't typed + // specifically so we resort to matching on the error string. + schemeErrorRe = regexp.MustCompile(`unsupported protocol scheme`) +) + +// ReaderFunc is the type of function that can be given natively to NewRequest +type ReaderFunc func() (io.Reader, error) + +// LenReader is an interface implemented by many in-memory io.Reader's. Used +// for automatically sending the right Content-Length header when possible. +type LenReader interface { + Len() int +} + +// Request wraps the metadata needed to create HTTP requests. +type Request struct { + // body is a seekable reader over the request body payload. This is + // used to rewind the request data in between retries. + body ReaderFunc + + // Embed an HTTP request directly. This makes a *Request act exactly + // like an *http.Request so that all meta methods are supported. + *http.Request +} + +// WithContext returns wrapped Request with a shallow copy of underlying *http.Request +// with its context changed to ctx. The provided ctx must be non-nil. +func (r *Request) WithContext(ctx context.Context) *Request { + r.Request = r.Request.WithContext(ctx) + return r +} + +// BodyBytes allows accessing the request body. It is an analogue to +// http.Request's Body variable, but it returns a copy of the underlying data +// rather than consuming it. +// +// This function is not thread-safe; do not call it at the same time as another +// call, or at the same time this request is being used with Client.Do. +func (r *Request) BodyBytes() ([]byte, error) { + if r.body == nil { + return nil, nil + } + body, err := r.body() + if err != nil { + return nil, err + } + buf := new(bytes.Buffer) + _, err = buf.ReadFrom(body) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +// SetBody allows setting the request body. +// +// It is useful if a new body needs to be set without constructing a new Request. +func (r *Request) SetBody(rawBody interface{}) error { + bodyReader, contentLength, err := getBodyReaderAndContentLength(rawBody) + if err != nil { + return err + } + r.body = bodyReader + r.ContentLength = contentLength + return nil +} + +// WriteTo allows copying the request body into a writer. +// +// It writes data to w until there's no more data to write or +// when an error occurs. The return int64 value is the number of bytes +// written. Any error encountered during the write is also returned. +// The signature matches io.WriterTo interface. +func (r *Request) WriteTo(w io.Writer) (int64, error) { + body, err := r.body() + if err != nil { + return 0, err + } + if c, ok := body.(io.Closer); ok { + defer c.Close() + } + return io.Copy(w, body) +} + +func getBodyReaderAndContentLength(rawBody interface{}) (ReaderFunc, int64, error) { + var bodyReader ReaderFunc + var contentLength int64 + + switch body := rawBody.(type) { + // If they gave us a function already, great! Use it. + case ReaderFunc: + bodyReader = body + tmp, err := body() + if err != nil { + return nil, 0, err + } + if lr, ok := tmp.(LenReader); ok { + contentLength = int64(lr.Len()) + } + if c, ok := tmp.(io.Closer); ok { + c.Close() + } + + case func() (io.Reader, error): + bodyReader = body + tmp, err := body() + if err != nil { + return nil, 0, err + } + if lr, ok := tmp.(LenReader); ok { + contentLength = int64(lr.Len()) + } + if c, ok := tmp.(io.Closer); ok { + c.Close() + } + + // If a regular byte slice, we can read it over and over via new + // readers + case []byte: + buf := body + bodyReader = func() (io.Reader, error) { + return bytes.NewReader(buf), nil + } + contentLength = int64(len(buf)) + + // If a bytes.Buffer we can read the underlying byte slice over and + // over + case *bytes.Buffer: + buf := body + bodyReader = func() (io.Reader, error) { + return bytes.NewReader(buf.Bytes()), nil + } + contentLength = int64(buf.Len()) + + // We prioritize *bytes.Reader here because we don't really want to + // deal with it seeking so want it to match here instead of the + // io.ReadSeeker case. + case *bytes.Reader: + buf, err := ioutil.ReadAll(body) + if err != nil { + return nil, 0, err + } + bodyReader = func() (io.Reader, error) { + return bytes.NewReader(buf), nil + } + contentLength = int64(len(buf)) + + // Compat case + case io.ReadSeeker: + raw := body + bodyReader = func() (io.Reader, error) { + _, err := raw.Seek(0, 0) + return ioutil.NopCloser(raw), err + } + if lr, ok := raw.(LenReader); ok { + contentLength = int64(lr.Len()) + } + + // Read all in so we can reset + case io.Reader: + buf, err := ioutil.ReadAll(body) + if err != nil { + return nil, 0, err + } + bodyReader = func() (io.Reader, error) { + return bytes.NewReader(buf), nil + } + contentLength = int64(len(buf)) + + // No body provided, nothing to do + case nil: + + // Unrecognized type + default: + return nil, 0, fmt.Errorf("cannot handle type %T", rawBody) + } + return bodyReader, contentLength, nil +} + +// FromRequest wraps an http.Request in a retryablehttp.Request +func FromRequest(r *http.Request) (*Request, error) { + bodyReader, _, err := getBodyReaderAndContentLength(r.Body) + if err != nil { + return nil, err + } + // Could assert contentLength == r.ContentLength + return &Request{bodyReader, r}, nil +} + +// NewRequest creates a new wrapped request. +func NewRequest(method, url string, rawBody interface{}) (*Request, error) { + bodyReader, contentLength, err := getBodyReaderAndContentLength(rawBody) + if err != nil { + return nil, err + } + + httpReq, err := http.NewRequest(method, url, nil) + if err != nil { + return nil, err + } + httpReq.ContentLength = contentLength + + return &Request{bodyReader, httpReq}, nil +} + +// Logger interface allows to use other loggers than +// standard log.Logger. +type Logger interface { + Printf(string, ...interface{}) +} + +// LeveledLogger interface implements the basic methods that a logger library needs +type LeveledLogger interface { + Error(string, ...interface{}) + Info(string, ...interface{}) + Debug(string, ...interface{}) + Warn(string, ...interface{}) +} + +// hookLogger adapts an LeveledLogger to Logger for use by the existing hook functions +// without changing the API. +type hookLogger struct { + LeveledLogger +} + +func (h hookLogger) Printf(s string, args ...interface{}) { + h.Info(fmt.Sprintf(s, args...)) +} + +// RequestLogHook allows a function to run before each retry. The HTTP +// request which will be made, and the retry number (0 for the initial +// request) are available to users. The internal logger is exposed to +// consumers. +type RequestLogHook func(Logger, *http.Request, int) + +// ResponseLogHook is like RequestLogHook, but allows running a function +// on each HTTP response. This function will be invoked at the end of +// every HTTP request executed, regardless of whether a subsequent retry +// needs to be performed or not. If the response body is read or closed +// from this method, this will affect the response returned from Do(). +type ResponseLogHook func(Logger, *http.Response) + +// CheckRetry specifies a policy for handling retries. It is called +// following each request with the response and error values returned by +// the http.Client. If CheckRetry returns false, the Client stops retrying +// and returns the response to the caller. If CheckRetry returns an error, +// that error value is returned in lieu of the error from the request. The +// Client will close any response body when retrying, but if the retry is +// aborted it is up to the CheckRetry callback to properly close any +// response body before returning. +type CheckRetry func(ctx context.Context, resp *http.Response, err error) (bool, error) + +// Backoff specifies a policy for how long to wait between retries. +// It is called after a failing request to determine the amount of time +// that should pass before trying again. +type Backoff func(min, max time.Duration, attemptNum int, resp *http.Response) time.Duration + +// ErrorHandler is called if retries are expired, containing the last status +// from the http library. If not specified, default behavior for the library is +// to close the body and return an error indicating how many tries were +// attempted. If overriding this, be sure to close the body if needed. +type ErrorHandler func(resp *http.Response, err error, numTries int) (*http.Response, error) + +// Client is used to make HTTP requests. It adds additional functionality +// like automatic retries to tolerate minor outages. +type Client struct { + HTTPClient *http.Client // Internal HTTP client. + Logger interface{} // Customer logger instance. Can be either Logger or LeveledLogger + + RetryWaitMin time.Duration // Minimum time to wait + RetryWaitMax time.Duration // Maximum time to wait + RetryMax int // Maximum number of retries + + // RequestLogHook allows a user-supplied function to be called + // before each retry. + RequestLogHook RequestLogHook + + // ResponseLogHook allows a user-supplied function to be called + // with the response from each HTTP request executed. + ResponseLogHook ResponseLogHook + + // CheckRetry specifies the policy for handling retries, and is called + // after each request. The default policy is DefaultRetryPolicy. + CheckRetry CheckRetry + + // Backoff specifies the policy for how long to wait between retries + Backoff Backoff + + // ErrorHandler specifies the custom error handler to use, if any + ErrorHandler ErrorHandler + + loggerInit sync.Once +} + +// NewClient creates a new Client with default settings. +func NewClient() *Client { + return &Client{ + HTTPClient: cleanhttp.DefaultPooledClient(), + Logger: defaultLogger, + RetryWaitMin: defaultRetryWaitMin, + RetryWaitMax: defaultRetryWaitMax, + RetryMax: defaultRetryMax, + CheckRetry: DefaultRetryPolicy, + Backoff: DefaultBackoff, + } +} + +func (c *Client) logger() interface{} { + c.loggerInit.Do(func() { + if c.Logger == nil { + return + } + + switch c.Logger.(type) { + case Logger, LeveledLogger: + // ok + default: + // This should happen in dev when they are setting Logger and work on code, not in prod. + panic(fmt.Sprintf("invalid logger type passed, must be Logger or LeveledLogger, was %T", c.Logger)) + } + }) + + return c.Logger +} + +// DefaultRetryPolicy provides a default callback for Client.CheckRetry, which +// will retry on connection errors and server errors. +func DefaultRetryPolicy(ctx context.Context, resp *http.Response, err error) (bool, error) { + // do not retry on context.Canceled or context.DeadlineExceeded + if ctx.Err() != nil { + return false, ctx.Err() + } + + if err != nil { + if v, ok := err.(*url.Error); ok { + // Don't retry if the error was due to too many redirects. + if redirectsErrorRe.MatchString(v.Error()) { + return false, nil + } + + // Don't retry if the error was due to an invalid protocol scheme. + if schemeErrorRe.MatchString(v.Error()) { + return false, nil + } + + // Don't retry if the error was due to TLS cert verification failure. + if _, ok := v.Err.(x509.UnknownAuthorityError); ok { + return false, nil + } + } + + // The error is likely recoverable so retry. + return true, nil + } + + // Check the response code. We retry on 500-range responses to allow + // the server time to recover, as 500's are typically not permanent + // errors and may relate to outages on the server side. This will catch + // invalid response codes as well, like 0 and 999. + if resp.StatusCode == 0 || (resp.StatusCode >= 500 && resp.StatusCode != 501) { + return true, nil + } + + return false, nil +} + +// DefaultBackoff provides a default callback for Client.Backoff which +// will perform exponential backoff based on the attempt number and limited +// by the provided minimum and maximum durations. +func DefaultBackoff(min, max time.Duration, attemptNum int, resp *http.Response) time.Duration { + mult := math.Pow(2, float64(attemptNum)) * float64(min) + sleep := time.Duration(mult) + if float64(sleep) != mult || sleep > max { + sleep = max + } + return sleep +} + +// LinearJitterBackoff provides a callback for Client.Backoff which will +// perform linear backoff based on the attempt number and with jitter to +// prevent a thundering herd. +// +// min and max here are *not* absolute values. The number to be multiplied by +// the attempt number will be chosen at random from between them, thus they are +// bounding the jitter. +// +// For instance: +// * To get strictly linear backoff of one second increasing each retry, set +// both to one second (1s, 2s, 3s, 4s, ...) +// * To get a small amount of jitter centered around one second increasing each +// retry, set to around one second, such as a min of 800ms and max of 1200ms +// (892ms, 2102ms, 2945ms, 4312ms, ...) +// * To get extreme jitter, set to a very wide spread, such as a min of 100ms +// and a max of 20s (15382ms, 292ms, 51321ms, 35234ms, ...) +func LinearJitterBackoff(min, max time.Duration, attemptNum int, resp *http.Response) time.Duration { + // attemptNum always starts at zero but we want to start at 1 for multiplication + attemptNum++ + + if max <= min { + // Unclear what to do here, or they are the same, so return min * + // attemptNum + return min * time.Duration(attemptNum) + } + + // Seed rand; doing this every time is fine + rand := rand.New(rand.NewSource(int64(time.Now().Nanosecond()))) + + // Pick a random number that lies somewhere between the min and max and + // multiply by the attemptNum. attemptNum starts at zero so we always + // increment here. We first get a random percentage, then apply that to the + // difference between min and max, and add to min. + jitter := rand.Float64() * float64(max-min) + jitterMin := int64(jitter) + int64(min) + return time.Duration(jitterMin * int64(attemptNum)) +} + +// PassthroughErrorHandler is an ErrorHandler that directly passes through the +// values from the net/http library for the final request. The body is not +// closed. +func PassthroughErrorHandler(resp *http.Response, err error, _ int) (*http.Response, error) { + return resp, err +} + +// Do wraps calling an HTTP method with retries. +func (c *Client) Do(req *Request) (*http.Response, error) { + if c.HTTPClient == nil { + c.HTTPClient = cleanhttp.DefaultPooledClient() + } + + logger := c.logger() + + if logger != nil { + switch v := logger.(type) { + case Logger: + v.Printf("[DEBUG] %s %s", req.Method, req.URL) + case LeveledLogger: + v.Debug("performing request", "method", req.Method, "url", req.URL) + } + } + + var resp *http.Response + var err error + + for i := 0; ; i++ { + var code int // HTTP response code + + // Always rewind the request body when non-nil. + if req.body != nil { + body, err := req.body() + if err != nil { + c.HTTPClient.CloseIdleConnections() + return resp, err + } + if c, ok := body.(io.ReadCloser); ok { + req.Body = c + } else { + req.Body = ioutil.NopCloser(body) + } + } + + if c.RequestLogHook != nil { + switch v := logger.(type) { + case Logger: + c.RequestLogHook(v, req.Request, i) + case LeveledLogger: + c.RequestLogHook(hookLogger{v}, req.Request, i) + default: + c.RequestLogHook(nil, req.Request, i) + } + } + + // Attempt the request + resp, err = c.HTTPClient.Do(req.Request) + if resp != nil { + code = resp.StatusCode + } + + // Check if we should continue with retries. + checkOK, checkErr := c.CheckRetry(req.Context(), resp, err) + + if err != nil { + switch v := logger.(type) { + case Logger: + v.Printf("[ERR] %s %s request failed: %v", req.Method, req.URL, err) + case LeveledLogger: + v.Error("request failed", "error", err, "method", req.Method, "url", req.URL) + } + } else { + // Call this here to maintain the behavior of logging all requests, + // even if CheckRetry signals to stop. + if c.ResponseLogHook != nil { + // Call the response logger function if provided. + switch v := logger.(type) { + case Logger: + c.ResponseLogHook(v, resp) + case LeveledLogger: + c.ResponseLogHook(hookLogger{v}, resp) + default: + c.ResponseLogHook(nil, resp) + } + } + } + + // Now decide if we should continue. + if !checkOK { + if checkErr != nil { + err = checkErr + } + c.HTTPClient.CloseIdleConnections() + return resp, err + } + + // We do this before drainBody because there's no need for the I/O if + // we're breaking out + remain := c.RetryMax - i + if remain <= 0 { + break + } + + // We're going to retry, consume any response to reuse the connection. + if err == nil && resp != nil { + c.drainBody(resp.Body) + } + + wait := c.Backoff(c.RetryWaitMin, c.RetryWaitMax, i, resp) + desc := fmt.Sprintf("%s %s", req.Method, req.URL) + if code > 0 { + desc = fmt.Sprintf("%s (status: %d)", desc, code) + } + if logger != nil { + switch v := logger.(type) { + case Logger: + v.Printf("[DEBUG] %s: retrying in %s (%d left)", desc, wait, remain) + case LeveledLogger: + v.Debug("retrying request", "request", desc, "timeout", wait, "remaining", remain) + } + } + select { + case <-req.Context().Done(): + c.HTTPClient.CloseIdleConnections() + return nil, req.Context().Err() + case <-time.After(wait): + } + } + + if c.ErrorHandler != nil { + c.HTTPClient.CloseIdleConnections() + return c.ErrorHandler(resp, err, c.RetryMax+1) + } + + // By default, we close the response body and return an error without + // returning the response + if resp != nil { + resp.Body.Close() + } + c.HTTPClient.CloseIdleConnections() + return nil, fmt.Errorf("%s %s giving up after %d attempts", + req.Method, req.URL, c.RetryMax+1) +} + +// Try to read the response body so we can reuse this connection. +func (c *Client) drainBody(body io.ReadCloser) { + defer body.Close() + _, err := io.Copy(ioutil.Discard, io.LimitReader(body, respReadLimit)) + if err != nil { + if c.logger() != nil { + switch v := c.logger().(type) { + case Logger: + v.Printf("[ERR] error reading response body: %v", err) + case LeveledLogger: + v.Error("error reading response body", "error", err) + } + } + } +} + +// Get is a shortcut for doing a GET request without making a new client. +func Get(url string) (*http.Response, error) { + return defaultClient.Get(url) +} + +// Get is a convenience helper for doing simple GET requests. +func (c *Client) Get(url string) (*http.Response, error) { + req, err := NewRequest("GET", url, nil) + if err != nil { + return nil, err + } + return c.Do(req) +} + +// Head is a shortcut for doing a HEAD request without making a new client. +func Head(url string) (*http.Response, error) { + return defaultClient.Head(url) +} + +// Head is a convenience method for doing simple HEAD requests. +func (c *Client) Head(url string) (*http.Response, error) { + req, err := NewRequest("HEAD", url, nil) + if err != nil { + return nil, err + } + return c.Do(req) +} + +// Post is a shortcut for doing a POST request without making a new client. +func Post(url, bodyType string, body interface{}) (*http.Response, error) { + return defaultClient.Post(url, bodyType, body) +} + +// Post is a convenience method for doing simple POST requests. +func (c *Client) Post(url, bodyType string, body interface{}) (*http.Response, error) { + req, err := NewRequest("POST", url, body) + if err != nil { + return nil, err + } + req.Header.Set("Content-Type", bodyType) + return c.Do(req) +} + +// PostForm is a shortcut to perform a POST with form data without creating +// a new client. +func PostForm(url string, data url.Values) (*http.Response, error) { + return defaultClient.PostForm(url, data) +} + +// PostForm is a convenience method for doing simple POST operations using +// pre-filled url.Values form data. +func (c *Client) PostForm(url string, data url.Values) (*http.Response, error) { + return c.Post(url, "application/x-www-form-urlencoded", strings.NewReader(data.Encode())) +} + +// StandardClient returns a stdlib *http.Client with a custom Transport, which +// shims in a *retryablehttp.Client for added retries. +func (c *Client) StandardClient() *http.Client { + return &http.Client{ + Transport: &RoundTripper{Client: c}, + } +} diff --git a/vendor/github.com/hashicorp/go-retryablehttp/go.mod b/vendor/github.com/hashicorp/go-retryablehttp/go.mod new file mode 100644 index 00000000000..7cc02b76fa4 --- /dev/null +++ b/vendor/github.com/hashicorp/go-retryablehttp/go.mod @@ -0,0 +1,8 @@ +module github.com/hashicorp/go-retryablehttp + +require ( + github.com/hashicorp/go-cleanhttp v0.5.1 + github.com/hashicorp/go-hclog v0.9.2 +) + +go 1.13 diff --git a/vendor/github.com/hashicorp/go-retryablehttp/go.sum b/vendor/github.com/hashicorp/go-retryablehttp/go.sum new file mode 100644 index 00000000000..71afe568227 --- /dev/null +++ b/vendor/github.com/hashicorp/go-retryablehttp/go.sum @@ -0,0 +1,10 @@ +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/hashicorp/go-cleanhttp v0.5.1 h1:dH3aiDG9Jvb5r5+bYHsikaOUIpcM0xvgMXVoDkXMzJM= +github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-hclog v0.9.2 h1:CG6TE5H9/JXsFWJCfoIVpKFIkFe6ysEuHirp4DxCsHI= +github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= diff --git a/vendor/github.com/hashicorp/go-retryablehttp/roundtripper.go b/vendor/github.com/hashicorp/go-retryablehttp/roundtripper.go new file mode 100644 index 00000000000..b841b4cfe53 --- /dev/null +++ b/vendor/github.com/hashicorp/go-retryablehttp/roundtripper.go @@ -0,0 +1,43 @@ +package retryablehttp + +import ( + "net/http" + "sync" +) + +// RoundTripper implements the http.RoundTripper interface, using a retrying +// HTTP client to execute requests. +// +// It is important to note that retryablehttp doesn't always act exactly as a +// RoundTripper should. This is highly dependent on the retryable client's +// configuration. +type RoundTripper struct { + // The client to use during requests. If nil, the default retryablehttp + // client and settings will be used. + Client *Client + + // once ensures that the logic to initialize the default client runs at + // most once, in a single thread. + once sync.Once +} + +// init initializes the underlying retryable client. +func (rt *RoundTripper) init() { + if rt.Client == nil { + rt.Client = NewClient() + } +} + +// RoundTrip satisfies the http.RoundTripper interface. +func (rt *RoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { + rt.once.Do(rt.init) + + // Convert the request to be retryable. + retryableReq, err := FromRequest(req) + if err != nil { + return nil, err + } + + // Execute the request. + return rt.Client.Do(retryableReq) +} diff --git a/vendor/github.com/hashicorp/golang-lru/LICENSE b/vendor/github.com/hashicorp/golang-lru/LICENSE new file mode 100644 index 00000000000..be2cc4dfb60 --- /dev/null +++ b/vendor/github.com/hashicorp/golang-lru/LICENSE @@ -0,0 +1,362 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. "Contributor" + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. "Contributor Version" + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the terms of + a Secondary License. + +1.6. "Executable Form" + + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + + means a work that combines Covered Software with other material, in a + separate file or files, that is not Covered Software. + +1.8. "License" + + means this document. + +1.9. "Licensable" + + means having the right to grant, to the maximum extent possible, whether + at the time of the initial grant or subsequently, any and all of the + rights conveyed by this License. + +1.10. "Modifications" + + means any of the following: + + a. any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. "Patent Claims" of a Contributor + + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the License, + by the making, using, selling, offering for sale, having made, import, + or transfer of either its Contributions or its Contributor Version. + +1.12. "Secondary License" + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. "Source Code Form" + + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, "control" means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution + become effective for each Contribution on the date the Contributor first + distributes such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under + this License. No additional rights or licenses will be implied from the + distribution or licensing of Covered Software under this License. + Notwithstanding Section 2.1(b) above, no patent license is granted by a + Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of + its Contributions. + + This License does not grant any rights in the trademarks, service marks, + or logos of any Contributor (except as may be necessary to comply with + the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this + License (see Section 10.2) or under the terms of a Secondary License (if + permitted under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its + Contributions are its original creation(s) or it has sufficient rights to + grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under + applicable copyright doctrines of fair use, fair dealing, or other + equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under + the terms of this License. You must inform recipients that the Source + Code Form of the Covered Software is governed by the terms of this + License, and how they can obtain a copy of this License. You may not + attempt to alter or restrict the recipients' rights in the Source Code + Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter the + recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for + the Covered Software. If the Larger Work is a combination of Covered + Software with a work governed by one or more Secondary Licenses, and the + Covered Software is not Incompatible With Secondary Licenses, this + License permits You to additionally distribute such Covered Software + under the terms of such Secondary License(s), so that the recipient of + the Larger Work may, at their option, further distribute the Covered + Software under the terms of either this License or such Secondary + License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices + (including copyright notices, patent notices, disclaimers of warranty, or + limitations of liability) contained within the Source Code Form of the + Covered Software, except that You may alter any license notices to the + extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on + behalf of any Contributor. You must make it absolutely clear that any + such warranty, support, indemnity, or liability obligation is offered by + You alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, + judicial order, or regulation then You must: (a) comply with the terms of + this License to the maximum extent possible; and (b) describe the + limitations and the code they affect. Such description must be placed in a + text file included with all distributions of the Covered Software under + this License. Except to the extent prohibited by statute or regulation, + such description must be sufficiently detailed for a recipient of ordinary + skill to be able to understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing + basis, if such Contributor fails to notify You of the non-compliance by + some reasonable means prior to 60 days after You have come back into + compliance. Moreover, Your grants from a particular Contributor are + reinstated on an ongoing basis if such Contributor notifies You of the + non-compliance by some reasonable means, this is the first time You have + received notice of non-compliance with this License from such + Contributor, and You become compliant prior to 30 days after Your receipt + of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, + counter-claims, and cross-claims) alleging that a Contributor Version + directly or indirectly infringes any patent, then the rights granted to + You by any and all Contributors for the Covered Software under Section + 2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an "as is" basis, + without warranty of any kind, either expressed, implied, or statutory, + including, without limitation, warranties that the Covered Software is free + of defects, merchantable, fit for a particular purpose or non-infringing. + The entire risk as to the quality and performance of the Covered Software + is with You. Should any Covered Software prove defective in any respect, + You (not any Contributor) assume the cost of any necessary servicing, + repair, or correction. This disclaimer of warranty constitutes an essential + part of this License. No use of any Covered Software is authorized under + this License except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from + such party's negligence to the extent applicable law prohibits such + limitation. Some jurisdictions do not allow the exclusion or limitation of + incidental or consequential damages, so this exclusion and limitation may + not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts + of a jurisdiction where the defendant maintains its principal place of + business and such litigation shall be governed by laws of that + jurisdiction, without reference to its conflict-of-law provisions. Nothing + in this Section shall prevent a party's ability to bring cross-claims or + counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject + matter hereof. If any provision of this License is held to be + unenforceable, such provision shall be reformed only to the extent + necessary to make it enforceable. Any law or regulation which provides that + the language of a contract shall be construed against the drafter shall not + be used to construe this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version + of the License under which You originally received the Covered Software, + or under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a + modified version of this License if you rename the license and remove + any references to the name of the license steward (except to note that + such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary + Licenses If You choose to distribute Source Code Form that is + Incompatible With Secondary Licenses under the terms of this version of + the License, the notice described in Exhibit B of this License must be + attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, +then You may include the notice in a location (such as a LICENSE file in a +relevant directory) where a recipient would be likely to look for such a +notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice + + This Source Code Form is "Incompatible + With Secondary Licenses", as defined by + the Mozilla Public License, v. 2.0. diff --git a/vendor/github.com/hashicorp/golang-lru/simplelru/lru.go b/vendor/github.com/hashicorp/golang-lru/simplelru/lru.go new file mode 100644 index 00000000000..a86c8539e06 --- /dev/null +++ b/vendor/github.com/hashicorp/golang-lru/simplelru/lru.go @@ -0,0 +1,177 @@ +package simplelru + +import ( + "container/list" + "errors" +) + +// EvictCallback is used to get a callback when a cache entry is evicted +type EvictCallback func(key interface{}, value interface{}) + +// LRU implements a non-thread safe fixed size LRU cache +type LRU struct { + size int + evictList *list.List + items map[interface{}]*list.Element + onEvict EvictCallback +} + +// entry is used to hold a value in the evictList +type entry struct { + key interface{} + value interface{} +} + +// NewLRU constructs an LRU of the given size +func NewLRU(size int, onEvict EvictCallback) (*LRU, error) { + if size <= 0 { + return nil, errors.New("Must provide a positive size") + } + c := &LRU{ + size: size, + evictList: list.New(), + items: make(map[interface{}]*list.Element), + onEvict: onEvict, + } + return c, nil +} + +// Purge is used to completely clear the cache. +func (c *LRU) Purge() { + for k, v := range c.items { + if c.onEvict != nil { + c.onEvict(k, v.Value.(*entry).value) + } + delete(c.items, k) + } + c.evictList.Init() +} + +// Add adds a value to the cache. Returns true if an eviction occurred. +func (c *LRU) Add(key, value interface{}) (evicted bool) { + // Check for existing item + if ent, ok := c.items[key]; ok { + c.evictList.MoveToFront(ent) + ent.Value.(*entry).value = value + return false + } + + // Add new item + ent := &entry{key, value} + entry := c.evictList.PushFront(ent) + c.items[key] = entry + + evict := c.evictList.Len() > c.size + // Verify size not exceeded + if evict { + c.removeOldest() + } + return evict +} + +// Get looks up a key's value from the cache. +func (c *LRU) Get(key interface{}) (value interface{}, ok bool) { + if ent, ok := c.items[key]; ok { + c.evictList.MoveToFront(ent) + if ent.Value.(*entry) == nil { + return nil, false + } + return ent.Value.(*entry).value, true + } + return +} + +// Contains checks if a key is in the cache, without updating the recent-ness +// or deleting it for being stale. +func (c *LRU) Contains(key interface{}) (ok bool) { + _, ok = c.items[key] + return ok +} + +// Peek returns the key value (or undefined if not found) without updating +// the "recently used"-ness of the key. +func (c *LRU) Peek(key interface{}) (value interface{}, ok bool) { + var ent *list.Element + if ent, ok = c.items[key]; ok { + return ent.Value.(*entry).value, true + } + return nil, ok +} + +// Remove removes the provided key from the cache, returning if the +// key was contained. +func (c *LRU) Remove(key interface{}) (present bool) { + if ent, ok := c.items[key]; ok { + c.removeElement(ent) + return true + } + return false +} + +// RemoveOldest removes the oldest item from the cache. +func (c *LRU) RemoveOldest() (key interface{}, value interface{}, ok bool) { + ent := c.evictList.Back() + if ent != nil { + c.removeElement(ent) + kv := ent.Value.(*entry) + return kv.key, kv.value, true + } + return nil, nil, false +} + +// GetOldest returns the oldest entry +func (c *LRU) GetOldest() (key interface{}, value interface{}, ok bool) { + ent := c.evictList.Back() + if ent != nil { + kv := ent.Value.(*entry) + return kv.key, kv.value, true + } + return nil, nil, false +} + +// Keys returns a slice of the keys in the cache, from oldest to newest. +func (c *LRU) Keys() []interface{} { + keys := make([]interface{}, len(c.items)) + i := 0 + for ent := c.evictList.Back(); ent != nil; ent = ent.Prev() { + keys[i] = ent.Value.(*entry).key + i++ + } + return keys +} + +// Len returns the number of items in the cache. +func (c *LRU) Len() int { + return c.evictList.Len() +} + +// Resize changes the cache size. +func (c *LRU) Resize(size int) (evicted int) { + diff := c.Len() - size + if diff < 0 { + diff = 0 + } + for i := 0; i < diff; i++ { + c.removeOldest() + } + c.size = size + return diff +} + +// removeOldest removes the oldest item from the cache. +func (c *LRU) removeOldest() { + ent := c.evictList.Back() + if ent != nil { + c.removeElement(ent) + } +} + +// removeElement is used to remove a given list element from the cache +func (c *LRU) removeElement(e *list.Element) { + c.evictList.Remove(e) + kv := e.Value.(*entry) + delete(c.items, kv.key) + if c.onEvict != nil { + c.onEvict(kv.key, kv.value) + } +} diff --git a/vendor/github.com/hashicorp/golang-lru/simplelru/lru_interface.go b/vendor/github.com/hashicorp/golang-lru/simplelru/lru_interface.go new file mode 100644 index 00000000000..92d70934d63 --- /dev/null +++ b/vendor/github.com/hashicorp/golang-lru/simplelru/lru_interface.go @@ -0,0 +1,39 @@ +package simplelru + +// LRUCache is the interface for simple LRU cache. +type LRUCache interface { + // Adds a value to the cache, returns true if an eviction occurred and + // updates the "recently used"-ness of the key. + Add(key, value interface{}) bool + + // Returns key's value from the cache and + // updates the "recently used"-ness of the key. #value, isFound + Get(key interface{}) (value interface{}, ok bool) + + // Checks if a key exists in cache without updating the recent-ness. + Contains(key interface{}) (ok bool) + + // Returns key's value without updating the "recently used"-ness of the key. + Peek(key interface{}) (value interface{}, ok bool) + + // Removes a key from the cache. + Remove(key interface{}) bool + + // Removes the oldest entry from cache. + RemoveOldest() (interface{}, interface{}, bool) + + // Returns the oldest entry from the cache. #key, value, isFound + GetOldest() (interface{}, interface{}, bool) + + // Returns a slice of the keys in the cache, from oldest to newest. + Keys() []interface{} + + // Returns the number of items in the cache. + Len() int + + // Clears all cache entries. + Purge() + + // Resizes cache, returning number evicted + Resize(int) int +} diff --git a/vendor/github.com/imdario/mergo/.travis.yml b/vendor/github.com/imdario/mergo/.travis.yml index dad29725f86..d324c43ba4d 100644 --- a/vendor/github.com/imdario/mergo/.travis.yml +++ b/vendor/github.com/imdario/mergo/.travis.yml @@ -1,4 +1,7 @@ language: go +arch: + - amd64 + - ppc64le install: - go get -t - go get golang.org/x/tools/cmd/cover diff --git a/vendor/github.com/imdario/mergo/README.md b/vendor/github.com/imdario/mergo/README.md index 02fc81e0626..aa8cbd7ce6d 100644 --- a/vendor/github.com/imdario/mergo/README.md +++ b/vendor/github.com/imdario/mergo/README.md @@ -1,44 +1,54 @@ # Mergo -A helper to merge structs and maps in Golang. Useful for configuration default values, avoiding messy if-statements. - -Also a lovely [comune](http://en.wikipedia.org/wiki/Mergo) (municipality) in the Province of Ancona in the Italian region of Marche. - -## Status - -It is ready for production use. [It is used in several projects by Docker, Google, The Linux Foundation, VMWare, Shopify, etc](https://github.com/imdario/mergo#mergo-in-the-wild). [![GoDoc][3]][4] -[![GoCard][5]][6] +[![GitHub release][5]][6] +[![GoCard][7]][8] [![Build Status][1]][2] -[![Coverage Status][7]][8] -[![Sourcegraph][9]][10] -[![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Fimdario%2Fmergo.svg?type=shield)](https://app.fossa.io/projects/git%2Bgithub.com%2Fimdario%2Fmergo?ref=badge_shield) +[![Coverage Status][9]][10] +[![Sourcegraph][11]][12] +[![FOSSA Status][13]][14] + +[![GoCenter Kudos][15]][16] [1]: https://travis-ci.org/imdario/mergo.png [2]: https://travis-ci.org/imdario/mergo [3]: https://godoc.org/github.com/imdario/mergo?status.svg [4]: https://godoc.org/github.com/imdario/mergo -[5]: https://goreportcard.com/badge/imdario/mergo -[6]: https://goreportcard.com/report/github.com/imdario/mergo -[7]: https://coveralls.io/repos/github/imdario/mergo/badge.svg?branch=master -[8]: https://coveralls.io/github/imdario/mergo?branch=master -[9]: https://sourcegraph.com/github.com/imdario/mergo/-/badge.svg -[10]: https://sourcegraph.com/github.com/imdario/mergo?badge +[5]: https://img.shields.io/github/release/imdario/mergo.svg +[6]: https://github.com/imdario/mergo/releases +[7]: https://goreportcard.com/badge/imdario/mergo +[8]: https://goreportcard.com/report/github.com/imdario/mergo +[9]: https://coveralls.io/repos/github/imdario/mergo/badge.svg?branch=master +[10]: https://coveralls.io/github/imdario/mergo?branch=master +[11]: https://sourcegraph.com/github.com/imdario/mergo/-/badge.svg +[12]: https://sourcegraph.com/github.com/imdario/mergo?badge +[13]: https://app.fossa.io/api/projects/git%2Bgithub.com%2Fimdario%2Fmergo.svg?type=shield +[14]: https://app.fossa.io/projects/git%2Bgithub.com%2Fimdario%2Fmergo?ref=badge_shield +[15]: https://search.gocenter.io/api/ui/badge/github.com%2Fimdario%2Fmergo +[16]: https://search.gocenter.io/github.com/imdario/mergo -### Latest release +A helper to merge structs and maps in Golang. Useful for configuration default values, avoiding messy if-statements. -[Release v0.3.7](https://github.com/imdario/mergo/releases/tag/v0.3.7). +Mergo merges same-type structs and maps by setting default values in zero-value fields. Mergo won't merge unexported (private) fields. It will do recursively any exported one. It also won't merge structs inside maps (because they are not addressable using Go reflection). + +Also a lovely [comune](http://en.wikipedia.org/wiki/Mergo) (municipality) in the Province of Ancona in the Italian region of Marche. + +## Status + +It is ready for production use. [It is used in several projects by Docker, Google, The Linux Foundation, VMWare, Shopify, etc](https://github.com/imdario/mergo#mergo-in-the-wild). ### Important note -Please keep in mind that in [0.3.2](//github.com/imdario/mergo/releases/tag/0.3.2) Mergo changed `Merge()`and `Map()` signatures to support [transformers](#transformers). An optional/variadic argument has been added, so it won't break existing code. +Please keep in mind that a problematic PR broke [0.3.9](//github.com/imdario/mergo/releases/tag/0.3.9). I reverted it in [0.3.10](//github.com/imdario/mergo/releases/tag/0.3.10), and I consider it stable but not bug-free. Also, this version adds suppot for go modules. -If you were using Mergo **before** April 6th 2015, please check your project works as intended after updating your local copy with ```go get -u github.com/imdario/mergo```. I apologize for any issue caused by its previous behavior and any future bug that Mergo could cause (I hope it won't!) in existing projects after the change (release 0.2.0). +Keep in mind that in [0.3.2](//github.com/imdario/mergo/releases/tag/0.3.2), Mergo changed `Merge()`and `Map()` signatures to support [transformers](#transformers). I added an optional/variadic argument so that it won't break the existing code. + +If you were using Mergo before April 6th, 2015, please check your project works as intended after updating your local copy with ```go get -u github.com/imdario/mergo```. I apologize for any issue caused by its previous behavior and any future bug that Mergo could cause in existing projects after the change (release 0.2.0). ### Donations -If Mergo is useful to you, consider buying me a coffee, a beer or making a monthly donation so I can keep building great free software. :heart_eyes: +If Mergo is useful to you, consider buying me a coffee, a beer, or making a monthly donation to allow me to keep building great free software. :heart_eyes: Buy Me a Coffee at ko-fi.com [![Beerpay](https://beerpay.io/imdario/mergo/badge.svg)](https://beerpay.io/imdario/mergo) @@ -87,8 +97,9 @@ If Mergo is useful to you, consider buying me a coffee, a beer or making a month - [mantasmatelis/whooplist-server](https://github.com/mantasmatelis/whooplist-server) - [jnuthong/item_search](https://github.com/jnuthong/item_search) - [bukalapak/snowboard](https://github.com/bukalapak/snowboard) +- [containerssh/containerssh](https://github.com/containerssh/containerssh) -## Installation +## Install go get github.com/imdario/mergo @@ -99,7 +110,7 @@ If Mergo is useful to you, consider buying me a coffee, a beer or making a month ## Usage -You can only merge same-type structs with exported fields initialized as zero value of their type and same-types maps. Mergo won't merge unexported (private) fields but will do recursively any exported one. It won't merge empty structs value as [they are not considered zero values](https://golang.org/ref/spec#The_zero_value) either. Also maps will be merged recursively except for structs inside maps (because they are not addressable using Go reflection). +You can only merge same-type structs with exported fields initialized as zero value of their type and same-types maps. Mergo won't merge unexported (private) fields but will do recursively any exported one. It won't merge empty structs value as [they are zero values](https://golang.org/ref/spec#The_zero_value) too. Also, maps will be merged recursively except for structs inside maps (because they are not addressable using Go reflection). ```go if err := mergo.Merge(&dst, src); err != nil { @@ -125,9 +136,7 @@ if err := mergo.Map(&dst, srcMap); err != nil { Warning: if you map a struct to map, it won't do it recursively. Don't expect Mergo to map struct members of your struct as `map[string]interface{}`. They will be just assigned as values. -More information and examples in [godoc documentation](http://godoc.org/github.com/imdario/mergo). - -### Nice example +Here is a nice example: ```go package main @@ -175,10 +184,10 @@ import ( "time" ) -type timeTransfomer struct { +type timeTransformer struct { } -func (t timeTransfomer) Transformer(typ reflect.Type) func(dst, src reflect.Value) error { +func (t timeTransformer) Transformer(typ reflect.Type) func(dst, src reflect.Value) error { if typ == reflect.TypeOf(time.Time{}) { return func(dst, src reflect.Value) error { if dst.CanSet() { @@ -202,7 +211,7 @@ type Snapshot struct { func main() { src := Snapshot{time.Now()} dest := Snapshot{} - mergo.Merge(&dest, src, mergo.WithTransformers(timeTransfomer{})) + mergo.Merge(&dest, src, mergo.WithTransformers(timeTransformer{})) fmt.Println(dest) // Will print // { 2018-01-12 01:15:00 +0000 UTC m=+0.000000001 } diff --git a/vendor/github.com/imdario/mergo/doc.go b/vendor/github.com/imdario/mergo/doc.go index 6e9aa7baf35..fcd985f995d 100644 --- a/vendor/github.com/imdario/mergo/doc.go +++ b/vendor/github.com/imdario/mergo/doc.go @@ -4,41 +4,140 @@ // license that can be found in the LICENSE file. /* -Package mergo merges same-type structs and maps by setting default values in zero-value fields. +A helper to merge structs and maps in Golang. Useful for configuration default values, avoiding messy if-statements. -Mergo won't merge unexported (private) fields but will do recursively any exported one. It also won't merge structs inside maps (because they are not addressable using Go reflection). +Mergo merges same-type structs and maps by setting default values in zero-value fields. Mergo won't merge unexported (private) fields. It will do recursively any exported one. It also won't merge structs inside maps (because they are not addressable using Go reflection). + +Status + +It is ready for production use. It is used in several projects by Docker, Google, The Linux Foundation, VMWare, Shopify, etc. + +Important note + +Please keep in mind that a problematic PR broke 0.3.9. We reverted it in 0.3.10. We consider 0.3.10 as stable but not bug-free. . Also, this version adds suppot for go modules. + +Keep in mind that in 0.3.2, Mergo changed Merge() and Map() signatures to support transformers. We added an optional/variadic argument so that it won't break the existing code. + +If you were using Mergo before April 6th, 2015, please check your project works as intended after updating your local copy with go get -u github.com/imdario/mergo. I apologize for any issue caused by its previous behavior and any future bug that Mergo could cause in existing projects after the change (release 0.2.0). + +Install + +Do your usual installation procedure: + + go get github.com/imdario/mergo + + // use in your .go code + import ( + "github.com/imdario/mergo" + ) Usage -From my own work-in-progress project: +You can only merge same-type structs with exported fields initialized as zero value of their type and same-types maps. Mergo won't merge unexported (private) fields but will do recursively any exported one. It won't merge empty structs value as they are zero values too. Also, maps will be merged recursively except for structs inside maps (because they are not addressable using Go reflection). + + if err := mergo.Merge(&dst, src); err != nil { + // ... + } + +Also, you can merge overwriting values using the transformer WithOverride. + + if err := mergo.Merge(&dst, src, mergo.WithOverride); err != nil { + // ... + } + +Additionally, you can map a map[string]interface{} to a struct (and otherwise, from struct to map), following the same restrictions as in Merge(). Keys are capitalized to find each corresponding exported field. + + if err := mergo.Map(&dst, srcMap); err != nil { + // ... + } + +Warning: if you map a struct to map, it won't do it recursively. Don't expect Mergo to map struct members of your struct as map[string]interface{}. They will be just assigned as values. + +Here is a nice example: + + package main + + import ( + "fmt" + "github.com/imdario/mergo" + ) - type networkConfig struct { - Protocol string - Address string - ServerType string `json: "server_type"` - Port uint16 + type Foo struct { + A string + B int64 } - type FssnConfig struct { - Network networkConfig + func main() { + src := Foo{ + A: "one", + B: 2, + } + dest := Foo{ + A: "two", + } + mergo.Merge(&dest, src) + fmt.Println(dest) + // Will print + // {two 2} } - var fssnDefault = FssnConfig { - networkConfig { - "tcp", - "127.0.0.1", - "http", - 31560, - }, +Transformers + +Transformers allow to merge specific types differently than in the default behavior. In other words, now you can customize how some types are merged. For example, time.Time is a struct; it doesn't have zero value but IsZero can return true because it has fields with zero value. How can we merge a non-zero time.Time? + + package main + + import ( + "fmt" + "github.com/imdario/mergo" + "reflect" + "time" + ) + + type timeTransformer struct { } - // Inside a function [...] + func (t timeTransformer) Transformer(typ reflect.Type) func(dst, src reflect.Value) error { + if typ == reflect.TypeOf(time.Time{}) { + return func(dst, src reflect.Value) error { + if dst.CanSet() { + isZero := dst.MethodByName("IsZero") + result := isZero.Call([]reflect.Value{}) + if result[0].Bool() { + dst.Set(src) + } + } + return nil + } + } + return nil + } + + type Snapshot struct { + Time time.Time + // ... + } - if err := mergo.Merge(&config, fssnDefault); err != nil { - log.Fatal(err) + func main() { + src := Snapshot{time.Now()} + dest := Snapshot{} + mergo.Merge(&dest, src, mergo.WithTransformers(timeTransformer{})) + fmt.Println(dest) + // Will print + // { 2018-01-12 01:15:00 +0000 UTC m=+0.000000001 } } - // More code [...] +Contact me + +If I can help you, you have an idea or you are using Mergo in your projects, don't hesitate to drop me a line (or a pull request): https://twitter.com/im_dario + +About + +Written by Dario Castañé: https://da.rio.hn + +License + +BSD 3-Clause license, as Go language. */ package mergo diff --git a/vendor/github.com/imdario/mergo/go.mod b/vendor/github.com/imdario/mergo/go.mod new file mode 100644 index 00000000000..3d689d93eb3 --- /dev/null +++ b/vendor/github.com/imdario/mergo/go.mod @@ -0,0 +1,5 @@ +module github.com/imdario/mergo + +go 1.13 + +require gopkg.in/yaml.v2 v2.3.0 diff --git a/vendor/github.com/imdario/mergo/go.sum b/vendor/github.com/imdario/mergo/go.sum new file mode 100644 index 00000000000..168980da5f7 --- /dev/null +++ b/vendor/github.com/imdario/mergo/go.sum @@ -0,0 +1,4 @@ +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/vendor/github.com/imdario/mergo/map.go b/vendor/github.com/imdario/mergo/map.go index d83258b4dda..a13a7ee46c7 100644 --- a/vendor/github.com/imdario/mergo/map.go +++ b/vendor/github.com/imdario/mergo/map.go @@ -99,11 +99,11 @@ func deepMap(dst, src reflect.Value, visited map[uintptr]*visit, depth int, conf continue } if srcKind == dstKind { - if _, err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil { + if err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil { return } } else if dstKind == reflect.Interface && dstElement.Kind() == reflect.Interface { - if _, err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil { + if err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil { return } } else if srcKind == reflect.Map { @@ -141,6 +141,9 @@ func MapWithOverwrite(dst, src interface{}, opts ...func(*Config)) error { } func _map(dst, src interface{}, opts ...func(*Config)) error { + if dst != nil && reflect.ValueOf(dst).Kind() != reflect.Ptr { + return ErrNonPointerAgument + } var ( vDst, vSrc reflect.Value err error @@ -157,8 +160,7 @@ func _map(dst, src interface{}, opts ...func(*Config)) error { // To be friction-less, we redirect equal-type arguments // to deepMerge. Only because arguments can be anything. if vSrc.Kind() == vDst.Kind() { - _, err := deepMerge(vDst, vSrc, make(map[uintptr]*visit), 0, config) - return err + return deepMerge(vDst, vSrc, make(map[uintptr]*visit), 0, config) } switch vSrc.Kind() { case reflect.Struct: diff --git a/vendor/github.com/imdario/mergo/merge.go b/vendor/github.com/imdario/mergo/merge.go index 3332c9c2a7a..8c2a8fcd901 100644 --- a/vendor/github.com/imdario/mergo/merge.go +++ b/vendor/github.com/imdario/mergo/merge.go @@ -11,26 +11,26 @@ package mergo import ( "fmt" "reflect" - "unsafe" ) -func hasExportedField(dst reflect.Value) (exported bool) { +func hasMergeableFields(dst reflect.Value) (exported bool) { for i, n := 0, dst.NumField(); i < n; i++ { field := dst.Type().Field(i) - if isExportedComponent(&field) { - return true + if field.Anonymous && dst.Field(i).Kind() == reflect.Struct { + exported = exported || hasMergeableFields(dst.Field(i)) + } else if isExportedComponent(&field) { + exported = exported || len(field.PkgPath) == 0 } } return } func isExportedComponent(field *reflect.StructField) bool { - name := field.Name pkgPath := field.PkgPath if len(pkgPath) > 0 { return false } - c := name[0] + c := field.Name[0] if 'a' <= c && c <= 'z' || c == '_' { return false } @@ -44,6 +44,8 @@ type Config struct { Transformers Transformers overwriteWithEmptyValue bool overwriteSliceWithEmptyValue bool + sliceDeepCopy bool + debug bool } type Transformers interface { @@ -53,17 +55,16 @@ type Transformers interface { // Traverses recursively both values, assigning src's fields values to dst. // The map argument tracks comparisons that have already been seen, which allows // short circuiting on recursive types. -func deepMerge(dstIn, src reflect.Value, visited map[uintptr]*visit, depth int, config *Config) (dst reflect.Value, err error) { - dst = dstIn +func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, config *Config) (err error) { overwrite := config.Overwrite typeCheck := config.TypeCheck overwriteWithEmptySrc := config.overwriteWithEmptyValue overwriteSliceWithEmptySrc := config.overwriteSliceWithEmptyValue + sliceDeepCopy := config.sliceDeepCopy if !src.IsValid() { return } - if dst.CanAddr() { addr := dst.UnsafeAddr() h := 17 * addr @@ -71,7 +72,7 @@ func deepMerge(dstIn, src reflect.Value, visited map[uintptr]*visit, depth int, typ := dst.Type() for p := seen; p != nil; p = p.next { if p.ptr == addr && p.typ == typ { - return dst, nil + return nil } } // Remember, remember... @@ -85,50 +86,19 @@ func deepMerge(dstIn, src reflect.Value, visited map[uintptr]*visit, depth int, } } - if dst.IsValid() && src.IsValid() && src.Type() != dst.Type() { - err = fmt.Errorf("cannot append two different types (%s, %s)", src.Kind(), dst.Kind()) - return - } - switch dst.Kind() { case reflect.Struct: - if hasExportedField(dst) { - dstCp := reflect.New(dst.Type()).Elem() + if hasMergeableFields(dst) { for i, n := 0, dst.NumField(); i < n; i++ { - dstField := dst.Field(i) - structField := dst.Type().Field(i) - // copy un-exported struct fields - if !isExportedComponent(&structField) { - rf := dstCp.Field(i) - rf = reflect.NewAt(rf.Type(), unsafe.Pointer(rf.UnsafeAddr())).Elem() //nolint:gosec - dstRF := dst.Field(i) - if !dst.Field(i).CanAddr() { - continue - } - - dstRF = reflect.NewAt(dstRF.Type(), unsafe.Pointer(dstRF.UnsafeAddr())).Elem() //nolint:gosec - rf.Set(dstRF) - continue - } - dstField, err = deepMerge(dstField, src.Field(i), visited, depth+1, config) - if err != nil { + if err = deepMerge(dst.Field(i), src.Field(i), visited, depth+1, config); err != nil { return } - dstCp.Field(i).Set(dstField) - } - - if dst.CanSet() { - dst.Set(dstCp) - } else { - dst = dstCp } - return } else { - if (isReflectNil(dst) || overwrite) && (!isEmptyValue(src) || overwriteWithEmptySrc) { - dst = src + if dst.CanSet() && (isReflectNil(dst) || overwrite) && (!isEmptyValue(src) || overwriteWithEmptySrc) { + dst.Set(src) } } - case reflect.Map: if dst.IsNil() && !src.IsNil() { if dst.CanSet() { @@ -138,73 +108,137 @@ func deepMerge(dstIn, src reflect.Value, visited map[uintptr]*visit, depth int, return } } + + if src.Kind() != reflect.Map { + if overwrite { + dst.Set(src) + } + return + } + for _, key := range src.MapKeys() { srcElement := src.MapIndex(key) - dstElement := dst.MapIndex(key) if !srcElement.IsValid() { continue } - if dst.MapIndex(key).IsValid() { - k := dstElement.Interface() - dstElement = reflect.ValueOf(k) - } - if isReflectNil(srcElement) { - if overwrite || isReflectNil(dstElement) { - dst.SetMapIndex(key, srcElement) + dstElement := dst.MapIndex(key) + switch srcElement.Kind() { + case reflect.Chan, reflect.Func, reflect.Map, reflect.Interface, reflect.Slice: + if srcElement.IsNil() { + if overwrite { + dst.SetMapIndex(key, srcElement) + } + continue + } + fallthrough + default: + if !srcElement.CanInterface() { + continue + } + switch reflect.TypeOf(srcElement.Interface()).Kind() { + case reflect.Struct: + fallthrough + case reflect.Ptr: + fallthrough + case reflect.Map: + srcMapElm := srcElement + dstMapElm := dstElement + if srcMapElm.CanInterface() { + srcMapElm = reflect.ValueOf(srcMapElm.Interface()) + if dstMapElm.IsValid() { + dstMapElm = reflect.ValueOf(dstMapElm.Interface()) + } + } + if err = deepMerge(dstMapElm, srcMapElm, visited, depth+1, config); err != nil { + return + } + case reflect.Slice: + srcSlice := reflect.ValueOf(srcElement.Interface()) + + var dstSlice reflect.Value + if !dstElement.IsValid() || dstElement.IsNil() { + dstSlice = reflect.MakeSlice(srcSlice.Type(), 0, srcSlice.Len()) + } else { + dstSlice = reflect.ValueOf(dstElement.Interface()) + } + + if (!isEmptyValue(src) || overwriteWithEmptySrc || overwriteSliceWithEmptySrc) && (overwrite || isEmptyValue(dst)) && !config.AppendSlice && !sliceDeepCopy { + if typeCheck && srcSlice.Type() != dstSlice.Type() { + return fmt.Errorf("cannot override two slices with different type (%s, %s)", srcSlice.Type(), dstSlice.Type()) + } + dstSlice = srcSlice + } else if config.AppendSlice { + if srcSlice.Type() != dstSlice.Type() { + return fmt.Errorf("cannot append two slices with different type (%s, %s)", srcSlice.Type(), dstSlice.Type()) + } + dstSlice = reflect.AppendSlice(dstSlice, srcSlice) + } else if sliceDeepCopy { + i := 0 + for ; i < srcSlice.Len() && i < dstSlice.Len(); i++ { + srcElement := srcSlice.Index(i) + dstElement := dstSlice.Index(i) + + if srcElement.CanInterface() { + srcElement = reflect.ValueOf(srcElement.Interface()) + } + if dstElement.CanInterface() { + dstElement = reflect.ValueOf(dstElement.Interface()) + } + + if err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil { + return + } + } + + } + dst.SetMapIndex(key, dstSlice) } - continue } - if !srcElement.CanInterface() { + if dstElement.IsValid() && !isEmptyValue(dstElement) && (reflect.TypeOf(srcElement.Interface()).Kind() == reflect.Map || reflect.TypeOf(srcElement.Interface()).Kind() == reflect.Slice) { continue } - if srcElement.CanInterface() { - srcElement = reflect.ValueOf(srcElement.Interface()) - if dstElement.IsValid() { - dstElement = reflect.ValueOf(dstElement.Interface()) + if srcElement.IsValid() && ((srcElement.Kind() != reflect.Ptr && overwrite) || !dstElement.IsValid() || isEmptyValue(dstElement)) { + if dst.IsNil() { + dst.Set(reflect.MakeMap(dst.Type())) } + dst.SetMapIndex(key, srcElement) } - dstElement, err = deepMerge(dstElement, srcElement, visited, depth+1, config) - if err != nil { - return - } - dst.SetMapIndex(key, dstElement) - } case reflect.Slice: - newSlice := dst - if (!isEmptyValue(src) || overwriteWithEmptySrc || overwriteSliceWithEmptySrc) && (overwrite || isEmptyValue(dst)) && !config.AppendSlice { - if typeCheck && src.Type() != dst.Type() { - return dst, fmt.Errorf("cannot override two slices with different type (%s, %s)", src.Type(), dst.Type()) - } - newSlice = src - } else if config.AppendSlice { - if typeCheck && src.Type() != dst.Type() { - err = fmt.Errorf("cannot append two slice with different type (%s, %s)", src.Type(), dst.Type()) - return - } - newSlice = reflect.AppendSlice(dst, src) - } - if dst.CanSet() { - dst.Set(newSlice) - } else { - dst = newSlice - } - case reflect.Ptr, reflect.Interface: - if isReflectNil(src) { + if !dst.CanSet() { break } + if (!isEmptyValue(src) || overwriteWithEmptySrc || overwriteSliceWithEmptySrc) && (overwrite || isEmptyValue(dst)) && !config.AppendSlice && !sliceDeepCopy { + dst.Set(src) + } else if config.AppendSlice { + if src.Type() != dst.Type() { + return fmt.Errorf("cannot append two slice with different type (%s, %s)", src.Type(), dst.Type()) + } + dst.Set(reflect.AppendSlice(dst, src)) + } else if sliceDeepCopy { + for i := 0; i < src.Len() && i < dst.Len(); i++ { + srcElement := src.Index(i) + dstElement := dst.Index(i) + if srcElement.CanInterface() { + srcElement = reflect.ValueOf(srcElement.Interface()) + } + if dstElement.CanInterface() { + dstElement = reflect.ValueOf(dstElement.Interface()) + } - if dst.Kind() != reflect.Ptr && src.Type().AssignableTo(dst.Type()) { - if dst.IsNil() || overwrite { - if overwrite || isEmptyValue(dst) { - if dst.CanSet() { - dst.Set(src) - } else { - dst = src - } + if err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil { + return } } + } + case reflect.Ptr: + fallthrough + case reflect.Interface: + if isReflectNil(src) { + if overwriteWithEmptySrc && dst.CanSet() && src.Type().AssignableTo(dst.Type()) { + dst.Set(src) + } break } @@ -214,33 +248,35 @@ func deepMerge(dstIn, src reflect.Value, visited map[uintptr]*visit, depth int, dst.Set(src) } } else if src.Kind() == reflect.Ptr { - if dst, err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1, config); err != nil { + if err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1, config); err != nil { return } - dst = dst.Addr() } else if dst.Elem().Type() == src.Type() { - if dst, err = deepMerge(dst.Elem(), src, visited, depth+1, config); err != nil { + if err = deepMerge(dst.Elem(), src, visited, depth+1, config); err != nil { return } } else { - return dst, ErrDifferentArgumentsTypes + return ErrDifferentArgumentsTypes } break } + if dst.IsNil() || overwrite { - if (overwrite || isEmptyValue(dst)) && (overwriteWithEmptySrc || !isEmptyValue(src)) { - if dst.CanSet() { - dst.Set(src) - } else { - dst = src - } + if dst.CanSet() && (overwrite || isEmptyValue(dst)) { + dst.Set(src) } - } else if _, err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1, config); err != nil { - return + break + } + + if dst.Elem().Kind() == src.Elem().Kind() { + if err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1, config); err != nil { + return + } + break } default: - overwriteFull := (!isEmptyValue(src) || overwriteWithEmptySrc) && (overwrite || isEmptyValue(dst)) - if overwriteFull { + mustSet := (isEmptyValue(dst) || overwrite) && (!isEmptyValue(src) || overwriteWithEmptySrc) + if mustSet { if dst.CanSet() { dst.Set(src) } else { @@ -281,6 +317,7 @@ func WithOverride(config *Config) { // WithOverwriteWithEmptyValue will make merge override non empty dst attributes with empty src attributes values. func WithOverwriteWithEmptyValue(config *Config) { + config.Overwrite = true config.overwriteWithEmptyValue = true } @@ -299,7 +336,16 @@ func WithTypeCheck(config *Config) { config.TypeCheck = true } +// WithSliceDeepCopy will merge slice element one by one with Overwrite flag. +func WithSliceDeepCopy(config *Config) { + config.sliceDeepCopy = true + config.Overwrite = true +} + func merge(dst, src interface{}, opts ...func(*Config)) error { + if dst != nil && reflect.ValueOf(dst).Kind() != reflect.Ptr { + return ErrNonPointerAgument + } var ( vDst, vSrc reflect.Value err error @@ -314,14 +360,10 @@ func merge(dst, src interface{}, opts ...func(*Config)) error { if vDst, vSrc, err = resolveValues(dst, src); err != nil { return err } - if !vDst.CanSet() { - return fmt.Errorf("cannot set dst, needs reference") - } if vDst.Type() != vSrc.Type() { return ErrDifferentArgumentsTypes } - _, err = deepMerge(vDst, vSrc, make(map[uintptr]*visit), 0, config) - return err + return deepMerge(vDst, vSrc, make(map[uintptr]*visit), 0, config) } // IsReflectNil is the reflect value provided nil diff --git a/vendor/github.com/imdario/mergo/mergo.go b/vendor/github.com/imdario/mergo/mergo.go index a82fea2fdcc..3cc926c7f62 100644 --- a/vendor/github.com/imdario/mergo/mergo.go +++ b/vendor/github.com/imdario/mergo/mergo.go @@ -20,6 +20,7 @@ var ( ErrNotSupported = errors.New("only structs and maps are supported") ErrExpectedMapAsDestination = errors.New("dst was expected to be a map") ErrExpectedStructAsDestination = errors.New("dst was expected to be a struct") + ErrNonPointerAgument = errors.New("dst must be a pointer") ) // During deepMerge, must keep track of checks that are @@ -75,23 +76,3 @@ func resolveValues(dst, src interface{}) (vDst, vSrc reflect.Value, err error) { } return } - -// Traverses recursively both values, assigning src's fields values to dst. -// The map argument tracks comparisons that have already been seen, which allows -// short circuiting on recursive types. -func deeper(dst, src reflect.Value, visited map[uintptr]*visit, depth int) (err error) { - if dst.CanAddr() { - addr := dst.UnsafeAddr() - h := 17 * addr - seen := visited[h] - typ := dst.Type() - for p := seen; p != nil; p = p.next { - if p.ptr == addr && p.typ == typ { - return nil - } - } - // Remember, remember... - visited[h] = &visit{addr, typ, seen} - } - return // TODO refactor -} diff --git a/vendor/github.com/jmespath/go-jmespath/.gitignore b/vendor/github.com/jmespath/go-jmespath/.gitignore new file mode 100644 index 00000000000..5091fb0736c --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/.gitignore @@ -0,0 +1,4 @@ +/jpgo +jmespath-fuzz.zip +cpu.out +go-jmespath.test diff --git a/vendor/github.com/jmespath/go-jmespath/.travis.yml b/vendor/github.com/jmespath/go-jmespath/.travis.yml new file mode 100644 index 00000000000..c56f37c0c94 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/.travis.yml @@ -0,0 +1,28 @@ +language: go + +sudo: false + +go: + - 1.5.x + - 1.6.x + - 1.7.x + - 1.8.x + - 1.9.x + - 1.10.x + - 1.11.x + - 1.12.x + - 1.13.x + - 1.14.x + - 1.15.x + - tip + +allow_failures: + - go: tip + +script: make build + +matrix: + include: + - language: go + go: 1.15.x + script: make test diff --git a/vendor/github.com/jmespath/go-jmespath/LICENSE b/vendor/github.com/jmespath/go-jmespath/LICENSE new file mode 100644 index 00000000000..b03310a91fd --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/LICENSE @@ -0,0 +1,13 @@ +Copyright 2015 James Saryerwinnie + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/vendor/github.com/jmespath/go-jmespath/Makefile b/vendor/github.com/jmespath/go-jmespath/Makefile new file mode 100644 index 00000000000..fb38ec2760e --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/Makefile @@ -0,0 +1,51 @@ + +CMD = jpgo + +SRC_PKGS=./ ./cmd/... ./fuzz/... + +help: + @echo "Please use \`make ' where is one of" + @echo " test to run all the tests" + @echo " build to build the library and jp executable" + @echo " generate to run codegen" + + +generate: + go generate ${SRC_PKGS} + +build: + rm -f $(CMD) + go build ${SRC_PKGS} + rm -f cmd/$(CMD)/$(CMD) && cd cmd/$(CMD)/ && go build ./... + mv cmd/$(CMD)/$(CMD) . + +test: test-internal-testify + echo "making tests ${SRC_PKGS}" + go test -v ${SRC_PKGS} + +check: + go vet ${SRC_PKGS} + @echo "golint ${SRC_PKGS}" + @lint=`golint ${SRC_PKGS}`; \ + lint=`echo "$$lint" | grep -v "astnodetype_string.go" | grep -v "toktype_string.go"`; \ + echo "$$lint"; \ + if [ "$$lint" != "" ]; then exit 1; fi + +htmlc: + go test -coverprofile="/tmp/jpcov" && go tool cover -html="/tmp/jpcov" && unlink /tmp/jpcov + +buildfuzz: + go-fuzz-build github.com/jmespath/go-jmespath/fuzz + +fuzz: buildfuzz + go-fuzz -bin=./jmespath-fuzz.zip -workdir=fuzz/testdata + +bench: + go test -bench . -cpuprofile cpu.out + +pprof-cpu: + go tool pprof ./go-jmespath.test ./cpu.out + +test-internal-testify: + cd internal/testify && go test ./... + diff --git a/vendor/github.com/jmespath/go-jmespath/README.md b/vendor/github.com/jmespath/go-jmespath/README.md new file mode 100644 index 00000000000..110ad799976 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/README.md @@ -0,0 +1,87 @@ +# go-jmespath - A JMESPath implementation in Go + +[![Build Status](https://img.shields.io/travis/jmespath/go-jmespath.svg)](https://travis-ci.org/jmespath/go-jmespath) + + + +go-jmespath is a GO implementation of JMESPath, +which is a query language for JSON. It will take a JSON +document and transform it into another JSON document +through a JMESPath expression. + +Using go-jmespath is really easy. There's a single function +you use, `jmespath.search`: + + +```go +> import "github.com/jmespath/go-jmespath" +> +> var jsondata = []byte(`{"foo": {"bar": {"baz": [0, 1, 2, 3, 4]}}}`) // your data +> var data interface{} +> err := json.Unmarshal(jsondata, &data) +> result, err := jmespath.Search("foo.bar.baz[2]", data) +result = 2 +``` + +In the example we gave the ``search`` function input data of +`{"foo": {"bar": {"baz": [0, 1, 2, 3, 4]}}}` as well as the JMESPath +expression `foo.bar.baz[2]`, and the `search` function evaluated +the expression against the input data to produce the result ``2``. + +The JMESPath language can do a lot more than select an element +from a list. Here are a few more examples: + +```go +> var jsondata = []byte(`{"foo": {"bar": {"baz": [0, 1, 2, 3, 4]}}}`) // your data +> var data interface{} +> err := json.Unmarshal(jsondata, &data) +> result, err := jmespath.search("foo.bar", data) +result = { "baz": [ 0, 1, 2, 3, 4 ] } + + +> var jsondata = []byte(`{"foo": [{"first": "a", "last": "b"}, + {"first": "c", "last": "d"}]}`) // your data +> var data interface{} +> err := json.Unmarshal(jsondata, &data) +> result, err := jmespath.search({"foo[*].first", data) +result [ 'a', 'c' ] + + +> var jsondata = []byte(`{"foo": [{"age": 20}, {"age": 25}, + {"age": 30}, {"age": 35}, + {"age": 40}]}`) // your data +> var data interface{} +> err := json.Unmarshal(jsondata, &data) +> result, err := jmespath.search("foo[?age > `30`]") +result = [ { age: 35 }, { age: 40 } ] +``` + +You can also pre-compile your query. This is usefull if +you are going to run multiple searches with it: + +```go + > var jsondata = []byte(`{"foo": "bar"}`) + > var data interface{} + > err := json.Unmarshal(jsondata, &data) + > precompiled, err := Compile("foo") + > if err != nil{ + > // ... handle the error + > } + > result, err := precompiled.Search(data) + result = "bar" +``` + +## More Resources + +The example above only show a small amount of what +a JMESPath expression can do. If you want to take a +tour of the language, the *best* place to go is the +[JMESPath Tutorial](http://jmespath.org/tutorial.html). + +One of the best things about JMESPath is that it is +implemented in many different programming languages including +python, ruby, php, lua, etc. To see a complete list of libraries, +check out the [JMESPath libraries page](http://jmespath.org/libraries.html). + +And finally, the full JMESPath specification can be found +on the [JMESPath site](http://jmespath.org/specification.html). diff --git a/vendor/github.com/jmespath/go-jmespath/api.go b/vendor/github.com/jmespath/go-jmespath/api.go new file mode 100644 index 00000000000..010efe9bfba --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/api.go @@ -0,0 +1,49 @@ +package jmespath + +import "strconv" + +// JMESPath is the representation of a compiled JMES path query. A JMESPath is +// safe for concurrent use by multiple goroutines. +type JMESPath struct { + ast ASTNode + intr *treeInterpreter +} + +// Compile parses a JMESPath expression and returns, if successful, a JMESPath +// object that can be used to match against data. +func Compile(expression string) (*JMESPath, error) { + parser := NewParser() + ast, err := parser.Parse(expression) + if err != nil { + return nil, err + } + jmespath := &JMESPath{ast: ast, intr: newInterpreter()} + return jmespath, nil +} + +// MustCompile is like Compile but panics if the expression cannot be parsed. +// It simplifies safe initialization of global variables holding compiled +// JMESPaths. +func MustCompile(expression string) *JMESPath { + jmespath, err := Compile(expression) + if err != nil { + panic(`jmespath: Compile(` + strconv.Quote(expression) + `): ` + err.Error()) + } + return jmespath +} + +// Search evaluates a JMESPath expression against input data and returns the result. +func (jp *JMESPath) Search(data interface{}) (interface{}, error) { + return jp.intr.Execute(jp.ast, data) +} + +// Search evaluates a JMESPath expression against input data and returns the result. +func Search(expression string, data interface{}) (interface{}, error) { + intr := newInterpreter() + parser := NewParser() + ast, err := parser.Parse(expression) + if err != nil { + return nil, err + } + return intr.Execute(ast, data) +} diff --git a/vendor/github.com/jmespath/go-jmespath/astnodetype_string.go b/vendor/github.com/jmespath/go-jmespath/astnodetype_string.go new file mode 100644 index 00000000000..1cd2d239c96 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/astnodetype_string.go @@ -0,0 +1,16 @@ +// generated by stringer -type astNodeType; DO NOT EDIT + +package jmespath + +import "fmt" + +const _astNodeType_name = "ASTEmptyASTComparatorASTCurrentNodeASTExpRefASTFunctionExpressionASTFieldASTFilterProjectionASTFlattenASTIdentityASTIndexASTIndexExpressionASTKeyValPairASTLiteralASTMultiSelectHashASTMultiSelectListASTOrExpressionASTAndExpressionASTNotExpressionASTPipeASTProjectionASTSubexpressionASTSliceASTValueProjection" + +var _astNodeType_index = [...]uint16{0, 8, 21, 35, 44, 65, 73, 92, 102, 113, 121, 139, 152, 162, 180, 198, 213, 229, 245, 252, 265, 281, 289, 307} + +func (i astNodeType) String() string { + if i < 0 || i >= astNodeType(len(_astNodeType_index)-1) { + return fmt.Sprintf("astNodeType(%d)", i) + } + return _astNodeType_name[_astNodeType_index[i]:_astNodeType_index[i+1]] +} diff --git a/vendor/github.com/jmespath/go-jmespath/functions.go b/vendor/github.com/jmespath/go-jmespath/functions.go new file mode 100644 index 00000000000..9b7cd89b4bc --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/functions.go @@ -0,0 +1,842 @@ +package jmespath + +import ( + "encoding/json" + "errors" + "fmt" + "math" + "reflect" + "sort" + "strconv" + "strings" + "unicode/utf8" +) + +type jpFunction func(arguments []interface{}) (interface{}, error) + +type jpType string + +const ( + jpUnknown jpType = "unknown" + jpNumber jpType = "number" + jpString jpType = "string" + jpArray jpType = "array" + jpObject jpType = "object" + jpArrayNumber jpType = "array[number]" + jpArrayString jpType = "array[string]" + jpExpref jpType = "expref" + jpAny jpType = "any" +) + +type functionEntry struct { + name string + arguments []argSpec + handler jpFunction + hasExpRef bool +} + +type argSpec struct { + types []jpType + variadic bool +} + +type byExprString struct { + intr *treeInterpreter + node ASTNode + items []interface{} + hasError bool +} + +func (a *byExprString) Len() int { + return len(a.items) +} +func (a *byExprString) Swap(i, j int) { + a.items[i], a.items[j] = a.items[j], a.items[i] +} +func (a *byExprString) Less(i, j int) bool { + first, err := a.intr.Execute(a.node, a.items[i]) + if err != nil { + a.hasError = true + // Return a dummy value. + return true + } + ith, ok := first.(string) + if !ok { + a.hasError = true + return true + } + second, err := a.intr.Execute(a.node, a.items[j]) + if err != nil { + a.hasError = true + // Return a dummy value. + return true + } + jth, ok := second.(string) + if !ok { + a.hasError = true + return true + } + return ith < jth +} + +type byExprFloat struct { + intr *treeInterpreter + node ASTNode + items []interface{} + hasError bool +} + +func (a *byExprFloat) Len() int { + return len(a.items) +} +func (a *byExprFloat) Swap(i, j int) { + a.items[i], a.items[j] = a.items[j], a.items[i] +} +func (a *byExprFloat) Less(i, j int) bool { + first, err := a.intr.Execute(a.node, a.items[i]) + if err != nil { + a.hasError = true + // Return a dummy value. + return true + } + ith, ok := first.(float64) + if !ok { + a.hasError = true + return true + } + second, err := a.intr.Execute(a.node, a.items[j]) + if err != nil { + a.hasError = true + // Return a dummy value. + return true + } + jth, ok := second.(float64) + if !ok { + a.hasError = true + return true + } + return ith < jth +} + +type functionCaller struct { + functionTable map[string]functionEntry +} + +func newFunctionCaller() *functionCaller { + caller := &functionCaller{} + caller.functionTable = map[string]functionEntry{ + "length": { + name: "length", + arguments: []argSpec{ + {types: []jpType{jpString, jpArray, jpObject}}, + }, + handler: jpfLength, + }, + "starts_with": { + name: "starts_with", + arguments: []argSpec{ + {types: []jpType{jpString}}, + {types: []jpType{jpString}}, + }, + handler: jpfStartsWith, + }, + "abs": { + name: "abs", + arguments: []argSpec{ + {types: []jpType{jpNumber}}, + }, + handler: jpfAbs, + }, + "avg": { + name: "avg", + arguments: []argSpec{ + {types: []jpType{jpArrayNumber}}, + }, + handler: jpfAvg, + }, + "ceil": { + name: "ceil", + arguments: []argSpec{ + {types: []jpType{jpNumber}}, + }, + handler: jpfCeil, + }, + "contains": { + name: "contains", + arguments: []argSpec{ + {types: []jpType{jpArray, jpString}}, + {types: []jpType{jpAny}}, + }, + handler: jpfContains, + }, + "ends_with": { + name: "ends_with", + arguments: []argSpec{ + {types: []jpType{jpString}}, + {types: []jpType{jpString}}, + }, + handler: jpfEndsWith, + }, + "floor": { + name: "floor", + arguments: []argSpec{ + {types: []jpType{jpNumber}}, + }, + handler: jpfFloor, + }, + "map": { + name: "amp", + arguments: []argSpec{ + {types: []jpType{jpExpref}}, + {types: []jpType{jpArray}}, + }, + handler: jpfMap, + hasExpRef: true, + }, + "max": { + name: "max", + arguments: []argSpec{ + {types: []jpType{jpArrayNumber, jpArrayString}}, + }, + handler: jpfMax, + }, + "merge": { + name: "merge", + arguments: []argSpec{ + {types: []jpType{jpObject}, variadic: true}, + }, + handler: jpfMerge, + }, + "max_by": { + name: "max_by", + arguments: []argSpec{ + {types: []jpType{jpArray}}, + {types: []jpType{jpExpref}}, + }, + handler: jpfMaxBy, + hasExpRef: true, + }, + "sum": { + name: "sum", + arguments: []argSpec{ + {types: []jpType{jpArrayNumber}}, + }, + handler: jpfSum, + }, + "min": { + name: "min", + arguments: []argSpec{ + {types: []jpType{jpArrayNumber, jpArrayString}}, + }, + handler: jpfMin, + }, + "min_by": { + name: "min_by", + arguments: []argSpec{ + {types: []jpType{jpArray}}, + {types: []jpType{jpExpref}}, + }, + handler: jpfMinBy, + hasExpRef: true, + }, + "type": { + name: "type", + arguments: []argSpec{ + {types: []jpType{jpAny}}, + }, + handler: jpfType, + }, + "keys": { + name: "keys", + arguments: []argSpec{ + {types: []jpType{jpObject}}, + }, + handler: jpfKeys, + }, + "values": { + name: "values", + arguments: []argSpec{ + {types: []jpType{jpObject}}, + }, + handler: jpfValues, + }, + "sort": { + name: "sort", + arguments: []argSpec{ + {types: []jpType{jpArrayString, jpArrayNumber}}, + }, + handler: jpfSort, + }, + "sort_by": { + name: "sort_by", + arguments: []argSpec{ + {types: []jpType{jpArray}}, + {types: []jpType{jpExpref}}, + }, + handler: jpfSortBy, + hasExpRef: true, + }, + "join": { + name: "join", + arguments: []argSpec{ + {types: []jpType{jpString}}, + {types: []jpType{jpArrayString}}, + }, + handler: jpfJoin, + }, + "reverse": { + name: "reverse", + arguments: []argSpec{ + {types: []jpType{jpArray, jpString}}, + }, + handler: jpfReverse, + }, + "to_array": { + name: "to_array", + arguments: []argSpec{ + {types: []jpType{jpAny}}, + }, + handler: jpfToArray, + }, + "to_string": { + name: "to_string", + arguments: []argSpec{ + {types: []jpType{jpAny}}, + }, + handler: jpfToString, + }, + "to_number": { + name: "to_number", + arguments: []argSpec{ + {types: []jpType{jpAny}}, + }, + handler: jpfToNumber, + }, + "not_null": { + name: "not_null", + arguments: []argSpec{ + {types: []jpType{jpAny}, variadic: true}, + }, + handler: jpfNotNull, + }, + } + return caller +} + +func (e *functionEntry) resolveArgs(arguments []interface{}) ([]interface{}, error) { + if len(e.arguments) == 0 { + return arguments, nil + } + if !e.arguments[len(e.arguments)-1].variadic { + if len(e.arguments) != len(arguments) { + return nil, errors.New("incorrect number of args") + } + for i, spec := range e.arguments { + userArg := arguments[i] + err := spec.typeCheck(userArg) + if err != nil { + return nil, err + } + } + return arguments, nil + } + if len(arguments) < len(e.arguments) { + return nil, errors.New("Invalid arity.") + } + return arguments, nil +} + +func (a *argSpec) typeCheck(arg interface{}) error { + for _, t := range a.types { + switch t { + case jpNumber: + if _, ok := arg.(float64); ok { + return nil + } + case jpString: + if _, ok := arg.(string); ok { + return nil + } + case jpArray: + if isSliceType(arg) { + return nil + } + case jpObject: + if _, ok := arg.(map[string]interface{}); ok { + return nil + } + case jpArrayNumber: + if _, ok := toArrayNum(arg); ok { + return nil + } + case jpArrayString: + if _, ok := toArrayStr(arg); ok { + return nil + } + case jpAny: + return nil + case jpExpref: + if _, ok := arg.(expRef); ok { + return nil + } + } + } + return fmt.Errorf("Invalid type for: %v, expected: %#v", arg, a.types) +} + +func (f *functionCaller) CallFunction(name string, arguments []interface{}, intr *treeInterpreter) (interface{}, error) { + entry, ok := f.functionTable[name] + if !ok { + return nil, errors.New("unknown function: " + name) + } + resolvedArgs, err := entry.resolveArgs(arguments) + if err != nil { + return nil, err + } + if entry.hasExpRef { + var extra []interface{} + extra = append(extra, intr) + resolvedArgs = append(extra, resolvedArgs...) + } + return entry.handler(resolvedArgs) +} + +func jpfAbs(arguments []interface{}) (interface{}, error) { + num := arguments[0].(float64) + return math.Abs(num), nil +} + +func jpfLength(arguments []interface{}) (interface{}, error) { + arg := arguments[0] + if c, ok := arg.(string); ok { + return float64(utf8.RuneCountInString(c)), nil + } else if isSliceType(arg) { + v := reflect.ValueOf(arg) + return float64(v.Len()), nil + } else if c, ok := arg.(map[string]interface{}); ok { + return float64(len(c)), nil + } + return nil, errors.New("could not compute length()") +} + +func jpfStartsWith(arguments []interface{}) (interface{}, error) { + search := arguments[0].(string) + prefix := arguments[1].(string) + return strings.HasPrefix(search, prefix), nil +} + +func jpfAvg(arguments []interface{}) (interface{}, error) { + // We've already type checked the value so we can safely use + // type assertions. + args := arguments[0].([]interface{}) + length := float64(len(args)) + numerator := 0.0 + for _, n := range args { + numerator += n.(float64) + } + return numerator / length, nil +} +func jpfCeil(arguments []interface{}) (interface{}, error) { + val := arguments[0].(float64) + return math.Ceil(val), nil +} +func jpfContains(arguments []interface{}) (interface{}, error) { + search := arguments[0] + el := arguments[1] + if searchStr, ok := search.(string); ok { + if elStr, ok := el.(string); ok { + return strings.Index(searchStr, elStr) != -1, nil + } + return false, nil + } + // Otherwise this is a generic contains for []interface{} + general := search.([]interface{}) + for _, item := range general { + if item == el { + return true, nil + } + } + return false, nil +} +func jpfEndsWith(arguments []interface{}) (interface{}, error) { + search := arguments[0].(string) + suffix := arguments[1].(string) + return strings.HasSuffix(search, suffix), nil +} +func jpfFloor(arguments []interface{}) (interface{}, error) { + val := arguments[0].(float64) + return math.Floor(val), nil +} +func jpfMap(arguments []interface{}) (interface{}, error) { + intr := arguments[0].(*treeInterpreter) + exp := arguments[1].(expRef) + node := exp.ref + arr := arguments[2].([]interface{}) + mapped := make([]interface{}, 0, len(arr)) + for _, value := range arr { + current, err := intr.Execute(node, value) + if err != nil { + return nil, err + } + mapped = append(mapped, current) + } + return mapped, nil +} +func jpfMax(arguments []interface{}) (interface{}, error) { + if items, ok := toArrayNum(arguments[0]); ok { + if len(items) == 0 { + return nil, nil + } + if len(items) == 1 { + return items[0], nil + } + best := items[0] + for _, item := range items[1:] { + if item > best { + best = item + } + } + return best, nil + } + // Otherwise we're dealing with a max() of strings. + items, _ := toArrayStr(arguments[0]) + if len(items) == 0 { + return nil, nil + } + if len(items) == 1 { + return items[0], nil + } + best := items[0] + for _, item := range items[1:] { + if item > best { + best = item + } + } + return best, nil +} +func jpfMerge(arguments []interface{}) (interface{}, error) { + final := make(map[string]interface{}) + for _, m := range arguments { + mapped := m.(map[string]interface{}) + for key, value := range mapped { + final[key] = value + } + } + return final, nil +} +func jpfMaxBy(arguments []interface{}) (interface{}, error) { + intr := arguments[0].(*treeInterpreter) + arr := arguments[1].([]interface{}) + exp := arguments[2].(expRef) + node := exp.ref + if len(arr) == 0 { + return nil, nil + } else if len(arr) == 1 { + return arr[0], nil + } + start, err := intr.Execute(node, arr[0]) + if err != nil { + return nil, err + } + switch t := start.(type) { + case float64: + bestVal := t + bestItem := arr[0] + for _, item := range arr[1:] { + result, err := intr.Execute(node, item) + if err != nil { + return nil, err + } + current, ok := result.(float64) + if !ok { + return nil, errors.New("invalid type, must be number") + } + if current > bestVal { + bestVal = current + bestItem = item + } + } + return bestItem, nil + case string: + bestVal := t + bestItem := arr[0] + for _, item := range arr[1:] { + result, err := intr.Execute(node, item) + if err != nil { + return nil, err + } + current, ok := result.(string) + if !ok { + return nil, errors.New("invalid type, must be string") + } + if current > bestVal { + bestVal = current + bestItem = item + } + } + return bestItem, nil + default: + return nil, errors.New("invalid type, must be number of string") + } +} +func jpfSum(arguments []interface{}) (interface{}, error) { + items, _ := toArrayNum(arguments[0]) + sum := 0.0 + for _, item := range items { + sum += item + } + return sum, nil +} + +func jpfMin(arguments []interface{}) (interface{}, error) { + if items, ok := toArrayNum(arguments[0]); ok { + if len(items) == 0 { + return nil, nil + } + if len(items) == 1 { + return items[0], nil + } + best := items[0] + for _, item := range items[1:] { + if item < best { + best = item + } + } + return best, nil + } + items, _ := toArrayStr(arguments[0]) + if len(items) == 0 { + return nil, nil + } + if len(items) == 1 { + return items[0], nil + } + best := items[0] + for _, item := range items[1:] { + if item < best { + best = item + } + } + return best, nil +} + +func jpfMinBy(arguments []interface{}) (interface{}, error) { + intr := arguments[0].(*treeInterpreter) + arr := arguments[1].([]interface{}) + exp := arguments[2].(expRef) + node := exp.ref + if len(arr) == 0 { + return nil, nil + } else if len(arr) == 1 { + return arr[0], nil + } + start, err := intr.Execute(node, arr[0]) + if err != nil { + return nil, err + } + if t, ok := start.(float64); ok { + bestVal := t + bestItem := arr[0] + for _, item := range arr[1:] { + result, err := intr.Execute(node, item) + if err != nil { + return nil, err + } + current, ok := result.(float64) + if !ok { + return nil, errors.New("invalid type, must be number") + } + if current < bestVal { + bestVal = current + bestItem = item + } + } + return bestItem, nil + } else if t, ok := start.(string); ok { + bestVal := t + bestItem := arr[0] + for _, item := range arr[1:] { + result, err := intr.Execute(node, item) + if err != nil { + return nil, err + } + current, ok := result.(string) + if !ok { + return nil, errors.New("invalid type, must be string") + } + if current < bestVal { + bestVal = current + bestItem = item + } + } + return bestItem, nil + } else { + return nil, errors.New("invalid type, must be number of string") + } +} +func jpfType(arguments []interface{}) (interface{}, error) { + arg := arguments[0] + if _, ok := arg.(float64); ok { + return "number", nil + } + if _, ok := arg.(string); ok { + return "string", nil + } + if _, ok := arg.([]interface{}); ok { + return "array", nil + } + if _, ok := arg.(map[string]interface{}); ok { + return "object", nil + } + if arg == nil { + return "null", nil + } + if arg == true || arg == false { + return "boolean", nil + } + return nil, errors.New("unknown type") +} +func jpfKeys(arguments []interface{}) (interface{}, error) { + arg := arguments[0].(map[string]interface{}) + collected := make([]interface{}, 0, len(arg)) + for key := range arg { + collected = append(collected, key) + } + return collected, nil +} +func jpfValues(arguments []interface{}) (interface{}, error) { + arg := arguments[0].(map[string]interface{}) + collected := make([]interface{}, 0, len(arg)) + for _, value := range arg { + collected = append(collected, value) + } + return collected, nil +} +func jpfSort(arguments []interface{}) (interface{}, error) { + if items, ok := toArrayNum(arguments[0]); ok { + d := sort.Float64Slice(items) + sort.Stable(d) + final := make([]interface{}, len(d)) + for i, val := range d { + final[i] = val + } + return final, nil + } + // Otherwise we're dealing with sort()'ing strings. + items, _ := toArrayStr(arguments[0]) + d := sort.StringSlice(items) + sort.Stable(d) + final := make([]interface{}, len(d)) + for i, val := range d { + final[i] = val + } + return final, nil +} +func jpfSortBy(arguments []interface{}) (interface{}, error) { + intr := arguments[0].(*treeInterpreter) + arr := arguments[1].([]interface{}) + exp := arguments[2].(expRef) + node := exp.ref + if len(arr) == 0 { + return arr, nil + } else if len(arr) == 1 { + return arr, nil + } + start, err := intr.Execute(node, arr[0]) + if err != nil { + return nil, err + } + if _, ok := start.(float64); ok { + sortable := &byExprFloat{intr, node, arr, false} + sort.Stable(sortable) + if sortable.hasError { + return nil, errors.New("error in sort_by comparison") + } + return arr, nil + } else if _, ok := start.(string); ok { + sortable := &byExprString{intr, node, arr, false} + sort.Stable(sortable) + if sortable.hasError { + return nil, errors.New("error in sort_by comparison") + } + return arr, nil + } else { + return nil, errors.New("invalid type, must be number of string") + } +} +func jpfJoin(arguments []interface{}) (interface{}, error) { + sep := arguments[0].(string) + // We can't just do arguments[1].([]string), we have to + // manually convert each item to a string. + arrayStr := []string{} + for _, item := range arguments[1].([]interface{}) { + arrayStr = append(arrayStr, item.(string)) + } + return strings.Join(arrayStr, sep), nil +} +func jpfReverse(arguments []interface{}) (interface{}, error) { + if s, ok := arguments[0].(string); ok { + r := []rune(s) + for i, j := 0, len(r)-1; i < len(r)/2; i, j = i+1, j-1 { + r[i], r[j] = r[j], r[i] + } + return string(r), nil + } + items := arguments[0].([]interface{}) + length := len(items) + reversed := make([]interface{}, length) + for i, item := range items { + reversed[length-(i+1)] = item + } + return reversed, nil +} +func jpfToArray(arguments []interface{}) (interface{}, error) { + if _, ok := arguments[0].([]interface{}); ok { + return arguments[0], nil + } + return arguments[:1:1], nil +} +func jpfToString(arguments []interface{}) (interface{}, error) { + if v, ok := arguments[0].(string); ok { + return v, nil + } + result, err := json.Marshal(arguments[0]) + if err != nil { + return nil, err + } + return string(result), nil +} +func jpfToNumber(arguments []interface{}) (interface{}, error) { + arg := arguments[0] + if v, ok := arg.(float64); ok { + return v, nil + } + if v, ok := arg.(string); ok { + conv, err := strconv.ParseFloat(v, 64) + if err != nil { + return nil, nil + } + return conv, nil + } + if _, ok := arg.([]interface{}); ok { + return nil, nil + } + if _, ok := arg.(map[string]interface{}); ok { + return nil, nil + } + if arg == nil { + return nil, nil + } + if arg == true || arg == false { + return nil, nil + } + return nil, errors.New("unknown type") +} +func jpfNotNull(arguments []interface{}) (interface{}, error) { + for _, arg := range arguments { + if arg != nil { + return arg, nil + } + } + return nil, nil +} diff --git a/vendor/github.com/jmespath/go-jmespath/go.mod b/vendor/github.com/jmespath/go-jmespath/go.mod new file mode 100644 index 00000000000..4d448e88b06 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/go.mod @@ -0,0 +1,5 @@ +module github.com/jmespath/go-jmespath + +go 1.14 + +require github.com/jmespath/go-jmespath/internal/testify v1.5.1 diff --git a/vendor/github.com/jmespath/go-jmespath/go.sum b/vendor/github.com/jmespath/go-jmespath/go.sum new file mode 100644 index 00000000000..d2db411e585 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/go.sum @@ -0,0 +1,11 @@ +github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= +github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/vendor/github.com/jmespath/go-jmespath/interpreter.go b/vendor/github.com/jmespath/go-jmespath/interpreter.go new file mode 100644 index 00000000000..13c74604c2c --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/interpreter.go @@ -0,0 +1,418 @@ +package jmespath + +import ( + "errors" + "reflect" + "unicode" + "unicode/utf8" +) + +/* This is a tree based interpreter. It walks the AST and directly + interprets the AST to search through a JSON document. +*/ + +type treeInterpreter struct { + fCall *functionCaller +} + +func newInterpreter() *treeInterpreter { + interpreter := treeInterpreter{} + interpreter.fCall = newFunctionCaller() + return &interpreter +} + +type expRef struct { + ref ASTNode +} + +// Execute takes an ASTNode and input data and interprets the AST directly. +// It will produce the result of applying the JMESPath expression associated +// with the ASTNode to the input data "value". +func (intr *treeInterpreter) Execute(node ASTNode, value interface{}) (interface{}, error) { + switch node.nodeType { + case ASTComparator: + left, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, err + } + right, err := intr.Execute(node.children[1], value) + if err != nil { + return nil, err + } + switch node.value { + case tEQ: + return objsEqual(left, right), nil + case tNE: + return !objsEqual(left, right), nil + } + leftNum, ok := left.(float64) + if !ok { + return nil, nil + } + rightNum, ok := right.(float64) + if !ok { + return nil, nil + } + switch node.value { + case tGT: + return leftNum > rightNum, nil + case tGTE: + return leftNum >= rightNum, nil + case tLT: + return leftNum < rightNum, nil + case tLTE: + return leftNum <= rightNum, nil + } + case ASTExpRef: + return expRef{ref: node.children[0]}, nil + case ASTFunctionExpression: + resolvedArgs := []interface{}{} + for _, arg := range node.children { + current, err := intr.Execute(arg, value) + if err != nil { + return nil, err + } + resolvedArgs = append(resolvedArgs, current) + } + return intr.fCall.CallFunction(node.value.(string), resolvedArgs, intr) + case ASTField: + if m, ok := value.(map[string]interface{}); ok { + key := node.value.(string) + return m[key], nil + } + return intr.fieldFromStruct(node.value.(string), value) + case ASTFilterProjection: + left, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, nil + } + sliceType, ok := left.([]interface{}) + if !ok { + if isSliceType(left) { + return intr.filterProjectionWithReflection(node, left) + } + return nil, nil + } + compareNode := node.children[2] + collected := []interface{}{} + for _, element := range sliceType { + result, err := intr.Execute(compareNode, element) + if err != nil { + return nil, err + } + if !isFalse(result) { + current, err := intr.Execute(node.children[1], element) + if err != nil { + return nil, err + } + if current != nil { + collected = append(collected, current) + } + } + } + return collected, nil + case ASTFlatten: + left, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, nil + } + sliceType, ok := left.([]interface{}) + if !ok { + // If we can't type convert to []interface{}, there's + // a chance this could still work via reflection if we're + // dealing with user provided types. + if isSliceType(left) { + return intr.flattenWithReflection(left) + } + return nil, nil + } + flattened := []interface{}{} + for _, element := range sliceType { + if elementSlice, ok := element.([]interface{}); ok { + flattened = append(flattened, elementSlice...) + } else if isSliceType(element) { + reflectFlat := []interface{}{} + v := reflect.ValueOf(element) + for i := 0; i < v.Len(); i++ { + reflectFlat = append(reflectFlat, v.Index(i).Interface()) + } + flattened = append(flattened, reflectFlat...) + } else { + flattened = append(flattened, element) + } + } + return flattened, nil + case ASTIdentity, ASTCurrentNode: + return value, nil + case ASTIndex: + if sliceType, ok := value.([]interface{}); ok { + index := node.value.(int) + if index < 0 { + index += len(sliceType) + } + if index < len(sliceType) && index >= 0 { + return sliceType[index], nil + } + return nil, nil + } + // Otherwise try via reflection. + rv := reflect.ValueOf(value) + if rv.Kind() == reflect.Slice { + index := node.value.(int) + if index < 0 { + index += rv.Len() + } + if index < rv.Len() && index >= 0 { + v := rv.Index(index) + return v.Interface(), nil + } + } + return nil, nil + case ASTKeyValPair: + return intr.Execute(node.children[0], value) + case ASTLiteral: + return node.value, nil + case ASTMultiSelectHash: + if value == nil { + return nil, nil + } + collected := make(map[string]interface{}) + for _, child := range node.children { + current, err := intr.Execute(child, value) + if err != nil { + return nil, err + } + key := child.value.(string) + collected[key] = current + } + return collected, nil + case ASTMultiSelectList: + if value == nil { + return nil, nil + } + collected := []interface{}{} + for _, child := range node.children { + current, err := intr.Execute(child, value) + if err != nil { + return nil, err + } + collected = append(collected, current) + } + return collected, nil + case ASTOrExpression: + matched, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, err + } + if isFalse(matched) { + matched, err = intr.Execute(node.children[1], value) + if err != nil { + return nil, err + } + } + return matched, nil + case ASTAndExpression: + matched, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, err + } + if isFalse(matched) { + return matched, nil + } + return intr.Execute(node.children[1], value) + case ASTNotExpression: + matched, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, err + } + if isFalse(matched) { + return true, nil + } + return false, nil + case ASTPipe: + result := value + var err error + for _, child := range node.children { + result, err = intr.Execute(child, result) + if err != nil { + return nil, err + } + } + return result, nil + case ASTProjection: + left, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, err + } + sliceType, ok := left.([]interface{}) + if !ok { + if isSliceType(left) { + return intr.projectWithReflection(node, left) + } + return nil, nil + } + collected := []interface{}{} + var current interface{} + for _, element := range sliceType { + current, err = intr.Execute(node.children[1], element) + if err != nil { + return nil, err + } + if current != nil { + collected = append(collected, current) + } + } + return collected, nil + case ASTSubexpression, ASTIndexExpression: + left, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, err + } + return intr.Execute(node.children[1], left) + case ASTSlice: + sliceType, ok := value.([]interface{}) + if !ok { + if isSliceType(value) { + return intr.sliceWithReflection(node, value) + } + return nil, nil + } + parts := node.value.([]*int) + sliceParams := make([]sliceParam, 3) + for i, part := range parts { + if part != nil { + sliceParams[i].Specified = true + sliceParams[i].N = *part + } + } + return slice(sliceType, sliceParams) + case ASTValueProjection: + left, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, nil + } + mapType, ok := left.(map[string]interface{}) + if !ok { + return nil, nil + } + values := make([]interface{}, len(mapType)) + for _, value := range mapType { + values = append(values, value) + } + collected := []interface{}{} + for _, element := range values { + current, err := intr.Execute(node.children[1], element) + if err != nil { + return nil, err + } + if current != nil { + collected = append(collected, current) + } + } + return collected, nil + } + return nil, errors.New("Unknown AST node: " + node.nodeType.String()) +} + +func (intr *treeInterpreter) fieldFromStruct(key string, value interface{}) (interface{}, error) { + rv := reflect.ValueOf(value) + first, n := utf8.DecodeRuneInString(key) + fieldName := string(unicode.ToUpper(first)) + key[n:] + if rv.Kind() == reflect.Struct { + v := rv.FieldByName(fieldName) + if !v.IsValid() { + return nil, nil + } + return v.Interface(), nil + } else if rv.Kind() == reflect.Ptr { + // Handle multiple levels of indirection? + if rv.IsNil() { + return nil, nil + } + rv = rv.Elem() + v := rv.FieldByName(fieldName) + if !v.IsValid() { + return nil, nil + } + return v.Interface(), nil + } + return nil, nil +} + +func (intr *treeInterpreter) flattenWithReflection(value interface{}) (interface{}, error) { + v := reflect.ValueOf(value) + flattened := []interface{}{} + for i := 0; i < v.Len(); i++ { + element := v.Index(i).Interface() + if reflect.TypeOf(element).Kind() == reflect.Slice { + // Then insert the contents of the element + // slice into the flattened slice, + // i.e flattened = append(flattened, mySlice...) + elementV := reflect.ValueOf(element) + for j := 0; j < elementV.Len(); j++ { + flattened = append( + flattened, elementV.Index(j).Interface()) + } + } else { + flattened = append(flattened, element) + } + } + return flattened, nil +} + +func (intr *treeInterpreter) sliceWithReflection(node ASTNode, value interface{}) (interface{}, error) { + v := reflect.ValueOf(value) + parts := node.value.([]*int) + sliceParams := make([]sliceParam, 3) + for i, part := range parts { + if part != nil { + sliceParams[i].Specified = true + sliceParams[i].N = *part + } + } + final := []interface{}{} + for i := 0; i < v.Len(); i++ { + element := v.Index(i).Interface() + final = append(final, element) + } + return slice(final, sliceParams) +} + +func (intr *treeInterpreter) filterProjectionWithReflection(node ASTNode, value interface{}) (interface{}, error) { + compareNode := node.children[2] + collected := []interface{}{} + v := reflect.ValueOf(value) + for i := 0; i < v.Len(); i++ { + element := v.Index(i).Interface() + result, err := intr.Execute(compareNode, element) + if err != nil { + return nil, err + } + if !isFalse(result) { + current, err := intr.Execute(node.children[1], element) + if err != nil { + return nil, err + } + if current != nil { + collected = append(collected, current) + } + } + } + return collected, nil +} + +func (intr *treeInterpreter) projectWithReflection(node ASTNode, value interface{}) (interface{}, error) { + collected := []interface{}{} + v := reflect.ValueOf(value) + for i := 0; i < v.Len(); i++ { + element := v.Index(i).Interface() + result, err := intr.Execute(node.children[1], element) + if err != nil { + return nil, err + } + if result != nil { + collected = append(collected, result) + } + } + return collected, nil +} diff --git a/vendor/github.com/jmespath/go-jmespath/lexer.go b/vendor/github.com/jmespath/go-jmespath/lexer.go new file mode 100644 index 00000000000..817900c8f52 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/lexer.go @@ -0,0 +1,420 @@ +package jmespath + +import ( + "bytes" + "encoding/json" + "fmt" + "strconv" + "strings" + "unicode/utf8" +) + +type token struct { + tokenType tokType + value string + position int + length int +} + +type tokType int + +const eof = -1 + +// Lexer contains information about the expression being tokenized. +type Lexer struct { + expression string // The expression provided by the user. + currentPos int // The current position in the string. + lastWidth int // The width of the current rune. This + buf bytes.Buffer // Internal buffer used for building up values. +} + +// SyntaxError is the main error used whenever a lexing or parsing error occurs. +type SyntaxError struct { + msg string // Error message displayed to user + Expression string // Expression that generated a SyntaxError + Offset int // The location in the string where the error occurred +} + +func (e SyntaxError) Error() string { + // In the future, it would be good to underline the specific + // location where the error occurred. + return "SyntaxError: " + e.msg +} + +// HighlightLocation will show where the syntax error occurred. +// It will place a "^" character on a line below the expression +// at the point where the syntax error occurred. +func (e SyntaxError) HighlightLocation() string { + return e.Expression + "\n" + strings.Repeat(" ", e.Offset) + "^" +} + +//go:generate stringer -type=tokType +const ( + tUnknown tokType = iota + tStar + tDot + tFilter + tFlatten + tLparen + tRparen + tLbracket + tRbracket + tLbrace + tRbrace + tOr + tPipe + tNumber + tUnquotedIdentifier + tQuotedIdentifier + tComma + tColon + tLT + tLTE + tGT + tGTE + tEQ + tNE + tJSONLiteral + tStringLiteral + tCurrent + tExpref + tAnd + tNot + tEOF +) + +var basicTokens = map[rune]tokType{ + '.': tDot, + '*': tStar, + ',': tComma, + ':': tColon, + '{': tLbrace, + '}': tRbrace, + ']': tRbracket, // tLbracket not included because it could be "[]" + '(': tLparen, + ')': tRparen, + '@': tCurrent, +} + +// Bit mask for [a-zA-Z_] shifted down 64 bits to fit in a single uint64. +// When using this bitmask just be sure to shift the rune down 64 bits +// before checking against identifierStartBits. +const identifierStartBits uint64 = 576460745995190270 + +// Bit mask for [a-zA-Z0-9], 128 bits -> 2 uint64s. +var identifierTrailingBits = [2]uint64{287948901175001088, 576460745995190270} + +var whiteSpace = map[rune]bool{ + ' ': true, '\t': true, '\n': true, '\r': true, +} + +func (t token) String() string { + return fmt.Sprintf("Token{%+v, %s, %d, %d}", + t.tokenType, t.value, t.position, t.length) +} + +// NewLexer creates a new JMESPath lexer. +func NewLexer() *Lexer { + lexer := Lexer{} + return &lexer +} + +func (lexer *Lexer) next() rune { + if lexer.currentPos >= len(lexer.expression) { + lexer.lastWidth = 0 + return eof + } + r, w := utf8.DecodeRuneInString(lexer.expression[lexer.currentPos:]) + lexer.lastWidth = w + lexer.currentPos += w + return r +} + +func (lexer *Lexer) back() { + lexer.currentPos -= lexer.lastWidth +} + +func (lexer *Lexer) peek() rune { + t := lexer.next() + lexer.back() + return t +} + +// tokenize takes an expression and returns corresponding tokens. +func (lexer *Lexer) tokenize(expression string) ([]token, error) { + var tokens []token + lexer.expression = expression + lexer.currentPos = 0 + lexer.lastWidth = 0 +loop: + for { + r := lexer.next() + if identifierStartBits&(1<<(uint64(r)-64)) > 0 { + t := lexer.consumeUnquotedIdentifier() + tokens = append(tokens, t) + } else if val, ok := basicTokens[r]; ok { + // Basic single char token. + t := token{ + tokenType: val, + value: string(r), + position: lexer.currentPos - lexer.lastWidth, + length: 1, + } + tokens = append(tokens, t) + } else if r == '-' || (r >= '0' && r <= '9') { + t := lexer.consumeNumber() + tokens = append(tokens, t) + } else if r == '[' { + t := lexer.consumeLBracket() + tokens = append(tokens, t) + } else if r == '"' { + t, err := lexer.consumeQuotedIdentifier() + if err != nil { + return tokens, err + } + tokens = append(tokens, t) + } else if r == '\'' { + t, err := lexer.consumeRawStringLiteral() + if err != nil { + return tokens, err + } + tokens = append(tokens, t) + } else if r == '`' { + t, err := lexer.consumeLiteral() + if err != nil { + return tokens, err + } + tokens = append(tokens, t) + } else if r == '|' { + t := lexer.matchOrElse(r, '|', tOr, tPipe) + tokens = append(tokens, t) + } else if r == '<' { + t := lexer.matchOrElse(r, '=', tLTE, tLT) + tokens = append(tokens, t) + } else if r == '>' { + t := lexer.matchOrElse(r, '=', tGTE, tGT) + tokens = append(tokens, t) + } else if r == '!' { + t := lexer.matchOrElse(r, '=', tNE, tNot) + tokens = append(tokens, t) + } else if r == '=' { + t := lexer.matchOrElse(r, '=', tEQ, tUnknown) + tokens = append(tokens, t) + } else if r == '&' { + t := lexer.matchOrElse(r, '&', tAnd, tExpref) + tokens = append(tokens, t) + } else if r == eof { + break loop + } else if _, ok := whiteSpace[r]; ok { + // Ignore whitespace + } else { + return tokens, lexer.syntaxError(fmt.Sprintf("Unknown char: %s", strconv.QuoteRuneToASCII(r))) + } + } + tokens = append(tokens, token{tEOF, "", len(lexer.expression), 0}) + return tokens, nil +} + +// Consume characters until the ending rune "r" is reached. +// If the end of the expression is reached before seeing the +// terminating rune "r", then an error is returned. +// If no error occurs then the matching substring is returned. +// The returned string will not include the ending rune. +func (lexer *Lexer) consumeUntil(end rune) (string, error) { + start := lexer.currentPos + current := lexer.next() + for current != end && current != eof { + if current == '\\' && lexer.peek() != eof { + lexer.next() + } + current = lexer.next() + } + if lexer.lastWidth == 0 { + // Then we hit an EOF so we never reached the closing + // delimiter. + return "", SyntaxError{ + msg: "Unclosed delimiter: " + string(end), + Expression: lexer.expression, + Offset: len(lexer.expression), + } + } + return lexer.expression[start : lexer.currentPos-lexer.lastWidth], nil +} + +func (lexer *Lexer) consumeLiteral() (token, error) { + start := lexer.currentPos + value, err := lexer.consumeUntil('`') + if err != nil { + return token{}, err + } + value = strings.Replace(value, "\\`", "`", -1) + return token{ + tokenType: tJSONLiteral, + value: value, + position: start, + length: len(value), + }, nil +} + +func (lexer *Lexer) consumeRawStringLiteral() (token, error) { + start := lexer.currentPos + currentIndex := start + current := lexer.next() + for current != '\'' && lexer.peek() != eof { + if current == '\\' && lexer.peek() == '\'' { + chunk := lexer.expression[currentIndex : lexer.currentPos-1] + lexer.buf.WriteString(chunk) + lexer.buf.WriteString("'") + lexer.next() + currentIndex = lexer.currentPos + } + current = lexer.next() + } + if lexer.lastWidth == 0 { + // Then we hit an EOF so we never reached the closing + // delimiter. + return token{}, SyntaxError{ + msg: "Unclosed delimiter: '", + Expression: lexer.expression, + Offset: len(lexer.expression), + } + } + if currentIndex < lexer.currentPos { + lexer.buf.WriteString(lexer.expression[currentIndex : lexer.currentPos-1]) + } + value := lexer.buf.String() + // Reset the buffer so it can reused again. + lexer.buf.Reset() + return token{ + tokenType: tStringLiteral, + value: value, + position: start, + length: len(value), + }, nil +} + +func (lexer *Lexer) syntaxError(msg string) SyntaxError { + return SyntaxError{ + msg: msg, + Expression: lexer.expression, + Offset: lexer.currentPos - 1, + } +} + +// Checks for a two char token, otherwise matches a single character +// token. This is used whenever a two char token overlaps a single +// char token, e.g. "||" -> tPipe, "|" -> tOr. +func (lexer *Lexer) matchOrElse(first rune, second rune, matchedType tokType, singleCharType tokType) token { + start := lexer.currentPos - lexer.lastWidth + nextRune := lexer.next() + var t token + if nextRune == second { + t = token{ + tokenType: matchedType, + value: string(first) + string(second), + position: start, + length: 2, + } + } else { + lexer.back() + t = token{ + tokenType: singleCharType, + value: string(first), + position: start, + length: 1, + } + } + return t +} + +func (lexer *Lexer) consumeLBracket() token { + // There's three options here: + // 1. A filter expression "[?" + // 2. A flatten operator "[]" + // 3. A bare rbracket "[" + start := lexer.currentPos - lexer.lastWidth + nextRune := lexer.next() + var t token + if nextRune == '?' { + t = token{ + tokenType: tFilter, + value: "[?", + position: start, + length: 2, + } + } else if nextRune == ']' { + t = token{ + tokenType: tFlatten, + value: "[]", + position: start, + length: 2, + } + } else { + t = token{ + tokenType: tLbracket, + value: "[", + position: start, + length: 1, + } + lexer.back() + } + return t +} + +func (lexer *Lexer) consumeQuotedIdentifier() (token, error) { + start := lexer.currentPos + value, err := lexer.consumeUntil('"') + if err != nil { + return token{}, err + } + var decoded string + asJSON := []byte("\"" + value + "\"") + if err := json.Unmarshal([]byte(asJSON), &decoded); err != nil { + return token{}, err + } + return token{ + tokenType: tQuotedIdentifier, + value: decoded, + position: start - 1, + length: len(decoded), + }, nil +} + +func (lexer *Lexer) consumeUnquotedIdentifier() token { + // Consume runes until we reach the end of an unquoted + // identifier. + start := lexer.currentPos - lexer.lastWidth + for { + r := lexer.next() + if r < 0 || r > 128 || identifierTrailingBits[uint64(r)/64]&(1<<(uint64(r)%64)) == 0 { + lexer.back() + break + } + } + value := lexer.expression[start:lexer.currentPos] + return token{ + tokenType: tUnquotedIdentifier, + value: value, + position: start, + length: lexer.currentPos - start, + } +} + +func (lexer *Lexer) consumeNumber() token { + // Consume runes until we reach something that's not a number. + start := lexer.currentPos - lexer.lastWidth + for { + r := lexer.next() + if r < '0' || r > '9' { + lexer.back() + break + } + } + value := lexer.expression[start:lexer.currentPos] + return token{ + tokenType: tNumber, + value: value, + position: start, + length: lexer.currentPos - start, + } +} diff --git a/vendor/github.com/jmespath/go-jmespath/parser.go b/vendor/github.com/jmespath/go-jmespath/parser.go new file mode 100644 index 00000000000..4abc303ab4a --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/parser.go @@ -0,0 +1,603 @@ +package jmespath + +import ( + "encoding/json" + "fmt" + "strconv" + "strings" +) + +type astNodeType int + +//go:generate stringer -type astNodeType +const ( + ASTEmpty astNodeType = iota + ASTComparator + ASTCurrentNode + ASTExpRef + ASTFunctionExpression + ASTField + ASTFilterProjection + ASTFlatten + ASTIdentity + ASTIndex + ASTIndexExpression + ASTKeyValPair + ASTLiteral + ASTMultiSelectHash + ASTMultiSelectList + ASTOrExpression + ASTAndExpression + ASTNotExpression + ASTPipe + ASTProjection + ASTSubexpression + ASTSlice + ASTValueProjection +) + +// ASTNode represents the abstract syntax tree of a JMESPath expression. +type ASTNode struct { + nodeType astNodeType + value interface{} + children []ASTNode +} + +func (node ASTNode) String() string { + return node.PrettyPrint(0) +} + +// PrettyPrint will pretty print the parsed AST. +// The AST is an implementation detail and this pretty print +// function is provided as a convenience method to help with +// debugging. You should not rely on its output as the internal +// structure of the AST may change at any time. +func (node ASTNode) PrettyPrint(indent int) string { + spaces := strings.Repeat(" ", indent) + output := fmt.Sprintf("%s%s {\n", spaces, node.nodeType) + nextIndent := indent + 2 + if node.value != nil { + if converted, ok := node.value.(fmt.Stringer); ok { + // Account for things like comparator nodes + // that are enums with a String() method. + output += fmt.Sprintf("%svalue: %s\n", strings.Repeat(" ", nextIndent), converted.String()) + } else { + output += fmt.Sprintf("%svalue: %#v\n", strings.Repeat(" ", nextIndent), node.value) + } + } + lastIndex := len(node.children) + if lastIndex > 0 { + output += fmt.Sprintf("%schildren: {\n", strings.Repeat(" ", nextIndent)) + childIndent := nextIndent + 2 + for _, elem := range node.children { + output += elem.PrettyPrint(childIndent) + } + } + output += fmt.Sprintf("%s}\n", spaces) + return output +} + +var bindingPowers = map[tokType]int{ + tEOF: 0, + tUnquotedIdentifier: 0, + tQuotedIdentifier: 0, + tRbracket: 0, + tRparen: 0, + tComma: 0, + tRbrace: 0, + tNumber: 0, + tCurrent: 0, + tExpref: 0, + tColon: 0, + tPipe: 1, + tOr: 2, + tAnd: 3, + tEQ: 5, + tLT: 5, + tLTE: 5, + tGT: 5, + tGTE: 5, + tNE: 5, + tFlatten: 9, + tStar: 20, + tFilter: 21, + tDot: 40, + tNot: 45, + tLbrace: 50, + tLbracket: 55, + tLparen: 60, +} + +// Parser holds state about the current expression being parsed. +type Parser struct { + expression string + tokens []token + index int +} + +// NewParser creates a new JMESPath parser. +func NewParser() *Parser { + p := Parser{} + return &p +} + +// Parse will compile a JMESPath expression. +func (p *Parser) Parse(expression string) (ASTNode, error) { + lexer := NewLexer() + p.expression = expression + p.index = 0 + tokens, err := lexer.tokenize(expression) + if err != nil { + return ASTNode{}, err + } + p.tokens = tokens + parsed, err := p.parseExpression(0) + if err != nil { + return ASTNode{}, err + } + if p.current() != tEOF { + return ASTNode{}, p.syntaxError(fmt.Sprintf( + "Unexpected token at the end of the expression: %s", p.current())) + } + return parsed, nil +} + +func (p *Parser) parseExpression(bindingPower int) (ASTNode, error) { + var err error + leftToken := p.lookaheadToken(0) + p.advance() + leftNode, err := p.nud(leftToken) + if err != nil { + return ASTNode{}, err + } + currentToken := p.current() + for bindingPower < bindingPowers[currentToken] { + p.advance() + leftNode, err = p.led(currentToken, leftNode) + if err != nil { + return ASTNode{}, err + } + currentToken = p.current() + } + return leftNode, nil +} + +func (p *Parser) parseIndexExpression() (ASTNode, error) { + if p.lookahead(0) == tColon || p.lookahead(1) == tColon { + return p.parseSliceExpression() + } + indexStr := p.lookaheadToken(0).value + parsedInt, err := strconv.Atoi(indexStr) + if err != nil { + return ASTNode{}, err + } + indexNode := ASTNode{nodeType: ASTIndex, value: parsedInt} + p.advance() + if err := p.match(tRbracket); err != nil { + return ASTNode{}, err + } + return indexNode, nil +} + +func (p *Parser) parseSliceExpression() (ASTNode, error) { + parts := []*int{nil, nil, nil} + index := 0 + current := p.current() + for current != tRbracket && index < 3 { + if current == tColon { + index++ + p.advance() + } else if current == tNumber { + parsedInt, err := strconv.Atoi(p.lookaheadToken(0).value) + if err != nil { + return ASTNode{}, err + } + parts[index] = &parsedInt + p.advance() + } else { + return ASTNode{}, p.syntaxError( + "Expected tColon or tNumber" + ", received: " + p.current().String()) + } + current = p.current() + } + if err := p.match(tRbracket); err != nil { + return ASTNode{}, err + } + return ASTNode{ + nodeType: ASTSlice, + value: parts, + }, nil +} + +func (p *Parser) match(tokenType tokType) error { + if p.current() == tokenType { + p.advance() + return nil + } + return p.syntaxError("Expected " + tokenType.String() + ", received: " + p.current().String()) +} + +func (p *Parser) led(tokenType tokType, node ASTNode) (ASTNode, error) { + switch tokenType { + case tDot: + if p.current() != tStar { + right, err := p.parseDotRHS(bindingPowers[tDot]) + return ASTNode{ + nodeType: ASTSubexpression, + children: []ASTNode{node, right}, + }, err + } + p.advance() + right, err := p.parseProjectionRHS(bindingPowers[tDot]) + return ASTNode{ + nodeType: ASTValueProjection, + children: []ASTNode{node, right}, + }, err + case tPipe: + right, err := p.parseExpression(bindingPowers[tPipe]) + return ASTNode{nodeType: ASTPipe, children: []ASTNode{node, right}}, err + case tOr: + right, err := p.parseExpression(bindingPowers[tOr]) + return ASTNode{nodeType: ASTOrExpression, children: []ASTNode{node, right}}, err + case tAnd: + right, err := p.parseExpression(bindingPowers[tAnd]) + return ASTNode{nodeType: ASTAndExpression, children: []ASTNode{node, right}}, err + case tLparen: + name := node.value + var args []ASTNode + for p.current() != tRparen { + expression, err := p.parseExpression(0) + if err != nil { + return ASTNode{}, err + } + if p.current() == tComma { + if err := p.match(tComma); err != nil { + return ASTNode{}, err + } + } + args = append(args, expression) + } + if err := p.match(tRparen); err != nil { + return ASTNode{}, err + } + return ASTNode{ + nodeType: ASTFunctionExpression, + value: name, + children: args, + }, nil + case tFilter: + return p.parseFilter(node) + case tFlatten: + left := ASTNode{nodeType: ASTFlatten, children: []ASTNode{node}} + right, err := p.parseProjectionRHS(bindingPowers[tFlatten]) + return ASTNode{ + nodeType: ASTProjection, + children: []ASTNode{left, right}, + }, err + case tEQ, tNE, tGT, tGTE, tLT, tLTE: + right, err := p.parseExpression(bindingPowers[tokenType]) + if err != nil { + return ASTNode{}, err + } + return ASTNode{ + nodeType: ASTComparator, + value: tokenType, + children: []ASTNode{node, right}, + }, nil + case tLbracket: + tokenType := p.current() + var right ASTNode + var err error + if tokenType == tNumber || tokenType == tColon { + right, err = p.parseIndexExpression() + if err != nil { + return ASTNode{}, err + } + return p.projectIfSlice(node, right) + } + // Otherwise this is a projection. + if err := p.match(tStar); err != nil { + return ASTNode{}, err + } + if err := p.match(tRbracket); err != nil { + return ASTNode{}, err + } + right, err = p.parseProjectionRHS(bindingPowers[tStar]) + if err != nil { + return ASTNode{}, err + } + return ASTNode{ + nodeType: ASTProjection, + children: []ASTNode{node, right}, + }, nil + } + return ASTNode{}, p.syntaxError("Unexpected token: " + tokenType.String()) +} + +func (p *Parser) nud(token token) (ASTNode, error) { + switch token.tokenType { + case tJSONLiteral: + var parsed interface{} + err := json.Unmarshal([]byte(token.value), &parsed) + if err != nil { + return ASTNode{}, err + } + return ASTNode{nodeType: ASTLiteral, value: parsed}, nil + case tStringLiteral: + return ASTNode{nodeType: ASTLiteral, value: token.value}, nil + case tUnquotedIdentifier: + return ASTNode{ + nodeType: ASTField, + value: token.value, + }, nil + case tQuotedIdentifier: + node := ASTNode{nodeType: ASTField, value: token.value} + if p.current() == tLparen { + return ASTNode{}, p.syntaxErrorToken("Can't have quoted identifier as function name.", token) + } + return node, nil + case tStar: + left := ASTNode{nodeType: ASTIdentity} + var right ASTNode + var err error + if p.current() == tRbracket { + right = ASTNode{nodeType: ASTIdentity} + } else { + right, err = p.parseProjectionRHS(bindingPowers[tStar]) + } + return ASTNode{nodeType: ASTValueProjection, children: []ASTNode{left, right}}, err + case tFilter: + return p.parseFilter(ASTNode{nodeType: ASTIdentity}) + case tLbrace: + return p.parseMultiSelectHash() + case tFlatten: + left := ASTNode{ + nodeType: ASTFlatten, + children: []ASTNode{{nodeType: ASTIdentity}}, + } + right, err := p.parseProjectionRHS(bindingPowers[tFlatten]) + if err != nil { + return ASTNode{}, err + } + return ASTNode{nodeType: ASTProjection, children: []ASTNode{left, right}}, nil + case tLbracket: + tokenType := p.current() + //var right ASTNode + if tokenType == tNumber || tokenType == tColon { + right, err := p.parseIndexExpression() + if err != nil { + return ASTNode{}, nil + } + return p.projectIfSlice(ASTNode{nodeType: ASTIdentity}, right) + } else if tokenType == tStar && p.lookahead(1) == tRbracket { + p.advance() + p.advance() + right, err := p.parseProjectionRHS(bindingPowers[tStar]) + if err != nil { + return ASTNode{}, err + } + return ASTNode{ + nodeType: ASTProjection, + children: []ASTNode{{nodeType: ASTIdentity}, right}, + }, nil + } else { + return p.parseMultiSelectList() + } + case tCurrent: + return ASTNode{nodeType: ASTCurrentNode}, nil + case tExpref: + expression, err := p.parseExpression(bindingPowers[tExpref]) + if err != nil { + return ASTNode{}, err + } + return ASTNode{nodeType: ASTExpRef, children: []ASTNode{expression}}, nil + case tNot: + expression, err := p.parseExpression(bindingPowers[tNot]) + if err != nil { + return ASTNode{}, err + } + return ASTNode{nodeType: ASTNotExpression, children: []ASTNode{expression}}, nil + case tLparen: + expression, err := p.parseExpression(0) + if err != nil { + return ASTNode{}, err + } + if err := p.match(tRparen); err != nil { + return ASTNode{}, err + } + return expression, nil + case tEOF: + return ASTNode{}, p.syntaxErrorToken("Incomplete expression", token) + } + + return ASTNode{}, p.syntaxErrorToken("Invalid token: "+token.tokenType.String(), token) +} + +func (p *Parser) parseMultiSelectList() (ASTNode, error) { + var expressions []ASTNode + for { + expression, err := p.parseExpression(0) + if err != nil { + return ASTNode{}, err + } + expressions = append(expressions, expression) + if p.current() == tRbracket { + break + } + err = p.match(tComma) + if err != nil { + return ASTNode{}, err + } + } + err := p.match(tRbracket) + if err != nil { + return ASTNode{}, err + } + return ASTNode{ + nodeType: ASTMultiSelectList, + children: expressions, + }, nil +} + +func (p *Parser) parseMultiSelectHash() (ASTNode, error) { + var children []ASTNode + for { + keyToken := p.lookaheadToken(0) + if err := p.match(tUnquotedIdentifier); err != nil { + if err := p.match(tQuotedIdentifier); err != nil { + return ASTNode{}, p.syntaxError("Expected tQuotedIdentifier or tUnquotedIdentifier") + } + } + keyName := keyToken.value + err := p.match(tColon) + if err != nil { + return ASTNode{}, err + } + value, err := p.parseExpression(0) + if err != nil { + return ASTNode{}, err + } + node := ASTNode{ + nodeType: ASTKeyValPair, + value: keyName, + children: []ASTNode{value}, + } + children = append(children, node) + if p.current() == tComma { + err := p.match(tComma) + if err != nil { + return ASTNode{}, nil + } + } else if p.current() == tRbrace { + err := p.match(tRbrace) + if err != nil { + return ASTNode{}, nil + } + break + } + } + return ASTNode{ + nodeType: ASTMultiSelectHash, + children: children, + }, nil +} + +func (p *Parser) projectIfSlice(left ASTNode, right ASTNode) (ASTNode, error) { + indexExpr := ASTNode{ + nodeType: ASTIndexExpression, + children: []ASTNode{left, right}, + } + if right.nodeType == ASTSlice { + right, err := p.parseProjectionRHS(bindingPowers[tStar]) + return ASTNode{ + nodeType: ASTProjection, + children: []ASTNode{indexExpr, right}, + }, err + } + return indexExpr, nil +} +func (p *Parser) parseFilter(node ASTNode) (ASTNode, error) { + var right, condition ASTNode + var err error + condition, err = p.parseExpression(0) + if err != nil { + return ASTNode{}, err + } + if err := p.match(tRbracket); err != nil { + return ASTNode{}, err + } + if p.current() == tFlatten { + right = ASTNode{nodeType: ASTIdentity} + } else { + right, err = p.parseProjectionRHS(bindingPowers[tFilter]) + if err != nil { + return ASTNode{}, err + } + } + + return ASTNode{ + nodeType: ASTFilterProjection, + children: []ASTNode{node, right, condition}, + }, nil +} + +func (p *Parser) parseDotRHS(bindingPower int) (ASTNode, error) { + lookahead := p.current() + if tokensOneOf([]tokType{tQuotedIdentifier, tUnquotedIdentifier, tStar}, lookahead) { + return p.parseExpression(bindingPower) + } else if lookahead == tLbracket { + if err := p.match(tLbracket); err != nil { + return ASTNode{}, err + } + return p.parseMultiSelectList() + } else if lookahead == tLbrace { + if err := p.match(tLbrace); err != nil { + return ASTNode{}, err + } + return p.parseMultiSelectHash() + } + return ASTNode{}, p.syntaxError("Expected identifier, lbracket, or lbrace") +} + +func (p *Parser) parseProjectionRHS(bindingPower int) (ASTNode, error) { + current := p.current() + if bindingPowers[current] < 10 { + return ASTNode{nodeType: ASTIdentity}, nil + } else if current == tLbracket { + return p.parseExpression(bindingPower) + } else if current == tFilter { + return p.parseExpression(bindingPower) + } else if current == tDot { + err := p.match(tDot) + if err != nil { + return ASTNode{}, err + } + return p.parseDotRHS(bindingPower) + } else { + return ASTNode{}, p.syntaxError("Error") + } +} + +func (p *Parser) lookahead(number int) tokType { + return p.lookaheadToken(number).tokenType +} + +func (p *Parser) current() tokType { + return p.lookahead(0) +} + +func (p *Parser) lookaheadToken(number int) token { + return p.tokens[p.index+number] +} + +func (p *Parser) advance() { + p.index++ +} + +func tokensOneOf(elements []tokType, token tokType) bool { + for _, elem := range elements { + if elem == token { + return true + } + } + return false +} + +func (p *Parser) syntaxError(msg string) SyntaxError { + return SyntaxError{ + msg: msg, + Expression: p.expression, + Offset: p.lookaheadToken(0).position, + } +} + +// Create a SyntaxError based on the provided token. +// This differs from syntaxError() which creates a SyntaxError +// based on the current lookahead token. +func (p *Parser) syntaxErrorToken(msg string, t token) SyntaxError { + return SyntaxError{ + msg: msg, + Expression: p.expression, + Offset: t.position, + } +} diff --git a/vendor/github.com/jmespath/go-jmespath/toktype_string.go b/vendor/github.com/jmespath/go-jmespath/toktype_string.go new file mode 100644 index 00000000000..dae79cbdf33 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/toktype_string.go @@ -0,0 +1,16 @@ +// generated by stringer -type=tokType; DO NOT EDIT + +package jmespath + +import "fmt" + +const _tokType_name = "tUnknowntStartDottFiltertFlattentLparentRparentLbrackettRbrackettLbracetRbracetOrtPipetNumbertUnquotedIdentifiertQuotedIdentifiertCommatColontLTtLTEtGTtGTEtEQtNEtJSONLiteraltStringLiteraltCurrenttExpreftAndtNottEOF" + +var _tokType_index = [...]uint8{0, 8, 13, 17, 24, 32, 39, 46, 55, 64, 71, 78, 81, 86, 93, 112, 129, 135, 141, 144, 148, 151, 155, 158, 161, 173, 187, 195, 202, 206, 210, 214} + +func (i tokType) String() string { + if i < 0 || i >= tokType(len(_tokType_index)-1) { + return fmt.Sprintf("tokType(%d)", i) + } + return _tokType_name[_tokType_index[i]:_tokType_index[i+1]] +} diff --git a/vendor/github.com/jmespath/go-jmespath/util.go b/vendor/github.com/jmespath/go-jmespath/util.go new file mode 100644 index 00000000000..ddc1b7d7d46 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/util.go @@ -0,0 +1,185 @@ +package jmespath + +import ( + "errors" + "reflect" +) + +// IsFalse determines if an object is false based on the JMESPath spec. +// JMESPath defines false values to be any of: +// - An empty string array, or hash. +// - The boolean value false. +// - nil +func isFalse(value interface{}) bool { + switch v := value.(type) { + case bool: + return !v + case []interface{}: + return len(v) == 0 + case map[string]interface{}: + return len(v) == 0 + case string: + return len(v) == 0 + case nil: + return true + } + // Try the reflection cases before returning false. + rv := reflect.ValueOf(value) + switch rv.Kind() { + case reflect.Struct: + // A struct type will never be false, even if + // all of its values are the zero type. + return false + case reflect.Slice, reflect.Map: + return rv.Len() == 0 + case reflect.Ptr: + if rv.IsNil() { + return true + } + // If it's a pointer type, we'll try to deref the pointer + // and evaluate the pointer value for isFalse. + element := rv.Elem() + return isFalse(element.Interface()) + } + return false +} + +// ObjsEqual is a generic object equality check. +// It will take two arbitrary objects and recursively determine +// if they are equal. +func objsEqual(left interface{}, right interface{}) bool { + return reflect.DeepEqual(left, right) +} + +// SliceParam refers to a single part of a slice. +// A slice consists of a start, a stop, and a step, similar to +// python slices. +type sliceParam struct { + N int + Specified bool +} + +// Slice supports [start:stop:step] style slicing that's supported in JMESPath. +func slice(slice []interface{}, parts []sliceParam) ([]interface{}, error) { + computed, err := computeSliceParams(len(slice), parts) + if err != nil { + return nil, err + } + start, stop, step := computed[0], computed[1], computed[2] + result := []interface{}{} + if step > 0 { + for i := start; i < stop; i += step { + result = append(result, slice[i]) + } + } else { + for i := start; i > stop; i += step { + result = append(result, slice[i]) + } + } + return result, nil +} + +func computeSliceParams(length int, parts []sliceParam) ([]int, error) { + var start, stop, step int + if !parts[2].Specified { + step = 1 + } else if parts[2].N == 0 { + return nil, errors.New("Invalid slice, step cannot be 0") + } else { + step = parts[2].N + } + var stepValueNegative bool + if step < 0 { + stepValueNegative = true + } else { + stepValueNegative = false + } + + if !parts[0].Specified { + if stepValueNegative { + start = length - 1 + } else { + start = 0 + } + } else { + start = capSlice(length, parts[0].N, step) + } + + if !parts[1].Specified { + if stepValueNegative { + stop = -1 + } else { + stop = length + } + } else { + stop = capSlice(length, parts[1].N, step) + } + return []int{start, stop, step}, nil +} + +func capSlice(length int, actual int, step int) int { + if actual < 0 { + actual += length + if actual < 0 { + if step < 0 { + actual = -1 + } else { + actual = 0 + } + } + } else if actual >= length { + if step < 0 { + actual = length - 1 + } else { + actual = length + } + } + return actual +} + +// ToArrayNum converts an empty interface type to a slice of float64. +// If any element in the array cannot be converted, then nil is returned +// along with a second value of false. +func toArrayNum(data interface{}) ([]float64, bool) { + // Is there a better way to do this with reflect? + if d, ok := data.([]interface{}); ok { + result := make([]float64, len(d)) + for i, el := range d { + item, ok := el.(float64) + if !ok { + return nil, false + } + result[i] = item + } + return result, true + } + return nil, false +} + +// ToArrayStr converts an empty interface type to a slice of strings. +// If any element in the array cannot be converted, then nil is returned +// along with a second value of false. If the input data could be entirely +// converted, then the converted data, along with a second value of true, +// will be returned. +func toArrayStr(data interface{}) ([]string, bool) { + // Is there a better way to do this with reflect? + if d, ok := data.([]interface{}); ok { + result := make([]string, len(d)) + for i, el := range d { + item, ok := el.(string) + if !ok { + return nil, false + } + result[i] = item + } + return result, true + } + return nil, false +} + +func isSliceType(v interface{}) bool { + if v == nil { + return false + } + return reflect.TypeOf(v).Kind() == reflect.Slice +} diff --git a/vendor/github.com/json-iterator/go/.codecov.yml b/vendor/github.com/json-iterator/go/.codecov.yml new file mode 100644 index 00000000000..955dc0be5fa --- /dev/null +++ b/vendor/github.com/json-iterator/go/.codecov.yml @@ -0,0 +1,3 @@ +ignore: + - "output_tests/.*" + diff --git a/vendor/github.com/json-iterator/go/.gitignore b/vendor/github.com/json-iterator/go/.gitignore new file mode 100644 index 00000000000..15556530a85 --- /dev/null +++ b/vendor/github.com/json-iterator/go/.gitignore @@ -0,0 +1,4 @@ +/vendor +/bug_test.go +/coverage.txt +/.idea diff --git a/vendor/github.com/json-iterator/go/.travis.yml b/vendor/github.com/json-iterator/go/.travis.yml new file mode 100644 index 00000000000..449e67cd01a --- /dev/null +++ b/vendor/github.com/json-iterator/go/.travis.yml @@ -0,0 +1,14 @@ +language: go + +go: + - 1.8.x + - 1.x + +before_install: + - go get -t -v ./... + +script: + - ./test.sh + +after_success: + - bash <(curl -s https://codecov.io/bash) diff --git a/vendor/github.com/json-iterator/go/Gopkg.lock b/vendor/github.com/json-iterator/go/Gopkg.lock new file mode 100644 index 00000000000..c8a9fbb3871 --- /dev/null +++ b/vendor/github.com/json-iterator/go/Gopkg.lock @@ -0,0 +1,21 @@ +# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. + + +[[projects]] + name = "github.com/modern-go/concurrent" + packages = ["."] + revision = "e0a39a4cb4216ea8db28e22a69f4ec25610d513a" + version = "1.0.0" + +[[projects]] + name = "github.com/modern-go/reflect2" + packages = ["."] + revision = "4b7aa43c6742a2c18fdef89dd197aaae7dac7ccd" + version = "1.0.1" + +[solve-meta] + analyzer-name = "dep" + analyzer-version = 1 + inputs-digest = "ea54a775e5a354cb015502d2e7aa4b74230fc77e894f34a838b268c25ec8eeb8" + solver-name = "gps-cdcl" + solver-version = 1 diff --git a/vendor/github.com/json-iterator/go/Gopkg.toml b/vendor/github.com/json-iterator/go/Gopkg.toml new file mode 100644 index 00000000000..313a0f887b6 --- /dev/null +++ b/vendor/github.com/json-iterator/go/Gopkg.toml @@ -0,0 +1,26 @@ +# Gopkg.toml example +# +# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md +# for detailed Gopkg.toml documentation. +# +# required = ["github.com/user/thing/cmd/thing"] +# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"] +# +# [[constraint]] +# name = "github.com/user/project" +# version = "1.0.0" +# +# [[constraint]] +# name = "github.com/user/project2" +# branch = "dev" +# source = "github.com/myfork/project2" +# +# [[override]] +# name = "github.com/x/y" +# version = "2.4.0" + +ignored = ["github.com/davecgh/go-spew*","github.com/google/gofuzz*","github.com/stretchr/testify*"] + +[[constraint]] + name = "github.com/modern-go/reflect2" + version = "1.0.1" diff --git a/vendor/github.com/json-iterator/go/LICENSE b/vendor/github.com/json-iterator/go/LICENSE new file mode 100644 index 00000000000..2cf4f5ab28e --- /dev/null +++ b/vendor/github.com/json-iterator/go/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2016 json-iterator + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/json-iterator/go/README.md b/vendor/github.com/json-iterator/go/README.md new file mode 100644 index 00000000000..52b111d5f36 --- /dev/null +++ b/vendor/github.com/json-iterator/go/README.md @@ -0,0 +1,87 @@ +[![Sourcegraph](https://sourcegraph.com/github.com/json-iterator/go/-/badge.svg)](https://sourcegraph.com/github.com/json-iterator/go?badge) +[![GoDoc](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](https://pkg.go.dev/github.com/json-iterator/go) +[![Build Status](https://travis-ci.org/json-iterator/go.svg?branch=master)](https://travis-ci.org/json-iterator/go) +[![codecov](https://codecov.io/gh/json-iterator/go/branch/master/graph/badge.svg)](https://codecov.io/gh/json-iterator/go) +[![rcard](https://goreportcard.com/badge/github.com/json-iterator/go)](https://goreportcard.com/report/github.com/json-iterator/go) +[![License](http://img.shields.io/badge/license-mit-blue.svg?style=flat-square)](https://raw.githubusercontent.com/json-iterator/go/master/LICENSE) +[![Gitter chat](https://badges.gitter.im/gitterHQ/gitter.png)](https://gitter.im/json-iterator/Lobby) + +A high-performance 100% compatible drop-in replacement of "encoding/json" + +You can also use thrift like JSON using [thrift-iterator](https://github.com/thrift-iterator/go) + +# Benchmark + +![benchmark](http://jsoniter.com/benchmarks/go-benchmark.png) + +Source code: https://github.com/json-iterator/go-benchmark/blob/master/src/github.com/json-iterator/go-benchmark/benchmark_medium_payload_test.go + +Raw Result (easyjson requires static code generation) + +| | ns/op | allocation bytes | allocation times | +| --------------- | ----------- | ---------------- | ---------------- | +| std decode | 35510 ns/op | 1960 B/op | 99 allocs/op | +| easyjson decode | 8499 ns/op | 160 B/op | 4 allocs/op | +| jsoniter decode | 5623 ns/op | 160 B/op | 3 allocs/op | +| std encode | 2213 ns/op | 712 B/op | 5 allocs/op | +| easyjson encode | 883 ns/op | 576 B/op | 3 allocs/op | +| jsoniter encode | 837 ns/op | 384 B/op | 4 allocs/op | + +Always benchmark with your own workload. +The result depends heavily on the data input. + +# Usage + +100% compatibility with standard lib + +Replace + +```go +import "encoding/json" +json.Marshal(&data) +``` + +with + +```go +import jsoniter "github.com/json-iterator/go" + +var json = jsoniter.ConfigCompatibleWithStandardLibrary +json.Marshal(&data) +``` + +Replace + +```go +import "encoding/json" +json.Unmarshal(input, &data) +``` + +with + +```go +import jsoniter "github.com/json-iterator/go" + +var json = jsoniter.ConfigCompatibleWithStandardLibrary +json.Unmarshal(input, &data) +``` + +[More documentation](http://jsoniter.com/migrate-from-go-std.html) + +# How to get + +``` +go get github.com/json-iterator/go +``` + +# Contribution Welcomed ! + +Contributors + +- [thockin](https://github.com/thockin) +- [mattn](https://github.com/mattn) +- [cch123](https://github.com/cch123) +- [Oleg Shaldybin](https://github.com/olegshaldybin) +- [Jason Toffaletti](https://github.com/toffaletti) + +Report issue or pull request, or email taowen@gmail.com, or [![Gitter chat](https://badges.gitter.im/gitterHQ/gitter.png)](https://gitter.im/json-iterator/Lobby) diff --git a/vendor/github.com/json-iterator/go/adapter.go b/vendor/github.com/json-iterator/go/adapter.go new file mode 100644 index 00000000000..92d2cc4a3dd --- /dev/null +++ b/vendor/github.com/json-iterator/go/adapter.go @@ -0,0 +1,150 @@ +package jsoniter + +import ( + "bytes" + "io" +) + +// RawMessage to make replace json with jsoniter +type RawMessage []byte + +// Unmarshal adapts to json/encoding Unmarshal API +// +// Unmarshal parses the JSON-encoded data and stores the result in the value pointed to by v. +// Refer to https://godoc.org/encoding/json#Unmarshal for more information +func Unmarshal(data []byte, v interface{}) error { + return ConfigDefault.Unmarshal(data, v) +} + +// UnmarshalFromString is a convenient method to read from string instead of []byte +func UnmarshalFromString(str string, v interface{}) error { + return ConfigDefault.UnmarshalFromString(str, v) +} + +// Get quick method to get value from deeply nested JSON structure +func Get(data []byte, path ...interface{}) Any { + return ConfigDefault.Get(data, path...) +} + +// Marshal adapts to json/encoding Marshal API +// +// Marshal returns the JSON encoding of v, adapts to json/encoding Marshal API +// Refer to https://godoc.org/encoding/json#Marshal for more information +func Marshal(v interface{}) ([]byte, error) { + return ConfigDefault.Marshal(v) +} + +// MarshalIndent same as json.MarshalIndent. Prefix is not supported. +func MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) { + return ConfigDefault.MarshalIndent(v, prefix, indent) +} + +// MarshalToString convenient method to write as string instead of []byte +func MarshalToString(v interface{}) (string, error) { + return ConfigDefault.MarshalToString(v) +} + +// NewDecoder adapts to json/stream NewDecoder API. +// +// NewDecoder returns a new decoder that reads from r. +// +// Instead of a json/encoding Decoder, an Decoder is returned +// Refer to https://godoc.org/encoding/json#NewDecoder for more information +func NewDecoder(reader io.Reader) *Decoder { + return ConfigDefault.NewDecoder(reader) +} + +// Decoder reads and decodes JSON values from an input stream. +// Decoder provides identical APIs with json/stream Decoder (Token() and UseNumber() are in progress) +type Decoder struct { + iter *Iterator +} + +// Decode decode JSON into interface{} +func (adapter *Decoder) Decode(obj interface{}) error { + if adapter.iter.head == adapter.iter.tail && adapter.iter.reader != nil { + if !adapter.iter.loadMore() { + return io.EOF + } + } + adapter.iter.ReadVal(obj) + err := adapter.iter.Error + if err == io.EOF { + return nil + } + return adapter.iter.Error +} + +// More is there more? +func (adapter *Decoder) More() bool { + iter := adapter.iter + if iter.Error != nil { + return false + } + c := iter.nextToken() + if c == 0 { + return false + } + iter.unreadByte() + return c != ']' && c != '}' +} + +// Buffered remaining buffer +func (adapter *Decoder) Buffered() io.Reader { + remaining := adapter.iter.buf[adapter.iter.head:adapter.iter.tail] + return bytes.NewReader(remaining) +} + +// UseNumber causes the Decoder to unmarshal a number into an interface{} as a +// Number instead of as a float64. +func (adapter *Decoder) UseNumber() { + cfg := adapter.iter.cfg.configBeforeFrozen + cfg.UseNumber = true + adapter.iter.cfg = cfg.frozeWithCacheReuse(adapter.iter.cfg.extraExtensions) +} + +// DisallowUnknownFields causes the Decoder to return an error when the destination +// is a struct and the input contains object keys which do not match any +// non-ignored, exported fields in the destination. +func (adapter *Decoder) DisallowUnknownFields() { + cfg := adapter.iter.cfg.configBeforeFrozen + cfg.DisallowUnknownFields = true + adapter.iter.cfg = cfg.frozeWithCacheReuse(adapter.iter.cfg.extraExtensions) +} + +// NewEncoder same as json.NewEncoder +func NewEncoder(writer io.Writer) *Encoder { + return ConfigDefault.NewEncoder(writer) +} + +// Encoder same as json.Encoder +type Encoder struct { + stream *Stream +} + +// Encode encode interface{} as JSON to io.Writer +func (adapter *Encoder) Encode(val interface{}) error { + adapter.stream.WriteVal(val) + adapter.stream.WriteRaw("\n") + adapter.stream.Flush() + return adapter.stream.Error +} + +// SetIndent set the indention. Prefix is not supported +func (adapter *Encoder) SetIndent(prefix, indent string) { + config := adapter.stream.cfg.configBeforeFrozen + config.IndentionStep = len(indent) + adapter.stream.cfg = config.frozeWithCacheReuse(adapter.stream.cfg.extraExtensions) +} + +// SetEscapeHTML escape html by default, set to false to disable +func (adapter *Encoder) SetEscapeHTML(escapeHTML bool) { + config := adapter.stream.cfg.configBeforeFrozen + config.EscapeHTML = escapeHTML + adapter.stream.cfg = config.frozeWithCacheReuse(adapter.stream.cfg.extraExtensions) +} + +// Valid reports whether data is a valid JSON encoding. +func Valid(data []byte) bool { + return ConfigDefault.Valid(data) +} diff --git a/vendor/github.com/json-iterator/go/any.go b/vendor/github.com/json-iterator/go/any.go new file mode 100644 index 00000000000..f6b8aeab0a1 --- /dev/null +++ b/vendor/github.com/json-iterator/go/any.go @@ -0,0 +1,325 @@ +package jsoniter + +import ( + "errors" + "fmt" + "github.com/modern-go/reflect2" + "io" + "reflect" + "strconv" + "unsafe" +) + +// Any generic object representation. +// The lazy json implementation holds []byte and parse lazily. +type Any interface { + LastError() error + ValueType() ValueType + MustBeValid() Any + ToBool() bool + ToInt() int + ToInt32() int32 + ToInt64() int64 + ToUint() uint + ToUint32() uint32 + ToUint64() uint64 + ToFloat32() float32 + ToFloat64() float64 + ToString() string + ToVal(val interface{}) + Get(path ...interface{}) Any + Size() int + Keys() []string + GetInterface() interface{} + WriteTo(stream *Stream) +} + +type baseAny struct{} + +func (any *baseAny) Get(path ...interface{}) Any { + return &invalidAny{baseAny{}, fmt.Errorf("GetIndex %v from simple value", path)} +} + +func (any *baseAny) Size() int { + return 0 +} + +func (any *baseAny) Keys() []string { + return []string{} +} + +func (any *baseAny) ToVal(obj interface{}) { + panic("not implemented") +} + +// WrapInt32 turn int32 into Any interface +func WrapInt32(val int32) Any { + return &int32Any{baseAny{}, val} +} + +// WrapInt64 turn int64 into Any interface +func WrapInt64(val int64) Any { + return &int64Any{baseAny{}, val} +} + +// WrapUint32 turn uint32 into Any interface +func WrapUint32(val uint32) Any { + return &uint32Any{baseAny{}, val} +} + +// WrapUint64 turn uint64 into Any interface +func WrapUint64(val uint64) Any { + return &uint64Any{baseAny{}, val} +} + +// WrapFloat64 turn float64 into Any interface +func WrapFloat64(val float64) Any { + return &floatAny{baseAny{}, val} +} + +// WrapString turn string into Any interface +func WrapString(val string) Any { + return &stringAny{baseAny{}, val} +} + +// Wrap turn a go object into Any interface +func Wrap(val interface{}) Any { + if val == nil { + return &nilAny{} + } + asAny, isAny := val.(Any) + if isAny { + return asAny + } + typ := reflect2.TypeOf(val) + switch typ.Kind() { + case reflect.Slice: + return wrapArray(val) + case reflect.Struct: + return wrapStruct(val) + case reflect.Map: + return wrapMap(val) + case reflect.String: + return WrapString(val.(string)) + case reflect.Int: + if strconv.IntSize == 32 { + return WrapInt32(int32(val.(int))) + } + return WrapInt64(int64(val.(int))) + case reflect.Int8: + return WrapInt32(int32(val.(int8))) + case reflect.Int16: + return WrapInt32(int32(val.(int16))) + case reflect.Int32: + return WrapInt32(val.(int32)) + case reflect.Int64: + return WrapInt64(val.(int64)) + case reflect.Uint: + if strconv.IntSize == 32 { + return WrapUint32(uint32(val.(uint))) + } + return WrapUint64(uint64(val.(uint))) + case reflect.Uintptr: + if ptrSize == 32 { + return WrapUint32(uint32(val.(uintptr))) + } + return WrapUint64(uint64(val.(uintptr))) + case reflect.Uint8: + return WrapUint32(uint32(val.(uint8))) + case reflect.Uint16: + return WrapUint32(uint32(val.(uint16))) + case reflect.Uint32: + return WrapUint32(uint32(val.(uint32))) + case reflect.Uint64: + return WrapUint64(val.(uint64)) + case reflect.Float32: + return WrapFloat64(float64(val.(float32))) + case reflect.Float64: + return WrapFloat64(val.(float64)) + case reflect.Bool: + if val.(bool) == true { + return &trueAny{} + } + return &falseAny{} + } + return &invalidAny{baseAny{}, fmt.Errorf("unsupported type: %v", typ)} +} + +// ReadAny read next JSON element as an Any object. It is a better json.RawMessage. +func (iter *Iterator) ReadAny() Any { + return iter.readAny() +} + +func (iter *Iterator) readAny() Any { + c := iter.nextToken() + switch c { + case '"': + iter.unreadByte() + return &stringAny{baseAny{}, iter.ReadString()} + case 'n': + iter.skipThreeBytes('u', 'l', 'l') // null + return &nilAny{} + case 't': + iter.skipThreeBytes('r', 'u', 'e') // true + return &trueAny{} + case 'f': + iter.skipFourBytes('a', 'l', 's', 'e') // false + return &falseAny{} + case '{': + return iter.readObjectAny() + case '[': + return iter.readArrayAny() + case '-': + return iter.readNumberAny(false) + case 0: + return &invalidAny{baseAny{}, errors.New("input is empty")} + default: + return iter.readNumberAny(true) + } +} + +func (iter *Iterator) readNumberAny(positive bool) Any { + iter.startCapture(iter.head - 1) + iter.skipNumber() + lazyBuf := iter.stopCapture() + return &numberLazyAny{baseAny{}, iter.cfg, lazyBuf, nil} +} + +func (iter *Iterator) readObjectAny() Any { + iter.startCapture(iter.head - 1) + iter.skipObject() + lazyBuf := iter.stopCapture() + return &objectLazyAny{baseAny{}, iter.cfg, lazyBuf, nil} +} + +func (iter *Iterator) readArrayAny() Any { + iter.startCapture(iter.head - 1) + iter.skipArray() + lazyBuf := iter.stopCapture() + return &arrayLazyAny{baseAny{}, iter.cfg, lazyBuf, nil} +} + +func locateObjectField(iter *Iterator, target string) []byte { + var found []byte + iter.ReadObjectCB(func(iter *Iterator, field string) bool { + if field == target { + found = iter.SkipAndReturnBytes() + return false + } + iter.Skip() + return true + }) + return found +} + +func locateArrayElement(iter *Iterator, target int) []byte { + var found []byte + n := 0 + iter.ReadArrayCB(func(iter *Iterator) bool { + if n == target { + found = iter.SkipAndReturnBytes() + return false + } + iter.Skip() + n++ + return true + }) + return found +} + +func locatePath(iter *Iterator, path []interface{}) Any { + for i, pathKeyObj := range path { + switch pathKey := pathKeyObj.(type) { + case string: + valueBytes := locateObjectField(iter, pathKey) + if valueBytes == nil { + return newInvalidAny(path[i:]) + } + iter.ResetBytes(valueBytes) + case int: + valueBytes := locateArrayElement(iter, pathKey) + if valueBytes == nil { + return newInvalidAny(path[i:]) + } + iter.ResetBytes(valueBytes) + case int32: + if '*' == pathKey { + return iter.readAny().Get(path[i:]...) + } + return newInvalidAny(path[i:]) + default: + return newInvalidAny(path[i:]) + } + } + if iter.Error != nil && iter.Error != io.EOF { + return &invalidAny{baseAny{}, iter.Error} + } + return iter.readAny() +} + +var anyType = reflect2.TypeOfPtr((*Any)(nil)).Elem() + +func createDecoderOfAny(ctx *ctx, typ reflect2.Type) ValDecoder { + if typ == anyType { + return &directAnyCodec{} + } + if typ.Implements(anyType) { + return &anyCodec{ + valType: typ, + } + } + return nil +} + +func createEncoderOfAny(ctx *ctx, typ reflect2.Type) ValEncoder { + if typ == anyType { + return &directAnyCodec{} + } + if typ.Implements(anyType) { + return &anyCodec{ + valType: typ, + } + } + return nil +} + +type anyCodec struct { + valType reflect2.Type +} + +func (codec *anyCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { + panic("not implemented") +} + +func (codec *anyCodec) Encode(ptr unsafe.Pointer, stream *Stream) { + obj := codec.valType.UnsafeIndirect(ptr) + any := obj.(Any) + any.WriteTo(stream) +} + +func (codec *anyCodec) IsEmpty(ptr unsafe.Pointer) bool { + obj := codec.valType.UnsafeIndirect(ptr) + any := obj.(Any) + return any.Size() == 0 +} + +type directAnyCodec struct { +} + +func (codec *directAnyCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { + *(*Any)(ptr) = iter.readAny() +} + +func (codec *directAnyCodec) Encode(ptr unsafe.Pointer, stream *Stream) { + any := *(*Any)(ptr) + if any == nil { + stream.WriteNil() + return + } + any.WriteTo(stream) +} + +func (codec *directAnyCodec) IsEmpty(ptr unsafe.Pointer) bool { + any := *(*Any)(ptr) + return any.Size() == 0 +} diff --git a/vendor/github.com/json-iterator/go/any_array.go b/vendor/github.com/json-iterator/go/any_array.go new file mode 100644 index 00000000000..0449e9aa428 --- /dev/null +++ b/vendor/github.com/json-iterator/go/any_array.go @@ -0,0 +1,278 @@ +package jsoniter + +import ( + "reflect" + "unsafe" +) + +type arrayLazyAny struct { + baseAny + cfg *frozenConfig + buf []byte + err error +} + +func (any *arrayLazyAny) ValueType() ValueType { + return ArrayValue +} + +func (any *arrayLazyAny) MustBeValid() Any { + return any +} + +func (any *arrayLazyAny) LastError() error { + return any.err +} + +func (any *arrayLazyAny) ToBool() bool { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + return iter.ReadArray() +} + +func (any *arrayLazyAny) ToInt() int { + if any.ToBool() { + return 1 + } + return 0 +} + +func (any *arrayLazyAny) ToInt32() int32 { + if any.ToBool() { + return 1 + } + return 0 +} + +func (any *arrayLazyAny) ToInt64() int64 { + if any.ToBool() { + return 1 + } + return 0 +} + +func (any *arrayLazyAny) ToUint() uint { + if any.ToBool() { + return 1 + } + return 0 +} + +func (any *arrayLazyAny) ToUint32() uint32 { + if any.ToBool() { + return 1 + } + return 0 +} + +func (any *arrayLazyAny) ToUint64() uint64 { + if any.ToBool() { + return 1 + } + return 0 +} + +func (any *arrayLazyAny) ToFloat32() float32 { + if any.ToBool() { + return 1 + } + return 0 +} + +func (any *arrayLazyAny) ToFloat64() float64 { + if any.ToBool() { + return 1 + } + return 0 +} + +func (any *arrayLazyAny) ToString() string { + return *(*string)(unsafe.Pointer(&any.buf)) +} + +func (any *arrayLazyAny) ToVal(val interface{}) { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + iter.ReadVal(val) +} + +func (any *arrayLazyAny) Get(path ...interface{}) Any { + if len(path) == 0 { + return any + } + switch firstPath := path[0].(type) { + case int: + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + valueBytes := locateArrayElement(iter, firstPath) + if valueBytes == nil { + return newInvalidAny(path) + } + iter.ResetBytes(valueBytes) + return locatePath(iter, path[1:]) + case int32: + if '*' == firstPath { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + arr := make([]Any, 0) + iter.ReadArrayCB(func(iter *Iterator) bool { + found := iter.readAny().Get(path[1:]...) + if found.ValueType() != InvalidValue { + arr = append(arr, found) + } + return true + }) + return wrapArray(arr) + } + return newInvalidAny(path) + default: + return newInvalidAny(path) + } +} + +func (any *arrayLazyAny) Size() int { + size := 0 + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + iter.ReadArrayCB(func(iter *Iterator) bool { + size++ + iter.Skip() + return true + }) + return size +} + +func (any *arrayLazyAny) WriteTo(stream *Stream) { + stream.Write(any.buf) +} + +func (any *arrayLazyAny) GetInterface() interface{} { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + return iter.Read() +} + +type arrayAny struct { + baseAny + val reflect.Value +} + +func wrapArray(val interface{}) *arrayAny { + return &arrayAny{baseAny{}, reflect.ValueOf(val)} +} + +func (any *arrayAny) ValueType() ValueType { + return ArrayValue +} + +func (any *arrayAny) MustBeValid() Any { + return any +} + +func (any *arrayAny) LastError() error { + return nil +} + +func (any *arrayAny) ToBool() bool { + return any.val.Len() != 0 +} + +func (any *arrayAny) ToInt() int { + if any.val.Len() == 0 { + return 0 + } + return 1 +} + +func (any *arrayAny) ToInt32() int32 { + if any.val.Len() == 0 { + return 0 + } + return 1 +} + +func (any *arrayAny) ToInt64() int64 { + if any.val.Len() == 0 { + return 0 + } + return 1 +} + +func (any *arrayAny) ToUint() uint { + if any.val.Len() == 0 { + return 0 + } + return 1 +} + +func (any *arrayAny) ToUint32() uint32 { + if any.val.Len() == 0 { + return 0 + } + return 1 +} + +func (any *arrayAny) ToUint64() uint64 { + if any.val.Len() == 0 { + return 0 + } + return 1 +} + +func (any *arrayAny) ToFloat32() float32 { + if any.val.Len() == 0 { + return 0 + } + return 1 +} + +func (any *arrayAny) ToFloat64() float64 { + if any.val.Len() == 0 { + return 0 + } + return 1 +} + +func (any *arrayAny) ToString() string { + str, _ := MarshalToString(any.val.Interface()) + return str +} + +func (any *arrayAny) Get(path ...interface{}) Any { + if len(path) == 0 { + return any + } + switch firstPath := path[0].(type) { + case int: + if firstPath < 0 || firstPath >= any.val.Len() { + return newInvalidAny(path) + } + return Wrap(any.val.Index(firstPath).Interface()) + case int32: + if '*' == firstPath { + mappedAll := make([]Any, 0) + for i := 0; i < any.val.Len(); i++ { + mapped := Wrap(any.val.Index(i).Interface()).Get(path[1:]...) + if mapped.ValueType() != InvalidValue { + mappedAll = append(mappedAll, mapped) + } + } + return wrapArray(mappedAll) + } + return newInvalidAny(path) + default: + return newInvalidAny(path) + } +} + +func (any *arrayAny) Size() int { + return any.val.Len() +} + +func (any *arrayAny) WriteTo(stream *Stream) { + stream.WriteVal(any.val) +} + +func (any *arrayAny) GetInterface() interface{} { + return any.val.Interface() +} diff --git a/vendor/github.com/json-iterator/go/any_bool.go b/vendor/github.com/json-iterator/go/any_bool.go new file mode 100644 index 00000000000..9452324af5b --- /dev/null +++ b/vendor/github.com/json-iterator/go/any_bool.go @@ -0,0 +1,137 @@ +package jsoniter + +type trueAny struct { + baseAny +} + +func (any *trueAny) LastError() error { + return nil +} + +func (any *trueAny) ToBool() bool { + return true +} + +func (any *trueAny) ToInt() int { + return 1 +} + +func (any *trueAny) ToInt32() int32 { + return 1 +} + +func (any *trueAny) ToInt64() int64 { + return 1 +} + +func (any *trueAny) ToUint() uint { + return 1 +} + +func (any *trueAny) ToUint32() uint32 { + return 1 +} + +func (any *trueAny) ToUint64() uint64 { + return 1 +} + +func (any *trueAny) ToFloat32() float32 { + return 1 +} + +func (any *trueAny) ToFloat64() float64 { + return 1 +} + +func (any *trueAny) ToString() string { + return "true" +} + +func (any *trueAny) WriteTo(stream *Stream) { + stream.WriteTrue() +} + +func (any *trueAny) Parse() *Iterator { + return nil +} + +func (any *trueAny) GetInterface() interface{} { + return true +} + +func (any *trueAny) ValueType() ValueType { + return BoolValue +} + +func (any *trueAny) MustBeValid() Any { + return any +} + +type falseAny struct { + baseAny +} + +func (any *falseAny) LastError() error { + return nil +} + +func (any *falseAny) ToBool() bool { + return false +} + +func (any *falseAny) ToInt() int { + return 0 +} + +func (any *falseAny) ToInt32() int32 { + return 0 +} + +func (any *falseAny) ToInt64() int64 { + return 0 +} + +func (any *falseAny) ToUint() uint { + return 0 +} + +func (any *falseAny) ToUint32() uint32 { + return 0 +} + +func (any *falseAny) ToUint64() uint64 { + return 0 +} + +func (any *falseAny) ToFloat32() float32 { + return 0 +} + +func (any *falseAny) ToFloat64() float64 { + return 0 +} + +func (any *falseAny) ToString() string { + return "false" +} + +func (any *falseAny) WriteTo(stream *Stream) { + stream.WriteFalse() +} + +func (any *falseAny) Parse() *Iterator { + return nil +} + +func (any *falseAny) GetInterface() interface{} { + return false +} + +func (any *falseAny) ValueType() ValueType { + return BoolValue +} + +func (any *falseAny) MustBeValid() Any { + return any +} diff --git a/vendor/github.com/json-iterator/go/any_float.go b/vendor/github.com/json-iterator/go/any_float.go new file mode 100644 index 00000000000..35fdb09497f --- /dev/null +++ b/vendor/github.com/json-iterator/go/any_float.go @@ -0,0 +1,83 @@ +package jsoniter + +import ( + "strconv" +) + +type floatAny struct { + baseAny + val float64 +} + +func (any *floatAny) Parse() *Iterator { + return nil +} + +func (any *floatAny) ValueType() ValueType { + return NumberValue +} + +func (any *floatAny) MustBeValid() Any { + return any +} + +func (any *floatAny) LastError() error { + return nil +} + +func (any *floatAny) ToBool() bool { + return any.ToFloat64() != 0 +} + +func (any *floatAny) ToInt() int { + return int(any.val) +} + +func (any *floatAny) ToInt32() int32 { + return int32(any.val) +} + +func (any *floatAny) ToInt64() int64 { + return int64(any.val) +} + +func (any *floatAny) ToUint() uint { + if any.val > 0 { + return uint(any.val) + } + return 0 +} + +func (any *floatAny) ToUint32() uint32 { + if any.val > 0 { + return uint32(any.val) + } + return 0 +} + +func (any *floatAny) ToUint64() uint64 { + if any.val > 0 { + return uint64(any.val) + } + return 0 +} + +func (any *floatAny) ToFloat32() float32 { + return float32(any.val) +} + +func (any *floatAny) ToFloat64() float64 { + return any.val +} + +func (any *floatAny) ToString() string { + return strconv.FormatFloat(any.val, 'E', -1, 64) +} + +func (any *floatAny) WriteTo(stream *Stream) { + stream.WriteFloat64(any.val) +} + +func (any *floatAny) GetInterface() interface{} { + return any.val +} diff --git a/vendor/github.com/json-iterator/go/any_int32.go b/vendor/github.com/json-iterator/go/any_int32.go new file mode 100644 index 00000000000..1b56f399150 --- /dev/null +++ b/vendor/github.com/json-iterator/go/any_int32.go @@ -0,0 +1,74 @@ +package jsoniter + +import ( + "strconv" +) + +type int32Any struct { + baseAny + val int32 +} + +func (any *int32Any) LastError() error { + return nil +} + +func (any *int32Any) ValueType() ValueType { + return NumberValue +} + +func (any *int32Any) MustBeValid() Any { + return any +} + +func (any *int32Any) ToBool() bool { + return any.val != 0 +} + +func (any *int32Any) ToInt() int { + return int(any.val) +} + +func (any *int32Any) ToInt32() int32 { + return any.val +} + +func (any *int32Any) ToInt64() int64 { + return int64(any.val) +} + +func (any *int32Any) ToUint() uint { + return uint(any.val) +} + +func (any *int32Any) ToUint32() uint32 { + return uint32(any.val) +} + +func (any *int32Any) ToUint64() uint64 { + return uint64(any.val) +} + +func (any *int32Any) ToFloat32() float32 { + return float32(any.val) +} + +func (any *int32Any) ToFloat64() float64 { + return float64(any.val) +} + +func (any *int32Any) ToString() string { + return strconv.FormatInt(int64(any.val), 10) +} + +func (any *int32Any) WriteTo(stream *Stream) { + stream.WriteInt32(any.val) +} + +func (any *int32Any) Parse() *Iterator { + return nil +} + +func (any *int32Any) GetInterface() interface{} { + return any.val +} diff --git a/vendor/github.com/json-iterator/go/any_int64.go b/vendor/github.com/json-iterator/go/any_int64.go new file mode 100644 index 00000000000..c440d72b6d3 --- /dev/null +++ b/vendor/github.com/json-iterator/go/any_int64.go @@ -0,0 +1,74 @@ +package jsoniter + +import ( + "strconv" +) + +type int64Any struct { + baseAny + val int64 +} + +func (any *int64Any) LastError() error { + return nil +} + +func (any *int64Any) ValueType() ValueType { + return NumberValue +} + +func (any *int64Any) MustBeValid() Any { + return any +} + +func (any *int64Any) ToBool() bool { + return any.val != 0 +} + +func (any *int64Any) ToInt() int { + return int(any.val) +} + +func (any *int64Any) ToInt32() int32 { + return int32(any.val) +} + +func (any *int64Any) ToInt64() int64 { + return any.val +} + +func (any *int64Any) ToUint() uint { + return uint(any.val) +} + +func (any *int64Any) ToUint32() uint32 { + return uint32(any.val) +} + +func (any *int64Any) ToUint64() uint64 { + return uint64(any.val) +} + +func (any *int64Any) ToFloat32() float32 { + return float32(any.val) +} + +func (any *int64Any) ToFloat64() float64 { + return float64(any.val) +} + +func (any *int64Any) ToString() string { + return strconv.FormatInt(any.val, 10) +} + +func (any *int64Any) WriteTo(stream *Stream) { + stream.WriteInt64(any.val) +} + +func (any *int64Any) Parse() *Iterator { + return nil +} + +func (any *int64Any) GetInterface() interface{} { + return any.val +} diff --git a/vendor/github.com/json-iterator/go/any_invalid.go b/vendor/github.com/json-iterator/go/any_invalid.go new file mode 100644 index 00000000000..1d859eac327 --- /dev/null +++ b/vendor/github.com/json-iterator/go/any_invalid.go @@ -0,0 +1,82 @@ +package jsoniter + +import "fmt" + +type invalidAny struct { + baseAny + err error +} + +func newInvalidAny(path []interface{}) *invalidAny { + return &invalidAny{baseAny{}, fmt.Errorf("%v not found", path)} +} + +func (any *invalidAny) LastError() error { + return any.err +} + +func (any *invalidAny) ValueType() ValueType { + return InvalidValue +} + +func (any *invalidAny) MustBeValid() Any { + panic(any.err) +} + +func (any *invalidAny) ToBool() bool { + return false +} + +func (any *invalidAny) ToInt() int { + return 0 +} + +func (any *invalidAny) ToInt32() int32 { + return 0 +} + +func (any *invalidAny) ToInt64() int64 { + return 0 +} + +func (any *invalidAny) ToUint() uint { + return 0 +} + +func (any *invalidAny) ToUint32() uint32 { + return 0 +} + +func (any *invalidAny) ToUint64() uint64 { + return 0 +} + +func (any *invalidAny) ToFloat32() float32 { + return 0 +} + +func (any *invalidAny) ToFloat64() float64 { + return 0 +} + +func (any *invalidAny) ToString() string { + return "" +} + +func (any *invalidAny) WriteTo(stream *Stream) { +} + +func (any *invalidAny) Get(path ...interface{}) Any { + if any.err == nil { + return &invalidAny{baseAny{}, fmt.Errorf("get %v from invalid", path)} + } + return &invalidAny{baseAny{}, fmt.Errorf("%v, get %v from invalid", any.err, path)} +} + +func (any *invalidAny) Parse() *Iterator { + return nil +} + +func (any *invalidAny) GetInterface() interface{} { + return nil +} diff --git a/vendor/github.com/json-iterator/go/any_nil.go b/vendor/github.com/json-iterator/go/any_nil.go new file mode 100644 index 00000000000..d04cb54c11c --- /dev/null +++ b/vendor/github.com/json-iterator/go/any_nil.go @@ -0,0 +1,69 @@ +package jsoniter + +type nilAny struct { + baseAny +} + +func (any *nilAny) LastError() error { + return nil +} + +func (any *nilAny) ValueType() ValueType { + return NilValue +} + +func (any *nilAny) MustBeValid() Any { + return any +} + +func (any *nilAny) ToBool() bool { + return false +} + +func (any *nilAny) ToInt() int { + return 0 +} + +func (any *nilAny) ToInt32() int32 { + return 0 +} + +func (any *nilAny) ToInt64() int64 { + return 0 +} + +func (any *nilAny) ToUint() uint { + return 0 +} + +func (any *nilAny) ToUint32() uint32 { + return 0 +} + +func (any *nilAny) ToUint64() uint64 { + return 0 +} + +func (any *nilAny) ToFloat32() float32 { + return 0 +} + +func (any *nilAny) ToFloat64() float64 { + return 0 +} + +func (any *nilAny) ToString() string { + return "" +} + +func (any *nilAny) WriteTo(stream *Stream) { + stream.WriteNil() +} + +func (any *nilAny) Parse() *Iterator { + return nil +} + +func (any *nilAny) GetInterface() interface{} { + return nil +} diff --git a/vendor/github.com/json-iterator/go/any_number.go b/vendor/github.com/json-iterator/go/any_number.go new file mode 100644 index 00000000000..9d1e901a66a --- /dev/null +++ b/vendor/github.com/json-iterator/go/any_number.go @@ -0,0 +1,123 @@ +package jsoniter + +import ( + "io" + "unsafe" +) + +type numberLazyAny struct { + baseAny + cfg *frozenConfig + buf []byte + err error +} + +func (any *numberLazyAny) ValueType() ValueType { + return NumberValue +} + +func (any *numberLazyAny) MustBeValid() Any { + return any +} + +func (any *numberLazyAny) LastError() error { + return any.err +} + +func (any *numberLazyAny) ToBool() bool { + return any.ToFloat64() != 0 +} + +func (any *numberLazyAny) ToInt() int { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + val := iter.ReadInt() + if iter.Error != nil && iter.Error != io.EOF { + any.err = iter.Error + } + return val +} + +func (any *numberLazyAny) ToInt32() int32 { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + val := iter.ReadInt32() + if iter.Error != nil && iter.Error != io.EOF { + any.err = iter.Error + } + return val +} + +func (any *numberLazyAny) ToInt64() int64 { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + val := iter.ReadInt64() + if iter.Error != nil && iter.Error != io.EOF { + any.err = iter.Error + } + return val +} + +func (any *numberLazyAny) ToUint() uint { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + val := iter.ReadUint() + if iter.Error != nil && iter.Error != io.EOF { + any.err = iter.Error + } + return val +} + +func (any *numberLazyAny) ToUint32() uint32 { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + val := iter.ReadUint32() + if iter.Error != nil && iter.Error != io.EOF { + any.err = iter.Error + } + return val +} + +func (any *numberLazyAny) ToUint64() uint64 { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + val := iter.ReadUint64() + if iter.Error != nil && iter.Error != io.EOF { + any.err = iter.Error + } + return val +} + +func (any *numberLazyAny) ToFloat32() float32 { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + val := iter.ReadFloat32() + if iter.Error != nil && iter.Error != io.EOF { + any.err = iter.Error + } + return val +} + +func (any *numberLazyAny) ToFloat64() float64 { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + val := iter.ReadFloat64() + if iter.Error != nil && iter.Error != io.EOF { + any.err = iter.Error + } + return val +} + +func (any *numberLazyAny) ToString() string { + return *(*string)(unsafe.Pointer(&any.buf)) +} + +func (any *numberLazyAny) WriteTo(stream *Stream) { + stream.Write(any.buf) +} + +func (any *numberLazyAny) GetInterface() interface{} { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + return iter.Read() +} diff --git a/vendor/github.com/json-iterator/go/any_object.go b/vendor/github.com/json-iterator/go/any_object.go new file mode 100644 index 00000000000..c44ef5c989a --- /dev/null +++ b/vendor/github.com/json-iterator/go/any_object.go @@ -0,0 +1,374 @@ +package jsoniter + +import ( + "reflect" + "unsafe" +) + +type objectLazyAny struct { + baseAny + cfg *frozenConfig + buf []byte + err error +} + +func (any *objectLazyAny) ValueType() ValueType { + return ObjectValue +} + +func (any *objectLazyAny) MustBeValid() Any { + return any +} + +func (any *objectLazyAny) LastError() error { + return any.err +} + +func (any *objectLazyAny) ToBool() bool { + return true +} + +func (any *objectLazyAny) ToInt() int { + return 0 +} + +func (any *objectLazyAny) ToInt32() int32 { + return 0 +} + +func (any *objectLazyAny) ToInt64() int64 { + return 0 +} + +func (any *objectLazyAny) ToUint() uint { + return 0 +} + +func (any *objectLazyAny) ToUint32() uint32 { + return 0 +} + +func (any *objectLazyAny) ToUint64() uint64 { + return 0 +} + +func (any *objectLazyAny) ToFloat32() float32 { + return 0 +} + +func (any *objectLazyAny) ToFloat64() float64 { + return 0 +} + +func (any *objectLazyAny) ToString() string { + return *(*string)(unsafe.Pointer(&any.buf)) +} + +func (any *objectLazyAny) ToVal(obj interface{}) { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + iter.ReadVal(obj) +} + +func (any *objectLazyAny) Get(path ...interface{}) Any { + if len(path) == 0 { + return any + } + switch firstPath := path[0].(type) { + case string: + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + valueBytes := locateObjectField(iter, firstPath) + if valueBytes == nil { + return newInvalidAny(path) + } + iter.ResetBytes(valueBytes) + return locatePath(iter, path[1:]) + case int32: + if '*' == firstPath { + mappedAll := map[string]Any{} + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + iter.ReadMapCB(func(iter *Iterator, field string) bool { + mapped := locatePath(iter, path[1:]) + if mapped.ValueType() != InvalidValue { + mappedAll[field] = mapped + } + return true + }) + return wrapMap(mappedAll) + } + return newInvalidAny(path) + default: + return newInvalidAny(path) + } +} + +func (any *objectLazyAny) Keys() []string { + keys := []string{} + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + iter.ReadMapCB(func(iter *Iterator, field string) bool { + iter.Skip() + keys = append(keys, field) + return true + }) + return keys +} + +func (any *objectLazyAny) Size() int { + size := 0 + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + iter.ReadObjectCB(func(iter *Iterator, field string) bool { + iter.Skip() + size++ + return true + }) + return size +} + +func (any *objectLazyAny) WriteTo(stream *Stream) { + stream.Write(any.buf) +} + +func (any *objectLazyAny) GetInterface() interface{} { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + return iter.Read() +} + +type objectAny struct { + baseAny + err error + val reflect.Value +} + +func wrapStruct(val interface{}) *objectAny { + return &objectAny{baseAny{}, nil, reflect.ValueOf(val)} +} + +func (any *objectAny) ValueType() ValueType { + return ObjectValue +} + +func (any *objectAny) MustBeValid() Any { + return any +} + +func (any *objectAny) Parse() *Iterator { + return nil +} + +func (any *objectAny) LastError() error { + return any.err +} + +func (any *objectAny) ToBool() bool { + return any.val.NumField() != 0 +} + +func (any *objectAny) ToInt() int { + return 0 +} + +func (any *objectAny) ToInt32() int32 { + return 0 +} + +func (any *objectAny) ToInt64() int64 { + return 0 +} + +func (any *objectAny) ToUint() uint { + return 0 +} + +func (any *objectAny) ToUint32() uint32 { + return 0 +} + +func (any *objectAny) ToUint64() uint64 { + return 0 +} + +func (any *objectAny) ToFloat32() float32 { + return 0 +} + +func (any *objectAny) ToFloat64() float64 { + return 0 +} + +func (any *objectAny) ToString() string { + str, err := MarshalToString(any.val.Interface()) + any.err = err + return str +} + +func (any *objectAny) Get(path ...interface{}) Any { + if len(path) == 0 { + return any + } + switch firstPath := path[0].(type) { + case string: + field := any.val.FieldByName(firstPath) + if !field.IsValid() { + return newInvalidAny(path) + } + return Wrap(field.Interface()) + case int32: + if '*' == firstPath { + mappedAll := map[string]Any{} + for i := 0; i < any.val.NumField(); i++ { + field := any.val.Field(i) + if field.CanInterface() { + mapped := Wrap(field.Interface()).Get(path[1:]...) + if mapped.ValueType() != InvalidValue { + mappedAll[any.val.Type().Field(i).Name] = mapped + } + } + } + return wrapMap(mappedAll) + } + return newInvalidAny(path) + default: + return newInvalidAny(path) + } +} + +func (any *objectAny) Keys() []string { + keys := make([]string, 0, any.val.NumField()) + for i := 0; i < any.val.NumField(); i++ { + keys = append(keys, any.val.Type().Field(i).Name) + } + return keys +} + +func (any *objectAny) Size() int { + return any.val.NumField() +} + +func (any *objectAny) WriteTo(stream *Stream) { + stream.WriteVal(any.val) +} + +func (any *objectAny) GetInterface() interface{} { + return any.val.Interface() +} + +type mapAny struct { + baseAny + err error + val reflect.Value +} + +func wrapMap(val interface{}) *mapAny { + return &mapAny{baseAny{}, nil, reflect.ValueOf(val)} +} + +func (any *mapAny) ValueType() ValueType { + return ObjectValue +} + +func (any *mapAny) MustBeValid() Any { + return any +} + +func (any *mapAny) Parse() *Iterator { + return nil +} + +func (any *mapAny) LastError() error { + return any.err +} + +func (any *mapAny) ToBool() bool { + return true +} + +func (any *mapAny) ToInt() int { + return 0 +} + +func (any *mapAny) ToInt32() int32 { + return 0 +} + +func (any *mapAny) ToInt64() int64 { + return 0 +} + +func (any *mapAny) ToUint() uint { + return 0 +} + +func (any *mapAny) ToUint32() uint32 { + return 0 +} + +func (any *mapAny) ToUint64() uint64 { + return 0 +} + +func (any *mapAny) ToFloat32() float32 { + return 0 +} + +func (any *mapAny) ToFloat64() float64 { + return 0 +} + +func (any *mapAny) ToString() string { + str, err := MarshalToString(any.val.Interface()) + any.err = err + return str +} + +func (any *mapAny) Get(path ...interface{}) Any { + if len(path) == 0 { + return any + } + switch firstPath := path[0].(type) { + case int32: + if '*' == firstPath { + mappedAll := map[string]Any{} + for _, key := range any.val.MapKeys() { + keyAsStr := key.String() + element := Wrap(any.val.MapIndex(key).Interface()) + mapped := element.Get(path[1:]...) + if mapped.ValueType() != InvalidValue { + mappedAll[keyAsStr] = mapped + } + } + return wrapMap(mappedAll) + } + return newInvalidAny(path) + default: + value := any.val.MapIndex(reflect.ValueOf(firstPath)) + if !value.IsValid() { + return newInvalidAny(path) + } + return Wrap(value.Interface()) + } +} + +func (any *mapAny) Keys() []string { + keys := make([]string, 0, any.val.Len()) + for _, key := range any.val.MapKeys() { + keys = append(keys, key.String()) + } + return keys +} + +func (any *mapAny) Size() int { + return any.val.Len() +} + +func (any *mapAny) WriteTo(stream *Stream) { + stream.WriteVal(any.val) +} + +func (any *mapAny) GetInterface() interface{} { + return any.val.Interface() +} diff --git a/vendor/github.com/json-iterator/go/any_str.go b/vendor/github.com/json-iterator/go/any_str.go new file mode 100644 index 00000000000..1f12f6612de --- /dev/null +++ b/vendor/github.com/json-iterator/go/any_str.go @@ -0,0 +1,166 @@ +package jsoniter + +import ( + "fmt" + "strconv" +) + +type stringAny struct { + baseAny + val string +} + +func (any *stringAny) Get(path ...interface{}) Any { + if len(path) == 0 { + return any + } + return &invalidAny{baseAny{}, fmt.Errorf("GetIndex %v from simple value", path)} +} + +func (any *stringAny) Parse() *Iterator { + return nil +} + +func (any *stringAny) ValueType() ValueType { + return StringValue +} + +func (any *stringAny) MustBeValid() Any { + return any +} + +func (any *stringAny) LastError() error { + return nil +} + +func (any *stringAny) ToBool() bool { + str := any.ToString() + if str == "0" { + return false + } + for _, c := range str { + switch c { + case ' ', '\n', '\r', '\t': + default: + return true + } + } + return false +} + +func (any *stringAny) ToInt() int { + return int(any.ToInt64()) + +} + +func (any *stringAny) ToInt32() int32 { + return int32(any.ToInt64()) +} + +func (any *stringAny) ToInt64() int64 { + if any.val == "" { + return 0 + } + + flag := 1 + startPos := 0 + if any.val[0] == '+' || any.val[0] == '-' { + startPos = 1 + } + + if any.val[0] == '-' { + flag = -1 + } + + endPos := startPos + for i := startPos; i < len(any.val); i++ { + if any.val[i] >= '0' && any.val[i] <= '9' { + endPos = i + 1 + } else { + break + } + } + parsed, _ := strconv.ParseInt(any.val[startPos:endPos], 10, 64) + return int64(flag) * parsed +} + +func (any *stringAny) ToUint() uint { + return uint(any.ToUint64()) +} + +func (any *stringAny) ToUint32() uint32 { + return uint32(any.ToUint64()) +} + +func (any *stringAny) ToUint64() uint64 { + if any.val == "" { + return 0 + } + + startPos := 0 + + if any.val[0] == '-' { + return 0 + } + if any.val[0] == '+' { + startPos = 1 + } + + endPos := startPos + for i := startPos; i < len(any.val); i++ { + if any.val[i] >= '0' && any.val[i] <= '9' { + endPos = i + 1 + } else { + break + } + } + parsed, _ := strconv.ParseUint(any.val[startPos:endPos], 10, 64) + return parsed +} + +func (any *stringAny) ToFloat32() float32 { + return float32(any.ToFloat64()) +} + +func (any *stringAny) ToFloat64() float64 { + if len(any.val) == 0 { + return 0 + } + + // first char invalid + if any.val[0] != '+' && any.val[0] != '-' && (any.val[0] > '9' || any.val[0] < '0') { + return 0 + } + + // extract valid num expression from string + // eg 123true => 123, -12.12xxa => -12.12 + endPos := 1 + for i := 1; i < len(any.val); i++ { + if any.val[i] == '.' || any.val[i] == 'e' || any.val[i] == 'E' || any.val[i] == '+' || any.val[i] == '-' { + endPos = i + 1 + continue + } + + // end position is the first char which is not digit + if any.val[i] >= '0' && any.val[i] <= '9' { + endPos = i + 1 + } else { + endPos = i + break + } + } + parsed, _ := strconv.ParseFloat(any.val[:endPos], 64) + return parsed +} + +func (any *stringAny) ToString() string { + return any.val +} + +func (any *stringAny) WriteTo(stream *Stream) { + stream.WriteString(any.val) +} + +func (any *stringAny) GetInterface() interface{} { + return any.val +} diff --git a/vendor/github.com/json-iterator/go/any_uint32.go b/vendor/github.com/json-iterator/go/any_uint32.go new file mode 100644 index 00000000000..656bbd33d7e --- /dev/null +++ b/vendor/github.com/json-iterator/go/any_uint32.go @@ -0,0 +1,74 @@ +package jsoniter + +import ( + "strconv" +) + +type uint32Any struct { + baseAny + val uint32 +} + +func (any *uint32Any) LastError() error { + return nil +} + +func (any *uint32Any) ValueType() ValueType { + return NumberValue +} + +func (any *uint32Any) MustBeValid() Any { + return any +} + +func (any *uint32Any) ToBool() bool { + return any.val != 0 +} + +func (any *uint32Any) ToInt() int { + return int(any.val) +} + +func (any *uint32Any) ToInt32() int32 { + return int32(any.val) +} + +func (any *uint32Any) ToInt64() int64 { + return int64(any.val) +} + +func (any *uint32Any) ToUint() uint { + return uint(any.val) +} + +func (any *uint32Any) ToUint32() uint32 { + return any.val +} + +func (any *uint32Any) ToUint64() uint64 { + return uint64(any.val) +} + +func (any *uint32Any) ToFloat32() float32 { + return float32(any.val) +} + +func (any *uint32Any) ToFloat64() float64 { + return float64(any.val) +} + +func (any *uint32Any) ToString() string { + return strconv.FormatInt(int64(any.val), 10) +} + +func (any *uint32Any) WriteTo(stream *Stream) { + stream.WriteUint32(any.val) +} + +func (any *uint32Any) Parse() *Iterator { + return nil +} + +func (any *uint32Any) GetInterface() interface{} { + return any.val +} diff --git a/vendor/github.com/json-iterator/go/any_uint64.go b/vendor/github.com/json-iterator/go/any_uint64.go new file mode 100644 index 00000000000..7df2fce33ba --- /dev/null +++ b/vendor/github.com/json-iterator/go/any_uint64.go @@ -0,0 +1,74 @@ +package jsoniter + +import ( + "strconv" +) + +type uint64Any struct { + baseAny + val uint64 +} + +func (any *uint64Any) LastError() error { + return nil +} + +func (any *uint64Any) ValueType() ValueType { + return NumberValue +} + +func (any *uint64Any) MustBeValid() Any { + return any +} + +func (any *uint64Any) ToBool() bool { + return any.val != 0 +} + +func (any *uint64Any) ToInt() int { + return int(any.val) +} + +func (any *uint64Any) ToInt32() int32 { + return int32(any.val) +} + +func (any *uint64Any) ToInt64() int64 { + return int64(any.val) +} + +func (any *uint64Any) ToUint() uint { + return uint(any.val) +} + +func (any *uint64Any) ToUint32() uint32 { + return uint32(any.val) +} + +func (any *uint64Any) ToUint64() uint64 { + return any.val +} + +func (any *uint64Any) ToFloat32() float32 { + return float32(any.val) +} + +func (any *uint64Any) ToFloat64() float64 { + return float64(any.val) +} + +func (any *uint64Any) ToString() string { + return strconv.FormatUint(any.val, 10) +} + +func (any *uint64Any) WriteTo(stream *Stream) { + stream.WriteUint64(any.val) +} + +func (any *uint64Any) Parse() *Iterator { + return nil +} + +func (any *uint64Any) GetInterface() interface{} { + return any.val +} diff --git a/vendor/github.com/json-iterator/go/build.sh b/vendor/github.com/json-iterator/go/build.sh new file mode 100644 index 00000000000..b45ef688313 --- /dev/null +++ b/vendor/github.com/json-iterator/go/build.sh @@ -0,0 +1,12 @@ +#!/bin/bash +set -e +set -x + +if [ ! -d /tmp/build-golang/src/github.com/json-iterator ]; then + mkdir -p /tmp/build-golang/src/github.com/json-iterator + ln -s $PWD /tmp/build-golang/src/github.com/json-iterator/go +fi +export GOPATH=/tmp/build-golang +go get -u github.com/golang/dep/cmd/dep +cd /tmp/build-golang/src/github.com/json-iterator/go +exec $GOPATH/bin/dep ensure -update diff --git a/vendor/github.com/json-iterator/go/config.go b/vendor/github.com/json-iterator/go/config.go new file mode 100644 index 00000000000..2adcdc3b790 --- /dev/null +++ b/vendor/github.com/json-iterator/go/config.go @@ -0,0 +1,375 @@ +package jsoniter + +import ( + "encoding/json" + "io" + "reflect" + "sync" + "unsafe" + + "github.com/modern-go/concurrent" + "github.com/modern-go/reflect2" +) + +// Config customize how the API should behave. +// The API is created from Config by Froze. +type Config struct { + IndentionStep int + MarshalFloatWith6Digits bool + EscapeHTML bool + SortMapKeys bool + UseNumber bool + DisallowUnknownFields bool + TagKey string + OnlyTaggedField bool + ValidateJsonRawMessage bool + ObjectFieldMustBeSimpleString bool + CaseSensitive bool +} + +// API the public interface of this package. +// Primary Marshal and Unmarshal. +type API interface { + IteratorPool + StreamPool + MarshalToString(v interface{}) (string, error) + Marshal(v interface{}) ([]byte, error) + MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) + UnmarshalFromString(str string, v interface{}) error + Unmarshal(data []byte, v interface{}) error + Get(data []byte, path ...interface{}) Any + NewEncoder(writer io.Writer) *Encoder + NewDecoder(reader io.Reader) *Decoder + Valid(data []byte) bool + RegisterExtension(extension Extension) + DecoderOf(typ reflect2.Type) ValDecoder + EncoderOf(typ reflect2.Type) ValEncoder +} + +// ConfigDefault the default API +var ConfigDefault = Config{ + EscapeHTML: true, +}.Froze() + +// ConfigCompatibleWithStandardLibrary tries to be 100% compatible with standard library behavior +var ConfigCompatibleWithStandardLibrary = Config{ + EscapeHTML: true, + SortMapKeys: true, + ValidateJsonRawMessage: true, +}.Froze() + +// ConfigFastest marshals float with only 6 digits precision +var ConfigFastest = Config{ + EscapeHTML: false, + MarshalFloatWith6Digits: true, // will lose precession + ObjectFieldMustBeSimpleString: true, // do not unescape object field +}.Froze() + +type frozenConfig struct { + configBeforeFrozen Config + sortMapKeys bool + indentionStep int + objectFieldMustBeSimpleString bool + onlyTaggedField bool + disallowUnknownFields bool + decoderCache *concurrent.Map + encoderCache *concurrent.Map + encoderExtension Extension + decoderExtension Extension + extraExtensions []Extension + streamPool *sync.Pool + iteratorPool *sync.Pool + caseSensitive bool +} + +func (cfg *frozenConfig) initCache() { + cfg.decoderCache = concurrent.NewMap() + cfg.encoderCache = concurrent.NewMap() +} + +func (cfg *frozenConfig) addDecoderToCache(cacheKey uintptr, decoder ValDecoder) { + cfg.decoderCache.Store(cacheKey, decoder) +} + +func (cfg *frozenConfig) addEncoderToCache(cacheKey uintptr, encoder ValEncoder) { + cfg.encoderCache.Store(cacheKey, encoder) +} + +func (cfg *frozenConfig) getDecoderFromCache(cacheKey uintptr) ValDecoder { + decoder, found := cfg.decoderCache.Load(cacheKey) + if found { + return decoder.(ValDecoder) + } + return nil +} + +func (cfg *frozenConfig) getEncoderFromCache(cacheKey uintptr) ValEncoder { + encoder, found := cfg.encoderCache.Load(cacheKey) + if found { + return encoder.(ValEncoder) + } + return nil +} + +var cfgCache = concurrent.NewMap() + +func getFrozenConfigFromCache(cfg Config) *frozenConfig { + obj, found := cfgCache.Load(cfg) + if found { + return obj.(*frozenConfig) + } + return nil +} + +func addFrozenConfigToCache(cfg Config, frozenConfig *frozenConfig) { + cfgCache.Store(cfg, frozenConfig) +} + +// Froze forge API from config +func (cfg Config) Froze() API { + api := &frozenConfig{ + sortMapKeys: cfg.SortMapKeys, + indentionStep: cfg.IndentionStep, + objectFieldMustBeSimpleString: cfg.ObjectFieldMustBeSimpleString, + onlyTaggedField: cfg.OnlyTaggedField, + disallowUnknownFields: cfg.DisallowUnknownFields, + caseSensitive: cfg.CaseSensitive, + } + api.streamPool = &sync.Pool{ + New: func() interface{} { + return NewStream(api, nil, 512) + }, + } + api.iteratorPool = &sync.Pool{ + New: func() interface{} { + return NewIterator(api) + }, + } + api.initCache() + encoderExtension := EncoderExtension{} + decoderExtension := DecoderExtension{} + if cfg.MarshalFloatWith6Digits { + api.marshalFloatWith6Digits(encoderExtension) + } + if cfg.EscapeHTML { + api.escapeHTML(encoderExtension) + } + if cfg.UseNumber { + api.useNumber(decoderExtension) + } + if cfg.ValidateJsonRawMessage { + api.validateJsonRawMessage(encoderExtension) + } + api.encoderExtension = encoderExtension + api.decoderExtension = decoderExtension + api.configBeforeFrozen = cfg + return api +} + +func (cfg Config) frozeWithCacheReuse(extraExtensions []Extension) *frozenConfig { + api := getFrozenConfigFromCache(cfg) + if api != nil { + return api + } + api = cfg.Froze().(*frozenConfig) + for _, extension := range extraExtensions { + api.RegisterExtension(extension) + } + addFrozenConfigToCache(cfg, api) + return api +} + +func (cfg *frozenConfig) validateJsonRawMessage(extension EncoderExtension) { + encoder := &funcEncoder{func(ptr unsafe.Pointer, stream *Stream) { + rawMessage := *(*json.RawMessage)(ptr) + iter := cfg.BorrowIterator([]byte(rawMessage)) + defer cfg.ReturnIterator(iter) + iter.Read() + if iter.Error != nil && iter.Error != io.EOF { + stream.WriteRaw("null") + } else { + stream.WriteRaw(string(rawMessage)) + } + }, func(ptr unsafe.Pointer) bool { + return len(*((*json.RawMessage)(ptr))) == 0 + }} + extension[reflect2.TypeOfPtr((*json.RawMessage)(nil)).Elem()] = encoder + extension[reflect2.TypeOfPtr((*RawMessage)(nil)).Elem()] = encoder +} + +func (cfg *frozenConfig) useNumber(extension DecoderExtension) { + extension[reflect2.TypeOfPtr((*interface{})(nil)).Elem()] = &funcDecoder{func(ptr unsafe.Pointer, iter *Iterator) { + exitingValue := *((*interface{})(ptr)) + if exitingValue != nil && reflect.TypeOf(exitingValue).Kind() == reflect.Ptr { + iter.ReadVal(exitingValue) + return + } + if iter.WhatIsNext() == NumberValue { + *((*interface{})(ptr)) = json.Number(iter.readNumberAsString()) + } else { + *((*interface{})(ptr)) = iter.Read() + } + }} +} +func (cfg *frozenConfig) getTagKey() string { + tagKey := cfg.configBeforeFrozen.TagKey + if tagKey == "" { + return "json" + } + return tagKey +} + +func (cfg *frozenConfig) RegisterExtension(extension Extension) { + cfg.extraExtensions = append(cfg.extraExtensions, extension) + copied := cfg.configBeforeFrozen + cfg.configBeforeFrozen = copied +} + +type lossyFloat32Encoder struct { +} + +func (encoder *lossyFloat32Encoder) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteFloat32Lossy(*((*float32)(ptr))) +} + +func (encoder *lossyFloat32Encoder) IsEmpty(ptr unsafe.Pointer) bool { + return *((*float32)(ptr)) == 0 +} + +type lossyFloat64Encoder struct { +} + +func (encoder *lossyFloat64Encoder) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteFloat64Lossy(*((*float64)(ptr))) +} + +func (encoder *lossyFloat64Encoder) IsEmpty(ptr unsafe.Pointer) bool { + return *((*float64)(ptr)) == 0 +} + +// EnableLossyFloatMarshalling keeps 10**(-6) precision +// for float variables for better performance. +func (cfg *frozenConfig) marshalFloatWith6Digits(extension EncoderExtension) { + // for better performance + extension[reflect2.TypeOfPtr((*float32)(nil)).Elem()] = &lossyFloat32Encoder{} + extension[reflect2.TypeOfPtr((*float64)(nil)).Elem()] = &lossyFloat64Encoder{} +} + +type htmlEscapedStringEncoder struct { +} + +func (encoder *htmlEscapedStringEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + str := *((*string)(ptr)) + stream.WriteStringWithHTMLEscaped(str) +} + +func (encoder *htmlEscapedStringEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return *((*string)(ptr)) == "" +} + +func (cfg *frozenConfig) escapeHTML(encoderExtension EncoderExtension) { + encoderExtension[reflect2.TypeOfPtr((*string)(nil)).Elem()] = &htmlEscapedStringEncoder{} +} + +func (cfg *frozenConfig) cleanDecoders() { + typeDecoders = map[string]ValDecoder{} + fieldDecoders = map[string]ValDecoder{} + *cfg = *(cfg.configBeforeFrozen.Froze().(*frozenConfig)) +} + +func (cfg *frozenConfig) cleanEncoders() { + typeEncoders = map[string]ValEncoder{} + fieldEncoders = map[string]ValEncoder{} + *cfg = *(cfg.configBeforeFrozen.Froze().(*frozenConfig)) +} + +func (cfg *frozenConfig) MarshalToString(v interface{}) (string, error) { + stream := cfg.BorrowStream(nil) + defer cfg.ReturnStream(stream) + stream.WriteVal(v) + if stream.Error != nil { + return "", stream.Error + } + return string(stream.Buffer()), nil +} + +func (cfg *frozenConfig) Marshal(v interface{}) ([]byte, error) { + stream := cfg.BorrowStream(nil) + defer cfg.ReturnStream(stream) + stream.WriteVal(v) + if stream.Error != nil { + return nil, stream.Error + } + result := stream.Buffer() + copied := make([]byte, len(result)) + copy(copied, result) + return copied, nil +} + +func (cfg *frozenConfig) MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) { + if prefix != "" { + panic("prefix is not supported") + } + for _, r := range indent { + if r != ' ' { + panic("indent can only be space") + } + } + newCfg := cfg.configBeforeFrozen + newCfg.IndentionStep = len(indent) + return newCfg.frozeWithCacheReuse(cfg.extraExtensions).Marshal(v) +} + +func (cfg *frozenConfig) UnmarshalFromString(str string, v interface{}) error { + data := []byte(str) + iter := cfg.BorrowIterator(data) + defer cfg.ReturnIterator(iter) + iter.ReadVal(v) + c := iter.nextToken() + if c == 0 { + if iter.Error == io.EOF { + return nil + } + return iter.Error + } + iter.ReportError("Unmarshal", "there are bytes left after unmarshal") + return iter.Error +} + +func (cfg *frozenConfig) Get(data []byte, path ...interface{}) Any { + iter := cfg.BorrowIterator(data) + defer cfg.ReturnIterator(iter) + return locatePath(iter, path) +} + +func (cfg *frozenConfig) Unmarshal(data []byte, v interface{}) error { + iter := cfg.BorrowIterator(data) + defer cfg.ReturnIterator(iter) + iter.ReadVal(v) + c := iter.nextToken() + if c == 0 { + if iter.Error == io.EOF { + return nil + } + return iter.Error + } + iter.ReportError("Unmarshal", "there are bytes left after unmarshal") + return iter.Error +} + +func (cfg *frozenConfig) NewEncoder(writer io.Writer) *Encoder { + stream := NewStream(cfg, writer, 512) + return &Encoder{stream} +} + +func (cfg *frozenConfig) NewDecoder(reader io.Reader) *Decoder { + iter := Parse(cfg, reader, 512) + return &Decoder{iter} +} + +func (cfg *frozenConfig) Valid(data []byte) bool { + iter := cfg.BorrowIterator(data) + defer cfg.ReturnIterator(iter) + iter.Skip() + return iter.Error == nil +} diff --git a/vendor/github.com/json-iterator/go/fuzzy_mode_convert_table.md b/vendor/github.com/json-iterator/go/fuzzy_mode_convert_table.md new file mode 100644 index 00000000000..3095662b061 --- /dev/null +++ b/vendor/github.com/json-iterator/go/fuzzy_mode_convert_table.md @@ -0,0 +1,7 @@ +| json type \ dest type | bool | int | uint | float |string| +| --- | --- | --- | --- |--|--| +| number | positive => true
negative => true
zero => false| 23.2 => 23
-32.1 => -32| 12.1 => 12
-12.1 => 0|as normal|same as origin| +| string | empty string => false
string "0" => false
other strings => true | "123.32" => 123
"-123.4" => -123
"123.23xxxw" => 123
"abcde12" => 0
"-32.1" => -32| 13.2 => 13
-1.1 => 0 |12.1 => 12.1
-12.3 => -12.3
12.4xxa => 12.4
+1.1e2 =>110 |same as origin| +| bool | true => true
false => false| true => 1
false => 0 | true => 1
false => 0 |true => 1
false => 0|true => "true"
false => "false"| +| object | true | 0 | 0 |0|originnal json| +| array | empty array => false
nonempty array => true| [] => 0
[1,2] => 1 | [] => 0
[1,2] => 1 |[] => 0
[1,2] => 1|original json| \ No newline at end of file diff --git a/vendor/github.com/json-iterator/go/go.mod b/vendor/github.com/json-iterator/go/go.mod new file mode 100644 index 00000000000..e05c42ff58b --- /dev/null +++ b/vendor/github.com/json-iterator/go/go.mod @@ -0,0 +1,11 @@ +module github.com/json-iterator/go + +go 1.12 + +require ( + github.com/davecgh/go-spew v1.1.1 + github.com/google/gofuzz v1.0.0 + github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421 + github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742 + github.com/stretchr/testify v1.3.0 +) diff --git a/vendor/github.com/json-iterator/go/go.sum b/vendor/github.com/json-iterator/go/go.sum new file mode 100644 index 00000000000..be00a6df969 --- /dev/null +++ b/vendor/github.com/json-iterator/go/go.sum @@ -0,0 +1,15 @@ +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/google/gofuzz v1.0.0 h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421 h1:ZqeYNhU3OHLH3mGKHDcjJRFFRrJa6eAM5H+CtDdOsPc= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742 h1:Esafd1046DLDQ0W1YjYsBW+p8U2u7vzgW2SQVmlNazg= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/objx v0.1.0 h1:4G4v2dO3VZwixGIRoQ5Lfboy6nUhCyYzaqnIAPPhYs4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= diff --git a/vendor/github.com/json-iterator/go/iter.go b/vendor/github.com/json-iterator/go/iter.go new file mode 100644 index 00000000000..29b31cf7895 --- /dev/null +++ b/vendor/github.com/json-iterator/go/iter.go @@ -0,0 +1,349 @@ +package jsoniter + +import ( + "encoding/json" + "fmt" + "io" +) + +// ValueType the type for JSON element +type ValueType int + +const ( + // InvalidValue invalid JSON element + InvalidValue ValueType = iota + // StringValue JSON element "string" + StringValue + // NumberValue JSON element 100 or 0.10 + NumberValue + // NilValue JSON element null + NilValue + // BoolValue JSON element true or false + BoolValue + // ArrayValue JSON element [] + ArrayValue + // ObjectValue JSON element {} + ObjectValue +) + +var hexDigits []byte +var valueTypes []ValueType + +func init() { + hexDigits = make([]byte, 256) + for i := 0; i < len(hexDigits); i++ { + hexDigits[i] = 255 + } + for i := '0'; i <= '9'; i++ { + hexDigits[i] = byte(i - '0') + } + for i := 'a'; i <= 'f'; i++ { + hexDigits[i] = byte((i - 'a') + 10) + } + for i := 'A'; i <= 'F'; i++ { + hexDigits[i] = byte((i - 'A') + 10) + } + valueTypes = make([]ValueType, 256) + for i := 0; i < len(valueTypes); i++ { + valueTypes[i] = InvalidValue + } + valueTypes['"'] = StringValue + valueTypes['-'] = NumberValue + valueTypes['0'] = NumberValue + valueTypes['1'] = NumberValue + valueTypes['2'] = NumberValue + valueTypes['3'] = NumberValue + valueTypes['4'] = NumberValue + valueTypes['5'] = NumberValue + valueTypes['6'] = NumberValue + valueTypes['7'] = NumberValue + valueTypes['8'] = NumberValue + valueTypes['9'] = NumberValue + valueTypes['t'] = BoolValue + valueTypes['f'] = BoolValue + valueTypes['n'] = NilValue + valueTypes['['] = ArrayValue + valueTypes['{'] = ObjectValue +} + +// Iterator is a io.Reader like object, with JSON specific read functions. +// Error is not returned as return value, but stored as Error member on this iterator instance. +type Iterator struct { + cfg *frozenConfig + reader io.Reader + buf []byte + head int + tail int + depth int + captureStartedAt int + captured []byte + Error error + Attachment interface{} // open for customized decoder +} + +// NewIterator creates an empty Iterator instance +func NewIterator(cfg API) *Iterator { + return &Iterator{ + cfg: cfg.(*frozenConfig), + reader: nil, + buf: nil, + head: 0, + tail: 0, + depth: 0, + } +} + +// Parse creates an Iterator instance from io.Reader +func Parse(cfg API, reader io.Reader, bufSize int) *Iterator { + return &Iterator{ + cfg: cfg.(*frozenConfig), + reader: reader, + buf: make([]byte, bufSize), + head: 0, + tail: 0, + depth: 0, + } +} + +// ParseBytes creates an Iterator instance from byte array +func ParseBytes(cfg API, input []byte) *Iterator { + return &Iterator{ + cfg: cfg.(*frozenConfig), + reader: nil, + buf: input, + head: 0, + tail: len(input), + depth: 0, + } +} + +// ParseString creates an Iterator instance from string +func ParseString(cfg API, input string) *Iterator { + return ParseBytes(cfg, []byte(input)) +} + +// Pool returns a pool can provide more iterator with same configuration +func (iter *Iterator) Pool() IteratorPool { + return iter.cfg +} + +// Reset reuse iterator instance by specifying another reader +func (iter *Iterator) Reset(reader io.Reader) *Iterator { + iter.reader = reader + iter.head = 0 + iter.tail = 0 + iter.depth = 0 + return iter +} + +// ResetBytes reuse iterator instance by specifying another byte array as input +func (iter *Iterator) ResetBytes(input []byte) *Iterator { + iter.reader = nil + iter.buf = input + iter.head = 0 + iter.tail = len(input) + iter.depth = 0 + return iter +} + +// WhatIsNext gets ValueType of relatively next json element +func (iter *Iterator) WhatIsNext() ValueType { + valueType := valueTypes[iter.nextToken()] + iter.unreadByte() + return valueType +} + +func (iter *Iterator) skipWhitespacesWithoutLoadMore() bool { + for i := iter.head; i < iter.tail; i++ { + c := iter.buf[i] + switch c { + case ' ', '\n', '\t', '\r': + continue + } + iter.head = i + return false + } + return true +} + +func (iter *Iterator) isObjectEnd() bool { + c := iter.nextToken() + if c == ',' { + return false + } + if c == '}' { + return true + } + iter.ReportError("isObjectEnd", "object ended prematurely, unexpected char "+string([]byte{c})) + return true +} + +func (iter *Iterator) nextToken() byte { + // a variation of skip whitespaces, returning the next non-whitespace token + for { + for i := iter.head; i < iter.tail; i++ { + c := iter.buf[i] + switch c { + case ' ', '\n', '\t', '\r': + continue + } + iter.head = i + 1 + return c + } + if !iter.loadMore() { + return 0 + } + } +} + +// ReportError record a error in iterator instance with current position. +func (iter *Iterator) ReportError(operation string, msg string) { + if iter.Error != nil { + if iter.Error != io.EOF { + return + } + } + peekStart := iter.head - 10 + if peekStart < 0 { + peekStart = 0 + } + peekEnd := iter.head + 10 + if peekEnd > iter.tail { + peekEnd = iter.tail + } + parsing := string(iter.buf[peekStart:peekEnd]) + contextStart := iter.head - 50 + if contextStart < 0 { + contextStart = 0 + } + contextEnd := iter.head + 50 + if contextEnd > iter.tail { + contextEnd = iter.tail + } + context := string(iter.buf[contextStart:contextEnd]) + iter.Error = fmt.Errorf("%s: %s, error found in #%v byte of ...|%s|..., bigger context ...|%s|...", + operation, msg, iter.head-peekStart, parsing, context) +} + +// CurrentBuffer gets current buffer as string for debugging purpose +func (iter *Iterator) CurrentBuffer() string { + peekStart := iter.head - 10 + if peekStart < 0 { + peekStart = 0 + } + return fmt.Sprintf("parsing #%v byte, around ...|%s|..., whole buffer ...|%s|...", iter.head, + string(iter.buf[peekStart:iter.head]), string(iter.buf[0:iter.tail])) +} + +func (iter *Iterator) readByte() (ret byte) { + if iter.head == iter.tail { + if iter.loadMore() { + ret = iter.buf[iter.head] + iter.head++ + return ret + } + return 0 + } + ret = iter.buf[iter.head] + iter.head++ + return ret +} + +func (iter *Iterator) loadMore() bool { + if iter.reader == nil { + if iter.Error == nil { + iter.head = iter.tail + iter.Error = io.EOF + } + return false + } + if iter.captured != nil { + iter.captured = append(iter.captured, + iter.buf[iter.captureStartedAt:iter.tail]...) + iter.captureStartedAt = 0 + } + for { + n, err := iter.reader.Read(iter.buf) + if n == 0 { + if err != nil { + if iter.Error == nil { + iter.Error = err + } + return false + } + } else { + iter.head = 0 + iter.tail = n + return true + } + } +} + +func (iter *Iterator) unreadByte() { + if iter.Error != nil { + return + } + iter.head-- + return +} + +// Read read the next JSON element as generic interface{}. +func (iter *Iterator) Read() interface{} { + valueType := iter.WhatIsNext() + switch valueType { + case StringValue: + return iter.ReadString() + case NumberValue: + if iter.cfg.configBeforeFrozen.UseNumber { + return json.Number(iter.readNumberAsString()) + } + return iter.ReadFloat64() + case NilValue: + iter.skipFourBytes('n', 'u', 'l', 'l') + return nil + case BoolValue: + return iter.ReadBool() + case ArrayValue: + arr := []interface{}{} + iter.ReadArrayCB(func(iter *Iterator) bool { + var elem interface{} + iter.ReadVal(&elem) + arr = append(arr, elem) + return true + }) + return arr + case ObjectValue: + obj := map[string]interface{}{} + iter.ReadMapCB(func(Iter *Iterator, field string) bool { + var elem interface{} + iter.ReadVal(&elem) + obj[field] = elem + return true + }) + return obj + default: + iter.ReportError("Read", fmt.Sprintf("unexpected value type: %v", valueType)) + return nil + } +} + +// limit maximum depth of nesting, as allowed by https://tools.ietf.org/html/rfc7159#section-9 +const maxDepth = 10000 + +func (iter *Iterator) incrementDepth() (success bool) { + iter.depth++ + if iter.depth <= maxDepth { + return true + } + iter.ReportError("incrementDepth", "exceeded max depth") + return false +} + +func (iter *Iterator) decrementDepth() (success bool) { + iter.depth-- + if iter.depth >= 0 { + return true + } + iter.ReportError("decrementDepth", "unexpected negative nesting") + return false +} diff --git a/vendor/github.com/json-iterator/go/iter_array.go b/vendor/github.com/json-iterator/go/iter_array.go new file mode 100644 index 00000000000..204fe0e0922 --- /dev/null +++ b/vendor/github.com/json-iterator/go/iter_array.go @@ -0,0 +1,64 @@ +package jsoniter + +// ReadArray read array element, tells if the array has more element to read. +func (iter *Iterator) ReadArray() (ret bool) { + c := iter.nextToken() + switch c { + case 'n': + iter.skipThreeBytes('u', 'l', 'l') + return false // null + case '[': + c = iter.nextToken() + if c != ']' { + iter.unreadByte() + return true + } + return false + case ']': + return false + case ',': + return true + default: + iter.ReportError("ReadArray", "expect [ or , or ] or n, but found "+string([]byte{c})) + return + } +} + +// ReadArrayCB read array with callback +func (iter *Iterator) ReadArrayCB(callback func(*Iterator) bool) (ret bool) { + c := iter.nextToken() + if c == '[' { + if !iter.incrementDepth() { + return false + } + c = iter.nextToken() + if c != ']' { + iter.unreadByte() + if !callback(iter) { + iter.decrementDepth() + return false + } + c = iter.nextToken() + for c == ',' { + if !callback(iter) { + iter.decrementDepth() + return false + } + c = iter.nextToken() + } + if c != ']' { + iter.ReportError("ReadArrayCB", "expect ] in the end, but found "+string([]byte{c})) + iter.decrementDepth() + return false + } + return iter.decrementDepth() + } + return iter.decrementDepth() + } + if c == 'n' { + iter.skipThreeBytes('u', 'l', 'l') + return true // null + } + iter.ReportError("ReadArrayCB", "expect [ or n, but found "+string([]byte{c})) + return false +} diff --git a/vendor/github.com/json-iterator/go/iter_float.go b/vendor/github.com/json-iterator/go/iter_float.go new file mode 100644 index 00000000000..8a3d8b6fb43 --- /dev/null +++ b/vendor/github.com/json-iterator/go/iter_float.go @@ -0,0 +1,342 @@ +package jsoniter + +import ( + "encoding/json" + "io" + "math/big" + "strconv" + "strings" + "unsafe" +) + +var floatDigits []int8 + +const invalidCharForNumber = int8(-1) +const endOfNumber = int8(-2) +const dotInNumber = int8(-3) + +func init() { + floatDigits = make([]int8, 256) + for i := 0; i < len(floatDigits); i++ { + floatDigits[i] = invalidCharForNumber + } + for i := int8('0'); i <= int8('9'); i++ { + floatDigits[i] = i - int8('0') + } + floatDigits[','] = endOfNumber + floatDigits[']'] = endOfNumber + floatDigits['}'] = endOfNumber + floatDigits[' '] = endOfNumber + floatDigits['\t'] = endOfNumber + floatDigits['\n'] = endOfNumber + floatDigits['.'] = dotInNumber +} + +// ReadBigFloat read big.Float +func (iter *Iterator) ReadBigFloat() (ret *big.Float) { + str := iter.readNumberAsString() + if iter.Error != nil && iter.Error != io.EOF { + return nil + } + prec := 64 + if len(str) > prec { + prec = len(str) + } + val, _, err := big.ParseFloat(str, 10, uint(prec), big.ToZero) + if err != nil { + iter.Error = err + return nil + } + return val +} + +// ReadBigInt read big.Int +func (iter *Iterator) ReadBigInt() (ret *big.Int) { + str := iter.readNumberAsString() + if iter.Error != nil && iter.Error != io.EOF { + return nil + } + ret = big.NewInt(0) + var success bool + ret, success = ret.SetString(str, 10) + if !success { + iter.ReportError("ReadBigInt", "invalid big int") + return nil + } + return ret +} + +//ReadFloat32 read float32 +func (iter *Iterator) ReadFloat32() (ret float32) { + c := iter.nextToken() + if c == '-' { + return -iter.readPositiveFloat32() + } + iter.unreadByte() + return iter.readPositiveFloat32() +} + +func (iter *Iterator) readPositiveFloat32() (ret float32) { + i := iter.head + // first char + if i == iter.tail { + return iter.readFloat32SlowPath() + } + c := iter.buf[i] + i++ + ind := floatDigits[c] + switch ind { + case invalidCharForNumber: + return iter.readFloat32SlowPath() + case endOfNumber: + iter.ReportError("readFloat32", "empty number") + return + case dotInNumber: + iter.ReportError("readFloat32", "leading dot is invalid") + return + case 0: + if i == iter.tail { + return iter.readFloat32SlowPath() + } + c = iter.buf[i] + switch c { + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + iter.ReportError("readFloat32", "leading zero is invalid") + return + } + } + value := uint64(ind) + // chars before dot +non_decimal_loop: + for ; i < iter.tail; i++ { + c = iter.buf[i] + ind := floatDigits[c] + switch ind { + case invalidCharForNumber: + return iter.readFloat32SlowPath() + case endOfNumber: + iter.head = i + return float32(value) + case dotInNumber: + break non_decimal_loop + } + if value > uint64SafeToMultiple10 { + return iter.readFloat32SlowPath() + } + value = (value << 3) + (value << 1) + uint64(ind) // value = value * 10 + ind; + } + // chars after dot + if c == '.' { + i++ + decimalPlaces := 0 + if i == iter.tail { + return iter.readFloat32SlowPath() + } + for ; i < iter.tail; i++ { + c = iter.buf[i] + ind := floatDigits[c] + switch ind { + case endOfNumber: + if decimalPlaces > 0 && decimalPlaces < len(pow10) { + iter.head = i + return float32(float64(value) / float64(pow10[decimalPlaces])) + } + // too many decimal places + return iter.readFloat32SlowPath() + case invalidCharForNumber, dotInNumber: + return iter.readFloat32SlowPath() + } + decimalPlaces++ + if value > uint64SafeToMultiple10 { + return iter.readFloat32SlowPath() + } + value = (value << 3) + (value << 1) + uint64(ind) + } + } + return iter.readFloat32SlowPath() +} + +func (iter *Iterator) readNumberAsString() (ret string) { + strBuf := [16]byte{} + str := strBuf[0:0] +load_loop: + for { + for i := iter.head; i < iter.tail; i++ { + c := iter.buf[i] + switch c { + case '+', '-', '.', 'e', 'E', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + str = append(str, c) + continue + default: + iter.head = i + break load_loop + } + } + if !iter.loadMore() { + break + } + } + if iter.Error != nil && iter.Error != io.EOF { + return + } + if len(str) == 0 { + iter.ReportError("readNumberAsString", "invalid number") + } + return *(*string)(unsafe.Pointer(&str)) +} + +func (iter *Iterator) readFloat32SlowPath() (ret float32) { + str := iter.readNumberAsString() + if iter.Error != nil && iter.Error != io.EOF { + return + } + errMsg := validateFloat(str) + if errMsg != "" { + iter.ReportError("readFloat32SlowPath", errMsg) + return + } + val, err := strconv.ParseFloat(str, 32) + if err != nil { + iter.Error = err + return + } + return float32(val) +} + +// ReadFloat64 read float64 +func (iter *Iterator) ReadFloat64() (ret float64) { + c := iter.nextToken() + if c == '-' { + return -iter.readPositiveFloat64() + } + iter.unreadByte() + return iter.readPositiveFloat64() +} + +func (iter *Iterator) readPositiveFloat64() (ret float64) { + i := iter.head + // first char + if i == iter.tail { + return iter.readFloat64SlowPath() + } + c := iter.buf[i] + i++ + ind := floatDigits[c] + switch ind { + case invalidCharForNumber: + return iter.readFloat64SlowPath() + case endOfNumber: + iter.ReportError("readFloat64", "empty number") + return + case dotInNumber: + iter.ReportError("readFloat64", "leading dot is invalid") + return + case 0: + if i == iter.tail { + return iter.readFloat64SlowPath() + } + c = iter.buf[i] + switch c { + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + iter.ReportError("readFloat64", "leading zero is invalid") + return + } + } + value := uint64(ind) + // chars before dot +non_decimal_loop: + for ; i < iter.tail; i++ { + c = iter.buf[i] + ind := floatDigits[c] + switch ind { + case invalidCharForNumber: + return iter.readFloat64SlowPath() + case endOfNumber: + iter.head = i + return float64(value) + case dotInNumber: + break non_decimal_loop + } + if value > uint64SafeToMultiple10 { + return iter.readFloat64SlowPath() + } + value = (value << 3) + (value << 1) + uint64(ind) // value = value * 10 + ind; + } + // chars after dot + if c == '.' { + i++ + decimalPlaces := 0 + if i == iter.tail { + return iter.readFloat64SlowPath() + } + for ; i < iter.tail; i++ { + c = iter.buf[i] + ind := floatDigits[c] + switch ind { + case endOfNumber: + if decimalPlaces > 0 && decimalPlaces < len(pow10) { + iter.head = i + return float64(value) / float64(pow10[decimalPlaces]) + } + // too many decimal places + return iter.readFloat64SlowPath() + case invalidCharForNumber, dotInNumber: + return iter.readFloat64SlowPath() + } + decimalPlaces++ + if value > uint64SafeToMultiple10 { + return iter.readFloat64SlowPath() + } + value = (value << 3) + (value << 1) + uint64(ind) + if value > maxFloat64 { + return iter.readFloat64SlowPath() + } + } + } + return iter.readFloat64SlowPath() +} + +func (iter *Iterator) readFloat64SlowPath() (ret float64) { + str := iter.readNumberAsString() + if iter.Error != nil && iter.Error != io.EOF { + return + } + errMsg := validateFloat(str) + if errMsg != "" { + iter.ReportError("readFloat64SlowPath", errMsg) + return + } + val, err := strconv.ParseFloat(str, 64) + if err != nil { + iter.Error = err + return + } + return val +} + +func validateFloat(str string) string { + // strconv.ParseFloat is not validating `1.` or `1.e1` + if len(str) == 0 { + return "empty number" + } + if str[0] == '-' { + return "-- is not valid" + } + dotPos := strings.IndexByte(str, '.') + if dotPos != -1 { + if dotPos == len(str)-1 { + return "dot can not be last character" + } + switch str[dotPos+1] { + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + default: + return "missing digit after dot" + } + } + return "" +} + +// ReadNumber read json.Number +func (iter *Iterator) ReadNumber() (ret json.Number) { + return json.Number(iter.readNumberAsString()) +} diff --git a/vendor/github.com/json-iterator/go/iter_int.go b/vendor/github.com/json-iterator/go/iter_int.go new file mode 100644 index 00000000000..d786a89fe1a --- /dev/null +++ b/vendor/github.com/json-iterator/go/iter_int.go @@ -0,0 +1,346 @@ +package jsoniter + +import ( + "math" + "strconv" +) + +var intDigits []int8 + +const uint32SafeToMultiply10 = uint32(0xffffffff)/10 - 1 +const uint64SafeToMultiple10 = uint64(0xffffffffffffffff)/10 - 1 +const maxFloat64 = 1<<53 - 1 + +func init() { + intDigits = make([]int8, 256) + for i := 0; i < len(intDigits); i++ { + intDigits[i] = invalidCharForNumber + } + for i := int8('0'); i <= int8('9'); i++ { + intDigits[i] = i - int8('0') + } +} + +// ReadUint read uint +func (iter *Iterator) ReadUint() uint { + if strconv.IntSize == 32 { + return uint(iter.ReadUint32()) + } + return uint(iter.ReadUint64()) +} + +// ReadInt read int +func (iter *Iterator) ReadInt() int { + if strconv.IntSize == 32 { + return int(iter.ReadInt32()) + } + return int(iter.ReadInt64()) +} + +// ReadInt8 read int8 +func (iter *Iterator) ReadInt8() (ret int8) { + c := iter.nextToken() + if c == '-' { + val := iter.readUint32(iter.readByte()) + if val > math.MaxInt8+1 { + iter.ReportError("ReadInt8", "overflow: "+strconv.FormatInt(int64(val), 10)) + return + } + return -int8(val) + } + val := iter.readUint32(c) + if val > math.MaxInt8 { + iter.ReportError("ReadInt8", "overflow: "+strconv.FormatInt(int64(val), 10)) + return + } + return int8(val) +} + +// ReadUint8 read uint8 +func (iter *Iterator) ReadUint8() (ret uint8) { + val := iter.readUint32(iter.nextToken()) + if val > math.MaxUint8 { + iter.ReportError("ReadUint8", "overflow: "+strconv.FormatInt(int64(val), 10)) + return + } + return uint8(val) +} + +// ReadInt16 read int16 +func (iter *Iterator) ReadInt16() (ret int16) { + c := iter.nextToken() + if c == '-' { + val := iter.readUint32(iter.readByte()) + if val > math.MaxInt16+1 { + iter.ReportError("ReadInt16", "overflow: "+strconv.FormatInt(int64(val), 10)) + return + } + return -int16(val) + } + val := iter.readUint32(c) + if val > math.MaxInt16 { + iter.ReportError("ReadInt16", "overflow: "+strconv.FormatInt(int64(val), 10)) + return + } + return int16(val) +} + +// ReadUint16 read uint16 +func (iter *Iterator) ReadUint16() (ret uint16) { + val := iter.readUint32(iter.nextToken()) + if val > math.MaxUint16 { + iter.ReportError("ReadUint16", "overflow: "+strconv.FormatInt(int64(val), 10)) + return + } + return uint16(val) +} + +// ReadInt32 read int32 +func (iter *Iterator) ReadInt32() (ret int32) { + c := iter.nextToken() + if c == '-' { + val := iter.readUint32(iter.readByte()) + if val > math.MaxInt32+1 { + iter.ReportError("ReadInt32", "overflow: "+strconv.FormatInt(int64(val), 10)) + return + } + return -int32(val) + } + val := iter.readUint32(c) + if val > math.MaxInt32 { + iter.ReportError("ReadInt32", "overflow: "+strconv.FormatInt(int64(val), 10)) + return + } + return int32(val) +} + +// ReadUint32 read uint32 +func (iter *Iterator) ReadUint32() (ret uint32) { + return iter.readUint32(iter.nextToken()) +} + +func (iter *Iterator) readUint32(c byte) (ret uint32) { + ind := intDigits[c] + if ind == 0 { + iter.assertInteger() + return 0 // single zero + } + if ind == invalidCharForNumber { + iter.ReportError("readUint32", "unexpected character: "+string([]byte{byte(ind)})) + return + } + value := uint32(ind) + if iter.tail-iter.head > 10 { + i := iter.head + ind2 := intDigits[iter.buf[i]] + if ind2 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value + } + i++ + ind3 := intDigits[iter.buf[i]] + if ind3 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value*10 + uint32(ind2) + } + //iter.head = i + 1 + //value = value * 100 + uint32(ind2) * 10 + uint32(ind3) + i++ + ind4 := intDigits[iter.buf[i]] + if ind4 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value*100 + uint32(ind2)*10 + uint32(ind3) + } + i++ + ind5 := intDigits[iter.buf[i]] + if ind5 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value*1000 + uint32(ind2)*100 + uint32(ind3)*10 + uint32(ind4) + } + i++ + ind6 := intDigits[iter.buf[i]] + if ind6 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value*10000 + uint32(ind2)*1000 + uint32(ind3)*100 + uint32(ind4)*10 + uint32(ind5) + } + i++ + ind7 := intDigits[iter.buf[i]] + if ind7 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value*100000 + uint32(ind2)*10000 + uint32(ind3)*1000 + uint32(ind4)*100 + uint32(ind5)*10 + uint32(ind6) + } + i++ + ind8 := intDigits[iter.buf[i]] + if ind8 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value*1000000 + uint32(ind2)*100000 + uint32(ind3)*10000 + uint32(ind4)*1000 + uint32(ind5)*100 + uint32(ind6)*10 + uint32(ind7) + } + i++ + ind9 := intDigits[iter.buf[i]] + value = value*10000000 + uint32(ind2)*1000000 + uint32(ind3)*100000 + uint32(ind4)*10000 + uint32(ind5)*1000 + uint32(ind6)*100 + uint32(ind7)*10 + uint32(ind8) + iter.head = i + if ind9 == invalidCharForNumber { + iter.assertInteger() + return value + } + } + for { + for i := iter.head; i < iter.tail; i++ { + ind = intDigits[iter.buf[i]] + if ind == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value + } + if value > uint32SafeToMultiply10 { + value2 := (value << 3) + (value << 1) + uint32(ind) + if value2 < value { + iter.ReportError("readUint32", "overflow") + return + } + value = value2 + continue + } + value = (value << 3) + (value << 1) + uint32(ind) + } + if !iter.loadMore() { + iter.assertInteger() + return value + } + } +} + +// ReadInt64 read int64 +func (iter *Iterator) ReadInt64() (ret int64) { + c := iter.nextToken() + if c == '-' { + val := iter.readUint64(iter.readByte()) + if val > math.MaxInt64+1 { + iter.ReportError("ReadInt64", "overflow: "+strconv.FormatUint(uint64(val), 10)) + return + } + return -int64(val) + } + val := iter.readUint64(c) + if val > math.MaxInt64 { + iter.ReportError("ReadInt64", "overflow: "+strconv.FormatUint(uint64(val), 10)) + return + } + return int64(val) +} + +// ReadUint64 read uint64 +func (iter *Iterator) ReadUint64() uint64 { + return iter.readUint64(iter.nextToken()) +} + +func (iter *Iterator) readUint64(c byte) (ret uint64) { + ind := intDigits[c] + if ind == 0 { + iter.assertInteger() + return 0 // single zero + } + if ind == invalidCharForNumber { + iter.ReportError("readUint64", "unexpected character: "+string([]byte{byte(ind)})) + return + } + value := uint64(ind) + if iter.tail-iter.head > 10 { + i := iter.head + ind2 := intDigits[iter.buf[i]] + if ind2 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value + } + i++ + ind3 := intDigits[iter.buf[i]] + if ind3 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value*10 + uint64(ind2) + } + //iter.head = i + 1 + //value = value * 100 + uint32(ind2) * 10 + uint32(ind3) + i++ + ind4 := intDigits[iter.buf[i]] + if ind4 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value*100 + uint64(ind2)*10 + uint64(ind3) + } + i++ + ind5 := intDigits[iter.buf[i]] + if ind5 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value*1000 + uint64(ind2)*100 + uint64(ind3)*10 + uint64(ind4) + } + i++ + ind6 := intDigits[iter.buf[i]] + if ind6 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value*10000 + uint64(ind2)*1000 + uint64(ind3)*100 + uint64(ind4)*10 + uint64(ind5) + } + i++ + ind7 := intDigits[iter.buf[i]] + if ind7 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value*100000 + uint64(ind2)*10000 + uint64(ind3)*1000 + uint64(ind4)*100 + uint64(ind5)*10 + uint64(ind6) + } + i++ + ind8 := intDigits[iter.buf[i]] + if ind8 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value*1000000 + uint64(ind2)*100000 + uint64(ind3)*10000 + uint64(ind4)*1000 + uint64(ind5)*100 + uint64(ind6)*10 + uint64(ind7) + } + i++ + ind9 := intDigits[iter.buf[i]] + value = value*10000000 + uint64(ind2)*1000000 + uint64(ind3)*100000 + uint64(ind4)*10000 + uint64(ind5)*1000 + uint64(ind6)*100 + uint64(ind7)*10 + uint64(ind8) + iter.head = i + if ind9 == invalidCharForNumber { + iter.assertInteger() + return value + } + } + for { + for i := iter.head; i < iter.tail; i++ { + ind = intDigits[iter.buf[i]] + if ind == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value + } + if value > uint64SafeToMultiple10 { + value2 := (value << 3) + (value << 1) + uint64(ind) + if value2 < value { + iter.ReportError("readUint64", "overflow") + return + } + value = value2 + continue + } + value = (value << 3) + (value << 1) + uint64(ind) + } + if !iter.loadMore() { + iter.assertInteger() + return value + } + } +} + +func (iter *Iterator) assertInteger() { + if iter.head < iter.tail && iter.buf[iter.head] == '.' { + iter.ReportError("assertInteger", "can not decode float as int") + } +} diff --git a/vendor/github.com/json-iterator/go/iter_object.go b/vendor/github.com/json-iterator/go/iter_object.go new file mode 100644 index 00000000000..58ee89c849e --- /dev/null +++ b/vendor/github.com/json-iterator/go/iter_object.go @@ -0,0 +1,267 @@ +package jsoniter + +import ( + "fmt" + "strings" +) + +// ReadObject read one field from object. +// If object ended, returns empty string. +// Otherwise, returns the field name. +func (iter *Iterator) ReadObject() (ret string) { + c := iter.nextToken() + switch c { + case 'n': + iter.skipThreeBytes('u', 'l', 'l') + return "" // null + case '{': + c = iter.nextToken() + if c == '"' { + iter.unreadByte() + field := iter.ReadString() + c = iter.nextToken() + if c != ':' { + iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c})) + } + return field + } + if c == '}' { + return "" // end of object + } + iter.ReportError("ReadObject", `expect " after {, but found `+string([]byte{c})) + return + case ',': + field := iter.ReadString() + c = iter.nextToken() + if c != ':' { + iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c})) + } + return field + case '}': + return "" // end of object + default: + iter.ReportError("ReadObject", fmt.Sprintf(`expect { or , or } or n, but found %s`, string([]byte{c}))) + return + } +} + +// CaseInsensitive +func (iter *Iterator) readFieldHash() int64 { + hash := int64(0x811c9dc5) + c := iter.nextToken() + if c != '"' { + iter.ReportError("readFieldHash", `expect ", but found `+string([]byte{c})) + return 0 + } + for { + for i := iter.head; i < iter.tail; i++ { + // require ascii string and no escape + b := iter.buf[i] + if b == '\\' { + iter.head = i + for _, b := range iter.readStringSlowPath() { + if 'A' <= b && b <= 'Z' && !iter.cfg.caseSensitive { + b += 'a' - 'A' + } + hash ^= int64(b) + hash *= 0x1000193 + } + c = iter.nextToken() + if c != ':' { + iter.ReportError("readFieldHash", `expect :, but found `+string([]byte{c})) + return 0 + } + return hash + } + if b == '"' { + iter.head = i + 1 + c = iter.nextToken() + if c != ':' { + iter.ReportError("readFieldHash", `expect :, but found `+string([]byte{c})) + return 0 + } + return hash + } + if 'A' <= b && b <= 'Z' && !iter.cfg.caseSensitive { + b += 'a' - 'A' + } + hash ^= int64(b) + hash *= 0x1000193 + } + if !iter.loadMore() { + iter.ReportError("readFieldHash", `incomplete field name`) + return 0 + } + } +} + +func calcHash(str string, caseSensitive bool) int64 { + if !caseSensitive { + str = strings.ToLower(str) + } + hash := int64(0x811c9dc5) + for _, b := range []byte(str) { + hash ^= int64(b) + hash *= 0x1000193 + } + return int64(hash) +} + +// ReadObjectCB read object with callback, the key is ascii only and field name not copied +func (iter *Iterator) ReadObjectCB(callback func(*Iterator, string) bool) bool { + c := iter.nextToken() + var field string + if c == '{' { + if !iter.incrementDepth() { + return false + } + c = iter.nextToken() + if c == '"' { + iter.unreadByte() + field = iter.ReadString() + c = iter.nextToken() + if c != ':' { + iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c})) + } + if !callback(iter, field) { + iter.decrementDepth() + return false + } + c = iter.nextToken() + for c == ',' { + field = iter.ReadString() + c = iter.nextToken() + if c != ':' { + iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c})) + } + if !callback(iter, field) { + iter.decrementDepth() + return false + } + c = iter.nextToken() + } + if c != '}' { + iter.ReportError("ReadObjectCB", `object not ended with }`) + iter.decrementDepth() + return false + } + return iter.decrementDepth() + } + if c == '}' { + return iter.decrementDepth() + } + iter.ReportError("ReadObjectCB", `expect " after {, but found `+string([]byte{c})) + iter.decrementDepth() + return false + } + if c == 'n' { + iter.skipThreeBytes('u', 'l', 'l') + return true // null + } + iter.ReportError("ReadObjectCB", `expect { or n, but found `+string([]byte{c})) + return false +} + +// ReadMapCB read map with callback, the key can be any string +func (iter *Iterator) ReadMapCB(callback func(*Iterator, string) bool) bool { + c := iter.nextToken() + if c == '{' { + if !iter.incrementDepth() { + return false + } + c = iter.nextToken() + if c == '"' { + iter.unreadByte() + field := iter.ReadString() + if iter.nextToken() != ':' { + iter.ReportError("ReadMapCB", "expect : after object field, but found "+string([]byte{c})) + iter.decrementDepth() + return false + } + if !callback(iter, field) { + iter.decrementDepth() + return false + } + c = iter.nextToken() + for c == ',' { + field = iter.ReadString() + if iter.nextToken() != ':' { + iter.ReportError("ReadMapCB", "expect : after object field, but found "+string([]byte{c})) + iter.decrementDepth() + return false + } + if !callback(iter, field) { + iter.decrementDepth() + return false + } + c = iter.nextToken() + } + if c != '}' { + iter.ReportError("ReadMapCB", `object not ended with }`) + iter.decrementDepth() + return false + } + return iter.decrementDepth() + } + if c == '}' { + return iter.decrementDepth() + } + iter.ReportError("ReadMapCB", `expect " after {, but found `+string([]byte{c})) + iter.decrementDepth() + return false + } + if c == 'n' { + iter.skipThreeBytes('u', 'l', 'l') + return true // null + } + iter.ReportError("ReadMapCB", `expect { or n, but found `+string([]byte{c})) + return false +} + +func (iter *Iterator) readObjectStart() bool { + c := iter.nextToken() + if c == '{' { + c = iter.nextToken() + if c == '}' { + return false + } + iter.unreadByte() + return true + } else if c == 'n' { + iter.skipThreeBytes('u', 'l', 'l') + return false + } + iter.ReportError("readObjectStart", "expect { or n, but found "+string([]byte{c})) + return false +} + +func (iter *Iterator) readObjectFieldAsBytes() (ret []byte) { + str := iter.ReadStringAsSlice() + if iter.skipWhitespacesWithoutLoadMore() { + if ret == nil { + ret = make([]byte, len(str)) + copy(ret, str) + } + if !iter.loadMore() { + return + } + } + if iter.buf[iter.head] != ':' { + iter.ReportError("readObjectFieldAsBytes", "expect : after object field, but found "+string([]byte{iter.buf[iter.head]})) + return + } + iter.head++ + if iter.skipWhitespacesWithoutLoadMore() { + if ret == nil { + ret = make([]byte, len(str)) + copy(ret, str) + } + if !iter.loadMore() { + return + } + } + if ret == nil { + return str + } + return ret +} diff --git a/vendor/github.com/json-iterator/go/iter_skip.go b/vendor/github.com/json-iterator/go/iter_skip.go new file mode 100644 index 00000000000..e91eefb15be --- /dev/null +++ b/vendor/github.com/json-iterator/go/iter_skip.go @@ -0,0 +1,130 @@ +package jsoniter + +import "fmt" + +// ReadNil reads a json object as nil and +// returns whether it's a nil or not +func (iter *Iterator) ReadNil() (ret bool) { + c := iter.nextToken() + if c == 'n' { + iter.skipThreeBytes('u', 'l', 'l') // null + return true + } + iter.unreadByte() + return false +} + +// ReadBool reads a json object as BoolValue +func (iter *Iterator) ReadBool() (ret bool) { + c := iter.nextToken() + if c == 't' { + iter.skipThreeBytes('r', 'u', 'e') + return true + } + if c == 'f' { + iter.skipFourBytes('a', 'l', 's', 'e') + return false + } + iter.ReportError("ReadBool", "expect t or f, but found "+string([]byte{c})) + return +} + +// SkipAndReturnBytes skip next JSON element, and return its content as []byte. +// The []byte can be kept, it is a copy of data. +func (iter *Iterator) SkipAndReturnBytes() []byte { + iter.startCapture(iter.head) + iter.Skip() + return iter.stopCapture() +} + +// SkipAndAppendBytes skips next JSON element and appends its content to +// buffer, returning the result. +func (iter *Iterator) SkipAndAppendBytes(buf []byte) []byte { + iter.startCaptureTo(buf, iter.head) + iter.Skip() + return iter.stopCapture() +} + +func (iter *Iterator) startCaptureTo(buf []byte, captureStartedAt int) { + if iter.captured != nil { + panic("already in capture mode") + } + iter.captureStartedAt = captureStartedAt + iter.captured = buf +} + +func (iter *Iterator) startCapture(captureStartedAt int) { + iter.startCaptureTo(make([]byte, 0, 32), captureStartedAt) +} + +func (iter *Iterator) stopCapture() []byte { + if iter.captured == nil { + panic("not in capture mode") + } + captured := iter.captured + remaining := iter.buf[iter.captureStartedAt:iter.head] + iter.captureStartedAt = -1 + iter.captured = nil + return append(captured, remaining...) +} + +// Skip skips a json object and positions to relatively the next json object +func (iter *Iterator) Skip() { + c := iter.nextToken() + switch c { + case '"': + iter.skipString() + case 'n': + iter.skipThreeBytes('u', 'l', 'l') // null + case 't': + iter.skipThreeBytes('r', 'u', 'e') // true + case 'f': + iter.skipFourBytes('a', 'l', 's', 'e') // false + case '0': + iter.unreadByte() + iter.ReadFloat32() + case '-', '1', '2', '3', '4', '5', '6', '7', '8', '9': + iter.skipNumber() + case '[': + iter.skipArray() + case '{': + iter.skipObject() + default: + iter.ReportError("Skip", fmt.Sprintf("do not know how to skip: %v", c)) + return + } +} + +func (iter *Iterator) skipFourBytes(b1, b2, b3, b4 byte) { + if iter.readByte() != b1 { + iter.ReportError("skipFourBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3, b4}))) + return + } + if iter.readByte() != b2 { + iter.ReportError("skipFourBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3, b4}))) + return + } + if iter.readByte() != b3 { + iter.ReportError("skipFourBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3, b4}))) + return + } + if iter.readByte() != b4 { + iter.ReportError("skipFourBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3, b4}))) + return + } +} + +func (iter *Iterator) skipThreeBytes(b1, b2, b3 byte) { + if iter.readByte() != b1 { + iter.ReportError("skipThreeBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3}))) + return + } + if iter.readByte() != b2 { + iter.ReportError("skipThreeBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3}))) + return + } + if iter.readByte() != b3 { + iter.ReportError("skipThreeBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3}))) + return + } +} diff --git a/vendor/github.com/json-iterator/go/iter_skip_sloppy.go b/vendor/github.com/json-iterator/go/iter_skip_sloppy.go new file mode 100644 index 00000000000..9303de41e40 --- /dev/null +++ b/vendor/github.com/json-iterator/go/iter_skip_sloppy.go @@ -0,0 +1,163 @@ +//+build jsoniter_sloppy + +package jsoniter + +// sloppy but faster implementation, do not validate the input json + +func (iter *Iterator) skipNumber() { + for { + for i := iter.head; i < iter.tail; i++ { + c := iter.buf[i] + switch c { + case ' ', '\n', '\r', '\t', ',', '}', ']': + iter.head = i + return + } + } + if !iter.loadMore() { + return + } + } +} + +func (iter *Iterator) skipArray() { + level := 1 + if !iter.incrementDepth() { + return + } + for { + for i := iter.head; i < iter.tail; i++ { + switch iter.buf[i] { + case '"': // If inside string, skip it + iter.head = i + 1 + iter.skipString() + i = iter.head - 1 // it will be i++ soon + case '[': // If open symbol, increase level + level++ + if !iter.incrementDepth() { + return + } + case ']': // If close symbol, increase level + level-- + if !iter.decrementDepth() { + return + } + + // If we have returned to the original level, we're done + if level == 0 { + iter.head = i + 1 + return + } + } + } + if !iter.loadMore() { + iter.ReportError("skipObject", "incomplete array") + return + } + } +} + +func (iter *Iterator) skipObject() { + level := 1 + if !iter.incrementDepth() { + return + } + + for { + for i := iter.head; i < iter.tail; i++ { + switch iter.buf[i] { + case '"': // If inside string, skip it + iter.head = i + 1 + iter.skipString() + i = iter.head - 1 // it will be i++ soon + case '{': // If open symbol, increase level + level++ + if !iter.incrementDepth() { + return + } + case '}': // If close symbol, increase level + level-- + if !iter.decrementDepth() { + return + } + + // If we have returned to the original level, we're done + if level == 0 { + iter.head = i + 1 + return + } + } + } + if !iter.loadMore() { + iter.ReportError("skipObject", "incomplete object") + return + } + } +} + +func (iter *Iterator) skipString() { + for { + end, escaped := iter.findStringEnd() + if end == -1 { + if !iter.loadMore() { + iter.ReportError("skipString", "incomplete string") + return + } + if escaped { + iter.head = 1 // skip the first char as last char read is \ + } + } else { + iter.head = end + return + } + } +} + +// adapted from: https://github.com/buger/jsonparser/blob/master/parser.go +// Tries to find the end of string +// Support if string contains escaped quote symbols. +func (iter *Iterator) findStringEnd() (int, bool) { + escaped := false + for i := iter.head; i < iter.tail; i++ { + c := iter.buf[i] + if c == '"' { + if !escaped { + return i + 1, false + } + j := i - 1 + for { + if j < iter.head || iter.buf[j] != '\\' { + // even number of backslashes + // either end of buffer, or " found + return i + 1, true + } + j-- + if j < iter.head || iter.buf[j] != '\\' { + // odd number of backslashes + // it is \" or \\\" + break + } + j-- + } + } else if c == '\\' { + escaped = true + } + } + j := iter.tail - 1 + for { + if j < iter.head || iter.buf[j] != '\\' { + // even number of backslashes + // either end of buffer, or " found + return -1, false // do not end with \ + } + j-- + if j < iter.head || iter.buf[j] != '\\' { + // odd number of backslashes + // it is \" or \\\" + break + } + j-- + + } + return -1, true // end with \ +} diff --git a/vendor/github.com/json-iterator/go/iter_skip_strict.go b/vendor/github.com/json-iterator/go/iter_skip_strict.go new file mode 100644 index 00000000000..6cf66d0438d --- /dev/null +++ b/vendor/github.com/json-iterator/go/iter_skip_strict.go @@ -0,0 +1,99 @@ +//+build !jsoniter_sloppy + +package jsoniter + +import ( + "fmt" + "io" +) + +func (iter *Iterator) skipNumber() { + if !iter.trySkipNumber() { + iter.unreadByte() + if iter.Error != nil && iter.Error != io.EOF { + return + } + iter.ReadFloat64() + if iter.Error != nil && iter.Error != io.EOF { + iter.Error = nil + iter.ReadBigFloat() + } + } +} + +func (iter *Iterator) trySkipNumber() bool { + dotFound := false + for i := iter.head; i < iter.tail; i++ { + c := iter.buf[i] + switch c { + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + case '.': + if dotFound { + iter.ReportError("validateNumber", `more than one dot found in number`) + return true // already failed + } + if i+1 == iter.tail { + return false + } + c = iter.buf[i+1] + switch c { + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + default: + iter.ReportError("validateNumber", `missing digit after dot`) + return true // already failed + } + dotFound = true + default: + switch c { + case ',', ']', '}', ' ', '\t', '\n', '\r': + if iter.head == i { + return false // if - without following digits + } + iter.head = i + return true // must be valid + } + return false // may be invalid + } + } + return false +} + +func (iter *Iterator) skipString() { + if !iter.trySkipString() { + iter.unreadByte() + iter.ReadString() + } +} + +func (iter *Iterator) trySkipString() bool { + for i := iter.head; i < iter.tail; i++ { + c := iter.buf[i] + if c == '"' { + iter.head = i + 1 + return true // valid + } else if c == '\\' { + return false + } else if c < ' ' { + iter.ReportError("trySkipString", + fmt.Sprintf(`invalid control character found: %d`, c)) + return true // already failed + } + } + return false +} + +func (iter *Iterator) skipObject() { + iter.unreadByte() + iter.ReadObjectCB(func(iter *Iterator, field string) bool { + iter.Skip() + return true + }) +} + +func (iter *Iterator) skipArray() { + iter.unreadByte() + iter.ReadArrayCB(func(iter *Iterator) bool { + iter.Skip() + return true + }) +} diff --git a/vendor/github.com/json-iterator/go/iter_str.go b/vendor/github.com/json-iterator/go/iter_str.go new file mode 100644 index 00000000000..adc487ea804 --- /dev/null +++ b/vendor/github.com/json-iterator/go/iter_str.go @@ -0,0 +1,215 @@ +package jsoniter + +import ( + "fmt" + "unicode/utf16" +) + +// ReadString read string from iterator +func (iter *Iterator) ReadString() (ret string) { + c := iter.nextToken() + if c == '"' { + for i := iter.head; i < iter.tail; i++ { + c := iter.buf[i] + if c == '"' { + ret = string(iter.buf[iter.head:i]) + iter.head = i + 1 + return ret + } else if c == '\\' { + break + } else if c < ' ' { + iter.ReportError("ReadString", + fmt.Sprintf(`invalid control character found: %d`, c)) + return + } + } + return iter.readStringSlowPath() + } else if c == 'n' { + iter.skipThreeBytes('u', 'l', 'l') + return "" + } + iter.ReportError("ReadString", `expects " or n, but found `+string([]byte{c})) + return +} + +func (iter *Iterator) readStringSlowPath() (ret string) { + var str []byte + var c byte + for iter.Error == nil { + c = iter.readByte() + if c == '"' { + return string(str) + } + if c == '\\' { + c = iter.readByte() + str = iter.readEscapedChar(c, str) + } else { + str = append(str, c) + } + } + iter.ReportError("readStringSlowPath", "unexpected end of input") + return +} + +func (iter *Iterator) readEscapedChar(c byte, str []byte) []byte { + switch c { + case 'u': + r := iter.readU4() + if utf16.IsSurrogate(r) { + c = iter.readByte() + if iter.Error != nil { + return nil + } + if c != '\\' { + iter.unreadByte() + str = appendRune(str, r) + return str + } + c = iter.readByte() + if iter.Error != nil { + return nil + } + if c != 'u' { + str = appendRune(str, r) + return iter.readEscapedChar(c, str) + } + r2 := iter.readU4() + if iter.Error != nil { + return nil + } + combined := utf16.DecodeRune(r, r2) + if combined == '\uFFFD' { + str = appendRune(str, r) + str = appendRune(str, r2) + } else { + str = appendRune(str, combined) + } + } else { + str = appendRune(str, r) + } + case '"': + str = append(str, '"') + case '\\': + str = append(str, '\\') + case '/': + str = append(str, '/') + case 'b': + str = append(str, '\b') + case 'f': + str = append(str, '\f') + case 'n': + str = append(str, '\n') + case 'r': + str = append(str, '\r') + case 't': + str = append(str, '\t') + default: + iter.ReportError("readEscapedChar", + `invalid escape char after \`) + return nil + } + return str +} + +// ReadStringAsSlice read string from iterator without copying into string form. +// The []byte can not be kept, as it will change after next iterator call. +func (iter *Iterator) ReadStringAsSlice() (ret []byte) { + c := iter.nextToken() + if c == '"' { + for i := iter.head; i < iter.tail; i++ { + // require ascii string and no escape + // for: field name, base64, number + if iter.buf[i] == '"' { + // fast path: reuse the underlying buffer + ret = iter.buf[iter.head:i] + iter.head = i + 1 + return ret + } + } + readLen := iter.tail - iter.head + copied := make([]byte, readLen, readLen*2) + copy(copied, iter.buf[iter.head:iter.tail]) + iter.head = iter.tail + for iter.Error == nil { + c := iter.readByte() + if c == '"' { + return copied + } + copied = append(copied, c) + } + return copied + } + iter.ReportError("ReadStringAsSlice", `expects " or n, but found `+string([]byte{c})) + return +} + +func (iter *Iterator) readU4() (ret rune) { + for i := 0; i < 4; i++ { + c := iter.readByte() + if iter.Error != nil { + return + } + if c >= '0' && c <= '9' { + ret = ret*16 + rune(c-'0') + } else if c >= 'a' && c <= 'f' { + ret = ret*16 + rune(c-'a'+10) + } else if c >= 'A' && c <= 'F' { + ret = ret*16 + rune(c-'A'+10) + } else { + iter.ReportError("readU4", "expects 0~9 or a~f, but found "+string([]byte{c})) + return + } + } + return ret +} + +const ( + t1 = 0x00 // 0000 0000 + tx = 0x80 // 1000 0000 + t2 = 0xC0 // 1100 0000 + t3 = 0xE0 // 1110 0000 + t4 = 0xF0 // 1111 0000 + t5 = 0xF8 // 1111 1000 + + maskx = 0x3F // 0011 1111 + mask2 = 0x1F // 0001 1111 + mask3 = 0x0F // 0000 1111 + mask4 = 0x07 // 0000 0111 + + rune1Max = 1<<7 - 1 + rune2Max = 1<<11 - 1 + rune3Max = 1<<16 - 1 + + surrogateMin = 0xD800 + surrogateMax = 0xDFFF + + maxRune = '\U0010FFFF' // Maximum valid Unicode code point. + runeError = '\uFFFD' // the "error" Rune or "Unicode replacement character" +) + +func appendRune(p []byte, r rune) []byte { + // Negative values are erroneous. Making it unsigned addresses the problem. + switch i := uint32(r); { + case i <= rune1Max: + p = append(p, byte(r)) + return p + case i <= rune2Max: + p = append(p, t2|byte(r>>6)) + p = append(p, tx|byte(r)&maskx) + return p + case i > maxRune, surrogateMin <= i && i <= surrogateMax: + r = runeError + fallthrough + case i <= rune3Max: + p = append(p, t3|byte(r>>12)) + p = append(p, tx|byte(r>>6)&maskx) + p = append(p, tx|byte(r)&maskx) + return p + default: + p = append(p, t4|byte(r>>18)) + p = append(p, tx|byte(r>>12)&maskx) + p = append(p, tx|byte(r>>6)&maskx) + p = append(p, tx|byte(r)&maskx) + return p + } +} diff --git a/vendor/github.com/json-iterator/go/jsoniter.go b/vendor/github.com/json-iterator/go/jsoniter.go new file mode 100644 index 00000000000..c2934f916eb --- /dev/null +++ b/vendor/github.com/json-iterator/go/jsoniter.go @@ -0,0 +1,18 @@ +// Package jsoniter implements encoding and decoding of JSON as defined in +// RFC 4627 and provides interfaces with identical syntax of standard lib encoding/json. +// Converting from encoding/json to jsoniter is no more than replacing the package with jsoniter +// and variable type declarations (if any). +// jsoniter interfaces gives 100% compatibility with code using standard lib. +// +// "JSON and Go" +// (https://golang.org/doc/articles/json_and_go.html) +// gives a description of how Marshal/Unmarshal operate +// between arbitrary or predefined json objects and bytes, +// and it applies to jsoniter.Marshal/Unmarshal as well. +// +// Besides, jsoniter.Iterator provides a different set of interfaces +// iterating given bytes/string/reader +// and yielding parsed elements one by one. +// This set of interfaces reads input as required and gives +// better performance. +package jsoniter diff --git a/vendor/github.com/json-iterator/go/pool.go b/vendor/github.com/json-iterator/go/pool.go new file mode 100644 index 00000000000..e2389b56cff --- /dev/null +++ b/vendor/github.com/json-iterator/go/pool.go @@ -0,0 +1,42 @@ +package jsoniter + +import ( + "io" +) + +// IteratorPool a thread safe pool of iterators with same configuration +type IteratorPool interface { + BorrowIterator(data []byte) *Iterator + ReturnIterator(iter *Iterator) +} + +// StreamPool a thread safe pool of streams with same configuration +type StreamPool interface { + BorrowStream(writer io.Writer) *Stream + ReturnStream(stream *Stream) +} + +func (cfg *frozenConfig) BorrowStream(writer io.Writer) *Stream { + stream := cfg.streamPool.Get().(*Stream) + stream.Reset(writer) + return stream +} + +func (cfg *frozenConfig) ReturnStream(stream *Stream) { + stream.out = nil + stream.Error = nil + stream.Attachment = nil + cfg.streamPool.Put(stream) +} + +func (cfg *frozenConfig) BorrowIterator(data []byte) *Iterator { + iter := cfg.iteratorPool.Get().(*Iterator) + iter.ResetBytes(data) + return iter +} + +func (cfg *frozenConfig) ReturnIterator(iter *Iterator) { + iter.Error = nil + iter.Attachment = nil + cfg.iteratorPool.Put(iter) +} diff --git a/vendor/github.com/json-iterator/go/reflect.go b/vendor/github.com/json-iterator/go/reflect.go new file mode 100644 index 00000000000..39acb320ace --- /dev/null +++ b/vendor/github.com/json-iterator/go/reflect.go @@ -0,0 +1,337 @@ +package jsoniter + +import ( + "fmt" + "reflect" + "unsafe" + + "github.com/modern-go/reflect2" +) + +// ValDecoder is an internal type registered to cache as needed. +// Don't confuse jsoniter.ValDecoder with json.Decoder. +// For json.Decoder's adapter, refer to jsoniter.AdapterDecoder(todo link). +// +// Reflection on type to create decoders, which is then cached +// Reflection on value is avoided as we can, as the reflect.Value itself will allocate, with following exceptions +// 1. create instance of new value, for example *int will need a int to be allocated +// 2. append to slice, if the existing cap is not enough, allocate will be done using Reflect.New +// 3. assignment to map, both key and value will be reflect.Value +// For a simple struct binding, it will be reflect.Value free and allocation free +type ValDecoder interface { + Decode(ptr unsafe.Pointer, iter *Iterator) +} + +// ValEncoder is an internal type registered to cache as needed. +// Don't confuse jsoniter.ValEncoder with json.Encoder. +// For json.Encoder's adapter, refer to jsoniter.AdapterEncoder(todo godoc link). +type ValEncoder interface { + IsEmpty(ptr unsafe.Pointer) bool + Encode(ptr unsafe.Pointer, stream *Stream) +} + +type checkIsEmpty interface { + IsEmpty(ptr unsafe.Pointer) bool +} + +type ctx struct { + *frozenConfig + prefix string + encoders map[reflect2.Type]ValEncoder + decoders map[reflect2.Type]ValDecoder +} + +func (b *ctx) caseSensitive() bool { + if b.frozenConfig == nil { + // default is case-insensitive + return false + } + return b.frozenConfig.caseSensitive +} + +func (b *ctx) append(prefix string) *ctx { + return &ctx{ + frozenConfig: b.frozenConfig, + prefix: b.prefix + " " + prefix, + encoders: b.encoders, + decoders: b.decoders, + } +} + +// ReadVal copy the underlying JSON into go interface, same as json.Unmarshal +func (iter *Iterator) ReadVal(obj interface{}) { + depth := iter.depth + cacheKey := reflect2.RTypeOf(obj) + decoder := iter.cfg.getDecoderFromCache(cacheKey) + if decoder == nil { + typ := reflect2.TypeOf(obj) + if typ == nil || typ.Kind() != reflect.Ptr { + iter.ReportError("ReadVal", "can only unmarshal into pointer") + return + } + decoder = iter.cfg.DecoderOf(typ) + } + ptr := reflect2.PtrOf(obj) + if ptr == nil { + iter.ReportError("ReadVal", "can not read into nil pointer") + return + } + decoder.Decode(ptr, iter) + if iter.depth != depth { + iter.ReportError("ReadVal", "unexpected mismatched nesting") + return + } +} + +// WriteVal copy the go interface into underlying JSON, same as json.Marshal +func (stream *Stream) WriteVal(val interface{}) { + if nil == val { + stream.WriteNil() + return + } + cacheKey := reflect2.RTypeOf(val) + encoder := stream.cfg.getEncoderFromCache(cacheKey) + if encoder == nil { + typ := reflect2.TypeOf(val) + encoder = stream.cfg.EncoderOf(typ) + } + encoder.Encode(reflect2.PtrOf(val), stream) +} + +func (cfg *frozenConfig) DecoderOf(typ reflect2.Type) ValDecoder { + cacheKey := typ.RType() + decoder := cfg.getDecoderFromCache(cacheKey) + if decoder != nil { + return decoder + } + ctx := &ctx{ + frozenConfig: cfg, + prefix: "", + decoders: map[reflect2.Type]ValDecoder{}, + encoders: map[reflect2.Type]ValEncoder{}, + } + ptrType := typ.(*reflect2.UnsafePtrType) + decoder = decoderOfType(ctx, ptrType.Elem()) + cfg.addDecoderToCache(cacheKey, decoder) + return decoder +} + +func decoderOfType(ctx *ctx, typ reflect2.Type) ValDecoder { + decoder := getTypeDecoderFromExtension(ctx, typ) + if decoder != nil { + return decoder + } + decoder = createDecoderOfType(ctx, typ) + for _, extension := range extensions { + decoder = extension.DecorateDecoder(typ, decoder) + } + decoder = ctx.decoderExtension.DecorateDecoder(typ, decoder) + for _, extension := range ctx.extraExtensions { + decoder = extension.DecorateDecoder(typ, decoder) + } + return decoder +} + +func createDecoderOfType(ctx *ctx, typ reflect2.Type) ValDecoder { + decoder := ctx.decoders[typ] + if decoder != nil { + return decoder + } + placeholder := &placeholderDecoder{} + ctx.decoders[typ] = placeholder + decoder = _createDecoderOfType(ctx, typ) + placeholder.decoder = decoder + return decoder +} + +func _createDecoderOfType(ctx *ctx, typ reflect2.Type) ValDecoder { + decoder := createDecoderOfJsonRawMessage(ctx, typ) + if decoder != nil { + return decoder + } + decoder = createDecoderOfJsonNumber(ctx, typ) + if decoder != nil { + return decoder + } + decoder = createDecoderOfMarshaler(ctx, typ) + if decoder != nil { + return decoder + } + decoder = createDecoderOfAny(ctx, typ) + if decoder != nil { + return decoder + } + decoder = createDecoderOfNative(ctx, typ) + if decoder != nil { + return decoder + } + switch typ.Kind() { + case reflect.Interface: + ifaceType, isIFace := typ.(*reflect2.UnsafeIFaceType) + if isIFace { + return &ifaceDecoder{valType: ifaceType} + } + return &efaceDecoder{} + case reflect.Struct: + return decoderOfStruct(ctx, typ) + case reflect.Array: + return decoderOfArray(ctx, typ) + case reflect.Slice: + return decoderOfSlice(ctx, typ) + case reflect.Map: + return decoderOfMap(ctx, typ) + case reflect.Ptr: + return decoderOfOptional(ctx, typ) + default: + return &lazyErrorDecoder{err: fmt.Errorf("%s%s is unsupported type", ctx.prefix, typ.String())} + } +} + +func (cfg *frozenConfig) EncoderOf(typ reflect2.Type) ValEncoder { + cacheKey := typ.RType() + encoder := cfg.getEncoderFromCache(cacheKey) + if encoder != nil { + return encoder + } + ctx := &ctx{ + frozenConfig: cfg, + prefix: "", + decoders: map[reflect2.Type]ValDecoder{}, + encoders: map[reflect2.Type]ValEncoder{}, + } + encoder = encoderOfType(ctx, typ) + if typ.LikePtr() { + encoder = &onePtrEncoder{encoder} + } + cfg.addEncoderToCache(cacheKey, encoder) + return encoder +} + +type onePtrEncoder struct { + encoder ValEncoder +} + +func (encoder *onePtrEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return encoder.encoder.IsEmpty(unsafe.Pointer(&ptr)) +} + +func (encoder *onePtrEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + encoder.encoder.Encode(unsafe.Pointer(&ptr), stream) +} + +func encoderOfType(ctx *ctx, typ reflect2.Type) ValEncoder { + encoder := getTypeEncoderFromExtension(ctx, typ) + if encoder != nil { + return encoder + } + encoder = createEncoderOfType(ctx, typ) + for _, extension := range extensions { + encoder = extension.DecorateEncoder(typ, encoder) + } + encoder = ctx.encoderExtension.DecorateEncoder(typ, encoder) + for _, extension := range ctx.extraExtensions { + encoder = extension.DecorateEncoder(typ, encoder) + } + return encoder +} + +func createEncoderOfType(ctx *ctx, typ reflect2.Type) ValEncoder { + encoder := ctx.encoders[typ] + if encoder != nil { + return encoder + } + placeholder := &placeholderEncoder{} + ctx.encoders[typ] = placeholder + encoder = _createEncoderOfType(ctx, typ) + placeholder.encoder = encoder + return encoder +} +func _createEncoderOfType(ctx *ctx, typ reflect2.Type) ValEncoder { + encoder := createEncoderOfJsonRawMessage(ctx, typ) + if encoder != nil { + return encoder + } + encoder = createEncoderOfJsonNumber(ctx, typ) + if encoder != nil { + return encoder + } + encoder = createEncoderOfMarshaler(ctx, typ) + if encoder != nil { + return encoder + } + encoder = createEncoderOfAny(ctx, typ) + if encoder != nil { + return encoder + } + encoder = createEncoderOfNative(ctx, typ) + if encoder != nil { + return encoder + } + kind := typ.Kind() + switch kind { + case reflect.Interface: + return &dynamicEncoder{typ} + case reflect.Struct: + return encoderOfStruct(ctx, typ) + case reflect.Array: + return encoderOfArray(ctx, typ) + case reflect.Slice: + return encoderOfSlice(ctx, typ) + case reflect.Map: + return encoderOfMap(ctx, typ) + case reflect.Ptr: + return encoderOfOptional(ctx, typ) + default: + return &lazyErrorEncoder{err: fmt.Errorf("%s%s is unsupported type", ctx.prefix, typ.String())} + } +} + +type lazyErrorDecoder struct { + err error +} + +func (decoder *lazyErrorDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if iter.WhatIsNext() != NilValue { + if iter.Error == nil { + iter.Error = decoder.err + } + } else { + iter.Skip() + } +} + +type lazyErrorEncoder struct { + err error +} + +func (encoder *lazyErrorEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + if ptr == nil { + stream.WriteNil() + } else if stream.Error == nil { + stream.Error = encoder.err + } +} + +func (encoder *lazyErrorEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return false +} + +type placeholderDecoder struct { + decoder ValDecoder +} + +func (decoder *placeholderDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + decoder.decoder.Decode(ptr, iter) +} + +type placeholderEncoder struct { + encoder ValEncoder +} + +func (encoder *placeholderEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + encoder.encoder.Encode(ptr, stream) +} + +func (encoder *placeholderEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return encoder.encoder.IsEmpty(ptr) +} diff --git a/vendor/github.com/json-iterator/go/reflect_array.go b/vendor/github.com/json-iterator/go/reflect_array.go new file mode 100644 index 00000000000..13a0b7b0878 --- /dev/null +++ b/vendor/github.com/json-iterator/go/reflect_array.go @@ -0,0 +1,104 @@ +package jsoniter + +import ( + "fmt" + "github.com/modern-go/reflect2" + "io" + "unsafe" +) + +func decoderOfArray(ctx *ctx, typ reflect2.Type) ValDecoder { + arrayType := typ.(*reflect2.UnsafeArrayType) + decoder := decoderOfType(ctx.append("[arrayElem]"), arrayType.Elem()) + return &arrayDecoder{arrayType, decoder} +} + +func encoderOfArray(ctx *ctx, typ reflect2.Type) ValEncoder { + arrayType := typ.(*reflect2.UnsafeArrayType) + if arrayType.Len() == 0 { + return emptyArrayEncoder{} + } + encoder := encoderOfType(ctx.append("[arrayElem]"), arrayType.Elem()) + return &arrayEncoder{arrayType, encoder} +} + +type emptyArrayEncoder struct{} + +func (encoder emptyArrayEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteEmptyArray() +} + +func (encoder emptyArrayEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return true +} + +type arrayEncoder struct { + arrayType *reflect2.UnsafeArrayType + elemEncoder ValEncoder +} + +func (encoder *arrayEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteArrayStart() + elemPtr := unsafe.Pointer(ptr) + encoder.elemEncoder.Encode(elemPtr, stream) + for i := 1; i < encoder.arrayType.Len(); i++ { + stream.WriteMore() + elemPtr = encoder.arrayType.UnsafeGetIndex(ptr, i) + encoder.elemEncoder.Encode(elemPtr, stream) + } + stream.WriteArrayEnd() + if stream.Error != nil && stream.Error != io.EOF { + stream.Error = fmt.Errorf("%v: %s", encoder.arrayType, stream.Error.Error()) + } +} + +func (encoder *arrayEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return false +} + +type arrayDecoder struct { + arrayType *reflect2.UnsafeArrayType + elemDecoder ValDecoder +} + +func (decoder *arrayDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + decoder.doDecode(ptr, iter) + if iter.Error != nil && iter.Error != io.EOF { + iter.Error = fmt.Errorf("%v: %s", decoder.arrayType, iter.Error.Error()) + } +} + +func (decoder *arrayDecoder) doDecode(ptr unsafe.Pointer, iter *Iterator) { + c := iter.nextToken() + arrayType := decoder.arrayType + if c == 'n' { + iter.skipThreeBytes('u', 'l', 'l') + return + } + if c != '[' { + iter.ReportError("decode array", "expect [ or n, but found "+string([]byte{c})) + return + } + c = iter.nextToken() + if c == ']' { + return + } + iter.unreadByte() + elemPtr := arrayType.UnsafeGetIndex(ptr, 0) + decoder.elemDecoder.Decode(elemPtr, iter) + length := 1 + for c = iter.nextToken(); c == ','; c = iter.nextToken() { + if length >= arrayType.Len() { + iter.Skip() + continue + } + idx := length + length += 1 + elemPtr = arrayType.UnsafeGetIndex(ptr, idx) + decoder.elemDecoder.Decode(elemPtr, iter) + } + if c != ']' { + iter.ReportError("decode array", "expect ], but found "+string([]byte{c})) + return + } +} diff --git a/vendor/github.com/json-iterator/go/reflect_dynamic.go b/vendor/github.com/json-iterator/go/reflect_dynamic.go new file mode 100644 index 00000000000..8b6bc8b4332 --- /dev/null +++ b/vendor/github.com/json-iterator/go/reflect_dynamic.go @@ -0,0 +1,70 @@ +package jsoniter + +import ( + "github.com/modern-go/reflect2" + "reflect" + "unsafe" +) + +type dynamicEncoder struct { + valType reflect2.Type +} + +func (encoder *dynamicEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + obj := encoder.valType.UnsafeIndirect(ptr) + stream.WriteVal(obj) +} + +func (encoder *dynamicEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return encoder.valType.UnsafeIndirect(ptr) == nil +} + +type efaceDecoder struct { +} + +func (decoder *efaceDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + pObj := (*interface{})(ptr) + obj := *pObj + if obj == nil { + *pObj = iter.Read() + return + } + typ := reflect2.TypeOf(obj) + if typ.Kind() != reflect.Ptr { + *pObj = iter.Read() + return + } + ptrType := typ.(*reflect2.UnsafePtrType) + ptrElemType := ptrType.Elem() + if iter.WhatIsNext() == NilValue { + if ptrElemType.Kind() != reflect.Ptr { + iter.skipFourBytes('n', 'u', 'l', 'l') + *pObj = nil + return + } + } + if reflect2.IsNil(obj) { + obj := ptrElemType.New() + iter.ReadVal(obj) + *pObj = obj + return + } + iter.ReadVal(obj) +} + +type ifaceDecoder struct { + valType *reflect2.UnsafeIFaceType +} + +func (decoder *ifaceDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if iter.ReadNil() { + decoder.valType.UnsafeSet(ptr, decoder.valType.UnsafeNew()) + return + } + obj := decoder.valType.UnsafeIndirect(ptr) + if reflect2.IsNil(obj) { + iter.ReportError("decode non empty interface", "can not unmarshal into nil") + return + } + iter.ReadVal(obj) +} diff --git a/vendor/github.com/json-iterator/go/reflect_extension.go b/vendor/github.com/json-iterator/go/reflect_extension.go new file mode 100644 index 00000000000..74a97bfe5ab --- /dev/null +++ b/vendor/github.com/json-iterator/go/reflect_extension.go @@ -0,0 +1,483 @@ +package jsoniter + +import ( + "fmt" + "github.com/modern-go/reflect2" + "reflect" + "sort" + "strings" + "unicode" + "unsafe" +) + +var typeDecoders = map[string]ValDecoder{} +var fieldDecoders = map[string]ValDecoder{} +var typeEncoders = map[string]ValEncoder{} +var fieldEncoders = map[string]ValEncoder{} +var extensions = []Extension{} + +// StructDescriptor describe how should we encode/decode the struct +type StructDescriptor struct { + Type reflect2.Type + Fields []*Binding +} + +// GetField get one field from the descriptor by its name. +// Can not use map here to keep field orders. +func (structDescriptor *StructDescriptor) GetField(fieldName string) *Binding { + for _, binding := range structDescriptor.Fields { + if binding.Field.Name() == fieldName { + return binding + } + } + return nil +} + +// Binding describe how should we encode/decode the struct field +type Binding struct { + levels []int + Field reflect2.StructField + FromNames []string + ToNames []string + Encoder ValEncoder + Decoder ValDecoder +} + +// Extension the one for all SPI. Customize encoding/decoding by specifying alternate encoder/decoder. +// Can also rename fields by UpdateStructDescriptor. +type Extension interface { + UpdateStructDescriptor(structDescriptor *StructDescriptor) + CreateMapKeyDecoder(typ reflect2.Type) ValDecoder + CreateMapKeyEncoder(typ reflect2.Type) ValEncoder + CreateDecoder(typ reflect2.Type) ValDecoder + CreateEncoder(typ reflect2.Type) ValEncoder + DecorateDecoder(typ reflect2.Type, decoder ValDecoder) ValDecoder + DecorateEncoder(typ reflect2.Type, encoder ValEncoder) ValEncoder +} + +// DummyExtension embed this type get dummy implementation for all methods of Extension +type DummyExtension struct { +} + +// UpdateStructDescriptor No-op +func (extension *DummyExtension) UpdateStructDescriptor(structDescriptor *StructDescriptor) { +} + +// CreateMapKeyDecoder No-op +func (extension *DummyExtension) CreateMapKeyDecoder(typ reflect2.Type) ValDecoder { + return nil +} + +// CreateMapKeyEncoder No-op +func (extension *DummyExtension) CreateMapKeyEncoder(typ reflect2.Type) ValEncoder { + return nil +} + +// CreateDecoder No-op +func (extension *DummyExtension) CreateDecoder(typ reflect2.Type) ValDecoder { + return nil +} + +// CreateEncoder No-op +func (extension *DummyExtension) CreateEncoder(typ reflect2.Type) ValEncoder { + return nil +} + +// DecorateDecoder No-op +func (extension *DummyExtension) DecorateDecoder(typ reflect2.Type, decoder ValDecoder) ValDecoder { + return decoder +} + +// DecorateEncoder No-op +func (extension *DummyExtension) DecorateEncoder(typ reflect2.Type, encoder ValEncoder) ValEncoder { + return encoder +} + +type EncoderExtension map[reflect2.Type]ValEncoder + +// UpdateStructDescriptor No-op +func (extension EncoderExtension) UpdateStructDescriptor(structDescriptor *StructDescriptor) { +} + +// CreateDecoder No-op +func (extension EncoderExtension) CreateDecoder(typ reflect2.Type) ValDecoder { + return nil +} + +// CreateEncoder get encoder from map +func (extension EncoderExtension) CreateEncoder(typ reflect2.Type) ValEncoder { + return extension[typ] +} + +// CreateMapKeyDecoder No-op +func (extension EncoderExtension) CreateMapKeyDecoder(typ reflect2.Type) ValDecoder { + return nil +} + +// CreateMapKeyEncoder No-op +func (extension EncoderExtension) CreateMapKeyEncoder(typ reflect2.Type) ValEncoder { + return nil +} + +// DecorateDecoder No-op +func (extension EncoderExtension) DecorateDecoder(typ reflect2.Type, decoder ValDecoder) ValDecoder { + return decoder +} + +// DecorateEncoder No-op +func (extension EncoderExtension) DecorateEncoder(typ reflect2.Type, encoder ValEncoder) ValEncoder { + return encoder +} + +type DecoderExtension map[reflect2.Type]ValDecoder + +// UpdateStructDescriptor No-op +func (extension DecoderExtension) UpdateStructDescriptor(structDescriptor *StructDescriptor) { +} + +// CreateMapKeyDecoder No-op +func (extension DecoderExtension) CreateMapKeyDecoder(typ reflect2.Type) ValDecoder { + return nil +} + +// CreateMapKeyEncoder No-op +func (extension DecoderExtension) CreateMapKeyEncoder(typ reflect2.Type) ValEncoder { + return nil +} + +// CreateDecoder get decoder from map +func (extension DecoderExtension) CreateDecoder(typ reflect2.Type) ValDecoder { + return extension[typ] +} + +// CreateEncoder No-op +func (extension DecoderExtension) CreateEncoder(typ reflect2.Type) ValEncoder { + return nil +} + +// DecorateDecoder No-op +func (extension DecoderExtension) DecorateDecoder(typ reflect2.Type, decoder ValDecoder) ValDecoder { + return decoder +} + +// DecorateEncoder No-op +func (extension DecoderExtension) DecorateEncoder(typ reflect2.Type, encoder ValEncoder) ValEncoder { + return encoder +} + +type funcDecoder struct { + fun DecoderFunc +} + +func (decoder *funcDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + decoder.fun(ptr, iter) +} + +type funcEncoder struct { + fun EncoderFunc + isEmptyFunc func(ptr unsafe.Pointer) bool +} + +func (encoder *funcEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + encoder.fun(ptr, stream) +} + +func (encoder *funcEncoder) IsEmpty(ptr unsafe.Pointer) bool { + if encoder.isEmptyFunc == nil { + return false + } + return encoder.isEmptyFunc(ptr) +} + +// DecoderFunc the function form of TypeDecoder +type DecoderFunc func(ptr unsafe.Pointer, iter *Iterator) + +// EncoderFunc the function form of TypeEncoder +type EncoderFunc func(ptr unsafe.Pointer, stream *Stream) + +// RegisterTypeDecoderFunc register TypeDecoder for a type with function +func RegisterTypeDecoderFunc(typ string, fun DecoderFunc) { + typeDecoders[typ] = &funcDecoder{fun} +} + +// RegisterTypeDecoder register TypeDecoder for a typ +func RegisterTypeDecoder(typ string, decoder ValDecoder) { + typeDecoders[typ] = decoder +} + +// RegisterFieldDecoderFunc register TypeDecoder for a struct field with function +func RegisterFieldDecoderFunc(typ string, field string, fun DecoderFunc) { + RegisterFieldDecoder(typ, field, &funcDecoder{fun}) +} + +// RegisterFieldDecoder register TypeDecoder for a struct field +func RegisterFieldDecoder(typ string, field string, decoder ValDecoder) { + fieldDecoders[fmt.Sprintf("%s/%s", typ, field)] = decoder +} + +// RegisterTypeEncoderFunc register TypeEncoder for a type with encode/isEmpty function +func RegisterTypeEncoderFunc(typ string, fun EncoderFunc, isEmptyFunc func(unsafe.Pointer) bool) { + typeEncoders[typ] = &funcEncoder{fun, isEmptyFunc} +} + +// RegisterTypeEncoder register TypeEncoder for a type +func RegisterTypeEncoder(typ string, encoder ValEncoder) { + typeEncoders[typ] = encoder +} + +// RegisterFieldEncoderFunc register TypeEncoder for a struct field with encode/isEmpty function +func RegisterFieldEncoderFunc(typ string, field string, fun EncoderFunc, isEmptyFunc func(unsafe.Pointer) bool) { + RegisterFieldEncoder(typ, field, &funcEncoder{fun, isEmptyFunc}) +} + +// RegisterFieldEncoder register TypeEncoder for a struct field +func RegisterFieldEncoder(typ string, field string, encoder ValEncoder) { + fieldEncoders[fmt.Sprintf("%s/%s", typ, field)] = encoder +} + +// RegisterExtension register extension +func RegisterExtension(extension Extension) { + extensions = append(extensions, extension) +} + +func getTypeDecoderFromExtension(ctx *ctx, typ reflect2.Type) ValDecoder { + decoder := _getTypeDecoderFromExtension(ctx, typ) + if decoder != nil { + for _, extension := range extensions { + decoder = extension.DecorateDecoder(typ, decoder) + } + decoder = ctx.decoderExtension.DecorateDecoder(typ, decoder) + for _, extension := range ctx.extraExtensions { + decoder = extension.DecorateDecoder(typ, decoder) + } + } + return decoder +} +func _getTypeDecoderFromExtension(ctx *ctx, typ reflect2.Type) ValDecoder { + for _, extension := range extensions { + decoder := extension.CreateDecoder(typ) + if decoder != nil { + return decoder + } + } + decoder := ctx.decoderExtension.CreateDecoder(typ) + if decoder != nil { + return decoder + } + for _, extension := range ctx.extraExtensions { + decoder := extension.CreateDecoder(typ) + if decoder != nil { + return decoder + } + } + typeName := typ.String() + decoder = typeDecoders[typeName] + if decoder != nil { + return decoder + } + if typ.Kind() == reflect.Ptr { + ptrType := typ.(*reflect2.UnsafePtrType) + decoder := typeDecoders[ptrType.Elem().String()] + if decoder != nil { + return &OptionalDecoder{ptrType.Elem(), decoder} + } + } + return nil +} + +func getTypeEncoderFromExtension(ctx *ctx, typ reflect2.Type) ValEncoder { + encoder := _getTypeEncoderFromExtension(ctx, typ) + if encoder != nil { + for _, extension := range extensions { + encoder = extension.DecorateEncoder(typ, encoder) + } + encoder = ctx.encoderExtension.DecorateEncoder(typ, encoder) + for _, extension := range ctx.extraExtensions { + encoder = extension.DecorateEncoder(typ, encoder) + } + } + return encoder +} + +func _getTypeEncoderFromExtension(ctx *ctx, typ reflect2.Type) ValEncoder { + for _, extension := range extensions { + encoder := extension.CreateEncoder(typ) + if encoder != nil { + return encoder + } + } + encoder := ctx.encoderExtension.CreateEncoder(typ) + if encoder != nil { + return encoder + } + for _, extension := range ctx.extraExtensions { + encoder := extension.CreateEncoder(typ) + if encoder != nil { + return encoder + } + } + typeName := typ.String() + encoder = typeEncoders[typeName] + if encoder != nil { + return encoder + } + if typ.Kind() == reflect.Ptr { + typePtr := typ.(*reflect2.UnsafePtrType) + encoder := typeEncoders[typePtr.Elem().String()] + if encoder != nil { + return &OptionalEncoder{encoder} + } + } + return nil +} + +func describeStruct(ctx *ctx, typ reflect2.Type) *StructDescriptor { + structType := typ.(*reflect2.UnsafeStructType) + embeddedBindings := []*Binding{} + bindings := []*Binding{} + for i := 0; i < structType.NumField(); i++ { + field := structType.Field(i) + tag, hastag := field.Tag().Lookup(ctx.getTagKey()) + if ctx.onlyTaggedField && !hastag && !field.Anonymous() { + continue + } + if tag == "-" || field.Name() == "_" { + continue + } + tagParts := strings.Split(tag, ",") + if field.Anonymous() && (tag == "" || tagParts[0] == "") { + if field.Type().Kind() == reflect.Struct { + structDescriptor := describeStruct(ctx, field.Type()) + for _, binding := range structDescriptor.Fields { + binding.levels = append([]int{i}, binding.levels...) + omitempty := binding.Encoder.(*structFieldEncoder).omitempty + binding.Encoder = &structFieldEncoder{field, binding.Encoder, omitempty} + binding.Decoder = &structFieldDecoder{field, binding.Decoder} + embeddedBindings = append(embeddedBindings, binding) + } + continue + } else if field.Type().Kind() == reflect.Ptr { + ptrType := field.Type().(*reflect2.UnsafePtrType) + if ptrType.Elem().Kind() == reflect.Struct { + structDescriptor := describeStruct(ctx, ptrType.Elem()) + for _, binding := range structDescriptor.Fields { + binding.levels = append([]int{i}, binding.levels...) + omitempty := binding.Encoder.(*structFieldEncoder).omitempty + binding.Encoder = &dereferenceEncoder{binding.Encoder} + binding.Encoder = &structFieldEncoder{field, binding.Encoder, omitempty} + binding.Decoder = &dereferenceDecoder{ptrType.Elem(), binding.Decoder} + binding.Decoder = &structFieldDecoder{field, binding.Decoder} + embeddedBindings = append(embeddedBindings, binding) + } + continue + } + } + } + fieldNames := calcFieldNames(field.Name(), tagParts[0], tag) + fieldCacheKey := fmt.Sprintf("%s/%s", typ.String(), field.Name()) + decoder := fieldDecoders[fieldCacheKey] + if decoder == nil { + decoder = decoderOfType(ctx.append(field.Name()), field.Type()) + } + encoder := fieldEncoders[fieldCacheKey] + if encoder == nil { + encoder = encoderOfType(ctx.append(field.Name()), field.Type()) + } + binding := &Binding{ + Field: field, + FromNames: fieldNames, + ToNames: fieldNames, + Decoder: decoder, + Encoder: encoder, + } + binding.levels = []int{i} + bindings = append(bindings, binding) + } + return createStructDescriptor(ctx, typ, bindings, embeddedBindings) +} +func createStructDescriptor(ctx *ctx, typ reflect2.Type, bindings []*Binding, embeddedBindings []*Binding) *StructDescriptor { + structDescriptor := &StructDescriptor{ + Type: typ, + Fields: bindings, + } + for _, extension := range extensions { + extension.UpdateStructDescriptor(structDescriptor) + } + ctx.encoderExtension.UpdateStructDescriptor(structDescriptor) + ctx.decoderExtension.UpdateStructDescriptor(structDescriptor) + for _, extension := range ctx.extraExtensions { + extension.UpdateStructDescriptor(structDescriptor) + } + processTags(structDescriptor, ctx.frozenConfig) + // merge normal & embedded bindings & sort with original order + allBindings := sortableBindings(append(embeddedBindings, structDescriptor.Fields...)) + sort.Sort(allBindings) + structDescriptor.Fields = allBindings + return structDescriptor +} + +type sortableBindings []*Binding + +func (bindings sortableBindings) Len() int { + return len(bindings) +} + +func (bindings sortableBindings) Less(i, j int) bool { + left := bindings[i].levels + right := bindings[j].levels + k := 0 + for { + if left[k] < right[k] { + return true + } else if left[k] > right[k] { + return false + } + k++ + } +} + +func (bindings sortableBindings) Swap(i, j int) { + bindings[i], bindings[j] = bindings[j], bindings[i] +} + +func processTags(structDescriptor *StructDescriptor, cfg *frozenConfig) { + for _, binding := range structDescriptor.Fields { + shouldOmitEmpty := false + tagParts := strings.Split(binding.Field.Tag().Get(cfg.getTagKey()), ",") + for _, tagPart := range tagParts[1:] { + if tagPart == "omitempty" { + shouldOmitEmpty = true + } else if tagPart == "string" { + if binding.Field.Type().Kind() == reflect.String { + binding.Decoder = &stringModeStringDecoder{binding.Decoder, cfg} + binding.Encoder = &stringModeStringEncoder{binding.Encoder, cfg} + } else { + binding.Decoder = &stringModeNumberDecoder{binding.Decoder} + binding.Encoder = &stringModeNumberEncoder{binding.Encoder} + } + } + } + binding.Decoder = &structFieldDecoder{binding.Field, binding.Decoder} + binding.Encoder = &structFieldEncoder{binding.Field, binding.Encoder, shouldOmitEmpty} + } +} + +func calcFieldNames(originalFieldName string, tagProvidedFieldName string, wholeTag string) []string { + // ignore? + if wholeTag == "-" { + return []string{} + } + // rename? + var fieldNames []string + if tagProvidedFieldName == "" { + fieldNames = []string{originalFieldName} + } else { + fieldNames = []string{tagProvidedFieldName} + } + // private? + isNotExported := unicode.IsLower(rune(originalFieldName[0])) || originalFieldName[0] == '_' + if isNotExported { + fieldNames = []string{} + } + return fieldNames +} diff --git a/vendor/github.com/json-iterator/go/reflect_json_number.go b/vendor/github.com/json-iterator/go/reflect_json_number.go new file mode 100644 index 00000000000..98d45c1ec25 --- /dev/null +++ b/vendor/github.com/json-iterator/go/reflect_json_number.go @@ -0,0 +1,112 @@ +package jsoniter + +import ( + "encoding/json" + "github.com/modern-go/reflect2" + "strconv" + "unsafe" +) + +type Number string + +// String returns the literal text of the number. +func (n Number) String() string { return string(n) } + +// Float64 returns the number as a float64. +func (n Number) Float64() (float64, error) { + return strconv.ParseFloat(string(n), 64) +} + +// Int64 returns the number as an int64. +func (n Number) Int64() (int64, error) { + return strconv.ParseInt(string(n), 10, 64) +} + +func CastJsonNumber(val interface{}) (string, bool) { + switch typedVal := val.(type) { + case json.Number: + return string(typedVal), true + case Number: + return string(typedVal), true + } + return "", false +} + +var jsonNumberType = reflect2.TypeOfPtr((*json.Number)(nil)).Elem() +var jsoniterNumberType = reflect2.TypeOfPtr((*Number)(nil)).Elem() + +func createDecoderOfJsonNumber(ctx *ctx, typ reflect2.Type) ValDecoder { + if typ.AssignableTo(jsonNumberType) { + return &jsonNumberCodec{} + } + if typ.AssignableTo(jsoniterNumberType) { + return &jsoniterNumberCodec{} + } + return nil +} + +func createEncoderOfJsonNumber(ctx *ctx, typ reflect2.Type) ValEncoder { + if typ.AssignableTo(jsonNumberType) { + return &jsonNumberCodec{} + } + if typ.AssignableTo(jsoniterNumberType) { + return &jsoniterNumberCodec{} + } + return nil +} + +type jsonNumberCodec struct { +} + +func (codec *jsonNumberCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { + switch iter.WhatIsNext() { + case StringValue: + *((*json.Number)(ptr)) = json.Number(iter.ReadString()) + case NilValue: + iter.skipFourBytes('n', 'u', 'l', 'l') + *((*json.Number)(ptr)) = "" + default: + *((*json.Number)(ptr)) = json.Number([]byte(iter.readNumberAsString())) + } +} + +func (codec *jsonNumberCodec) Encode(ptr unsafe.Pointer, stream *Stream) { + number := *((*json.Number)(ptr)) + if len(number) == 0 { + stream.writeByte('0') + } else { + stream.WriteRaw(string(number)) + } +} + +func (codec *jsonNumberCodec) IsEmpty(ptr unsafe.Pointer) bool { + return len(*((*json.Number)(ptr))) == 0 +} + +type jsoniterNumberCodec struct { +} + +func (codec *jsoniterNumberCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { + switch iter.WhatIsNext() { + case StringValue: + *((*Number)(ptr)) = Number(iter.ReadString()) + case NilValue: + iter.skipFourBytes('n', 'u', 'l', 'l') + *((*Number)(ptr)) = "" + default: + *((*Number)(ptr)) = Number([]byte(iter.readNumberAsString())) + } +} + +func (codec *jsoniterNumberCodec) Encode(ptr unsafe.Pointer, stream *Stream) { + number := *((*Number)(ptr)) + if len(number) == 0 { + stream.writeByte('0') + } else { + stream.WriteRaw(string(number)) + } +} + +func (codec *jsoniterNumberCodec) IsEmpty(ptr unsafe.Pointer) bool { + return len(*((*Number)(ptr))) == 0 +} diff --git a/vendor/github.com/json-iterator/go/reflect_json_raw_message.go b/vendor/github.com/json-iterator/go/reflect_json_raw_message.go new file mode 100644 index 00000000000..eba434f2f16 --- /dev/null +++ b/vendor/github.com/json-iterator/go/reflect_json_raw_message.go @@ -0,0 +1,76 @@ +package jsoniter + +import ( + "encoding/json" + "github.com/modern-go/reflect2" + "unsafe" +) + +var jsonRawMessageType = reflect2.TypeOfPtr((*json.RawMessage)(nil)).Elem() +var jsoniterRawMessageType = reflect2.TypeOfPtr((*RawMessage)(nil)).Elem() + +func createEncoderOfJsonRawMessage(ctx *ctx, typ reflect2.Type) ValEncoder { + if typ == jsonRawMessageType { + return &jsonRawMessageCodec{} + } + if typ == jsoniterRawMessageType { + return &jsoniterRawMessageCodec{} + } + return nil +} + +func createDecoderOfJsonRawMessage(ctx *ctx, typ reflect2.Type) ValDecoder { + if typ == jsonRawMessageType { + return &jsonRawMessageCodec{} + } + if typ == jsoniterRawMessageType { + return &jsoniterRawMessageCodec{} + } + return nil +} + +type jsonRawMessageCodec struct { +} + +func (codec *jsonRawMessageCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if iter.ReadNil() { + *((*json.RawMessage)(ptr)) = nil + } else { + *((*json.RawMessage)(ptr)) = iter.SkipAndReturnBytes() + } +} + +func (codec *jsonRawMessageCodec) Encode(ptr unsafe.Pointer, stream *Stream) { + if *((*json.RawMessage)(ptr)) == nil { + stream.WriteNil() + } else { + stream.WriteRaw(string(*((*json.RawMessage)(ptr)))) + } +} + +func (codec *jsonRawMessageCodec) IsEmpty(ptr unsafe.Pointer) bool { + return len(*((*json.RawMessage)(ptr))) == 0 +} + +type jsoniterRawMessageCodec struct { +} + +func (codec *jsoniterRawMessageCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if iter.ReadNil() { + *((*RawMessage)(ptr)) = nil + } else { + *((*RawMessage)(ptr)) = iter.SkipAndReturnBytes() + } +} + +func (codec *jsoniterRawMessageCodec) Encode(ptr unsafe.Pointer, stream *Stream) { + if *((*RawMessage)(ptr)) == nil { + stream.WriteNil() + } else { + stream.WriteRaw(string(*((*RawMessage)(ptr)))) + } +} + +func (codec *jsoniterRawMessageCodec) IsEmpty(ptr unsafe.Pointer) bool { + return len(*((*RawMessage)(ptr))) == 0 +} diff --git a/vendor/github.com/json-iterator/go/reflect_map.go b/vendor/github.com/json-iterator/go/reflect_map.go new file mode 100644 index 00000000000..58296713013 --- /dev/null +++ b/vendor/github.com/json-iterator/go/reflect_map.go @@ -0,0 +1,346 @@ +package jsoniter + +import ( + "fmt" + "github.com/modern-go/reflect2" + "io" + "reflect" + "sort" + "unsafe" +) + +func decoderOfMap(ctx *ctx, typ reflect2.Type) ValDecoder { + mapType := typ.(*reflect2.UnsafeMapType) + keyDecoder := decoderOfMapKey(ctx.append("[mapKey]"), mapType.Key()) + elemDecoder := decoderOfType(ctx.append("[mapElem]"), mapType.Elem()) + return &mapDecoder{ + mapType: mapType, + keyType: mapType.Key(), + elemType: mapType.Elem(), + keyDecoder: keyDecoder, + elemDecoder: elemDecoder, + } +} + +func encoderOfMap(ctx *ctx, typ reflect2.Type) ValEncoder { + mapType := typ.(*reflect2.UnsafeMapType) + if ctx.sortMapKeys { + return &sortKeysMapEncoder{ + mapType: mapType, + keyEncoder: encoderOfMapKey(ctx.append("[mapKey]"), mapType.Key()), + elemEncoder: encoderOfType(ctx.append("[mapElem]"), mapType.Elem()), + } + } + return &mapEncoder{ + mapType: mapType, + keyEncoder: encoderOfMapKey(ctx.append("[mapKey]"), mapType.Key()), + elemEncoder: encoderOfType(ctx.append("[mapElem]"), mapType.Elem()), + } +} + +func decoderOfMapKey(ctx *ctx, typ reflect2.Type) ValDecoder { + decoder := ctx.decoderExtension.CreateMapKeyDecoder(typ) + if decoder != nil { + return decoder + } + for _, extension := range ctx.extraExtensions { + decoder := extension.CreateMapKeyDecoder(typ) + if decoder != nil { + return decoder + } + } + + ptrType := reflect2.PtrTo(typ) + if ptrType.Implements(unmarshalerType) { + return &referenceDecoder{ + &unmarshalerDecoder{ + valType: ptrType, + }, + } + } + if typ.Implements(unmarshalerType) { + return &unmarshalerDecoder{ + valType: typ, + } + } + if ptrType.Implements(textUnmarshalerType) { + return &referenceDecoder{ + &textUnmarshalerDecoder{ + valType: ptrType, + }, + } + } + if typ.Implements(textUnmarshalerType) { + return &textUnmarshalerDecoder{ + valType: typ, + } + } + + switch typ.Kind() { + case reflect.String: + return decoderOfType(ctx, reflect2.DefaultTypeOfKind(reflect.String)) + case reflect.Bool, + reflect.Uint8, reflect.Int8, + reflect.Uint16, reflect.Int16, + reflect.Uint32, reflect.Int32, + reflect.Uint64, reflect.Int64, + reflect.Uint, reflect.Int, + reflect.Float32, reflect.Float64, + reflect.Uintptr: + typ = reflect2.DefaultTypeOfKind(typ.Kind()) + return &numericMapKeyDecoder{decoderOfType(ctx, typ)} + default: + return &lazyErrorDecoder{err: fmt.Errorf("unsupported map key type: %v", typ)} + } +} + +func encoderOfMapKey(ctx *ctx, typ reflect2.Type) ValEncoder { + encoder := ctx.encoderExtension.CreateMapKeyEncoder(typ) + if encoder != nil { + return encoder + } + for _, extension := range ctx.extraExtensions { + encoder := extension.CreateMapKeyEncoder(typ) + if encoder != nil { + return encoder + } + } + + if typ == textMarshalerType { + return &directTextMarshalerEncoder{ + stringEncoder: ctx.EncoderOf(reflect2.TypeOf("")), + } + } + if typ.Implements(textMarshalerType) { + return &textMarshalerEncoder{ + valType: typ, + stringEncoder: ctx.EncoderOf(reflect2.TypeOf("")), + } + } + + switch typ.Kind() { + case reflect.String: + return encoderOfType(ctx, reflect2.DefaultTypeOfKind(reflect.String)) + case reflect.Bool, + reflect.Uint8, reflect.Int8, + reflect.Uint16, reflect.Int16, + reflect.Uint32, reflect.Int32, + reflect.Uint64, reflect.Int64, + reflect.Uint, reflect.Int, + reflect.Float32, reflect.Float64, + reflect.Uintptr: + typ = reflect2.DefaultTypeOfKind(typ.Kind()) + return &numericMapKeyEncoder{encoderOfType(ctx, typ)} + default: + if typ.Kind() == reflect.Interface { + return &dynamicMapKeyEncoder{ctx, typ} + } + return &lazyErrorEncoder{err: fmt.Errorf("unsupported map key type: %v", typ)} + } +} + +type mapDecoder struct { + mapType *reflect2.UnsafeMapType + keyType reflect2.Type + elemType reflect2.Type + keyDecoder ValDecoder + elemDecoder ValDecoder +} + +func (decoder *mapDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + mapType := decoder.mapType + c := iter.nextToken() + if c == 'n' { + iter.skipThreeBytes('u', 'l', 'l') + *(*unsafe.Pointer)(ptr) = nil + mapType.UnsafeSet(ptr, mapType.UnsafeNew()) + return + } + if mapType.UnsafeIsNil(ptr) { + mapType.UnsafeSet(ptr, mapType.UnsafeMakeMap(0)) + } + if c != '{' { + iter.ReportError("ReadMapCB", `expect { or n, but found `+string([]byte{c})) + return + } + c = iter.nextToken() + if c == '}' { + return + } + iter.unreadByte() + key := decoder.keyType.UnsafeNew() + decoder.keyDecoder.Decode(key, iter) + c = iter.nextToken() + if c != ':' { + iter.ReportError("ReadMapCB", "expect : after object field, but found "+string([]byte{c})) + return + } + elem := decoder.elemType.UnsafeNew() + decoder.elemDecoder.Decode(elem, iter) + decoder.mapType.UnsafeSetIndex(ptr, key, elem) + for c = iter.nextToken(); c == ','; c = iter.nextToken() { + key := decoder.keyType.UnsafeNew() + decoder.keyDecoder.Decode(key, iter) + c = iter.nextToken() + if c != ':' { + iter.ReportError("ReadMapCB", "expect : after object field, but found "+string([]byte{c})) + return + } + elem := decoder.elemType.UnsafeNew() + decoder.elemDecoder.Decode(elem, iter) + decoder.mapType.UnsafeSetIndex(ptr, key, elem) + } + if c != '}' { + iter.ReportError("ReadMapCB", `expect }, but found `+string([]byte{c})) + } +} + +type numericMapKeyDecoder struct { + decoder ValDecoder +} + +func (decoder *numericMapKeyDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + c := iter.nextToken() + if c != '"' { + iter.ReportError("ReadMapCB", `expect ", but found `+string([]byte{c})) + return + } + decoder.decoder.Decode(ptr, iter) + c = iter.nextToken() + if c != '"' { + iter.ReportError("ReadMapCB", `expect ", but found `+string([]byte{c})) + return + } +} + +type numericMapKeyEncoder struct { + encoder ValEncoder +} + +func (encoder *numericMapKeyEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.writeByte('"') + encoder.encoder.Encode(ptr, stream) + stream.writeByte('"') +} + +func (encoder *numericMapKeyEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return false +} + +type dynamicMapKeyEncoder struct { + ctx *ctx + valType reflect2.Type +} + +func (encoder *dynamicMapKeyEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + obj := encoder.valType.UnsafeIndirect(ptr) + encoderOfMapKey(encoder.ctx, reflect2.TypeOf(obj)).Encode(reflect2.PtrOf(obj), stream) +} + +func (encoder *dynamicMapKeyEncoder) IsEmpty(ptr unsafe.Pointer) bool { + obj := encoder.valType.UnsafeIndirect(ptr) + return encoderOfMapKey(encoder.ctx, reflect2.TypeOf(obj)).IsEmpty(reflect2.PtrOf(obj)) +} + +type mapEncoder struct { + mapType *reflect2.UnsafeMapType + keyEncoder ValEncoder + elemEncoder ValEncoder +} + +func (encoder *mapEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + if *(*unsafe.Pointer)(ptr) == nil { + stream.WriteNil() + return + } + stream.WriteObjectStart() + iter := encoder.mapType.UnsafeIterate(ptr) + for i := 0; iter.HasNext(); i++ { + if i != 0 { + stream.WriteMore() + } + key, elem := iter.UnsafeNext() + encoder.keyEncoder.Encode(key, stream) + if stream.indention > 0 { + stream.writeTwoBytes(byte(':'), byte(' ')) + } else { + stream.writeByte(':') + } + encoder.elemEncoder.Encode(elem, stream) + } + stream.WriteObjectEnd() +} + +func (encoder *mapEncoder) IsEmpty(ptr unsafe.Pointer) bool { + iter := encoder.mapType.UnsafeIterate(ptr) + return !iter.HasNext() +} + +type sortKeysMapEncoder struct { + mapType *reflect2.UnsafeMapType + keyEncoder ValEncoder + elemEncoder ValEncoder +} + +func (encoder *sortKeysMapEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + if *(*unsafe.Pointer)(ptr) == nil { + stream.WriteNil() + return + } + stream.WriteObjectStart() + mapIter := encoder.mapType.UnsafeIterate(ptr) + subStream := stream.cfg.BorrowStream(nil) + subStream.Attachment = stream.Attachment + subIter := stream.cfg.BorrowIterator(nil) + keyValues := encodedKeyValues{} + for mapIter.HasNext() { + key, elem := mapIter.UnsafeNext() + subStreamIndex := subStream.Buffered() + encoder.keyEncoder.Encode(key, subStream) + if subStream.Error != nil && subStream.Error != io.EOF && stream.Error == nil { + stream.Error = subStream.Error + } + encodedKey := subStream.Buffer()[subStreamIndex:] + subIter.ResetBytes(encodedKey) + decodedKey := subIter.ReadString() + if stream.indention > 0 { + subStream.writeTwoBytes(byte(':'), byte(' ')) + } else { + subStream.writeByte(':') + } + encoder.elemEncoder.Encode(elem, subStream) + keyValues = append(keyValues, encodedKV{ + key: decodedKey, + keyValue: subStream.Buffer()[subStreamIndex:], + }) + } + sort.Sort(keyValues) + for i, keyValue := range keyValues { + if i != 0 { + stream.WriteMore() + } + stream.Write(keyValue.keyValue) + } + if subStream.Error != nil && stream.Error == nil { + stream.Error = subStream.Error + } + stream.WriteObjectEnd() + stream.cfg.ReturnStream(subStream) + stream.cfg.ReturnIterator(subIter) +} + +func (encoder *sortKeysMapEncoder) IsEmpty(ptr unsafe.Pointer) bool { + iter := encoder.mapType.UnsafeIterate(ptr) + return !iter.HasNext() +} + +type encodedKeyValues []encodedKV + +type encodedKV struct { + key string + keyValue []byte +} + +func (sv encodedKeyValues) Len() int { return len(sv) } +func (sv encodedKeyValues) Swap(i, j int) { sv[i], sv[j] = sv[j], sv[i] } +func (sv encodedKeyValues) Less(i, j int) bool { return sv[i].key < sv[j].key } diff --git a/vendor/github.com/json-iterator/go/reflect_marshaler.go b/vendor/github.com/json-iterator/go/reflect_marshaler.go new file mode 100644 index 00000000000..3e21f375671 --- /dev/null +++ b/vendor/github.com/json-iterator/go/reflect_marshaler.go @@ -0,0 +1,225 @@ +package jsoniter + +import ( + "encoding" + "encoding/json" + "unsafe" + + "github.com/modern-go/reflect2" +) + +var marshalerType = reflect2.TypeOfPtr((*json.Marshaler)(nil)).Elem() +var unmarshalerType = reflect2.TypeOfPtr((*json.Unmarshaler)(nil)).Elem() +var textMarshalerType = reflect2.TypeOfPtr((*encoding.TextMarshaler)(nil)).Elem() +var textUnmarshalerType = reflect2.TypeOfPtr((*encoding.TextUnmarshaler)(nil)).Elem() + +func createDecoderOfMarshaler(ctx *ctx, typ reflect2.Type) ValDecoder { + ptrType := reflect2.PtrTo(typ) + if ptrType.Implements(unmarshalerType) { + return &referenceDecoder{ + &unmarshalerDecoder{ptrType}, + } + } + if ptrType.Implements(textUnmarshalerType) { + return &referenceDecoder{ + &textUnmarshalerDecoder{ptrType}, + } + } + return nil +} + +func createEncoderOfMarshaler(ctx *ctx, typ reflect2.Type) ValEncoder { + if typ == marshalerType { + checkIsEmpty := createCheckIsEmpty(ctx, typ) + var encoder ValEncoder = &directMarshalerEncoder{ + checkIsEmpty: checkIsEmpty, + } + return encoder + } + if typ.Implements(marshalerType) { + checkIsEmpty := createCheckIsEmpty(ctx, typ) + var encoder ValEncoder = &marshalerEncoder{ + valType: typ, + checkIsEmpty: checkIsEmpty, + } + return encoder + } + ptrType := reflect2.PtrTo(typ) + if ctx.prefix != "" && ptrType.Implements(marshalerType) { + checkIsEmpty := createCheckIsEmpty(ctx, ptrType) + var encoder ValEncoder = &marshalerEncoder{ + valType: ptrType, + checkIsEmpty: checkIsEmpty, + } + return &referenceEncoder{encoder} + } + if typ == textMarshalerType { + checkIsEmpty := createCheckIsEmpty(ctx, typ) + var encoder ValEncoder = &directTextMarshalerEncoder{ + checkIsEmpty: checkIsEmpty, + stringEncoder: ctx.EncoderOf(reflect2.TypeOf("")), + } + return encoder + } + if typ.Implements(textMarshalerType) { + checkIsEmpty := createCheckIsEmpty(ctx, typ) + var encoder ValEncoder = &textMarshalerEncoder{ + valType: typ, + stringEncoder: ctx.EncoderOf(reflect2.TypeOf("")), + checkIsEmpty: checkIsEmpty, + } + return encoder + } + // if prefix is empty, the type is the root type + if ctx.prefix != "" && ptrType.Implements(textMarshalerType) { + checkIsEmpty := createCheckIsEmpty(ctx, ptrType) + var encoder ValEncoder = &textMarshalerEncoder{ + valType: ptrType, + stringEncoder: ctx.EncoderOf(reflect2.TypeOf("")), + checkIsEmpty: checkIsEmpty, + } + return &referenceEncoder{encoder} + } + return nil +} + +type marshalerEncoder struct { + checkIsEmpty checkIsEmpty + valType reflect2.Type +} + +func (encoder *marshalerEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + obj := encoder.valType.UnsafeIndirect(ptr) + if encoder.valType.IsNullable() && reflect2.IsNil(obj) { + stream.WriteNil() + return + } + marshaler := obj.(json.Marshaler) + bytes, err := marshaler.MarshalJSON() + if err != nil { + stream.Error = err + } else { + // html escape was already done by jsoniter + // but the extra '\n' should be trimed + l := len(bytes) + if l > 0 && bytes[l-1] == '\n' { + bytes = bytes[:l-1] + } + stream.Write(bytes) + } +} + +func (encoder *marshalerEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return encoder.checkIsEmpty.IsEmpty(ptr) +} + +type directMarshalerEncoder struct { + checkIsEmpty checkIsEmpty +} + +func (encoder *directMarshalerEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + marshaler := *(*json.Marshaler)(ptr) + if marshaler == nil { + stream.WriteNil() + return + } + bytes, err := marshaler.MarshalJSON() + if err != nil { + stream.Error = err + } else { + stream.Write(bytes) + } +} + +func (encoder *directMarshalerEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return encoder.checkIsEmpty.IsEmpty(ptr) +} + +type textMarshalerEncoder struct { + valType reflect2.Type + stringEncoder ValEncoder + checkIsEmpty checkIsEmpty +} + +func (encoder *textMarshalerEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + obj := encoder.valType.UnsafeIndirect(ptr) + if encoder.valType.IsNullable() && reflect2.IsNil(obj) { + stream.WriteNil() + return + } + marshaler := (obj).(encoding.TextMarshaler) + bytes, err := marshaler.MarshalText() + if err != nil { + stream.Error = err + } else { + str := string(bytes) + encoder.stringEncoder.Encode(unsafe.Pointer(&str), stream) + } +} + +func (encoder *textMarshalerEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return encoder.checkIsEmpty.IsEmpty(ptr) +} + +type directTextMarshalerEncoder struct { + stringEncoder ValEncoder + checkIsEmpty checkIsEmpty +} + +func (encoder *directTextMarshalerEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + marshaler := *(*encoding.TextMarshaler)(ptr) + if marshaler == nil { + stream.WriteNil() + return + } + bytes, err := marshaler.MarshalText() + if err != nil { + stream.Error = err + } else { + str := string(bytes) + encoder.stringEncoder.Encode(unsafe.Pointer(&str), stream) + } +} + +func (encoder *directTextMarshalerEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return encoder.checkIsEmpty.IsEmpty(ptr) +} + +type unmarshalerDecoder struct { + valType reflect2.Type +} + +func (decoder *unmarshalerDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + valType := decoder.valType + obj := valType.UnsafeIndirect(ptr) + unmarshaler := obj.(json.Unmarshaler) + iter.nextToken() + iter.unreadByte() // skip spaces + bytes := iter.SkipAndReturnBytes() + err := unmarshaler.UnmarshalJSON(bytes) + if err != nil { + iter.ReportError("unmarshalerDecoder", err.Error()) + } +} + +type textUnmarshalerDecoder struct { + valType reflect2.Type +} + +func (decoder *textUnmarshalerDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + valType := decoder.valType + obj := valType.UnsafeIndirect(ptr) + if reflect2.IsNil(obj) { + ptrType := valType.(*reflect2.UnsafePtrType) + elemType := ptrType.Elem() + elem := elemType.UnsafeNew() + ptrType.UnsafeSet(ptr, unsafe.Pointer(&elem)) + obj = valType.UnsafeIndirect(ptr) + } + unmarshaler := (obj).(encoding.TextUnmarshaler) + str := iter.ReadString() + err := unmarshaler.UnmarshalText([]byte(str)) + if err != nil { + iter.ReportError("textUnmarshalerDecoder", err.Error()) + } +} diff --git a/vendor/github.com/json-iterator/go/reflect_native.go b/vendor/github.com/json-iterator/go/reflect_native.go new file mode 100644 index 00000000000..f88722d14d1 --- /dev/null +++ b/vendor/github.com/json-iterator/go/reflect_native.go @@ -0,0 +1,453 @@ +package jsoniter + +import ( + "encoding/base64" + "reflect" + "strconv" + "unsafe" + + "github.com/modern-go/reflect2" +) + +const ptrSize = 32 << uintptr(^uintptr(0)>>63) + +func createEncoderOfNative(ctx *ctx, typ reflect2.Type) ValEncoder { + if typ.Kind() == reflect.Slice && typ.(reflect2.SliceType).Elem().Kind() == reflect.Uint8 { + sliceDecoder := decoderOfSlice(ctx, typ) + return &base64Codec{sliceDecoder: sliceDecoder} + } + typeName := typ.String() + kind := typ.Kind() + switch kind { + case reflect.String: + if typeName != "string" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*string)(nil)).Elem()) + } + return &stringCodec{} + case reflect.Int: + if typeName != "int" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*int)(nil)).Elem()) + } + if strconv.IntSize == 32 { + return &int32Codec{} + } + return &int64Codec{} + case reflect.Int8: + if typeName != "int8" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*int8)(nil)).Elem()) + } + return &int8Codec{} + case reflect.Int16: + if typeName != "int16" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*int16)(nil)).Elem()) + } + return &int16Codec{} + case reflect.Int32: + if typeName != "int32" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*int32)(nil)).Elem()) + } + return &int32Codec{} + case reflect.Int64: + if typeName != "int64" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*int64)(nil)).Elem()) + } + return &int64Codec{} + case reflect.Uint: + if typeName != "uint" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*uint)(nil)).Elem()) + } + if strconv.IntSize == 32 { + return &uint32Codec{} + } + return &uint64Codec{} + case reflect.Uint8: + if typeName != "uint8" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*uint8)(nil)).Elem()) + } + return &uint8Codec{} + case reflect.Uint16: + if typeName != "uint16" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*uint16)(nil)).Elem()) + } + return &uint16Codec{} + case reflect.Uint32: + if typeName != "uint32" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*uint32)(nil)).Elem()) + } + return &uint32Codec{} + case reflect.Uintptr: + if typeName != "uintptr" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*uintptr)(nil)).Elem()) + } + if ptrSize == 32 { + return &uint32Codec{} + } + return &uint64Codec{} + case reflect.Uint64: + if typeName != "uint64" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*uint64)(nil)).Elem()) + } + return &uint64Codec{} + case reflect.Float32: + if typeName != "float32" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*float32)(nil)).Elem()) + } + return &float32Codec{} + case reflect.Float64: + if typeName != "float64" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*float64)(nil)).Elem()) + } + return &float64Codec{} + case reflect.Bool: + if typeName != "bool" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*bool)(nil)).Elem()) + } + return &boolCodec{} + } + return nil +} + +func createDecoderOfNative(ctx *ctx, typ reflect2.Type) ValDecoder { + if typ.Kind() == reflect.Slice && typ.(reflect2.SliceType).Elem().Kind() == reflect.Uint8 { + sliceDecoder := decoderOfSlice(ctx, typ) + return &base64Codec{sliceDecoder: sliceDecoder} + } + typeName := typ.String() + switch typ.Kind() { + case reflect.String: + if typeName != "string" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*string)(nil)).Elem()) + } + return &stringCodec{} + case reflect.Int: + if typeName != "int" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*int)(nil)).Elem()) + } + if strconv.IntSize == 32 { + return &int32Codec{} + } + return &int64Codec{} + case reflect.Int8: + if typeName != "int8" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*int8)(nil)).Elem()) + } + return &int8Codec{} + case reflect.Int16: + if typeName != "int16" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*int16)(nil)).Elem()) + } + return &int16Codec{} + case reflect.Int32: + if typeName != "int32" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*int32)(nil)).Elem()) + } + return &int32Codec{} + case reflect.Int64: + if typeName != "int64" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*int64)(nil)).Elem()) + } + return &int64Codec{} + case reflect.Uint: + if typeName != "uint" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*uint)(nil)).Elem()) + } + if strconv.IntSize == 32 { + return &uint32Codec{} + } + return &uint64Codec{} + case reflect.Uint8: + if typeName != "uint8" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*uint8)(nil)).Elem()) + } + return &uint8Codec{} + case reflect.Uint16: + if typeName != "uint16" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*uint16)(nil)).Elem()) + } + return &uint16Codec{} + case reflect.Uint32: + if typeName != "uint32" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*uint32)(nil)).Elem()) + } + return &uint32Codec{} + case reflect.Uintptr: + if typeName != "uintptr" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*uintptr)(nil)).Elem()) + } + if ptrSize == 32 { + return &uint32Codec{} + } + return &uint64Codec{} + case reflect.Uint64: + if typeName != "uint64" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*uint64)(nil)).Elem()) + } + return &uint64Codec{} + case reflect.Float32: + if typeName != "float32" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*float32)(nil)).Elem()) + } + return &float32Codec{} + case reflect.Float64: + if typeName != "float64" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*float64)(nil)).Elem()) + } + return &float64Codec{} + case reflect.Bool: + if typeName != "bool" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*bool)(nil)).Elem()) + } + return &boolCodec{} + } + return nil +} + +type stringCodec struct { +} + +func (codec *stringCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { + *((*string)(ptr)) = iter.ReadString() +} + +func (codec *stringCodec) Encode(ptr unsafe.Pointer, stream *Stream) { + str := *((*string)(ptr)) + stream.WriteString(str) +} + +func (codec *stringCodec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*string)(ptr)) == "" +} + +type int8Codec struct { +} + +func (codec *int8Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.ReadNil() { + *((*int8)(ptr)) = iter.ReadInt8() + } +} + +func (codec *int8Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteInt8(*((*int8)(ptr))) +} + +func (codec *int8Codec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*int8)(ptr)) == 0 +} + +type int16Codec struct { +} + +func (codec *int16Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.ReadNil() { + *((*int16)(ptr)) = iter.ReadInt16() + } +} + +func (codec *int16Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteInt16(*((*int16)(ptr))) +} + +func (codec *int16Codec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*int16)(ptr)) == 0 +} + +type int32Codec struct { +} + +func (codec *int32Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.ReadNil() { + *((*int32)(ptr)) = iter.ReadInt32() + } +} + +func (codec *int32Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteInt32(*((*int32)(ptr))) +} + +func (codec *int32Codec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*int32)(ptr)) == 0 +} + +type int64Codec struct { +} + +func (codec *int64Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.ReadNil() { + *((*int64)(ptr)) = iter.ReadInt64() + } +} + +func (codec *int64Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteInt64(*((*int64)(ptr))) +} + +func (codec *int64Codec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*int64)(ptr)) == 0 +} + +type uint8Codec struct { +} + +func (codec *uint8Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.ReadNil() { + *((*uint8)(ptr)) = iter.ReadUint8() + } +} + +func (codec *uint8Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteUint8(*((*uint8)(ptr))) +} + +func (codec *uint8Codec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*uint8)(ptr)) == 0 +} + +type uint16Codec struct { +} + +func (codec *uint16Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.ReadNil() { + *((*uint16)(ptr)) = iter.ReadUint16() + } +} + +func (codec *uint16Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteUint16(*((*uint16)(ptr))) +} + +func (codec *uint16Codec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*uint16)(ptr)) == 0 +} + +type uint32Codec struct { +} + +func (codec *uint32Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.ReadNil() { + *((*uint32)(ptr)) = iter.ReadUint32() + } +} + +func (codec *uint32Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteUint32(*((*uint32)(ptr))) +} + +func (codec *uint32Codec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*uint32)(ptr)) == 0 +} + +type uint64Codec struct { +} + +func (codec *uint64Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.ReadNil() { + *((*uint64)(ptr)) = iter.ReadUint64() + } +} + +func (codec *uint64Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteUint64(*((*uint64)(ptr))) +} + +func (codec *uint64Codec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*uint64)(ptr)) == 0 +} + +type float32Codec struct { +} + +func (codec *float32Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.ReadNil() { + *((*float32)(ptr)) = iter.ReadFloat32() + } +} + +func (codec *float32Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteFloat32(*((*float32)(ptr))) +} + +func (codec *float32Codec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*float32)(ptr)) == 0 +} + +type float64Codec struct { +} + +func (codec *float64Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.ReadNil() { + *((*float64)(ptr)) = iter.ReadFloat64() + } +} + +func (codec *float64Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteFloat64(*((*float64)(ptr))) +} + +func (codec *float64Codec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*float64)(ptr)) == 0 +} + +type boolCodec struct { +} + +func (codec *boolCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.ReadNil() { + *((*bool)(ptr)) = iter.ReadBool() + } +} + +func (codec *boolCodec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteBool(*((*bool)(ptr))) +} + +func (codec *boolCodec) IsEmpty(ptr unsafe.Pointer) bool { + return !(*((*bool)(ptr))) +} + +type base64Codec struct { + sliceType *reflect2.UnsafeSliceType + sliceDecoder ValDecoder +} + +func (codec *base64Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if iter.ReadNil() { + codec.sliceType.UnsafeSetNil(ptr) + return + } + switch iter.WhatIsNext() { + case StringValue: + src := iter.ReadString() + dst, err := base64.StdEncoding.DecodeString(src) + if err != nil { + iter.ReportError("decode base64", err.Error()) + } else { + codec.sliceType.UnsafeSet(ptr, unsafe.Pointer(&dst)) + } + case ArrayValue: + codec.sliceDecoder.Decode(ptr, iter) + default: + iter.ReportError("base64Codec", "invalid input") + } +} + +func (codec *base64Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + if codec.sliceType.UnsafeIsNil(ptr) { + stream.WriteNil() + return + } + src := *((*[]byte)(ptr)) + encoding := base64.StdEncoding + stream.writeByte('"') + if len(src) != 0 { + size := encoding.EncodedLen(len(src)) + buf := make([]byte, size) + encoding.Encode(buf, src) + stream.buf = append(stream.buf, buf...) + } + stream.writeByte('"') +} + +func (codec *base64Codec) IsEmpty(ptr unsafe.Pointer) bool { + return len(*((*[]byte)(ptr))) == 0 +} diff --git a/vendor/github.com/json-iterator/go/reflect_optional.go b/vendor/github.com/json-iterator/go/reflect_optional.go new file mode 100644 index 00000000000..fa71f474891 --- /dev/null +++ b/vendor/github.com/json-iterator/go/reflect_optional.go @@ -0,0 +1,129 @@ +package jsoniter + +import ( + "github.com/modern-go/reflect2" + "unsafe" +) + +func decoderOfOptional(ctx *ctx, typ reflect2.Type) ValDecoder { + ptrType := typ.(*reflect2.UnsafePtrType) + elemType := ptrType.Elem() + decoder := decoderOfType(ctx, elemType) + return &OptionalDecoder{elemType, decoder} +} + +func encoderOfOptional(ctx *ctx, typ reflect2.Type) ValEncoder { + ptrType := typ.(*reflect2.UnsafePtrType) + elemType := ptrType.Elem() + elemEncoder := encoderOfType(ctx, elemType) + encoder := &OptionalEncoder{elemEncoder} + return encoder +} + +type OptionalDecoder struct { + ValueType reflect2.Type + ValueDecoder ValDecoder +} + +func (decoder *OptionalDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if iter.ReadNil() { + *((*unsafe.Pointer)(ptr)) = nil + } else { + if *((*unsafe.Pointer)(ptr)) == nil { + //pointer to null, we have to allocate memory to hold the value + newPtr := decoder.ValueType.UnsafeNew() + decoder.ValueDecoder.Decode(newPtr, iter) + *((*unsafe.Pointer)(ptr)) = newPtr + } else { + //reuse existing instance + decoder.ValueDecoder.Decode(*((*unsafe.Pointer)(ptr)), iter) + } + } +} + +type dereferenceDecoder struct { + // only to deference a pointer + valueType reflect2.Type + valueDecoder ValDecoder +} + +func (decoder *dereferenceDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if *((*unsafe.Pointer)(ptr)) == nil { + //pointer to null, we have to allocate memory to hold the value + newPtr := decoder.valueType.UnsafeNew() + decoder.valueDecoder.Decode(newPtr, iter) + *((*unsafe.Pointer)(ptr)) = newPtr + } else { + //reuse existing instance + decoder.valueDecoder.Decode(*((*unsafe.Pointer)(ptr)), iter) + } +} + +type OptionalEncoder struct { + ValueEncoder ValEncoder +} + +func (encoder *OptionalEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + if *((*unsafe.Pointer)(ptr)) == nil { + stream.WriteNil() + } else { + encoder.ValueEncoder.Encode(*((*unsafe.Pointer)(ptr)), stream) + } +} + +func (encoder *OptionalEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return *((*unsafe.Pointer)(ptr)) == nil +} + +type dereferenceEncoder struct { + ValueEncoder ValEncoder +} + +func (encoder *dereferenceEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + if *((*unsafe.Pointer)(ptr)) == nil { + stream.WriteNil() + } else { + encoder.ValueEncoder.Encode(*((*unsafe.Pointer)(ptr)), stream) + } +} + +func (encoder *dereferenceEncoder) IsEmpty(ptr unsafe.Pointer) bool { + dePtr := *((*unsafe.Pointer)(ptr)) + if dePtr == nil { + return true + } + return encoder.ValueEncoder.IsEmpty(dePtr) +} + +func (encoder *dereferenceEncoder) IsEmbeddedPtrNil(ptr unsafe.Pointer) bool { + deReferenced := *((*unsafe.Pointer)(ptr)) + if deReferenced == nil { + return true + } + isEmbeddedPtrNil, converted := encoder.ValueEncoder.(IsEmbeddedPtrNil) + if !converted { + return false + } + fieldPtr := unsafe.Pointer(deReferenced) + return isEmbeddedPtrNil.IsEmbeddedPtrNil(fieldPtr) +} + +type referenceEncoder struct { + encoder ValEncoder +} + +func (encoder *referenceEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + encoder.encoder.Encode(unsafe.Pointer(&ptr), stream) +} + +func (encoder *referenceEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return encoder.encoder.IsEmpty(unsafe.Pointer(&ptr)) +} + +type referenceDecoder struct { + decoder ValDecoder +} + +func (decoder *referenceDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + decoder.decoder.Decode(unsafe.Pointer(&ptr), iter) +} diff --git a/vendor/github.com/json-iterator/go/reflect_slice.go b/vendor/github.com/json-iterator/go/reflect_slice.go new file mode 100644 index 00000000000..9441d79df33 --- /dev/null +++ b/vendor/github.com/json-iterator/go/reflect_slice.go @@ -0,0 +1,99 @@ +package jsoniter + +import ( + "fmt" + "github.com/modern-go/reflect2" + "io" + "unsafe" +) + +func decoderOfSlice(ctx *ctx, typ reflect2.Type) ValDecoder { + sliceType := typ.(*reflect2.UnsafeSliceType) + decoder := decoderOfType(ctx.append("[sliceElem]"), sliceType.Elem()) + return &sliceDecoder{sliceType, decoder} +} + +func encoderOfSlice(ctx *ctx, typ reflect2.Type) ValEncoder { + sliceType := typ.(*reflect2.UnsafeSliceType) + encoder := encoderOfType(ctx.append("[sliceElem]"), sliceType.Elem()) + return &sliceEncoder{sliceType, encoder} +} + +type sliceEncoder struct { + sliceType *reflect2.UnsafeSliceType + elemEncoder ValEncoder +} + +func (encoder *sliceEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + if encoder.sliceType.UnsafeIsNil(ptr) { + stream.WriteNil() + return + } + length := encoder.sliceType.UnsafeLengthOf(ptr) + if length == 0 { + stream.WriteEmptyArray() + return + } + stream.WriteArrayStart() + encoder.elemEncoder.Encode(encoder.sliceType.UnsafeGetIndex(ptr, 0), stream) + for i := 1; i < length; i++ { + stream.WriteMore() + elemPtr := encoder.sliceType.UnsafeGetIndex(ptr, i) + encoder.elemEncoder.Encode(elemPtr, stream) + } + stream.WriteArrayEnd() + if stream.Error != nil && stream.Error != io.EOF { + stream.Error = fmt.Errorf("%v: %s", encoder.sliceType, stream.Error.Error()) + } +} + +func (encoder *sliceEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return encoder.sliceType.UnsafeLengthOf(ptr) == 0 +} + +type sliceDecoder struct { + sliceType *reflect2.UnsafeSliceType + elemDecoder ValDecoder +} + +func (decoder *sliceDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + decoder.doDecode(ptr, iter) + if iter.Error != nil && iter.Error != io.EOF { + iter.Error = fmt.Errorf("%v: %s", decoder.sliceType, iter.Error.Error()) + } +} + +func (decoder *sliceDecoder) doDecode(ptr unsafe.Pointer, iter *Iterator) { + c := iter.nextToken() + sliceType := decoder.sliceType + if c == 'n' { + iter.skipThreeBytes('u', 'l', 'l') + sliceType.UnsafeSetNil(ptr) + return + } + if c != '[' { + iter.ReportError("decode slice", "expect [ or n, but found "+string([]byte{c})) + return + } + c = iter.nextToken() + if c == ']' { + sliceType.UnsafeSet(ptr, sliceType.UnsafeMakeSlice(0, 0)) + return + } + iter.unreadByte() + sliceType.UnsafeGrow(ptr, 1) + elemPtr := sliceType.UnsafeGetIndex(ptr, 0) + decoder.elemDecoder.Decode(elemPtr, iter) + length := 1 + for c = iter.nextToken(); c == ','; c = iter.nextToken() { + idx := length + length += 1 + sliceType.UnsafeGrow(ptr, length) + elemPtr = sliceType.UnsafeGetIndex(ptr, idx) + decoder.elemDecoder.Decode(elemPtr, iter) + } + if c != ']' { + iter.ReportError("decode slice", "expect ], but found "+string([]byte{c})) + return + } +} diff --git a/vendor/github.com/json-iterator/go/reflect_struct_decoder.go b/vendor/github.com/json-iterator/go/reflect_struct_decoder.go new file mode 100644 index 00000000000..92ae912dc24 --- /dev/null +++ b/vendor/github.com/json-iterator/go/reflect_struct_decoder.go @@ -0,0 +1,1097 @@ +package jsoniter + +import ( + "fmt" + "io" + "strings" + "unsafe" + + "github.com/modern-go/reflect2" +) + +func decoderOfStruct(ctx *ctx, typ reflect2.Type) ValDecoder { + bindings := map[string]*Binding{} + structDescriptor := describeStruct(ctx, typ) + for _, binding := range structDescriptor.Fields { + for _, fromName := range binding.FromNames { + old := bindings[fromName] + if old == nil { + bindings[fromName] = binding + continue + } + ignoreOld, ignoreNew := resolveConflictBinding(ctx.frozenConfig, old, binding) + if ignoreOld { + delete(bindings, fromName) + } + if !ignoreNew { + bindings[fromName] = binding + } + } + } + fields := map[string]*structFieldDecoder{} + for k, binding := range bindings { + fields[k] = binding.Decoder.(*structFieldDecoder) + } + + if !ctx.caseSensitive() { + for k, binding := range bindings { + if _, found := fields[strings.ToLower(k)]; !found { + fields[strings.ToLower(k)] = binding.Decoder.(*structFieldDecoder) + } + } + } + + return createStructDecoder(ctx, typ, fields) +} + +func createStructDecoder(ctx *ctx, typ reflect2.Type, fields map[string]*structFieldDecoder) ValDecoder { + if ctx.disallowUnknownFields { + return &generalStructDecoder{typ: typ, fields: fields, disallowUnknownFields: true} + } + knownHash := map[int64]struct{}{ + 0: {}, + } + + switch len(fields) { + case 0: + return &skipObjectDecoder{typ} + case 1: + for fieldName, fieldDecoder := range fields { + fieldHash := calcHash(fieldName, ctx.caseSensitive()) + _, known := knownHash[fieldHash] + if known { + return &generalStructDecoder{typ, fields, false} + } + knownHash[fieldHash] = struct{}{} + return &oneFieldStructDecoder{typ, fieldHash, fieldDecoder} + } + case 2: + var fieldHash1 int64 + var fieldHash2 int64 + var fieldDecoder1 *structFieldDecoder + var fieldDecoder2 *structFieldDecoder + for fieldName, fieldDecoder := range fields { + fieldHash := calcHash(fieldName, ctx.caseSensitive()) + _, known := knownHash[fieldHash] + if known { + return &generalStructDecoder{typ, fields, false} + } + knownHash[fieldHash] = struct{}{} + if fieldHash1 == 0 { + fieldHash1 = fieldHash + fieldDecoder1 = fieldDecoder + } else { + fieldHash2 = fieldHash + fieldDecoder2 = fieldDecoder + } + } + return &twoFieldsStructDecoder{typ, fieldHash1, fieldDecoder1, fieldHash2, fieldDecoder2} + case 3: + var fieldName1 int64 + var fieldName2 int64 + var fieldName3 int64 + var fieldDecoder1 *structFieldDecoder + var fieldDecoder2 *structFieldDecoder + var fieldDecoder3 *structFieldDecoder + for fieldName, fieldDecoder := range fields { + fieldHash := calcHash(fieldName, ctx.caseSensitive()) + _, known := knownHash[fieldHash] + if known { + return &generalStructDecoder{typ, fields, false} + } + knownHash[fieldHash] = struct{}{} + if fieldName1 == 0 { + fieldName1 = fieldHash + fieldDecoder1 = fieldDecoder + } else if fieldName2 == 0 { + fieldName2 = fieldHash + fieldDecoder2 = fieldDecoder + } else { + fieldName3 = fieldHash + fieldDecoder3 = fieldDecoder + } + } + return &threeFieldsStructDecoder{typ, + fieldName1, fieldDecoder1, + fieldName2, fieldDecoder2, + fieldName3, fieldDecoder3} + case 4: + var fieldName1 int64 + var fieldName2 int64 + var fieldName3 int64 + var fieldName4 int64 + var fieldDecoder1 *structFieldDecoder + var fieldDecoder2 *structFieldDecoder + var fieldDecoder3 *structFieldDecoder + var fieldDecoder4 *structFieldDecoder + for fieldName, fieldDecoder := range fields { + fieldHash := calcHash(fieldName, ctx.caseSensitive()) + _, known := knownHash[fieldHash] + if known { + return &generalStructDecoder{typ, fields, false} + } + knownHash[fieldHash] = struct{}{} + if fieldName1 == 0 { + fieldName1 = fieldHash + fieldDecoder1 = fieldDecoder + } else if fieldName2 == 0 { + fieldName2 = fieldHash + fieldDecoder2 = fieldDecoder + } else if fieldName3 == 0 { + fieldName3 = fieldHash + fieldDecoder3 = fieldDecoder + } else { + fieldName4 = fieldHash + fieldDecoder4 = fieldDecoder + } + } + return &fourFieldsStructDecoder{typ, + fieldName1, fieldDecoder1, + fieldName2, fieldDecoder2, + fieldName3, fieldDecoder3, + fieldName4, fieldDecoder4} + case 5: + var fieldName1 int64 + var fieldName2 int64 + var fieldName3 int64 + var fieldName4 int64 + var fieldName5 int64 + var fieldDecoder1 *structFieldDecoder + var fieldDecoder2 *structFieldDecoder + var fieldDecoder3 *structFieldDecoder + var fieldDecoder4 *structFieldDecoder + var fieldDecoder5 *structFieldDecoder + for fieldName, fieldDecoder := range fields { + fieldHash := calcHash(fieldName, ctx.caseSensitive()) + _, known := knownHash[fieldHash] + if known { + return &generalStructDecoder{typ, fields, false} + } + knownHash[fieldHash] = struct{}{} + if fieldName1 == 0 { + fieldName1 = fieldHash + fieldDecoder1 = fieldDecoder + } else if fieldName2 == 0 { + fieldName2 = fieldHash + fieldDecoder2 = fieldDecoder + } else if fieldName3 == 0 { + fieldName3 = fieldHash + fieldDecoder3 = fieldDecoder + } else if fieldName4 == 0 { + fieldName4 = fieldHash + fieldDecoder4 = fieldDecoder + } else { + fieldName5 = fieldHash + fieldDecoder5 = fieldDecoder + } + } + return &fiveFieldsStructDecoder{typ, + fieldName1, fieldDecoder1, + fieldName2, fieldDecoder2, + fieldName3, fieldDecoder3, + fieldName4, fieldDecoder4, + fieldName5, fieldDecoder5} + case 6: + var fieldName1 int64 + var fieldName2 int64 + var fieldName3 int64 + var fieldName4 int64 + var fieldName5 int64 + var fieldName6 int64 + var fieldDecoder1 *structFieldDecoder + var fieldDecoder2 *structFieldDecoder + var fieldDecoder3 *structFieldDecoder + var fieldDecoder4 *structFieldDecoder + var fieldDecoder5 *structFieldDecoder + var fieldDecoder6 *structFieldDecoder + for fieldName, fieldDecoder := range fields { + fieldHash := calcHash(fieldName, ctx.caseSensitive()) + _, known := knownHash[fieldHash] + if known { + return &generalStructDecoder{typ, fields, false} + } + knownHash[fieldHash] = struct{}{} + if fieldName1 == 0 { + fieldName1 = fieldHash + fieldDecoder1 = fieldDecoder + } else if fieldName2 == 0 { + fieldName2 = fieldHash + fieldDecoder2 = fieldDecoder + } else if fieldName3 == 0 { + fieldName3 = fieldHash + fieldDecoder3 = fieldDecoder + } else if fieldName4 == 0 { + fieldName4 = fieldHash + fieldDecoder4 = fieldDecoder + } else if fieldName5 == 0 { + fieldName5 = fieldHash + fieldDecoder5 = fieldDecoder + } else { + fieldName6 = fieldHash + fieldDecoder6 = fieldDecoder + } + } + return &sixFieldsStructDecoder{typ, + fieldName1, fieldDecoder1, + fieldName2, fieldDecoder2, + fieldName3, fieldDecoder3, + fieldName4, fieldDecoder4, + fieldName5, fieldDecoder5, + fieldName6, fieldDecoder6} + case 7: + var fieldName1 int64 + var fieldName2 int64 + var fieldName3 int64 + var fieldName4 int64 + var fieldName5 int64 + var fieldName6 int64 + var fieldName7 int64 + var fieldDecoder1 *structFieldDecoder + var fieldDecoder2 *structFieldDecoder + var fieldDecoder3 *structFieldDecoder + var fieldDecoder4 *structFieldDecoder + var fieldDecoder5 *structFieldDecoder + var fieldDecoder6 *structFieldDecoder + var fieldDecoder7 *structFieldDecoder + for fieldName, fieldDecoder := range fields { + fieldHash := calcHash(fieldName, ctx.caseSensitive()) + _, known := knownHash[fieldHash] + if known { + return &generalStructDecoder{typ, fields, false} + } + knownHash[fieldHash] = struct{}{} + if fieldName1 == 0 { + fieldName1 = fieldHash + fieldDecoder1 = fieldDecoder + } else if fieldName2 == 0 { + fieldName2 = fieldHash + fieldDecoder2 = fieldDecoder + } else if fieldName3 == 0 { + fieldName3 = fieldHash + fieldDecoder3 = fieldDecoder + } else if fieldName4 == 0 { + fieldName4 = fieldHash + fieldDecoder4 = fieldDecoder + } else if fieldName5 == 0 { + fieldName5 = fieldHash + fieldDecoder5 = fieldDecoder + } else if fieldName6 == 0 { + fieldName6 = fieldHash + fieldDecoder6 = fieldDecoder + } else { + fieldName7 = fieldHash + fieldDecoder7 = fieldDecoder + } + } + return &sevenFieldsStructDecoder{typ, + fieldName1, fieldDecoder1, + fieldName2, fieldDecoder2, + fieldName3, fieldDecoder3, + fieldName4, fieldDecoder4, + fieldName5, fieldDecoder5, + fieldName6, fieldDecoder6, + fieldName7, fieldDecoder7} + case 8: + var fieldName1 int64 + var fieldName2 int64 + var fieldName3 int64 + var fieldName4 int64 + var fieldName5 int64 + var fieldName6 int64 + var fieldName7 int64 + var fieldName8 int64 + var fieldDecoder1 *structFieldDecoder + var fieldDecoder2 *structFieldDecoder + var fieldDecoder3 *structFieldDecoder + var fieldDecoder4 *structFieldDecoder + var fieldDecoder5 *structFieldDecoder + var fieldDecoder6 *structFieldDecoder + var fieldDecoder7 *structFieldDecoder + var fieldDecoder8 *structFieldDecoder + for fieldName, fieldDecoder := range fields { + fieldHash := calcHash(fieldName, ctx.caseSensitive()) + _, known := knownHash[fieldHash] + if known { + return &generalStructDecoder{typ, fields, false} + } + knownHash[fieldHash] = struct{}{} + if fieldName1 == 0 { + fieldName1 = fieldHash + fieldDecoder1 = fieldDecoder + } else if fieldName2 == 0 { + fieldName2 = fieldHash + fieldDecoder2 = fieldDecoder + } else if fieldName3 == 0 { + fieldName3 = fieldHash + fieldDecoder3 = fieldDecoder + } else if fieldName4 == 0 { + fieldName4 = fieldHash + fieldDecoder4 = fieldDecoder + } else if fieldName5 == 0 { + fieldName5 = fieldHash + fieldDecoder5 = fieldDecoder + } else if fieldName6 == 0 { + fieldName6 = fieldHash + fieldDecoder6 = fieldDecoder + } else if fieldName7 == 0 { + fieldName7 = fieldHash + fieldDecoder7 = fieldDecoder + } else { + fieldName8 = fieldHash + fieldDecoder8 = fieldDecoder + } + } + return &eightFieldsStructDecoder{typ, + fieldName1, fieldDecoder1, + fieldName2, fieldDecoder2, + fieldName3, fieldDecoder3, + fieldName4, fieldDecoder4, + fieldName5, fieldDecoder5, + fieldName6, fieldDecoder6, + fieldName7, fieldDecoder7, + fieldName8, fieldDecoder8} + case 9: + var fieldName1 int64 + var fieldName2 int64 + var fieldName3 int64 + var fieldName4 int64 + var fieldName5 int64 + var fieldName6 int64 + var fieldName7 int64 + var fieldName8 int64 + var fieldName9 int64 + var fieldDecoder1 *structFieldDecoder + var fieldDecoder2 *structFieldDecoder + var fieldDecoder3 *structFieldDecoder + var fieldDecoder4 *structFieldDecoder + var fieldDecoder5 *structFieldDecoder + var fieldDecoder6 *structFieldDecoder + var fieldDecoder7 *structFieldDecoder + var fieldDecoder8 *structFieldDecoder + var fieldDecoder9 *structFieldDecoder + for fieldName, fieldDecoder := range fields { + fieldHash := calcHash(fieldName, ctx.caseSensitive()) + _, known := knownHash[fieldHash] + if known { + return &generalStructDecoder{typ, fields, false} + } + knownHash[fieldHash] = struct{}{} + if fieldName1 == 0 { + fieldName1 = fieldHash + fieldDecoder1 = fieldDecoder + } else if fieldName2 == 0 { + fieldName2 = fieldHash + fieldDecoder2 = fieldDecoder + } else if fieldName3 == 0 { + fieldName3 = fieldHash + fieldDecoder3 = fieldDecoder + } else if fieldName4 == 0 { + fieldName4 = fieldHash + fieldDecoder4 = fieldDecoder + } else if fieldName5 == 0 { + fieldName5 = fieldHash + fieldDecoder5 = fieldDecoder + } else if fieldName6 == 0 { + fieldName6 = fieldHash + fieldDecoder6 = fieldDecoder + } else if fieldName7 == 0 { + fieldName7 = fieldHash + fieldDecoder7 = fieldDecoder + } else if fieldName8 == 0 { + fieldName8 = fieldHash + fieldDecoder8 = fieldDecoder + } else { + fieldName9 = fieldHash + fieldDecoder9 = fieldDecoder + } + } + return &nineFieldsStructDecoder{typ, + fieldName1, fieldDecoder1, + fieldName2, fieldDecoder2, + fieldName3, fieldDecoder3, + fieldName4, fieldDecoder4, + fieldName5, fieldDecoder5, + fieldName6, fieldDecoder6, + fieldName7, fieldDecoder7, + fieldName8, fieldDecoder8, + fieldName9, fieldDecoder9} + case 10: + var fieldName1 int64 + var fieldName2 int64 + var fieldName3 int64 + var fieldName4 int64 + var fieldName5 int64 + var fieldName6 int64 + var fieldName7 int64 + var fieldName8 int64 + var fieldName9 int64 + var fieldName10 int64 + var fieldDecoder1 *structFieldDecoder + var fieldDecoder2 *structFieldDecoder + var fieldDecoder3 *structFieldDecoder + var fieldDecoder4 *structFieldDecoder + var fieldDecoder5 *structFieldDecoder + var fieldDecoder6 *structFieldDecoder + var fieldDecoder7 *structFieldDecoder + var fieldDecoder8 *structFieldDecoder + var fieldDecoder9 *structFieldDecoder + var fieldDecoder10 *structFieldDecoder + for fieldName, fieldDecoder := range fields { + fieldHash := calcHash(fieldName, ctx.caseSensitive()) + _, known := knownHash[fieldHash] + if known { + return &generalStructDecoder{typ, fields, false} + } + knownHash[fieldHash] = struct{}{} + if fieldName1 == 0 { + fieldName1 = fieldHash + fieldDecoder1 = fieldDecoder + } else if fieldName2 == 0 { + fieldName2 = fieldHash + fieldDecoder2 = fieldDecoder + } else if fieldName3 == 0 { + fieldName3 = fieldHash + fieldDecoder3 = fieldDecoder + } else if fieldName4 == 0 { + fieldName4 = fieldHash + fieldDecoder4 = fieldDecoder + } else if fieldName5 == 0 { + fieldName5 = fieldHash + fieldDecoder5 = fieldDecoder + } else if fieldName6 == 0 { + fieldName6 = fieldHash + fieldDecoder6 = fieldDecoder + } else if fieldName7 == 0 { + fieldName7 = fieldHash + fieldDecoder7 = fieldDecoder + } else if fieldName8 == 0 { + fieldName8 = fieldHash + fieldDecoder8 = fieldDecoder + } else if fieldName9 == 0 { + fieldName9 = fieldHash + fieldDecoder9 = fieldDecoder + } else { + fieldName10 = fieldHash + fieldDecoder10 = fieldDecoder + } + } + return &tenFieldsStructDecoder{typ, + fieldName1, fieldDecoder1, + fieldName2, fieldDecoder2, + fieldName3, fieldDecoder3, + fieldName4, fieldDecoder4, + fieldName5, fieldDecoder5, + fieldName6, fieldDecoder6, + fieldName7, fieldDecoder7, + fieldName8, fieldDecoder8, + fieldName9, fieldDecoder9, + fieldName10, fieldDecoder10} + } + return &generalStructDecoder{typ, fields, false} +} + +type generalStructDecoder struct { + typ reflect2.Type + fields map[string]*structFieldDecoder + disallowUnknownFields bool +} + +func (decoder *generalStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.readObjectStart() { + return + } + if !iter.incrementDepth() { + return + } + var c byte + for c = ','; c == ','; c = iter.nextToken() { + decoder.decodeOneField(ptr, iter) + } + if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 { + iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) + } + if c != '}' { + iter.ReportError("struct Decode", `expect }, but found `+string([]byte{c})) + } + iter.decrementDepth() +} + +func (decoder *generalStructDecoder) decodeOneField(ptr unsafe.Pointer, iter *Iterator) { + var field string + var fieldDecoder *structFieldDecoder + if iter.cfg.objectFieldMustBeSimpleString { + fieldBytes := iter.ReadStringAsSlice() + field = *(*string)(unsafe.Pointer(&fieldBytes)) + fieldDecoder = decoder.fields[field] + if fieldDecoder == nil && !iter.cfg.caseSensitive { + fieldDecoder = decoder.fields[strings.ToLower(field)] + } + } else { + field = iter.ReadString() + fieldDecoder = decoder.fields[field] + if fieldDecoder == nil && !iter.cfg.caseSensitive { + fieldDecoder = decoder.fields[strings.ToLower(field)] + } + } + if fieldDecoder == nil { + if decoder.disallowUnknownFields { + msg := "found unknown field: " + field + iter.ReportError("ReadObject", msg) + } + c := iter.nextToken() + if c != ':' { + iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c})) + } + iter.Skip() + return + } + c := iter.nextToken() + if c != ':' { + iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c})) + } + fieldDecoder.Decode(ptr, iter) +} + +type skipObjectDecoder struct { + typ reflect2.Type +} + +func (decoder *skipObjectDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + valueType := iter.WhatIsNext() + if valueType != ObjectValue && valueType != NilValue { + iter.ReportError("skipObjectDecoder", "expect object or null") + return + } + iter.Skip() +} + +type oneFieldStructDecoder struct { + typ reflect2.Type + fieldHash int64 + fieldDecoder *structFieldDecoder +} + +func (decoder *oneFieldStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.readObjectStart() { + return + } + if !iter.incrementDepth() { + return + } + for { + if iter.readFieldHash() == decoder.fieldHash { + decoder.fieldDecoder.Decode(ptr, iter) + } else { + iter.Skip() + } + if iter.isObjectEnd() { + break + } + } + if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 { + iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) + } + iter.decrementDepth() +} + +type twoFieldsStructDecoder struct { + typ reflect2.Type + fieldHash1 int64 + fieldDecoder1 *structFieldDecoder + fieldHash2 int64 + fieldDecoder2 *structFieldDecoder +} + +func (decoder *twoFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.readObjectStart() { + return + } + if !iter.incrementDepth() { + return + } + for { + switch iter.readFieldHash() { + case decoder.fieldHash1: + decoder.fieldDecoder1.Decode(ptr, iter) + case decoder.fieldHash2: + decoder.fieldDecoder2.Decode(ptr, iter) + default: + iter.Skip() + } + if iter.isObjectEnd() { + break + } + } + if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 { + iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) + } + iter.decrementDepth() +} + +type threeFieldsStructDecoder struct { + typ reflect2.Type + fieldHash1 int64 + fieldDecoder1 *structFieldDecoder + fieldHash2 int64 + fieldDecoder2 *structFieldDecoder + fieldHash3 int64 + fieldDecoder3 *structFieldDecoder +} + +func (decoder *threeFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.readObjectStart() { + return + } + if !iter.incrementDepth() { + return + } + for { + switch iter.readFieldHash() { + case decoder.fieldHash1: + decoder.fieldDecoder1.Decode(ptr, iter) + case decoder.fieldHash2: + decoder.fieldDecoder2.Decode(ptr, iter) + case decoder.fieldHash3: + decoder.fieldDecoder3.Decode(ptr, iter) + default: + iter.Skip() + } + if iter.isObjectEnd() { + break + } + } + if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 { + iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) + } + iter.decrementDepth() +} + +type fourFieldsStructDecoder struct { + typ reflect2.Type + fieldHash1 int64 + fieldDecoder1 *structFieldDecoder + fieldHash2 int64 + fieldDecoder2 *structFieldDecoder + fieldHash3 int64 + fieldDecoder3 *structFieldDecoder + fieldHash4 int64 + fieldDecoder4 *structFieldDecoder +} + +func (decoder *fourFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.readObjectStart() { + return + } + if !iter.incrementDepth() { + return + } + for { + switch iter.readFieldHash() { + case decoder.fieldHash1: + decoder.fieldDecoder1.Decode(ptr, iter) + case decoder.fieldHash2: + decoder.fieldDecoder2.Decode(ptr, iter) + case decoder.fieldHash3: + decoder.fieldDecoder3.Decode(ptr, iter) + case decoder.fieldHash4: + decoder.fieldDecoder4.Decode(ptr, iter) + default: + iter.Skip() + } + if iter.isObjectEnd() { + break + } + } + if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 { + iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) + } + iter.decrementDepth() +} + +type fiveFieldsStructDecoder struct { + typ reflect2.Type + fieldHash1 int64 + fieldDecoder1 *structFieldDecoder + fieldHash2 int64 + fieldDecoder2 *structFieldDecoder + fieldHash3 int64 + fieldDecoder3 *structFieldDecoder + fieldHash4 int64 + fieldDecoder4 *structFieldDecoder + fieldHash5 int64 + fieldDecoder5 *structFieldDecoder +} + +func (decoder *fiveFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.readObjectStart() { + return + } + if !iter.incrementDepth() { + return + } + for { + switch iter.readFieldHash() { + case decoder.fieldHash1: + decoder.fieldDecoder1.Decode(ptr, iter) + case decoder.fieldHash2: + decoder.fieldDecoder2.Decode(ptr, iter) + case decoder.fieldHash3: + decoder.fieldDecoder3.Decode(ptr, iter) + case decoder.fieldHash4: + decoder.fieldDecoder4.Decode(ptr, iter) + case decoder.fieldHash5: + decoder.fieldDecoder5.Decode(ptr, iter) + default: + iter.Skip() + } + if iter.isObjectEnd() { + break + } + } + if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 { + iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) + } + iter.decrementDepth() +} + +type sixFieldsStructDecoder struct { + typ reflect2.Type + fieldHash1 int64 + fieldDecoder1 *structFieldDecoder + fieldHash2 int64 + fieldDecoder2 *structFieldDecoder + fieldHash3 int64 + fieldDecoder3 *structFieldDecoder + fieldHash4 int64 + fieldDecoder4 *structFieldDecoder + fieldHash5 int64 + fieldDecoder5 *structFieldDecoder + fieldHash6 int64 + fieldDecoder6 *structFieldDecoder +} + +func (decoder *sixFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.readObjectStart() { + return + } + if !iter.incrementDepth() { + return + } + for { + switch iter.readFieldHash() { + case decoder.fieldHash1: + decoder.fieldDecoder1.Decode(ptr, iter) + case decoder.fieldHash2: + decoder.fieldDecoder2.Decode(ptr, iter) + case decoder.fieldHash3: + decoder.fieldDecoder3.Decode(ptr, iter) + case decoder.fieldHash4: + decoder.fieldDecoder4.Decode(ptr, iter) + case decoder.fieldHash5: + decoder.fieldDecoder5.Decode(ptr, iter) + case decoder.fieldHash6: + decoder.fieldDecoder6.Decode(ptr, iter) + default: + iter.Skip() + } + if iter.isObjectEnd() { + break + } + } + if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 { + iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) + } + iter.decrementDepth() +} + +type sevenFieldsStructDecoder struct { + typ reflect2.Type + fieldHash1 int64 + fieldDecoder1 *structFieldDecoder + fieldHash2 int64 + fieldDecoder2 *structFieldDecoder + fieldHash3 int64 + fieldDecoder3 *structFieldDecoder + fieldHash4 int64 + fieldDecoder4 *structFieldDecoder + fieldHash5 int64 + fieldDecoder5 *structFieldDecoder + fieldHash6 int64 + fieldDecoder6 *structFieldDecoder + fieldHash7 int64 + fieldDecoder7 *structFieldDecoder +} + +func (decoder *sevenFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.readObjectStart() { + return + } + if !iter.incrementDepth() { + return + } + for { + switch iter.readFieldHash() { + case decoder.fieldHash1: + decoder.fieldDecoder1.Decode(ptr, iter) + case decoder.fieldHash2: + decoder.fieldDecoder2.Decode(ptr, iter) + case decoder.fieldHash3: + decoder.fieldDecoder3.Decode(ptr, iter) + case decoder.fieldHash4: + decoder.fieldDecoder4.Decode(ptr, iter) + case decoder.fieldHash5: + decoder.fieldDecoder5.Decode(ptr, iter) + case decoder.fieldHash6: + decoder.fieldDecoder6.Decode(ptr, iter) + case decoder.fieldHash7: + decoder.fieldDecoder7.Decode(ptr, iter) + default: + iter.Skip() + } + if iter.isObjectEnd() { + break + } + } + if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 { + iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) + } + iter.decrementDepth() +} + +type eightFieldsStructDecoder struct { + typ reflect2.Type + fieldHash1 int64 + fieldDecoder1 *structFieldDecoder + fieldHash2 int64 + fieldDecoder2 *structFieldDecoder + fieldHash3 int64 + fieldDecoder3 *structFieldDecoder + fieldHash4 int64 + fieldDecoder4 *structFieldDecoder + fieldHash5 int64 + fieldDecoder5 *structFieldDecoder + fieldHash6 int64 + fieldDecoder6 *structFieldDecoder + fieldHash7 int64 + fieldDecoder7 *structFieldDecoder + fieldHash8 int64 + fieldDecoder8 *structFieldDecoder +} + +func (decoder *eightFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.readObjectStart() { + return + } + if !iter.incrementDepth() { + return + } + for { + switch iter.readFieldHash() { + case decoder.fieldHash1: + decoder.fieldDecoder1.Decode(ptr, iter) + case decoder.fieldHash2: + decoder.fieldDecoder2.Decode(ptr, iter) + case decoder.fieldHash3: + decoder.fieldDecoder3.Decode(ptr, iter) + case decoder.fieldHash4: + decoder.fieldDecoder4.Decode(ptr, iter) + case decoder.fieldHash5: + decoder.fieldDecoder5.Decode(ptr, iter) + case decoder.fieldHash6: + decoder.fieldDecoder6.Decode(ptr, iter) + case decoder.fieldHash7: + decoder.fieldDecoder7.Decode(ptr, iter) + case decoder.fieldHash8: + decoder.fieldDecoder8.Decode(ptr, iter) + default: + iter.Skip() + } + if iter.isObjectEnd() { + break + } + } + if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 { + iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) + } + iter.decrementDepth() +} + +type nineFieldsStructDecoder struct { + typ reflect2.Type + fieldHash1 int64 + fieldDecoder1 *structFieldDecoder + fieldHash2 int64 + fieldDecoder2 *structFieldDecoder + fieldHash3 int64 + fieldDecoder3 *structFieldDecoder + fieldHash4 int64 + fieldDecoder4 *structFieldDecoder + fieldHash5 int64 + fieldDecoder5 *structFieldDecoder + fieldHash6 int64 + fieldDecoder6 *structFieldDecoder + fieldHash7 int64 + fieldDecoder7 *structFieldDecoder + fieldHash8 int64 + fieldDecoder8 *structFieldDecoder + fieldHash9 int64 + fieldDecoder9 *structFieldDecoder +} + +func (decoder *nineFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.readObjectStart() { + return + } + if !iter.incrementDepth() { + return + } + for { + switch iter.readFieldHash() { + case decoder.fieldHash1: + decoder.fieldDecoder1.Decode(ptr, iter) + case decoder.fieldHash2: + decoder.fieldDecoder2.Decode(ptr, iter) + case decoder.fieldHash3: + decoder.fieldDecoder3.Decode(ptr, iter) + case decoder.fieldHash4: + decoder.fieldDecoder4.Decode(ptr, iter) + case decoder.fieldHash5: + decoder.fieldDecoder5.Decode(ptr, iter) + case decoder.fieldHash6: + decoder.fieldDecoder6.Decode(ptr, iter) + case decoder.fieldHash7: + decoder.fieldDecoder7.Decode(ptr, iter) + case decoder.fieldHash8: + decoder.fieldDecoder8.Decode(ptr, iter) + case decoder.fieldHash9: + decoder.fieldDecoder9.Decode(ptr, iter) + default: + iter.Skip() + } + if iter.isObjectEnd() { + break + } + } + if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 { + iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) + } + iter.decrementDepth() +} + +type tenFieldsStructDecoder struct { + typ reflect2.Type + fieldHash1 int64 + fieldDecoder1 *structFieldDecoder + fieldHash2 int64 + fieldDecoder2 *structFieldDecoder + fieldHash3 int64 + fieldDecoder3 *structFieldDecoder + fieldHash4 int64 + fieldDecoder4 *structFieldDecoder + fieldHash5 int64 + fieldDecoder5 *structFieldDecoder + fieldHash6 int64 + fieldDecoder6 *structFieldDecoder + fieldHash7 int64 + fieldDecoder7 *structFieldDecoder + fieldHash8 int64 + fieldDecoder8 *structFieldDecoder + fieldHash9 int64 + fieldDecoder9 *structFieldDecoder + fieldHash10 int64 + fieldDecoder10 *structFieldDecoder +} + +func (decoder *tenFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.readObjectStart() { + return + } + if !iter.incrementDepth() { + return + } + for { + switch iter.readFieldHash() { + case decoder.fieldHash1: + decoder.fieldDecoder1.Decode(ptr, iter) + case decoder.fieldHash2: + decoder.fieldDecoder2.Decode(ptr, iter) + case decoder.fieldHash3: + decoder.fieldDecoder3.Decode(ptr, iter) + case decoder.fieldHash4: + decoder.fieldDecoder4.Decode(ptr, iter) + case decoder.fieldHash5: + decoder.fieldDecoder5.Decode(ptr, iter) + case decoder.fieldHash6: + decoder.fieldDecoder6.Decode(ptr, iter) + case decoder.fieldHash7: + decoder.fieldDecoder7.Decode(ptr, iter) + case decoder.fieldHash8: + decoder.fieldDecoder8.Decode(ptr, iter) + case decoder.fieldHash9: + decoder.fieldDecoder9.Decode(ptr, iter) + case decoder.fieldHash10: + decoder.fieldDecoder10.Decode(ptr, iter) + default: + iter.Skip() + } + if iter.isObjectEnd() { + break + } + } + if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 { + iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) + } + iter.decrementDepth() +} + +type structFieldDecoder struct { + field reflect2.StructField + fieldDecoder ValDecoder +} + +func (decoder *structFieldDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + fieldPtr := decoder.field.UnsafeGet(ptr) + decoder.fieldDecoder.Decode(fieldPtr, iter) + if iter.Error != nil && iter.Error != io.EOF { + iter.Error = fmt.Errorf("%s: %s", decoder.field.Name(), iter.Error.Error()) + } +} + +type stringModeStringDecoder struct { + elemDecoder ValDecoder + cfg *frozenConfig +} + +func (decoder *stringModeStringDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + decoder.elemDecoder.Decode(ptr, iter) + str := *((*string)(ptr)) + tempIter := decoder.cfg.BorrowIterator([]byte(str)) + defer decoder.cfg.ReturnIterator(tempIter) + *((*string)(ptr)) = tempIter.ReadString() +} + +type stringModeNumberDecoder struct { + elemDecoder ValDecoder +} + +func (decoder *stringModeNumberDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if iter.WhatIsNext() == NilValue { + decoder.elemDecoder.Decode(ptr, iter) + return + } + + c := iter.nextToken() + if c != '"' { + iter.ReportError("stringModeNumberDecoder", `expect ", but found `+string([]byte{c})) + return + } + decoder.elemDecoder.Decode(ptr, iter) + if iter.Error != nil { + return + } + c = iter.readByte() + if c != '"' { + iter.ReportError("stringModeNumberDecoder", `expect ", but found `+string([]byte{c})) + return + } +} diff --git a/vendor/github.com/json-iterator/go/reflect_struct_encoder.go b/vendor/github.com/json-iterator/go/reflect_struct_encoder.go new file mode 100644 index 00000000000..152e3ef5a93 --- /dev/null +++ b/vendor/github.com/json-iterator/go/reflect_struct_encoder.go @@ -0,0 +1,211 @@ +package jsoniter + +import ( + "fmt" + "github.com/modern-go/reflect2" + "io" + "reflect" + "unsafe" +) + +func encoderOfStruct(ctx *ctx, typ reflect2.Type) ValEncoder { + type bindingTo struct { + binding *Binding + toName string + ignored bool + } + orderedBindings := []*bindingTo{} + structDescriptor := describeStruct(ctx, typ) + for _, binding := range structDescriptor.Fields { + for _, toName := range binding.ToNames { + new := &bindingTo{ + binding: binding, + toName: toName, + } + for _, old := range orderedBindings { + if old.toName != toName { + continue + } + old.ignored, new.ignored = resolveConflictBinding(ctx.frozenConfig, old.binding, new.binding) + } + orderedBindings = append(orderedBindings, new) + } + } + if len(orderedBindings) == 0 { + return &emptyStructEncoder{} + } + finalOrderedFields := []structFieldTo{} + for _, bindingTo := range orderedBindings { + if !bindingTo.ignored { + finalOrderedFields = append(finalOrderedFields, structFieldTo{ + encoder: bindingTo.binding.Encoder.(*structFieldEncoder), + toName: bindingTo.toName, + }) + } + } + return &structEncoder{typ, finalOrderedFields} +} + +func createCheckIsEmpty(ctx *ctx, typ reflect2.Type) checkIsEmpty { + encoder := createEncoderOfNative(ctx, typ) + if encoder != nil { + return encoder + } + kind := typ.Kind() + switch kind { + case reflect.Interface: + return &dynamicEncoder{typ} + case reflect.Struct: + return &structEncoder{typ: typ} + case reflect.Array: + return &arrayEncoder{} + case reflect.Slice: + return &sliceEncoder{} + case reflect.Map: + return encoderOfMap(ctx, typ) + case reflect.Ptr: + return &OptionalEncoder{} + default: + return &lazyErrorEncoder{err: fmt.Errorf("unsupported type: %v", typ)} + } +} + +func resolveConflictBinding(cfg *frozenConfig, old, new *Binding) (ignoreOld, ignoreNew bool) { + newTagged := new.Field.Tag().Get(cfg.getTagKey()) != "" + oldTagged := old.Field.Tag().Get(cfg.getTagKey()) != "" + if newTagged { + if oldTagged { + if len(old.levels) > len(new.levels) { + return true, false + } else if len(new.levels) > len(old.levels) { + return false, true + } else { + return true, true + } + } else { + return true, false + } + } else { + if oldTagged { + return true, false + } + if len(old.levels) > len(new.levels) { + return true, false + } else if len(new.levels) > len(old.levels) { + return false, true + } else { + return true, true + } + } +} + +type structFieldEncoder struct { + field reflect2.StructField + fieldEncoder ValEncoder + omitempty bool +} + +func (encoder *structFieldEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + fieldPtr := encoder.field.UnsafeGet(ptr) + encoder.fieldEncoder.Encode(fieldPtr, stream) + if stream.Error != nil && stream.Error != io.EOF { + stream.Error = fmt.Errorf("%s: %s", encoder.field.Name(), stream.Error.Error()) + } +} + +func (encoder *structFieldEncoder) IsEmpty(ptr unsafe.Pointer) bool { + fieldPtr := encoder.field.UnsafeGet(ptr) + return encoder.fieldEncoder.IsEmpty(fieldPtr) +} + +func (encoder *structFieldEncoder) IsEmbeddedPtrNil(ptr unsafe.Pointer) bool { + isEmbeddedPtrNil, converted := encoder.fieldEncoder.(IsEmbeddedPtrNil) + if !converted { + return false + } + fieldPtr := encoder.field.UnsafeGet(ptr) + return isEmbeddedPtrNil.IsEmbeddedPtrNil(fieldPtr) +} + +type IsEmbeddedPtrNil interface { + IsEmbeddedPtrNil(ptr unsafe.Pointer) bool +} + +type structEncoder struct { + typ reflect2.Type + fields []structFieldTo +} + +type structFieldTo struct { + encoder *structFieldEncoder + toName string +} + +func (encoder *structEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteObjectStart() + isNotFirst := false + for _, field := range encoder.fields { + if field.encoder.omitempty && field.encoder.IsEmpty(ptr) { + continue + } + if field.encoder.IsEmbeddedPtrNil(ptr) { + continue + } + if isNotFirst { + stream.WriteMore() + } + stream.WriteObjectField(field.toName) + field.encoder.Encode(ptr, stream) + isNotFirst = true + } + stream.WriteObjectEnd() + if stream.Error != nil && stream.Error != io.EOF { + stream.Error = fmt.Errorf("%v.%s", encoder.typ, stream.Error.Error()) + } +} + +func (encoder *structEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return false +} + +type emptyStructEncoder struct { +} + +func (encoder *emptyStructEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteEmptyObject() +} + +func (encoder *emptyStructEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return false +} + +type stringModeNumberEncoder struct { + elemEncoder ValEncoder +} + +func (encoder *stringModeNumberEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.writeByte('"') + encoder.elemEncoder.Encode(ptr, stream) + stream.writeByte('"') +} + +func (encoder *stringModeNumberEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return encoder.elemEncoder.IsEmpty(ptr) +} + +type stringModeStringEncoder struct { + elemEncoder ValEncoder + cfg *frozenConfig +} + +func (encoder *stringModeStringEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + tempStream := encoder.cfg.BorrowStream(nil) + tempStream.Attachment = stream.Attachment + defer encoder.cfg.ReturnStream(tempStream) + encoder.elemEncoder.Encode(ptr, tempStream) + stream.WriteString(string(tempStream.Buffer())) +} + +func (encoder *stringModeStringEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return encoder.elemEncoder.IsEmpty(ptr) +} diff --git a/vendor/github.com/json-iterator/go/stream.go b/vendor/github.com/json-iterator/go/stream.go new file mode 100644 index 00000000000..23d8a3ad6b1 --- /dev/null +++ b/vendor/github.com/json-iterator/go/stream.go @@ -0,0 +1,210 @@ +package jsoniter + +import ( + "io" +) + +// stream is a io.Writer like object, with JSON specific write functions. +// Error is not returned as return value, but stored as Error member on this stream instance. +type Stream struct { + cfg *frozenConfig + out io.Writer + buf []byte + Error error + indention int + Attachment interface{} // open for customized encoder +} + +// NewStream create new stream instance. +// cfg can be jsoniter.ConfigDefault. +// out can be nil if write to internal buffer. +// bufSize is the initial size for the internal buffer in bytes. +func NewStream(cfg API, out io.Writer, bufSize int) *Stream { + return &Stream{ + cfg: cfg.(*frozenConfig), + out: out, + buf: make([]byte, 0, bufSize), + Error: nil, + indention: 0, + } +} + +// Pool returns a pool can provide more stream with same configuration +func (stream *Stream) Pool() StreamPool { + return stream.cfg +} + +// Reset reuse this stream instance by assign a new writer +func (stream *Stream) Reset(out io.Writer) { + stream.out = out + stream.buf = stream.buf[:0] +} + +// Available returns how many bytes are unused in the buffer. +func (stream *Stream) Available() int { + return cap(stream.buf) - len(stream.buf) +} + +// Buffered returns the number of bytes that have been written into the current buffer. +func (stream *Stream) Buffered() int { + return len(stream.buf) +} + +// Buffer if writer is nil, use this method to take the result +func (stream *Stream) Buffer() []byte { + return stream.buf +} + +// SetBuffer allows to append to the internal buffer directly +func (stream *Stream) SetBuffer(buf []byte) { + stream.buf = buf +} + +// Write writes the contents of p into the buffer. +// It returns the number of bytes written. +// If nn < len(p), it also returns an error explaining +// why the write is short. +func (stream *Stream) Write(p []byte) (nn int, err error) { + stream.buf = append(stream.buf, p...) + if stream.out != nil { + nn, err = stream.out.Write(stream.buf) + stream.buf = stream.buf[nn:] + return + } + return len(p), nil +} + +// WriteByte writes a single byte. +func (stream *Stream) writeByte(c byte) { + stream.buf = append(stream.buf, c) +} + +func (stream *Stream) writeTwoBytes(c1 byte, c2 byte) { + stream.buf = append(stream.buf, c1, c2) +} + +func (stream *Stream) writeThreeBytes(c1 byte, c2 byte, c3 byte) { + stream.buf = append(stream.buf, c1, c2, c3) +} + +func (stream *Stream) writeFourBytes(c1 byte, c2 byte, c3 byte, c4 byte) { + stream.buf = append(stream.buf, c1, c2, c3, c4) +} + +func (stream *Stream) writeFiveBytes(c1 byte, c2 byte, c3 byte, c4 byte, c5 byte) { + stream.buf = append(stream.buf, c1, c2, c3, c4, c5) +} + +// Flush writes any buffered data to the underlying io.Writer. +func (stream *Stream) Flush() error { + if stream.out == nil { + return nil + } + if stream.Error != nil { + return stream.Error + } + _, err := stream.out.Write(stream.buf) + if err != nil { + if stream.Error == nil { + stream.Error = err + } + return err + } + stream.buf = stream.buf[:0] + return nil +} + +// WriteRaw write string out without quotes, just like []byte +func (stream *Stream) WriteRaw(s string) { + stream.buf = append(stream.buf, s...) +} + +// WriteNil write null to stream +func (stream *Stream) WriteNil() { + stream.writeFourBytes('n', 'u', 'l', 'l') +} + +// WriteTrue write true to stream +func (stream *Stream) WriteTrue() { + stream.writeFourBytes('t', 'r', 'u', 'e') +} + +// WriteFalse write false to stream +func (stream *Stream) WriteFalse() { + stream.writeFiveBytes('f', 'a', 'l', 's', 'e') +} + +// WriteBool write true or false into stream +func (stream *Stream) WriteBool(val bool) { + if val { + stream.WriteTrue() + } else { + stream.WriteFalse() + } +} + +// WriteObjectStart write { with possible indention +func (stream *Stream) WriteObjectStart() { + stream.indention += stream.cfg.indentionStep + stream.writeByte('{') + stream.writeIndention(0) +} + +// WriteObjectField write "field": with possible indention +func (stream *Stream) WriteObjectField(field string) { + stream.WriteString(field) + if stream.indention > 0 { + stream.writeTwoBytes(':', ' ') + } else { + stream.writeByte(':') + } +} + +// WriteObjectEnd write } with possible indention +func (stream *Stream) WriteObjectEnd() { + stream.writeIndention(stream.cfg.indentionStep) + stream.indention -= stream.cfg.indentionStep + stream.writeByte('}') +} + +// WriteEmptyObject write {} +func (stream *Stream) WriteEmptyObject() { + stream.writeByte('{') + stream.writeByte('}') +} + +// WriteMore write , with possible indention +func (stream *Stream) WriteMore() { + stream.writeByte(',') + stream.writeIndention(0) +} + +// WriteArrayStart write [ with possible indention +func (stream *Stream) WriteArrayStart() { + stream.indention += stream.cfg.indentionStep + stream.writeByte('[') + stream.writeIndention(0) +} + +// WriteEmptyArray write [] +func (stream *Stream) WriteEmptyArray() { + stream.writeTwoBytes('[', ']') +} + +// WriteArrayEnd write ] with possible indention +func (stream *Stream) WriteArrayEnd() { + stream.writeIndention(stream.cfg.indentionStep) + stream.indention -= stream.cfg.indentionStep + stream.writeByte(']') +} + +func (stream *Stream) writeIndention(delta int) { + if stream.indention == 0 { + return + } + stream.writeByte('\n') + toWrite := stream.indention - delta + for i := 0; i < toWrite; i++ { + stream.buf = append(stream.buf, ' ') + } +} diff --git a/vendor/github.com/json-iterator/go/stream_float.go b/vendor/github.com/json-iterator/go/stream_float.go new file mode 100644 index 00000000000..826aa594ac6 --- /dev/null +++ b/vendor/github.com/json-iterator/go/stream_float.go @@ -0,0 +1,111 @@ +package jsoniter + +import ( + "fmt" + "math" + "strconv" +) + +var pow10 []uint64 + +func init() { + pow10 = []uint64{1, 10, 100, 1000, 10000, 100000, 1000000} +} + +// WriteFloat32 write float32 to stream +func (stream *Stream) WriteFloat32(val float32) { + if math.IsInf(float64(val), 0) || math.IsNaN(float64(val)) { + stream.Error = fmt.Errorf("unsupported value: %f", val) + return + } + abs := math.Abs(float64(val)) + fmt := byte('f') + // Note: Must use float32 comparisons for underlying float32 value to get precise cutoffs right. + if abs != 0 { + if float32(abs) < 1e-6 || float32(abs) >= 1e21 { + fmt = 'e' + } + } + stream.buf = strconv.AppendFloat(stream.buf, float64(val), fmt, -1, 32) +} + +// WriteFloat32Lossy write float32 to stream with ONLY 6 digits precision although much much faster +func (stream *Stream) WriteFloat32Lossy(val float32) { + if math.IsInf(float64(val), 0) || math.IsNaN(float64(val)) { + stream.Error = fmt.Errorf("unsupported value: %f", val) + return + } + if val < 0 { + stream.writeByte('-') + val = -val + } + if val > 0x4ffffff { + stream.WriteFloat32(val) + return + } + precision := 6 + exp := uint64(1000000) // 6 + lval := uint64(float64(val)*float64(exp) + 0.5) + stream.WriteUint64(lval / exp) + fval := lval % exp + if fval == 0 { + return + } + stream.writeByte('.') + for p := precision - 1; p > 0 && fval < pow10[p]; p-- { + stream.writeByte('0') + } + stream.WriteUint64(fval) + for stream.buf[len(stream.buf)-1] == '0' { + stream.buf = stream.buf[:len(stream.buf)-1] + } +} + +// WriteFloat64 write float64 to stream +func (stream *Stream) WriteFloat64(val float64) { + if math.IsInf(val, 0) || math.IsNaN(val) { + stream.Error = fmt.Errorf("unsupported value: %f", val) + return + } + abs := math.Abs(val) + fmt := byte('f') + // Note: Must use float32 comparisons for underlying float32 value to get precise cutoffs right. + if abs != 0 { + if abs < 1e-6 || abs >= 1e21 { + fmt = 'e' + } + } + stream.buf = strconv.AppendFloat(stream.buf, float64(val), fmt, -1, 64) +} + +// WriteFloat64Lossy write float64 to stream with ONLY 6 digits precision although much much faster +func (stream *Stream) WriteFloat64Lossy(val float64) { + if math.IsInf(val, 0) || math.IsNaN(val) { + stream.Error = fmt.Errorf("unsupported value: %f", val) + return + } + if val < 0 { + stream.writeByte('-') + val = -val + } + if val > 0x4ffffff { + stream.WriteFloat64(val) + return + } + precision := 6 + exp := uint64(1000000) // 6 + lval := uint64(val*float64(exp) + 0.5) + stream.WriteUint64(lval / exp) + fval := lval % exp + if fval == 0 { + return + } + stream.writeByte('.') + for p := precision - 1; p > 0 && fval < pow10[p]; p-- { + stream.writeByte('0') + } + stream.WriteUint64(fval) + for stream.buf[len(stream.buf)-1] == '0' { + stream.buf = stream.buf[:len(stream.buf)-1] + } +} diff --git a/vendor/github.com/json-iterator/go/stream_int.go b/vendor/github.com/json-iterator/go/stream_int.go new file mode 100644 index 00000000000..d1059ee4c20 --- /dev/null +++ b/vendor/github.com/json-iterator/go/stream_int.go @@ -0,0 +1,190 @@ +package jsoniter + +var digits []uint32 + +func init() { + digits = make([]uint32, 1000) + for i := uint32(0); i < 1000; i++ { + digits[i] = (((i / 100) + '0') << 16) + ((((i / 10) % 10) + '0') << 8) + i%10 + '0' + if i < 10 { + digits[i] += 2 << 24 + } else if i < 100 { + digits[i] += 1 << 24 + } + } +} + +func writeFirstBuf(space []byte, v uint32) []byte { + start := v >> 24 + if start == 0 { + space = append(space, byte(v>>16), byte(v>>8)) + } else if start == 1 { + space = append(space, byte(v>>8)) + } + space = append(space, byte(v)) + return space +} + +func writeBuf(buf []byte, v uint32) []byte { + return append(buf, byte(v>>16), byte(v>>8), byte(v)) +} + +// WriteUint8 write uint8 to stream +func (stream *Stream) WriteUint8(val uint8) { + stream.buf = writeFirstBuf(stream.buf, digits[val]) +} + +// WriteInt8 write int8 to stream +func (stream *Stream) WriteInt8(nval int8) { + var val uint8 + if nval < 0 { + val = uint8(-nval) + stream.buf = append(stream.buf, '-') + } else { + val = uint8(nval) + } + stream.buf = writeFirstBuf(stream.buf, digits[val]) +} + +// WriteUint16 write uint16 to stream +func (stream *Stream) WriteUint16(val uint16) { + q1 := val / 1000 + if q1 == 0 { + stream.buf = writeFirstBuf(stream.buf, digits[val]) + return + } + r1 := val - q1*1000 + stream.buf = writeFirstBuf(stream.buf, digits[q1]) + stream.buf = writeBuf(stream.buf, digits[r1]) + return +} + +// WriteInt16 write int16 to stream +func (stream *Stream) WriteInt16(nval int16) { + var val uint16 + if nval < 0 { + val = uint16(-nval) + stream.buf = append(stream.buf, '-') + } else { + val = uint16(nval) + } + stream.WriteUint16(val) +} + +// WriteUint32 write uint32 to stream +func (stream *Stream) WriteUint32(val uint32) { + q1 := val / 1000 + if q1 == 0 { + stream.buf = writeFirstBuf(stream.buf, digits[val]) + return + } + r1 := val - q1*1000 + q2 := q1 / 1000 + if q2 == 0 { + stream.buf = writeFirstBuf(stream.buf, digits[q1]) + stream.buf = writeBuf(stream.buf, digits[r1]) + return + } + r2 := q1 - q2*1000 + q3 := q2 / 1000 + if q3 == 0 { + stream.buf = writeFirstBuf(stream.buf, digits[q2]) + } else { + r3 := q2 - q3*1000 + stream.buf = append(stream.buf, byte(q3+'0')) + stream.buf = writeBuf(stream.buf, digits[r3]) + } + stream.buf = writeBuf(stream.buf, digits[r2]) + stream.buf = writeBuf(stream.buf, digits[r1]) +} + +// WriteInt32 write int32 to stream +func (stream *Stream) WriteInt32(nval int32) { + var val uint32 + if nval < 0 { + val = uint32(-nval) + stream.buf = append(stream.buf, '-') + } else { + val = uint32(nval) + } + stream.WriteUint32(val) +} + +// WriteUint64 write uint64 to stream +func (stream *Stream) WriteUint64(val uint64) { + q1 := val / 1000 + if q1 == 0 { + stream.buf = writeFirstBuf(stream.buf, digits[val]) + return + } + r1 := val - q1*1000 + q2 := q1 / 1000 + if q2 == 0 { + stream.buf = writeFirstBuf(stream.buf, digits[q1]) + stream.buf = writeBuf(stream.buf, digits[r1]) + return + } + r2 := q1 - q2*1000 + q3 := q2 / 1000 + if q3 == 0 { + stream.buf = writeFirstBuf(stream.buf, digits[q2]) + stream.buf = writeBuf(stream.buf, digits[r2]) + stream.buf = writeBuf(stream.buf, digits[r1]) + return + } + r3 := q2 - q3*1000 + q4 := q3 / 1000 + if q4 == 0 { + stream.buf = writeFirstBuf(stream.buf, digits[q3]) + stream.buf = writeBuf(stream.buf, digits[r3]) + stream.buf = writeBuf(stream.buf, digits[r2]) + stream.buf = writeBuf(stream.buf, digits[r1]) + return + } + r4 := q3 - q4*1000 + q5 := q4 / 1000 + if q5 == 0 { + stream.buf = writeFirstBuf(stream.buf, digits[q4]) + stream.buf = writeBuf(stream.buf, digits[r4]) + stream.buf = writeBuf(stream.buf, digits[r3]) + stream.buf = writeBuf(stream.buf, digits[r2]) + stream.buf = writeBuf(stream.buf, digits[r1]) + return + } + r5 := q4 - q5*1000 + q6 := q5 / 1000 + if q6 == 0 { + stream.buf = writeFirstBuf(stream.buf, digits[q5]) + } else { + stream.buf = writeFirstBuf(stream.buf, digits[q6]) + r6 := q5 - q6*1000 + stream.buf = writeBuf(stream.buf, digits[r6]) + } + stream.buf = writeBuf(stream.buf, digits[r5]) + stream.buf = writeBuf(stream.buf, digits[r4]) + stream.buf = writeBuf(stream.buf, digits[r3]) + stream.buf = writeBuf(stream.buf, digits[r2]) + stream.buf = writeBuf(stream.buf, digits[r1]) +} + +// WriteInt64 write int64 to stream +func (stream *Stream) WriteInt64(nval int64) { + var val uint64 + if nval < 0 { + val = uint64(-nval) + stream.buf = append(stream.buf, '-') + } else { + val = uint64(nval) + } + stream.WriteUint64(val) +} + +// WriteInt write int to stream +func (stream *Stream) WriteInt(val int) { + stream.WriteInt64(int64(val)) +} + +// WriteUint write uint to stream +func (stream *Stream) WriteUint(val uint) { + stream.WriteUint64(uint64(val)) +} diff --git a/vendor/github.com/json-iterator/go/stream_str.go b/vendor/github.com/json-iterator/go/stream_str.go new file mode 100644 index 00000000000..54c2ba0b3a2 --- /dev/null +++ b/vendor/github.com/json-iterator/go/stream_str.go @@ -0,0 +1,372 @@ +package jsoniter + +import ( + "unicode/utf8" +) + +// htmlSafeSet holds the value true if the ASCII character with the given +// array position can be safely represented inside a JSON string, embedded +// inside of HTML